1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
22 #include "trace_synth.h"
25 C(NONE, "No error"), \
26 C(DUPLICATE_VAR, "Variable already defined"), \
27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 C(TOO_MANY_VARS, "Too many variables defined"), \
29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
51 C(TOO_MANY_PARAMS, "Too many action params"), \
52 C(PARAM_NOT_FOUND, "Couldn't find param"), \
53 C(INVALID_PARAM, "Invalid action param"), \
54 C(ACTION_NOT_FOUND, "No action found"), \
55 C(NO_SAVE_PARAMS, "No params found for save()"), \
56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 C(ACTION_MISMATCH, "Handler doesn't support action"), \
58 C(NO_CLOSING_PAREN, "No closing paren found"), \
59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
62 C(VAR_NOT_FOUND, "Couldn't find variable"), \
63 C(FIELD_NOT_FOUND, "Couldn't find field"), \
64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
66 C(EMPTY_SORT_FIELD, "Empty sort field"), \
67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \
70 C(EXPECT_NUMBER, "Expecting numeric literal"), \
71 C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \
72 C(DIVISION_BY_ZERO, "Division by zero"), \
73 C(NEED_NOHC_VAL, "Non-hitcount value is required for 'nohitcount'"),
76 #define C(a, b) HIST_ERR_##a
83 static const char *err_text
[] = { ERRORS
};
87 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
88 struct tracing_map_elt
*elt
,
89 struct trace_buffer
*buffer
,
90 struct ring_buffer_event
*rbe
,
93 #define HIST_FIELD_OPERANDS_MAX 2
94 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX 8
96 #define HIST_CONST_DIGITS_MAX 21
97 #define HIST_DIV_SHIFT 20 /* For optimizing division by constants */
103 FIELD_OP_UNARY_MINUS
,
110 HIST_FIELD_FN_VAR_REF
,
111 HIST_FIELD_FN_COUNTER
,
114 HIST_FIELD_FN_BUCKET
,
115 HIST_FIELD_FN_TIMESTAMP
,
118 HIST_FIELD_FN_STRING
,
119 HIST_FIELD_FN_DYNSTRING
,
120 HIST_FIELD_FN_RELDYNSTRING
,
121 HIST_FIELD_FN_PSTRING
,
130 HIST_FIELD_FN_UMINUS
,
135 HIST_FIELD_FN_DIV_POWER2
,
136 HIST_FIELD_FN_DIV_NOT_POWER2
,
137 HIST_FIELD_FN_DIV_MULT_SHIFT
,
138 HIST_FIELD_FN_EXECNAME
,
143 * A hist_var (histogram variable) contains variable information for
144 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
145 * flag set. A hist_var has a variable name e.g. ts0, and is
146 * associated with a given histogram trigger, as specified by
147 * hist_data. The hist_var idx is the unique index assigned to the
148 * variable by the hist trigger's tracing_map. The idx is what is
149 * used to set a variable's value and, by a variable reference, to
154 struct hist_trigger_data
*hist_data
;
159 struct ftrace_event_field
*field
;
161 unsigned long buckets
;
163 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
164 struct hist_trigger_data
*hist_data
;
165 enum hist_field_fn fn_num
;
169 unsigned int is_signed
;
172 * Variable fields contain variable-specific info in var.
175 enum field_op_id
operator;
180 * The name field is used for EXPR and VAR_REF fields. VAR
181 * fields contain the variable name in var.name.
186 * When a histogram trigger is hit, if it has any references
187 * to variables, the values of those variables are collected
188 * into a var_ref_vals array by resolve_var_refs(). The
189 * current value of each variable is read from the tracing_map
190 * using the hist field's hist_var.idx and entered into the
191 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
193 unsigned int var_ref_idx
;
196 unsigned int var_str_idx
;
198 /* Numeric literals are represented as u64 */
200 /* Used to optimize division by constants */
204 static u64
hist_fn_call(struct hist_field
*hist_field
,
205 struct tracing_map_elt
*elt
,
206 struct trace_buffer
*buffer
,
207 struct ring_buffer_event
*rbe
,
210 static u64
hist_field_const(struct hist_field
*field
,
211 struct tracing_map_elt
*elt
,
212 struct trace_buffer
*buffer
,
213 struct ring_buffer_event
*rbe
,
216 return field
->constant
;
219 static u64
hist_field_counter(struct hist_field
*field
,
220 struct tracing_map_elt
*elt
,
221 struct trace_buffer
*buffer
,
222 struct ring_buffer_event
*rbe
,
228 static u64
hist_field_string(struct hist_field
*hist_field
,
229 struct tracing_map_elt
*elt
,
230 struct trace_buffer
*buffer
,
231 struct ring_buffer_event
*rbe
,
234 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
236 return (u64
)(unsigned long)addr
;
239 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
240 struct tracing_map_elt
*elt
,
241 struct trace_buffer
*buffer
,
242 struct ring_buffer_event
*rbe
,
245 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
246 int str_loc
= str_item
& 0xffff;
247 char *addr
= (char *)(event
+ str_loc
);
249 return (u64
)(unsigned long)addr
;
252 static u64
hist_field_reldynstring(struct hist_field
*hist_field
,
253 struct tracing_map_elt
*elt
,
254 struct trace_buffer
*buffer
,
255 struct ring_buffer_event
*rbe
,
258 u32
*item
= event
+ hist_field
->field
->offset
;
259 u32 str_item
= *item
;
260 int str_loc
= str_item
& 0xffff;
261 char *addr
= (char *)&item
[1] + str_loc
;
263 return (u64
)(unsigned long)addr
;
266 static u64
hist_field_pstring(struct hist_field
*hist_field
,
267 struct tracing_map_elt
*elt
,
268 struct trace_buffer
*buffer
,
269 struct ring_buffer_event
*rbe
,
272 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
274 return (u64
)(unsigned long)*addr
;
277 static u64
hist_field_log2(struct hist_field
*hist_field
,
278 struct tracing_map_elt
*elt
,
279 struct trace_buffer
*buffer
,
280 struct ring_buffer_event
*rbe
,
283 struct hist_field
*operand
= hist_field
->operands
[0];
285 u64 val
= hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
287 return (u64
) ilog2(roundup_pow_of_two(val
));
290 static u64
hist_field_bucket(struct hist_field
*hist_field
,
291 struct tracing_map_elt
*elt
,
292 struct trace_buffer
*buffer
,
293 struct ring_buffer_event
*rbe
,
296 struct hist_field
*operand
= hist_field
->operands
[0];
297 unsigned long buckets
= hist_field
->buckets
;
299 u64 val
= hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
301 if (WARN_ON_ONCE(!buckets
))
305 val
= div64_ul(val
, buckets
);
307 val
= (u64
)((unsigned long)val
/ buckets
);
308 return val
* buckets
;
311 static u64
hist_field_plus(struct hist_field
*hist_field
,
312 struct tracing_map_elt
*elt
,
313 struct trace_buffer
*buffer
,
314 struct ring_buffer_event
*rbe
,
317 struct hist_field
*operand1
= hist_field
->operands
[0];
318 struct hist_field
*operand2
= hist_field
->operands
[1];
320 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
321 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
326 static u64
hist_field_minus(struct hist_field
*hist_field
,
327 struct tracing_map_elt
*elt
,
328 struct trace_buffer
*buffer
,
329 struct ring_buffer_event
*rbe
,
332 struct hist_field
*operand1
= hist_field
->operands
[0];
333 struct hist_field
*operand2
= hist_field
->operands
[1];
335 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
336 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
341 static u64
hist_field_div(struct hist_field
*hist_field
,
342 struct tracing_map_elt
*elt
,
343 struct trace_buffer
*buffer
,
344 struct ring_buffer_event
*rbe
,
347 struct hist_field
*operand1
= hist_field
->operands
[0];
348 struct hist_field
*operand2
= hist_field
->operands
[1];
350 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
351 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
353 /* Return -1 for the undefined case */
357 /* Use shift if the divisor is a power of 2 */
358 if (!(val2
& (val2
- 1)))
359 return val1
>> __ffs64(val2
);
361 return div64_u64(val1
, val2
);
364 static u64
div_by_power_of_two(struct hist_field
*hist_field
,
365 struct tracing_map_elt
*elt
,
366 struct trace_buffer
*buffer
,
367 struct ring_buffer_event
*rbe
,
370 struct hist_field
*operand1
= hist_field
->operands
[0];
371 struct hist_field
*operand2
= hist_field
->operands
[1];
373 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
375 return val1
>> __ffs64(operand2
->constant
);
378 static u64
div_by_not_power_of_two(struct hist_field
*hist_field
,
379 struct tracing_map_elt
*elt
,
380 struct trace_buffer
*buffer
,
381 struct ring_buffer_event
*rbe
,
384 struct hist_field
*operand1
= hist_field
->operands
[0];
385 struct hist_field
*operand2
= hist_field
->operands
[1];
387 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
389 return div64_u64(val1
, operand2
->constant
);
392 static u64
div_by_mult_and_shift(struct hist_field
*hist_field
,
393 struct tracing_map_elt
*elt
,
394 struct trace_buffer
*buffer
,
395 struct ring_buffer_event
*rbe
,
398 struct hist_field
*operand1
= hist_field
->operands
[0];
399 struct hist_field
*operand2
= hist_field
->operands
[1];
401 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
404 * If the divisor is a constant, do a multiplication and shift instead.
406 * Choose Z = some power of 2. If Y <= Z, then:
407 * X / Y = (X * (Z / Y)) / Z
409 * (Z / Y) is a constant (mult) which is calculated at parse time, so:
410 * X / Y = (X * mult) / Z
412 * The division by Z can be replaced by a shift since Z is a power of 2:
413 * X / Y = (X * mult) >> HIST_DIV_SHIFT
415 * As long, as X < Z the results will not be off by more than 1.
417 if (val1
< (1 << HIST_DIV_SHIFT
)) {
418 u64 mult
= operand2
->div_multiplier
;
420 return (val1
* mult
+ ((1 << HIST_DIV_SHIFT
) - 1)) >> HIST_DIV_SHIFT
;
423 return div64_u64(val1
, operand2
->constant
);
426 static u64
hist_field_mult(struct hist_field
*hist_field
,
427 struct tracing_map_elt
*elt
,
428 struct trace_buffer
*buffer
,
429 struct ring_buffer_event
*rbe
,
432 struct hist_field
*operand1
= hist_field
->operands
[0];
433 struct hist_field
*operand2
= hist_field
->operands
[1];
435 u64 val1
= hist_fn_call(operand1
, elt
, buffer
, rbe
, event
);
436 u64 val2
= hist_fn_call(operand2
, elt
, buffer
, rbe
, event
);
441 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
442 struct tracing_map_elt
*elt
,
443 struct trace_buffer
*buffer
,
444 struct ring_buffer_event
*rbe
,
447 struct hist_field
*operand
= hist_field
->operands
[0];
449 s64 sval
= (s64
)hist_fn_call(operand
, elt
, buffer
, rbe
, event
);
450 u64 val
= (u64
)-sval
;
455 #define DEFINE_HIST_FIELD_FN(type) \
456 static u64 hist_field_##type(struct hist_field *hist_field, \
457 struct tracing_map_elt *elt, \
458 struct trace_buffer *buffer, \
459 struct ring_buffer_event *rbe, \
462 type *addr = (type *)(event + hist_field->field->offset); \
464 return (u64)(unsigned long)*addr; \
467 DEFINE_HIST_FIELD_FN(s64
);
468 DEFINE_HIST_FIELD_FN(u64
);
469 DEFINE_HIST_FIELD_FN(s32
);
470 DEFINE_HIST_FIELD_FN(u32
);
471 DEFINE_HIST_FIELD_FN(s16
);
472 DEFINE_HIST_FIELD_FN(u16
);
473 DEFINE_HIST_FIELD_FN(s8
);
474 DEFINE_HIST_FIELD_FN(u8
);
476 #define for_each_hist_field(i, hist_data) \
477 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
479 #define for_each_hist_val_field(i, hist_data) \
480 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
482 #define for_each_hist_key_field(i, hist_data) \
483 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
485 #define HITCOUNT_IDX 0
486 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
488 enum hist_field_flags
{
489 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
490 HIST_FIELD_FL_KEY
= 1 << 1,
491 HIST_FIELD_FL_STRING
= 1 << 2,
492 HIST_FIELD_FL_HEX
= 1 << 3,
493 HIST_FIELD_FL_SYM
= 1 << 4,
494 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
495 HIST_FIELD_FL_EXECNAME
= 1 << 6,
496 HIST_FIELD_FL_SYSCALL
= 1 << 7,
497 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
498 HIST_FIELD_FL_LOG2
= 1 << 9,
499 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
500 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
501 HIST_FIELD_FL_VAR
= 1 << 12,
502 HIST_FIELD_FL_EXPR
= 1 << 13,
503 HIST_FIELD_FL_VAR_REF
= 1 << 14,
504 HIST_FIELD_FL_CPU
= 1 << 15,
505 HIST_FIELD_FL_ALIAS
= 1 << 16,
506 HIST_FIELD_FL_BUCKET
= 1 << 17,
507 HIST_FIELD_FL_CONST
= 1 << 18,
508 HIST_FIELD_FL_PERCENT
= 1 << 19,
509 HIST_FIELD_FL_GRAPH
= 1 << 20,
510 HIST_FIELD_FL_COMM
= 1 << 21,
515 char *name
[TRACING_MAP_VARS_MAX
];
516 char *expr
[TRACING_MAP_VARS_MAX
];
519 struct hist_trigger_attrs
{
530 unsigned int map_bits
;
532 char *assignment_str
[TRACING_MAP_VARS_MAX
];
533 unsigned int n_assignments
;
535 char *action_str
[HIST_ACTIONS_MAX
];
536 unsigned int n_actions
;
538 struct var_defs var_defs
;
542 struct hist_field
*var
;
543 struct hist_field
*val
;
546 struct field_var_hist
{
547 struct hist_trigger_data
*hist_data
;
551 struct hist_trigger_data
{
552 struct hist_field
*fields
[HIST_FIELDS_MAX
];
555 unsigned int n_fields
;
557 unsigned int n_var_str
;
558 unsigned int key_size
;
559 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
560 unsigned int n_sort_keys
;
561 struct trace_event_file
*event_file
;
562 struct hist_trigger_attrs
*attrs
;
563 struct tracing_map
*map
;
564 bool enable_timestamps
;
566 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
567 unsigned int n_var_refs
;
569 struct action_data
*actions
[HIST_ACTIONS_MAX
];
570 unsigned int n_actions
;
572 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
573 unsigned int n_field_vars
;
574 unsigned int n_field_var_str
;
575 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
576 unsigned int n_field_var_hists
;
578 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
579 unsigned int n_save_vars
;
580 unsigned int n_save_var_str
;
585 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
586 struct tracing_map_elt
*elt
,
587 struct trace_buffer
*buffer
, void *rec
,
588 struct ring_buffer_event
*rbe
, void *key
,
589 struct action_data
*data
, u64
*var_ref_vals
);
591 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
606 enum handler_id handler
;
607 enum action_id action
;
611 unsigned int n_params
;
612 char *params
[SYNTH_FIELDS_MAX
];
615 * When a histogram trigger is hit, the values of any
616 * references to variables, including variables being passed
617 * as parameters to synthetic events, are collected into a
618 * var_ref_vals array. This var_ref_idx array is an array of
619 * indices into the var_ref_vals array, one for each synthetic
620 * event param, and is passed to the synthetic event
623 unsigned int var_ref_idx
[SYNTH_FIELDS_MAX
];
624 struct synth_event
*synth_event
;
625 bool use_trace_keyword
;
626 char *synth_event_name
;
636 * var_str contains the $-unstripped variable
637 * name referenced by var_ref, and used when
638 * printing the action. Because var_ref
639 * creation is deferred to create_actions(),
640 * we need a per-action way to save it until
641 * then, thus var_str.
646 * var_ref refers to the variable being
647 * tracked e.g onmax($var).
649 struct hist_field
*var_ref
;
652 * track_var contains the 'invisible' tracking
653 * variable created to keep the current
656 struct hist_field
*track_var
;
658 check_track_val_fn_t check_val
;
659 action_fn_t save_data
;
668 unsigned int key_len
;
670 struct tracing_map_elt elt
;
672 struct action_data
*action_data
;
673 struct hist_trigger_data
*hist_data
;
676 struct hist_elt_data
{
679 char **field_var_str
;
683 struct snapshot_context
{
684 struct tracing_map_elt
*elt
;
689 * Returns the specific division function to use if the divisor
690 * is constant. This avoids extra branches when the trigger is hit.
692 static enum hist_field_fn
hist_field_get_div_fn(struct hist_field
*divisor
)
694 u64 div
= divisor
->constant
;
696 if (!(div
& (div
- 1)))
697 return HIST_FIELD_FN_DIV_POWER2
;
699 /* If the divisor is too large, do a regular division */
700 if (div
> (1 << HIST_DIV_SHIFT
))
701 return HIST_FIELD_FN_DIV_NOT_POWER2
;
703 divisor
->div_multiplier
= div64_u64((u64
)(1 << HIST_DIV_SHIFT
), div
);
704 return HIST_FIELD_FN_DIV_MULT_SHIFT
;
707 static void track_data_free(struct track_data
*track_data
)
709 struct hist_elt_data
*elt_data
;
714 kfree(track_data
->key
);
716 elt_data
= track_data
->elt
.private_data
;
718 kfree(elt_data
->comm
);
725 static struct track_data
*track_data_alloc(unsigned int key_len
,
726 struct action_data
*action_data
,
727 struct hist_trigger_data
*hist_data
)
729 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
730 struct hist_elt_data
*elt_data
;
733 return ERR_PTR(-ENOMEM
);
735 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
737 track_data_free(data
);
738 return ERR_PTR(-ENOMEM
);
741 data
->key_len
= key_len
;
742 data
->action_data
= action_data
;
743 data
->hist_data
= hist_data
;
745 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
747 track_data_free(data
);
748 return ERR_PTR(-ENOMEM
);
751 data
->elt
.private_data
= elt_data
;
753 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
754 if (!elt_data
->comm
) {
755 track_data_free(data
);
756 return ERR_PTR(-ENOMEM
);
762 #define HIST_PREFIX "hist:"
764 static char *last_cmd
;
765 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
767 static int errpos(char *str
)
769 if (!str
|| !last_cmd
)
772 return err_pos(last_cmd
, str
);
775 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
777 const char *system
= NULL
, *name
= NULL
;
778 struct trace_event_call
*call
;
785 last_cmd
= kasprintf(GFP_KERNEL
, HIST_PREFIX
"%s", str
);
790 call
= file
->event_call
;
791 system
= call
->class->system
;
793 name
= trace_event_name(call
);
800 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, HIST_PREFIX
"%s:%s", system
, name
);
803 static void hist_err(struct trace_array
*tr
, u8 err_type
, u16 err_pos
)
808 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
812 static void hist_err_clear(void)
816 last_cmd_loc
[0] = '\0';
819 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
820 unsigned int *var_ref_idx
);
822 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
823 unsigned int *var_ref_idx
)
825 struct tracepoint
*tp
= event
->tp
;
827 if (unlikely(static_key_enabled(&tp
->key
))) {
828 struct tracepoint_func
*probe_func_ptr
;
829 synth_probe_func_t probe_func
;
832 if (!(cpu_online(raw_smp_processor_id())))
835 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
836 if (probe_func_ptr
) {
838 probe_func
= probe_func_ptr
->func
;
839 __data
= probe_func_ptr
->data
;
840 probe_func(__data
, var_ref_vals
, var_ref_idx
);
841 } while ((++probe_func_ptr
)->func
);
846 static void action_trace(struct hist_trigger_data
*hist_data
,
847 struct tracing_map_elt
*elt
,
848 struct trace_buffer
*buffer
, void *rec
,
849 struct ring_buffer_event
*rbe
, void *key
,
850 struct action_data
*data
, u64
*var_ref_vals
)
852 struct synth_event
*event
= data
->synth_event
;
854 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
857 struct hist_var_data
{
858 struct list_head list
;
859 struct hist_trigger_data
*hist_data
;
862 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
863 struct tracing_map_elt
*elt
,
864 struct trace_buffer
*buffer
,
865 struct ring_buffer_event
*rbe
,
868 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
869 struct trace_array
*tr
= hist_data
->event_file
->tr
;
871 u64 ts
= ring_buffer_event_time_stamp(buffer
, rbe
);
873 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
879 static u64
hist_field_cpu(struct hist_field
*hist_field
,
880 struct tracing_map_elt
*elt
,
881 struct trace_buffer
*buffer
,
882 struct ring_buffer_event
*rbe
,
885 int cpu
= smp_processor_id();
890 static u64
hist_field_comm(struct hist_field
*hist_field
,
891 struct tracing_map_elt
*elt
,
892 struct trace_buffer
*buffer
,
893 struct ring_buffer_event
*rbe
,
896 return (u64
)(unsigned long)current
->comm
;
900 * check_field_for_var_ref - Check if a VAR_REF field references a variable
901 * @hist_field: The VAR_REF field to check
902 * @var_data: The hist trigger that owns the variable
903 * @var_idx: The trigger variable identifier
905 * Check the given VAR_REF field to see whether or not it references
906 * the given variable associated with the given trigger.
908 * Return: The VAR_REF field if it does reference the variable, NULL if not
910 static struct hist_field
*
911 check_field_for_var_ref(struct hist_field
*hist_field
,
912 struct hist_trigger_data
*var_data
,
913 unsigned int var_idx
)
915 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
917 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
918 hist_field
->var
.hist_data
== var_data
)
925 * find_var_ref - Check if a trigger has a reference to a trigger variable
926 * @hist_data: The hist trigger that might have a reference to the variable
927 * @var_data: The hist trigger that owns the variable
928 * @var_idx: The trigger variable identifier
930 * Check the list of var_refs[] on the first hist trigger to see
931 * whether any of them are references to the variable on the second
934 * Return: The VAR_REF field referencing the variable if so, NULL if not
936 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
937 struct hist_trigger_data
*var_data
,
938 unsigned int var_idx
)
940 struct hist_field
*hist_field
;
943 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
944 hist_field
= hist_data
->var_refs
[i
];
945 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
953 * find_any_var_ref - Check if there is a reference to a given trigger variable
954 * @hist_data: The hist trigger
955 * @var_idx: The trigger variable identifier
957 * Check to see whether the given variable is currently referenced by
960 * The trigger the variable is defined on is explicitly excluded - the
961 * assumption being that a self-reference doesn't prevent a trigger
962 * from being removed.
964 * Return: The VAR_REF field referencing the variable if so, NULL if not
966 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
967 unsigned int var_idx
)
969 struct trace_array
*tr
= hist_data
->event_file
->tr
;
970 struct hist_field
*found
= NULL
;
971 struct hist_var_data
*var_data
;
973 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
974 if (var_data
->hist_data
== hist_data
)
976 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
985 * check_var_refs - Check if there is a reference to any of trigger's variables
986 * @hist_data: The hist trigger
988 * A trigger can define one or more variables. If any one of them is
989 * currently referenced by any other trigger, this function will
992 * Typically used to determine whether or not a trigger can be removed
993 * - if there are any references to a trigger's variables, it cannot.
995 * Return: True if there is a reference to any of trigger's variables
997 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
999 struct hist_field
*field
;
1003 for_each_hist_field(i
, hist_data
) {
1004 field
= hist_data
->fields
[i
];
1005 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
1006 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
1016 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
1018 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1019 struct hist_var_data
*var_data
, *found
= NULL
;
1021 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1022 if (var_data
->hist_data
== hist_data
) {
1031 static bool field_has_hist_vars(struct hist_field
*hist_field
,
1042 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
1043 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1046 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
1047 struct hist_field
*operand
;
1049 operand
= hist_field
->operands
[i
];
1050 if (field_has_hist_vars(operand
, level
+ 1))
1057 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
1059 struct hist_field
*hist_field
;
1062 for_each_hist_field(i
, hist_data
) {
1063 hist_field
= hist_data
->fields
[i
];
1064 if (field_has_hist_vars(hist_field
, 0))
1071 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
1073 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1074 struct hist_var_data
*var_data
;
1076 var_data
= find_hist_vars(hist_data
);
1080 if (tracing_check_open_get_tr(tr
))
1083 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
1085 trace_array_put(tr
);
1089 var_data
->hist_data
= hist_data
;
1090 list_add(&var_data
->list
, &tr
->hist_vars
);
1095 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
1097 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1098 struct hist_var_data
*var_data
;
1100 var_data
= find_hist_vars(hist_data
);
1104 if (WARN_ON(check_var_refs(hist_data
)))
1107 list_del(&var_data
->list
);
1111 trace_array_put(tr
);
1114 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
1115 const char *var_name
)
1117 struct hist_field
*hist_field
, *found
= NULL
;
1120 for_each_hist_field(i
, hist_data
) {
1121 hist_field
= hist_data
->fields
[i
];
1122 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
1123 strcmp(hist_field
->var
.name
, var_name
) == 0) {
1132 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
1133 struct trace_event_file
*file
,
1134 const char *var_name
)
1136 struct hist_trigger_data
*test_data
;
1137 struct event_trigger_data
*test
;
1138 struct hist_field
*hist_field
;
1140 lockdep_assert_held(&event_mutex
);
1142 hist_field
= find_var_field(hist_data
, var_name
);
1146 list_for_each_entry(test
, &file
->triggers
, list
) {
1147 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1148 test_data
= test
->private_data
;
1149 hist_field
= find_var_field(test_data
, var_name
);
1158 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
1163 struct hist_trigger_data
*var_hist_data
;
1164 struct hist_var_data
*var_data
;
1165 struct trace_event_file
*file
, *found
= NULL
;
1168 return find_event_file(tr
, system
, event_name
);
1170 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
1171 var_hist_data
= var_data
->hist_data
;
1172 file
= var_hist_data
->event_file
;
1176 if (find_var_field(var_hist_data
, var_name
)) {
1178 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
1189 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
1190 const char *var_name
)
1192 struct hist_trigger_data
*test_data
;
1193 struct event_trigger_data
*test
;
1194 struct hist_field
*hist_field
;
1196 lockdep_assert_held(&event_mutex
);
1198 list_for_each_entry(test
, &file
->triggers
, list
) {
1199 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1200 test_data
= test
->private_data
;
1201 hist_field
= find_var_field(test_data
, var_name
);
1210 static struct hist_field
*
1211 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
1213 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1214 struct hist_field
*hist_field
, *found
= NULL
;
1215 struct trace_event_file
*file
;
1218 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
1219 struct action_data
*data
= hist_data
->actions
[i
];
1221 if (data
->handler
== HANDLER_ONMATCH
) {
1222 char *system
= data
->match_data
.event_system
;
1223 char *event_name
= data
->match_data
.event
;
1225 file
= find_var_file(tr
, system
, event_name
, var_name
);
1228 hist_field
= find_file_var(file
, var_name
);
1231 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
1233 return ERR_PTR(-EINVAL
);
1243 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1248 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1249 struct hist_field
*hist_field
= NULL
;
1250 struct trace_event_file
*file
;
1252 if (!system
|| !event_name
) {
1253 hist_field
= find_match_var(hist_data
, var_name
);
1254 if (IS_ERR(hist_field
))
1260 file
= find_var_file(tr
, system
, event_name
, var_name
);
1264 hist_field
= find_file_var(file
, var_name
);
1269 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1270 struct tracing_map_elt
*elt
,
1271 struct trace_buffer
*buffer
,
1272 struct ring_buffer_event
*rbe
,
1275 struct hist_elt_data
*elt_data
;
1278 if (WARN_ON_ONCE(!elt
))
1281 elt_data
= elt
->private_data
;
1282 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1287 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1288 u64
*var_ref_vals
, bool self
)
1290 struct hist_trigger_data
*var_data
;
1291 struct tracing_map_elt
*var_elt
;
1292 struct hist_field
*hist_field
;
1293 unsigned int i
, var_idx
;
1294 bool resolved
= true;
1297 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1298 hist_field
= hist_data
->var_refs
[i
];
1299 var_idx
= hist_field
->var
.idx
;
1300 var_data
= hist_field
->var
.hist_data
;
1302 if (var_data
== NULL
) {
1307 if ((self
&& var_data
!= hist_data
) ||
1308 (!self
&& var_data
== hist_data
))
1311 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1317 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1322 if (self
|| !hist_field
->read_once
)
1323 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1325 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1327 var_ref_vals
[i
] = var_val
;
1333 static const char *hist_field_name(struct hist_field
*field
,
1336 const char *field_name
= "";
1338 if (WARN_ON_ONCE(!field
))
1345 field_name
= field
->field
->name
;
1346 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1347 field
->flags
& HIST_FIELD_FL_ALIAS
||
1348 field
->flags
& HIST_FIELD_FL_BUCKET
)
1349 field_name
= hist_field_name(field
->operands
[0], ++level
);
1350 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1351 field_name
= "common_cpu";
1352 else if (field
->flags
& HIST_FIELD_FL_COMM
)
1353 field_name
= "common_comm";
1354 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1355 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1356 if (field
->system
) {
1357 static char full_name
[MAX_FILTER_STR_VAL
];
1359 strcat(full_name
, field
->system
);
1360 strcat(full_name
, ".");
1361 strcat(full_name
, field
->event_name
);
1362 strcat(full_name
, ".");
1363 strcat(full_name
, field
->name
);
1364 field_name
= full_name
;
1366 field_name
= field
->name
;
1367 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1368 field_name
= "common_timestamp";
1369 else if (field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
1370 field_name
= "common_stacktrace";
1371 } else if (field
->flags
& HIST_FIELD_FL_HITCOUNT
)
1372 field_name
= "hitcount";
1374 if (field_name
== NULL
)
1380 static enum hist_field_fn
select_value_fn(int field_size
, int field_is_signed
)
1382 switch (field_size
) {
1384 if (field_is_signed
)
1385 return HIST_FIELD_FN_S64
;
1387 return HIST_FIELD_FN_U64
;
1389 if (field_is_signed
)
1390 return HIST_FIELD_FN_S32
;
1392 return HIST_FIELD_FN_U32
;
1394 if (field_is_signed
)
1395 return HIST_FIELD_FN_S16
;
1397 return HIST_FIELD_FN_U16
;
1399 if (field_is_signed
)
1400 return HIST_FIELD_FN_S8
;
1402 return HIST_FIELD_FN_U8
;
1405 return HIST_FIELD_FN_NOP
;
1408 static int parse_map_size(char *str
)
1410 unsigned long size
, map_bits
;
1413 ret
= kstrtoul(str
, 0, &size
);
1417 map_bits
= ilog2(roundup_pow_of_two(size
));
1418 if (map_bits
< TRACING_MAP_BITS_MIN
||
1419 map_bits
> TRACING_MAP_BITS_MAX
)
1427 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
1434 for (i
= 0; i
< attrs
->n_assignments
; i
++)
1435 kfree(attrs
->assignment_str
[i
]);
1437 for (i
= 0; i
< attrs
->n_actions
; i
++)
1438 kfree(attrs
->action_str
[i
]);
1441 kfree(attrs
->sort_key_str
);
1442 kfree(attrs
->keys_str
);
1443 kfree(attrs
->vals_str
);
1444 kfree(attrs
->clock
);
1448 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
1452 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
1455 if ((str_has_prefix(str
, "onmatch(")) ||
1456 (str_has_prefix(str
, "onmax(")) ||
1457 (str_has_prefix(str
, "onchange("))) {
1458 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
1459 if (!attrs
->action_str
[attrs
->n_actions
]) {
1469 static int parse_assignment(struct trace_array
*tr
,
1470 char *str
, struct hist_trigger_attrs
*attrs
)
1474 if ((len
= str_has_prefix(str
, "key=")) ||
1475 (len
= str_has_prefix(str
, "keys="))) {
1476 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1477 if (!attrs
->keys_str
) {
1481 } else if ((len
= str_has_prefix(str
, "val=")) ||
1482 (len
= str_has_prefix(str
, "vals=")) ||
1483 (len
= str_has_prefix(str
, "values="))) {
1484 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1485 if (!attrs
->vals_str
) {
1489 } else if ((len
= str_has_prefix(str
, "sort="))) {
1490 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1491 if (!attrs
->sort_key_str
) {
1495 } else if (str_has_prefix(str
, "name=")) {
1496 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
1501 } else if ((len
= str_has_prefix(str
, "clock="))) {
1504 str
= strstrip(str
);
1505 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
1506 if (!attrs
->clock
) {
1510 } else if ((len
= str_has_prefix(str
, "size="))) {
1511 int map_bits
= parse_map_size(str
+ len
);
1517 attrs
->map_bits
= map_bits
;
1521 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
1522 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
1527 assignment
= kstrdup(str
, GFP_KERNEL
);
1533 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
1539 static struct hist_trigger_attrs
*
1540 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
1542 struct hist_trigger_attrs
*attrs
;
1545 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
1547 return ERR_PTR(-ENOMEM
);
1549 while (trigger_str
) {
1550 char *str
= strsep(&trigger_str
, ":");
1553 rhs
= strchr(str
, '=');
1555 if (!strlen(++rhs
)) {
1557 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
1560 ret
= parse_assignment(tr
, str
, attrs
);
1563 } else if (strcmp(str
, "nohitcount") == 0 ||
1564 strcmp(str
, "NOHC") == 0)
1565 attrs
->no_hitcount
= true;
1566 else if (strcmp(str
, "pause") == 0)
1567 attrs
->pause
= true;
1568 else if ((strcmp(str
, "cont") == 0) ||
1569 (strcmp(str
, "continue") == 0))
1571 else if (strcmp(str
, "clear") == 0)
1572 attrs
->clear
= true;
1574 ret
= parse_action(str
, attrs
);
1580 if (!attrs
->keys_str
) {
1585 if (!attrs
->clock
) {
1586 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
1587 if (!attrs
->clock
) {
1595 destroy_hist_trigger_attrs(attrs
);
1597 return ERR_PTR(ret
);
1600 static inline void save_comm(char *comm
, struct task_struct
*task
)
1603 strcpy(comm
, "<idle>");
1607 if (WARN_ON_ONCE(task
->pid
< 0)) {
1608 strcpy(comm
, "<XXX>");
1612 strscpy(comm
, task
->comm
, TASK_COMM_LEN
);
1615 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
1619 for (i
= 0; i
< elt_data
->n_field_var_str
; i
++)
1620 kfree(elt_data
->field_var_str
[i
]);
1622 kfree(elt_data
->field_var_str
);
1624 kfree(elt_data
->comm
);
1628 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
1630 struct hist_elt_data
*elt_data
= elt
->private_data
;
1632 hist_elt_data_free(elt_data
);
1635 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
1637 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
1638 unsigned int size
= TASK_COMM_LEN
;
1639 struct hist_elt_data
*elt_data
;
1640 struct hist_field
*hist_field
;
1641 unsigned int i
, n_str
;
1643 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
1647 for_each_hist_field(i
, hist_data
) {
1648 hist_field
= hist_data
->fields
[i
];
1650 if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
1651 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
1652 if (!elt_data
->comm
) {
1660 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
+
1661 hist_data
->n_var_str
;
1662 if (n_str
> SYNTH_FIELDS_MAX
) {
1663 hist_elt_data_free(elt_data
);
1667 BUILD_BUG_ON(STR_VAR_LEN_MAX
& (sizeof(u64
) - 1));
1669 size
= STR_VAR_LEN_MAX
;
1671 elt_data
->field_var_str
= kcalloc(n_str
, sizeof(char *), GFP_KERNEL
);
1672 if (!elt_data
->field_var_str
) {
1673 hist_elt_data_free(elt_data
);
1676 elt_data
->n_field_var_str
= n_str
;
1678 for (i
= 0; i
< n_str
; i
++) {
1679 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
1680 if (!elt_data
->field_var_str
[i
]) {
1681 hist_elt_data_free(elt_data
);
1686 elt
->private_data
= elt_data
;
1691 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
1693 struct hist_elt_data
*elt_data
= elt
->private_data
;
1696 save_comm(elt_data
->comm
, current
);
1699 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
1700 .elt_alloc
= hist_trigger_elt_data_alloc
,
1701 .elt_free
= hist_trigger_elt_data_free
,
1702 .elt_init
= hist_trigger_elt_data_init
,
1705 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
1707 const char *flags_str
= NULL
;
1709 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
1711 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
1713 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
1714 flags_str
= "sym-offset";
1715 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
1716 flags_str
= "execname";
1717 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
1718 flags_str
= "syscall";
1719 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
1721 else if (hist_field
->flags
& HIST_FIELD_FL_BUCKET
)
1722 flags_str
= "buckets";
1723 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
1724 flags_str
= "usecs";
1725 else if (hist_field
->flags
& HIST_FIELD_FL_PERCENT
)
1726 flags_str
= "percent";
1727 else if (hist_field
->flags
& HIST_FIELD_FL_GRAPH
)
1728 flags_str
= "graph";
1729 else if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
1730 flags_str
= "stacktrace";
1735 static void expr_field_str(struct hist_field
*field
, char *expr
)
1737 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
1739 else if (field
->flags
& HIST_FIELD_FL_CONST
) {
1740 char str
[HIST_CONST_DIGITS_MAX
];
1742 snprintf(str
, HIST_CONST_DIGITS_MAX
, "%llu", field
->constant
);
1746 strcat(expr
, hist_field_name(field
, 0));
1748 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
1749 const char *flags_str
= get_hist_field_flags(field
);
1753 strcat(expr
, flags_str
);
1758 static char *expr_str(struct hist_field
*field
, unsigned int level
)
1765 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
1769 if (!field
->operands
[0]) {
1770 expr_field_str(field
, expr
);
1774 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
1778 subexpr
= expr_str(field
->operands
[0], ++level
);
1783 strcat(expr
, subexpr
);
1791 expr_field_str(field
->operands
[0], expr
);
1793 switch (field
->operator) {
1794 case FIELD_OP_MINUS
:
1811 expr_field_str(field
->operands
[1], expr
);
1817 * If field_op != FIELD_OP_NONE, *sep points to the root operator
1818 * of the expression tree to be evaluated.
1820 static int contains_operator(char *str
, char **sep
)
1822 enum field_op_id field_op
= FIELD_OP_NONE
;
1823 char *minus_op
, *plus_op
, *div_op
, *mult_op
;
1827 * Report the last occurrence of the operators first, so that the
1828 * expression is evaluated left to right. This is important since
1829 * subtraction and division are not associative.
1832 * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2
1833 * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2
1837 * First, find lower precedence addition and subtraction
1838 * since the expression will be evaluated recursively.
1840 minus_op
= strrchr(str
, '-');
1843 * Unary minus is not supported in sub-expressions. If
1844 * present, it is always the next root operator.
1846 if (minus_op
== str
) {
1847 field_op
= FIELD_OP_UNARY_MINUS
;
1851 field_op
= FIELD_OP_MINUS
;
1854 plus_op
= strrchr(str
, '+');
1855 if (plus_op
|| minus_op
) {
1857 * For operators of the same precedence use to rightmost as the
1858 * root, so that the expression is evaluated left to right.
1860 if (plus_op
> minus_op
)
1861 field_op
= FIELD_OP_PLUS
;
1866 * Multiplication and division have higher precedence than addition and
1869 div_op
= strrchr(str
, '/');
1871 field_op
= FIELD_OP_DIV
;
1873 mult_op
= strrchr(str
, '*');
1875 * For operators of the same precedence use to rightmost as the
1876 * root, so that the expression is evaluated left to right.
1878 if (mult_op
> div_op
)
1879 field_op
= FIELD_OP_MULT
;
1884 case FIELD_OP_UNARY_MINUS
:
1885 case FIELD_OP_MINUS
:
1907 static void get_hist_field(struct hist_field
*hist_field
)
1912 static void __destroy_hist_field(struct hist_field
*hist_field
)
1914 if (--hist_field
->ref
> 1)
1917 kfree(hist_field
->var
.name
);
1918 kfree(hist_field
->name
);
1920 /* Can likely be a const */
1921 kfree_const(hist_field
->type
);
1923 kfree(hist_field
->system
);
1924 kfree(hist_field
->event_name
);
1929 static void destroy_hist_field(struct hist_field
*hist_field
,
1940 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1941 return; /* var refs will be destroyed separately */
1943 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
1944 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
1946 __destroy_hist_field(hist_field
);
1949 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
1950 struct ftrace_event_field
*field
,
1951 unsigned long flags
,
1954 struct hist_field
*hist_field
;
1956 if (field
&& is_function_field(field
))
1959 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
1963 hist_field
->ref
= 1;
1965 hist_field
->hist_data
= hist_data
;
1967 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
1968 goto out
; /* caller will populate */
1970 if (flags
& HIST_FIELD_FL_VAR_REF
) {
1971 hist_field
->fn_num
= HIST_FIELD_FN_VAR_REF
;
1975 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
1976 hist_field
->fn_num
= HIST_FIELD_FN_COUNTER
;
1977 hist_field
->size
= sizeof(u64
);
1978 hist_field
->type
= "u64";
1982 if (flags
& HIST_FIELD_FL_CONST
) {
1983 hist_field
->fn_num
= HIST_FIELD_FN_CONST
;
1984 hist_field
->size
= sizeof(u64
);
1985 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
1986 if (!hist_field
->type
)
1991 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
1993 hist_field
->fn_num
= HIST_FIELD_FN_STACK
;
1995 hist_field
->fn_num
= HIST_FIELD_FN_NOP
;
1996 hist_field
->size
= HIST_STACKTRACE_SIZE
;
1997 hist_field
->type
= kstrdup_const("unsigned long[]", GFP_KERNEL
);
1998 if (!hist_field
->type
)
2003 if (flags
& (HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
)) {
2004 unsigned long fl
= flags
& ~(HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
);
2005 hist_field
->fn_num
= flags
& HIST_FIELD_FL_LOG2
? HIST_FIELD_FN_LOG2
:
2006 HIST_FIELD_FN_BUCKET
;
2007 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
2008 if (!hist_field
->operands
[0])
2010 hist_field
->size
= hist_field
->operands
[0]->size
;
2011 hist_field
->type
= kstrdup_const(hist_field
->operands
[0]->type
, GFP_KERNEL
);
2012 if (!hist_field
->type
)
2017 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
2018 hist_field
->fn_num
= HIST_FIELD_FN_TIMESTAMP
;
2019 hist_field
->size
= sizeof(u64
);
2020 hist_field
->type
= "u64";
2024 if (flags
& HIST_FIELD_FL_CPU
) {
2025 hist_field
->fn_num
= HIST_FIELD_FN_CPU
;
2026 hist_field
->size
= sizeof(int);
2027 hist_field
->type
= "unsigned int";
2031 if (flags
& HIST_FIELD_FL_COMM
) {
2032 hist_field
->fn_num
= HIST_FIELD_FN_COMM
;
2033 hist_field
->size
= MAX_FILTER_STR_VAL
;
2034 hist_field
->type
= "char[]";
2038 if (WARN_ON_ONCE(!field
))
2041 /* Pointers to strings are just pointers and dangerous to dereference */
2042 if (is_string_field(field
) &&
2043 (field
->filter_type
!= FILTER_PTR_STRING
)) {
2044 flags
|= HIST_FIELD_FL_STRING
;
2046 hist_field
->size
= MAX_FILTER_STR_VAL
;
2047 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
2048 if (!hist_field
->type
)
2051 if (field
->filter_type
== FILTER_STATIC_STRING
) {
2052 hist_field
->fn_num
= HIST_FIELD_FN_STRING
;
2053 hist_field
->size
= field
->size
;
2054 } else if (field
->filter_type
== FILTER_DYN_STRING
) {
2055 hist_field
->fn_num
= HIST_FIELD_FN_DYNSTRING
;
2056 } else if (field
->filter_type
== FILTER_RDYN_STRING
)
2057 hist_field
->fn_num
= HIST_FIELD_FN_RELDYNSTRING
;
2059 hist_field
->fn_num
= HIST_FIELD_FN_PSTRING
;
2061 hist_field
->size
= field
->size
;
2062 hist_field
->is_signed
= field
->is_signed
;
2063 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
2064 if (!hist_field
->type
)
2067 hist_field
->fn_num
= select_value_fn(field
->size
,
2069 if (hist_field
->fn_num
== HIST_FIELD_FN_NOP
) {
2070 destroy_hist_field(hist_field
, 0);
2075 hist_field
->field
= field
;
2076 hist_field
->flags
= flags
;
2079 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
2080 if (!hist_field
->var
.name
)
2086 destroy_hist_field(hist_field
, 0);
2090 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
2094 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
2095 if (hist_data
->fields
[i
]) {
2096 destroy_hist_field(hist_data
->fields
[i
], 0);
2097 hist_data
->fields
[i
] = NULL
;
2101 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2102 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
2103 __destroy_hist_field(hist_data
->var_refs
[i
]);
2104 hist_data
->var_refs
[i
] = NULL
;
2108 static int init_var_ref(struct hist_field
*ref_field
,
2109 struct hist_field
*var_field
,
2110 char *system
, char *event_name
)
2114 ref_field
->var
.idx
= var_field
->var
.idx
;
2115 ref_field
->var
.hist_data
= var_field
->hist_data
;
2116 ref_field
->size
= var_field
->size
;
2117 ref_field
->is_signed
= var_field
->is_signed
;
2118 ref_field
->flags
|= var_field
->flags
&
2119 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2122 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
2123 if (!ref_field
->system
)
2128 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
2129 if (!ref_field
->event_name
) {
2135 if (var_field
->var
.name
) {
2136 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
2137 if (!ref_field
->name
) {
2141 } else if (var_field
->name
) {
2142 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
2143 if (!ref_field
->name
) {
2149 ref_field
->type
= kstrdup_const(var_field
->type
, GFP_KERNEL
);
2150 if (!ref_field
->type
) {
2157 kfree(ref_field
->system
);
2158 ref_field
->system
= NULL
;
2159 kfree(ref_field
->event_name
);
2160 ref_field
->event_name
= NULL
;
2161 kfree(ref_field
->name
);
2162 ref_field
->name
= NULL
;
2167 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
2168 struct hist_field
*var_field
)
2170 struct hist_field
*ref_field
;
2173 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2174 ref_field
= hist_data
->var_refs
[i
];
2175 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
2176 ref_field
->var
.hist_data
== var_field
->hist_data
)
2184 * create_var_ref - Create a variable reference and attach it to trigger
2185 * @hist_data: The trigger that will be referencing the variable
2186 * @var_field: The VAR field to create a reference to
2187 * @system: The optional system string
2188 * @event_name: The optional event_name string
2190 * Given a variable hist_field, create a VAR_REF hist_field that
2191 * represents a reference to it.
2193 * This function also adds the reference to the trigger that
2194 * now references the variable.
2196 * Return: The VAR_REF field if successful, NULL if not
2198 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
2199 struct hist_field
*var_field
,
2200 char *system
, char *event_name
)
2202 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
2203 struct hist_field
*ref_field
;
2206 /* Check if the variable already exists */
2207 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2208 ref_field
= hist_data
->var_refs
[i
];
2209 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
2210 ref_field
->var
.hist_data
== var_field
->hist_data
) {
2211 get_hist_field(ref_field
);
2215 /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
2216 if (hist_data
->n_var_refs
>= TRACING_MAP_VARS_MAX
)
2218 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
2220 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
2221 destroy_hist_field(ref_field
, 0);
2225 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
2226 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
2232 static bool is_var_ref(char *var_name
)
2234 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
2240 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
2246 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
2247 name
= hist_data
->attrs
->var_defs
.name
[i
];
2249 if (strcmp(var_name
, name
) == 0) {
2250 field
= hist_data
->attrs
->var_defs
.expr
[i
];
2251 if (contains_operator(field
, NULL
) || is_var_ref(field
))
2260 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
2261 char *system
, char *event_name
,
2264 struct trace_event_call
*call
;
2266 if (system
&& event_name
) {
2267 call
= hist_data
->event_file
->event_call
;
2269 if (strcmp(system
, call
->class->system
) != 0)
2272 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2276 if (!!system
!= !!event_name
)
2279 if (!is_var_ref(var_name
))
2284 return field_name_from_var(hist_data
, var_name
);
2287 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
2288 char *system
, char *event_name
,
2291 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
2292 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2294 if (!is_var_ref(var_name
))
2299 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
2301 ref_field
= create_var_ref(hist_data
, var_field
,
2302 system
, event_name
);
2305 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
2310 static struct ftrace_event_field
*
2311 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
2312 char *field_str
, unsigned long *flags
, unsigned long *buckets
)
2314 struct ftrace_event_field
*field
= NULL
;
2315 char *field_name
, *modifier
, *str
;
2316 struct trace_array
*tr
= file
->tr
;
2318 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
2320 return ERR_PTR(-ENOMEM
);
2322 field_name
= strsep(&modifier
, ".");
2324 if (strcmp(modifier
, "hex") == 0)
2325 *flags
|= HIST_FIELD_FL_HEX
;
2326 else if (strcmp(modifier
, "sym") == 0)
2327 *flags
|= HIST_FIELD_FL_SYM
;
2329 * 'sym-offset' occurrences in the trigger string are modified
2330 * to 'symXoffset' to simplify arithmetic expression parsing.
2332 else if (strcmp(modifier
, "symXoffset") == 0)
2333 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
2334 else if ((strcmp(modifier
, "execname") == 0) &&
2335 (strcmp(field_name
, "common_pid") == 0))
2336 *flags
|= HIST_FIELD_FL_EXECNAME
;
2337 else if (strcmp(modifier
, "syscall") == 0)
2338 *flags
|= HIST_FIELD_FL_SYSCALL
;
2339 else if (strcmp(modifier
, "stacktrace") == 0)
2340 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2341 else if (strcmp(modifier
, "log2") == 0)
2342 *flags
|= HIST_FIELD_FL_LOG2
;
2343 else if (strcmp(modifier
, "usecs") == 0)
2344 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
2345 else if (strncmp(modifier
, "bucket", 6) == 0) {
2350 if (*modifier
== 's')
2352 if (*modifier
!= '=')
2355 ret
= kstrtoul(modifier
, 0, buckets
);
2356 if (ret
|| !(*buckets
))
2358 *flags
|= HIST_FIELD_FL_BUCKET
;
2359 } else if (strncmp(modifier
, "percent", 7) == 0) {
2360 if (*flags
& (HIST_FIELD_FL_VAR
| HIST_FIELD_FL_KEY
))
2362 *flags
|= HIST_FIELD_FL_PERCENT
;
2363 } else if (strncmp(modifier
, "graph", 5) == 0) {
2364 if (*flags
& (HIST_FIELD_FL_VAR
| HIST_FIELD_FL_KEY
))
2366 *flags
|= HIST_FIELD_FL_GRAPH
;
2369 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
2370 field
= ERR_PTR(-EINVAL
);
2375 if (strcmp(field_name
, "common_timestamp") == 0) {
2376 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
2377 hist_data
->enable_timestamps
= true;
2378 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2379 hist_data
->attrs
->ts_in_usecs
= true;
2380 } else if (strcmp(field_name
, "common_stacktrace") == 0) {
2381 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2382 } else if (strcmp(field_name
, "common_cpu") == 0) {
2383 *flags
|= HIST_FIELD_FL_CPU
;
2384 } else if (strcmp(field_name
, "common_comm") == 0) {
2385 *flags
|= HIST_FIELD_FL_COMM
| HIST_FIELD_FL_STRING
;
2386 } else if (strcmp(field_name
, "hitcount") == 0)
2387 *flags
|= HIST_FIELD_FL_HITCOUNT
;
2389 field
= trace_find_event_field(file
->event_call
, field_name
);
2390 if (!field
|| !field
->size
) {
2392 * For backward compatibility, if field_name
2393 * was "cpu" or "stacktrace", then we treat this
2394 * the same as common_cpu and common_stacktrace
2395 * respectively. This also works for "CPU", and
2398 if (field
&& field
->filter_type
== FILTER_CPU
) {
2399 *flags
|= HIST_FIELD_FL_CPU
;
2400 } else if (field
&& field
->filter_type
== FILTER_STACKTRACE
) {
2401 *flags
|= HIST_FIELD_FL_STACKTRACE
;
2402 } else if (field
&& field
->filter_type
== FILTER_COMM
) {
2403 *flags
|= HIST_FIELD_FL_COMM
| HIST_FIELD_FL_STRING
;
2405 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
,
2406 errpos(field_name
));
2407 field
= ERR_PTR(-EINVAL
);
2418 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
2419 struct hist_field
*var_ref
,
2422 struct hist_field
*alias
= NULL
;
2423 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
2425 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2429 alias
->fn_num
= var_ref
->fn_num
;
2430 alias
->operands
[0] = var_ref
;
2432 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2433 destroy_hist_field(alias
, 0);
2437 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
2442 static struct hist_field
*parse_const(struct hist_trigger_data
*hist_data
,
2443 char *str
, char *var_name
,
2444 unsigned long *flags
)
2446 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2447 struct hist_field
*field
= NULL
;
2450 if (kstrtoull(str
, 0, &constant
)) {
2451 hist_err(tr
, HIST_ERR_EXPECT_NUMBER
, errpos(str
));
2455 *flags
|= HIST_FIELD_FL_CONST
;
2456 field
= create_hist_field(hist_data
, NULL
, *flags
, var_name
);
2460 field
->constant
= constant
;
2465 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2466 struct trace_event_file
*file
, char *str
,
2467 unsigned long *flags
, char *var_name
)
2469 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2470 struct ftrace_event_field
*field
= NULL
;
2471 struct hist_field
*hist_field
= NULL
;
2472 unsigned long buckets
= 0;
2475 if (isdigit(str
[0])) {
2476 hist_field
= parse_const(hist_data
, str
, var_name
, flags
);
2484 s
= strchr(str
, '.');
2486 s
= strchr(++s
, '.');
2488 ref_system
= strsep(&str
, ".");
2493 ref_event
= strsep(&str
, ".");
2502 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2504 hist_field
= parse_var_ref(hist_data
, ref_system
,
2505 ref_event
, ref_var
);
2508 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2519 field
= parse_field(hist_data
, file
, str
, flags
, &buckets
);
2520 if (IS_ERR(field
)) {
2521 ret
= PTR_ERR(field
);
2525 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2530 hist_field
->buckets
= buckets
;
2534 return ERR_PTR(ret
);
2537 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2538 struct trace_event_file
*file
,
2539 char *str
, unsigned long flags
,
2540 char *var_name
, unsigned int *n_subexprs
);
2542 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2543 struct trace_event_file
*file
,
2544 char *str
, unsigned long flags
,
2545 char *var_name
, unsigned int *n_subexprs
)
2547 struct hist_field
*operand1
, *expr
= NULL
;
2548 unsigned long operand_flags
;
2552 /* Unary minus operator, increment n_subexprs */
2555 /* we support only -(xxx) i.e. explicit parens required */
2557 if (*n_subexprs
> 3) {
2558 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2563 str
++; /* skip leading '-' */
2565 s
= strchr(str
, '(');
2573 s
= strrchr(str
, ')');
2575 /* unary minus not supported in sub-expressions */
2576 if (*(s
+1) != '\0') {
2577 hist_err(file
->tr
, HIST_ERR_UNARY_MINUS_SUBEXPR
,
2585 ret
= -EINVAL
; /* no closing ')' */
2589 flags
|= HIST_FIELD_FL_EXPR
;
2590 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2597 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, n_subexprs
);
2598 if (IS_ERR(operand1
)) {
2599 ret
= PTR_ERR(operand1
);
2602 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2603 /* String type can not be the operand of unary operator. */
2604 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2605 destroy_hist_field(operand1
, 0);
2610 expr
->flags
|= operand1
->flags
&
2611 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2612 expr
->fn_num
= HIST_FIELD_FN_UMINUS
;
2613 expr
->operands
[0] = operand1
;
2614 expr
->size
= operand1
->size
;
2615 expr
->is_signed
= operand1
->is_signed
;
2616 expr
->operator = FIELD_OP_UNARY_MINUS
;
2617 expr
->name
= expr_str(expr
, 0);
2618 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2626 destroy_hist_field(expr
, 0);
2627 return ERR_PTR(ret
);
2631 * If the operands are var refs, return pointers the
2632 * variable(s) referenced in var1 and var2, else NULL.
2634 static int check_expr_operands(struct trace_array
*tr
,
2635 struct hist_field
*operand1
,
2636 struct hist_field
*operand2
,
2637 struct hist_field
**var1
,
2638 struct hist_field
**var2
)
2640 unsigned long operand1_flags
= operand1
->flags
;
2641 unsigned long operand2_flags
= operand2
->flags
;
2643 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2644 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2645 struct hist_field
*var
;
2647 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2650 operand1_flags
= var
->flags
;
2654 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2655 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2656 struct hist_field
*var
;
2658 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2661 operand2_flags
= var
->flags
;
2665 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2666 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2667 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
2674 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2675 struct trace_event_file
*file
,
2676 char *str
, unsigned long flags
,
2677 char *var_name
, unsigned int *n_subexprs
)
2679 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2680 struct hist_field
*var1
= NULL
, *var2
= NULL
;
2681 unsigned long operand_flags
, operand2_flags
;
2682 int field_op
, ret
= -EINVAL
;
2683 char *sep
, *operand1_str
;
2684 enum hist_field_fn op_fn
;
2685 bool combine_consts
;
2687 if (*n_subexprs
> 3) {
2688 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2689 return ERR_PTR(-EINVAL
);
2692 field_op
= contains_operator(str
, &sep
);
2694 if (field_op
== FIELD_OP_NONE
)
2695 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2697 if (field_op
== FIELD_OP_UNARY_MINUS
)
2698 return parse_unary(hist_data
, file
, str
, flags
, var_name
, n_subexprs
);
2700 /* Binary operator found, increment n_subexprs */
2703 /* Split the expression string at the root operator */
2705 return ERR_PTR(-EINVAL
);
2711 /* Binary operator requires both operands */
2712 if (*operand1_str
== '\0' || *str
== '\0')
2713 return ERR_PTR(-EINVAL
);
2717 /* LHS of string is an expression e.g. a+b in a+b+c */
2718 operand1
= parse_expr(hist_data
, file
, operand1_str
, operand_flags
, NULL
, n_subexprs
);
2719 if (IS_ERR(operand1
))
2720 return ERR_CAST(operand1
);
2722 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2723 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(operand1_str
));
2728 /* RHS of string is another expression e.g. c in a+b+c */
2730 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, n_subexprs
);
2731 if (IS_ERR(operand2
)) {
2732 ret
= PTR_ERR(operand2
);
2735 if (operand2
->flags
& HIST_FIELD_FL_STRING
) {
2736 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2742 case FIELD_OP_MINUS
:
2743 op_fn
= HIST_FIELD_FN_MINUS
;
2746 op_fn
= HIST_FIELD_FN_PLUS
;
2749 op_fn
= HIST_FIELD_FN_DIV
;
2752 op_fn
= HIST_FIELD_FN_MULT
;
2759 ret
= check_expr_operands(file
->tr
, operand1
, operand2
, &var1
, &var2
);
2763 operand_flags
= var1
? var1
->flags
: operand1
->flags
;
2764 operand2_flags
= var2
? var2
->flags
: operand2
->flags
;
2767 * If both operands are constant, the expression can be
2768 * collapsed to a single constant.
2770 combine_consts
= operand_flags
& operand2_flags
& HIST_FIELD_FL_CONST
;
2772 flags
|= combine_consts
? HIST_FIELD_FL_CONST
: HIST_FIELD_FL_EXPR
;
2774 flags
|= operand1
->flags
&
2775 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2777 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2783 operand1
->read_once
= true;
2784 operand2
->read_once
= true;
2786 /* The operands are now owned and free'd by 'expr' */
2787 expr
->operands
[0] = operand1
;
2788 expr
->operands
[1] = operand2
;
2790 if (field_op
== FIELD_OP_DIV
&&
2791 operand2_flags
& HIST_FIELD_FL_CONST
) {
2792 u64 divisor
= var2
? var2
->constant
: operand2
->constant
;
2795 hist_err(file
->tr
, HIST_ERR_DIVISION_BY_ZERO
, errpos(str
));
2801 * Copy the divisor here so we don't have to look it up
2802 * later if this is a var ref
2804 operand2
->constant
= divisor
;
2805 op_fn
= hist_field_get_div_fn(operand2
);
2808 expr
->fn_num
= op_fn
;
2810 if (combine_consts
) {
2812 expr
->operands
[0] = var1
;
2814 expr
->operands
[1] = var2
;
2816 expr
->constant
= hist_fn_call(expr
, NULL
, NULL
, NULL
, NULL
);
2817 expr
->fn_num
= HIST_FIELD_FN_CONST
;
2819 expr
->operands
[0] = NULL
;
2820 expr
->operands
[1] = NULL
;
2823 * var refs won't be destroyed immediately
2824 * See: destroy_hist_field()
2826 destroy_hist_field(operand2
, 0);
2827 destroy_hist_field(operand1
, 0);
2829 expr
->name
= expr_str(expr
, 0);
2831 /* The operand sizes should be the same, so just pick one */
2832 expr
->size
= operand1
->size
;
2833 expr
->is_signed
= operand1
->is_signed
;
2835 expr
->operator = field_op
;
2836 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2842 expr
->name
= expr_str(expr
, 0);
2848 destroy_hist_field(operand2
, 0);
2850 destroy_hist_field(operand1
, 0);
2851 return ERR_PTR(ret
);
2854 destroy_hist_field(expr
, 0);
2855 return ERR_PTR(ret
);
2858 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
2859 struct trace_event_file
*file
)
2861 struct event_trigger_data
*test
;
2863 lockdep_assert_held(&event_mutex
);
2865 list_for_each_entry(test
, &file
->triggers
, list
) {
2866 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2867 if (test
->private_data
== hist_data
)
2868 return test
->filter_str
;
2875 static struct event_command trigger_hist_cmd
;
2876 static int event_hist_trigger_parse(struct event_command
*cmd_ops
,
2877 struct trace_event_file
*file
,
2878 char *glob
, char *cmd
,
2879 char *param_and_filter
);
2881 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
2882 struct hist_trigger_data
*hist_data
,
2883 unsigned int n_keys
)
2885 struct hist_field
*target_hist_field
, *hist_field
;
2886 unsigned int n
, i
, j
;
2888 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
2891 i
= hist_data
->n_vals
;
2892 j
= target_hist_data
->n_vals
;
2894 for (n
= 0; n
< n_keys
; n
++) {
2895 hist_field
= hist_data
->fields
[i
+ n
];
2896 target_hist_field
= target_hist_data
->fields
[j
+ n
];
2898 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
2900 if (hist_field
->size
!= target_hist_field
->size
)
2902 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
2909 static struct hist_trigger_data
*
2910 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
2911 struct trace_event_file
*file
)
2913 struct hist_trigger_data
*hist_data
;
2914 struct event_trigger_data
*test
;
2915 unsigned int n_keys
;
2917 lockdep_assert_held(&event_mutex
);
2919 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
2921 list_for_each_entry(test
, &file
->triggers
, list
) {
2922 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2923 hist_data
= test
->private_data
;
2925 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
2933 static struct trace_event_file
*event_file(struct trace_array
*tr
,
2934 char *system
, char *event_name
)
2936 struct trace_event_file
*file
;
2938 file
= __find_event_file(tr
, system
, event_name
);
2940 return ERR_PTR(-EINVAL
);
2945 static struct hist_field
*
2946 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
2947 char *system
, char *event_name
, char *field_name
)
2949 struct hist_field
*event_var
;
2950 char *synthetic_name
;
2952 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2953 if (!synthetic_name
)
2954 return ERR_PTR(-ENOMEM
);
2956 strcpy(synthetic_name
, "synthetic_");
2957 strcat(synthetic_name
, field_name
);
2959 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
2961 kfree(synthetic_name
);
2967 * create_field_var_hist - Automatically create a histogram and var for a field
2968 * @target_hist_data: The target hist trigger
2969 * @subsys_name: Optional subsystem name
2970 * @event_name: Optional event name
2971 * @field_name: The name of the field (and the resulting variable)
2973 * Hist trigger actions fetch data from variables, not directly from
2974 * events. However, for convenience, users are allowed to directly
2975 * specify an event field in an action, which will be automatically
2976 * converted into a variable on their behalf.
2978 * If a user specifies a field on an event that isn't the event the
2979 * histogram currently being defined (the target event histogram), the
2980 * only way that can be accomplished is if a new hist trigger is
2981 * created and the field variable defined on that.
2983 * This function creates a new histogram compatible with the target
2984 * event (meaning a histogram with the same key as the target
2985 * histogram), and creates a variable for the specified field, but
2986 * with 'synthetic_' prepended to the variable name in order to avoid
2987 * collision with normal field variables.
2989 * Return: The variable created for the field.
2991 static struct hist_field
*
2992 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
2993 char *subsys_name
, char *event_name
, char *field_name
)
2995 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
2996 struct hist_trigger_data
*hist_data
;
2997 unsigned int i
, n
, first
= true;
2998 struct field_var_hist
*var_hist
;
2999 struct trace_event_file
*file
;
3000 struct hist_field
*key_field
;
3001 struct hist_field
*event_var
;
3006 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
3007 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
3008 return ERR_PTR(-EINVAL
);
3011 file
= event_file(tr
, subsys_name
, event_name
);
3014 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
3015 ret
= PTR_ERR(file
);
3016 return ERR_PTR(ret
);
3020 * Look for a histogram compatible with target. We'll use the
3021 * found histogram specification to create a new matching
3022 * histogram with our variable on it. target_hist_data is not
3023 * yet a registered histogram so we can't use that.
3025 hist_data
= find_compatible_hist(target_hist_data
, file
);
3027 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
3028 return ERR_PTR(-EINVAL
);
3031 /* See if a synthetic field variable has already been created */
3032 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3033 event_name
, field_name
);
3034 if (!IS_ERR_OR_NULL(event_var
))
3037 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
3039 return ERR_PTR(-ENOMEM
);
3041 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3044 return ERR_PTR(-ENOMEM
);
3047 /* Use the same keys as the compatible histogram */
3048 strcat(cmd
, "keys=");
3050 for_each_hist_key_field(i
, hist_data
) {
3051 key_field
= hist_data
->fields
[i
];
3054 strcat(cmd
, key_field
->field
->name
);
3058 /* Create the synthetic field variable specification */
3059 strcat(cmd
, ":synthetic_");
3060 strcat(cmd
, field_name
);
3062 strcat(cmd
, field_name
);
3064 /* Use the same filter as the compatible histogram */
3065 saved_filter
= find_trigger_filter(hist_data
, file
);
3067 strcat(cmd
, " if ");
3068 strcat(cmd
, saved_filter
);
3071 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
3072 if (!var_hist
->cmd
) {
3075 return ERR_PTR(-ENOMEM
);
3078 /* Save the compatible histogram information */
3079 var_hist
->hist_data
= hist_data
;
3081 /* Create the new histogram with our variable */
3082 ret
= event_hist_trigger_parse(&trigger_hist_cmd
, file
,
3086 kfree(var_hist
->cmd
);
3088 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
3089 return ERR_PTR(ret
);
3094 /* If we can't find the variable, something went wrong */
3095 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
3096 event_name
, field_name
);
3097 if (IS_ERR_OR_NULL(event_var
)) {
3098 kfree(var_hist
->cmd
);
3100 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
3101 return ERR_PTR(-EINVAL
);
3104 n
= target_hist_data
->n_field_var_hists
;
3105 target_hist_data
->field_var_hists
[n
] = var_hist
;
3106 target_hist_data
->n_field_var_hists
++;
3111 static struct hist_field
*
3112 find_target_event_var(struct hist_trigger_data
*hist_data
,
3113 char *subsys_name
, char *event_name
, char *var_name
)
3115 struct trace_event_file
*file
= hist_data
->event_file
;
3116 struct hist_field
*hist_field
= NULL
;
3119 struct trace_event_call
*call
;
3124 call
= file
->event_call
;
3126 if (strcmp(subsys_name
, call
->class->system
) != 0)
3129 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3133 hist_field
= find_var_field(hist_data
, var_name
);
3138 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
3139 struct trace_buffer
*buffer
,
3140 struct ring_buffer_event
*rbe
,
3142 struct field_var
**field_vars
,
3143 unsigned int n_field_vars
,
3144 unsigned int field_var_str_start
)
3146 struct hist_elt_data
*elt_data
= elt
->private_data
;
3147 unsigned int i
, j
, var_idx
;
3150 /* Make sure stacktrace can fit in the string variable length */
3151 BUILD_BUG_ON((HIST_STACKTRACE_DEPTH
+ 1) * sizeof(long) >= STR_VAR_LEN_MAX
);
3153 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
3154 struct field_var
*field_var
= field_vars
[i
];
3155 struct hist_field
*var
= field_var
->var
;
3156 struct hist_field
*val
= field_var
->val
;
3158 var_val
= hist_fn_call(val
, elt
, buffer
, rbe
, rec
);
3159 var_idx
= var
->var
.idx
;
3161 if (val
->flags
& (HIST_FIELD_FL_STRING
|
3162 HIST_FIELD_FL_STACKTRACE
)) {
3163 char *str
= elt_data
->field_var_str
[j
++];
3164 char *val_str
= (char *)(uintptr_t)var_val
;
3167 if (val
->flags
& HIST_FIELD_FL_STRING
) {
3168 size
= min(val
->size
, STR_VAR_LEN_MAX
);
3169 strscpy(str
, val_str
, size
);
3171 char *stack_start
= str
+ sizeof(unsigned long);
3174 e
= stack_trace_save((void *)stack_start
,
3175 HIST_STACKTRACE_DEPTH
,
3176 HIST_STACKTRACE_SKIP
);
3177 if (e
< HIST_STACKTRACE_DEPTH
- 1)
3178 ((unsigned long *)stack_start
)[e
] = 0;
3179 *((unsigned long *)str
) = e
;
3181 var_val
= (u64
)(uintptr_t)str
;
3183 tracing_map_set_var(elt
, var_idx
, var_val
);
3187 static void update_field_vars(struct hist_trigger_data
*hist_data
,
3188 struct tracing_map_elt
*elt
,
3189 struct trace_buffer
*buffer
,
3190 struct ring_buffer_event
*rbe
,
3193 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->field_vars
,
3194 hist_data
->n_field_vars
, 0);
3197 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
3198 struct tracing_map_elt
*elt
,
3199 struct trace_buffer
*buffer
, void *rec
,
3200 struct ring_buffer_event
*rbe
, void *key
,
3201 struct action_data
*data
, u64
*var_ref_vals
)
3203 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->save_vars
,
3204 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
3207 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
3208 struct trace_event_file
*file
,
3209 char *name
, int size
, const char *type
)
3211 struct hist_field
*var
;
3214 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
3215 var
= ERR_PTR(-EINVAL
);
3219 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3221 var
= ERR_PTR(-ENOMEM
);
3225 idx
= tracing_map_add_var(hist_data
->map
);
3228 var
= ERR_PTR(-EINVAL
);
3233 var
->flags
= HIST_FIELD_FL_VAR
;
3235 var
->var
.hist_data
= var
->hist_data
= hist_data
;
3237 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
3238 var
->type
= kstrdup_const(type
, GFP_KERNEL
);
3239 if (!var
->var
.name
|| !var
->type
) {
3240 kfree_const(var
->type
);
3241 kfree(var
->var
.name
);
3243 var
= ERR_PTR(-ENOMEM
);
3249 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
3250 struct trace_event_file
*file
,
3253 struct hist_field
*val
= NULL
, *var
= NULL
;
3254 unsigned long flags
= HIST_FIELD_FL_VAR
;
3255 struct trace_array
*tr
= file
->tr
;
3256 struct field_var
*field_var
;
3259 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
3260 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
3265 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
3267 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
3272 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
3274 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
3280 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
3288 field_var
->var
= var
;
3289 field_var
->val
= val
;
3293 field_var
= ERR_PTR(ret
);
3298 * create_target_field_var - Automatically create a variable for a field
3299 * @target_hist_data: The target hist trigger
3300 * @subsys_name: Optional subsystem name
3301 * @event_name: Optional event name
3302 * @var_name: The name of the field (and the resulting variable)
3304 * Hist trigger actions fetch data from variables, not directly from
3305 * events. However, for convenience, users are allowed to directly
3306 * specify an event field in an action, which will be automatically
3307 * converted into a variable on their behalf.
3309 * This function creates a field variable with the name var_name on
3310 * the hist trigger currently being defined on the target event. If
3311 * subsys_name and event_name are specified, this function simply
3312 * verifies that they do in fact match the target event subsystem and
3315 * Return: The variable created for the field.
3317 static struct field_var
*
3318 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
3319 char *subsys_name
, char *event_name
, char *var_name
)
3321 struct trace_event_file
*file
= target_hist_data
->event_file
;
3324 struct trace_event_call
*call
;
3329 call
= file
->event_call
;
3331 if (strcmp(subsys_name
, call
->class->system
) != 0)
3334 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3338 return create_field_var(target_hist_data
, file
, var_name
);
3341 static bool check_track_val_max(u64 track_val
, u64 var_val
)
3343 if (var_val
<= track_val
)
3349 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
3351 if (var_val
== track_val
)
3357 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
3358 struct tracing_map_elt
*elt
,
3359 struct action_data
*data
)
3361 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3364 track_val
= tracing_map_read_var(elt
, track_var_idx
);
3369 static void save_track_val(struct hist_trigger_data
*hist_data
,
3370 struct tracing_map_elt
*elt
,
3371 struct action_data
*data
, u64 var_val
)
3373 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
3375 tracing_map_set_var(elt
, track_var_idx
, var_val
);
3378 static void save_track_data(struct hist_trigger_data
*hist_data
,
3379 struct tracing_map_elt
*elt
,
3380 struct trace_buffer
*buffer
, void *rec
,
3381 struct ring_buffer_event
*rbe
, void *key
,
3382 struct action_data
*data
, u64
*var_ref_vals
)
3384 if (data
->track_data
.save_data
)
3385 data
->track_data
.save_data(hist_data
, elt
, buffer
, rec
, rbe
,
3386 key
, data
, var_ref_vals
);
3389 static bool check_track_val(struct tracing_map_elt
*elt
,
3390 struct action_data
*data
,
3393 struct hist_trigger_data
*hist_data
;
3396 hist_data
= data
->track_data
.track_var
->hist_data
;
3397 track_val
= get_track_val(hist_data
, elt
, data
);
3399 return data
->track_data
.check_val(track_val
, var_val
);
3402 #ifdef CONFIG_TRACER_SNAPSHOT
3403 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3405 /* called with tr->max_lock held */
3406 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
3407 struct hist_elt_data
*elt_data
, *track_elt_data
;
3408 struct snapshot_context
*context
= cond_data
;
3409 struct action_data
*action
;
3415 action
= track_data
->action_data
;
3417 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
3418 track_data
->action_data
);
3420 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
3423 track_data
->track_val
= track_val
;
3424 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
3426 elt_data
= context
->elt
->private_data
;
3427 track_elt_data
= track_data
->elt
.private_data
;
3429 strscpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
3431 track_data
->updated
= true;
3436 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3437 struct tracing_map_elt
*elt
,
3438 struct trace_buffer
*buffer
, void *rec
,
3439 struct ring_buffer_event
*rbe
, void *key
,
3440 struct action_data
*data
,
3443 struct trace_event_file
*file
= hist_data
->event_file
;
3444 struct snapshot_context context
;
3449 tracing_snapshot_cond(file
->tr
, &context
);
3452 static void hist_trigger_print_key(struct seq_file
*m
,
3453 struct hist_trigger_data
*hist_data
,
3455 struct tracing_map_elt
*elt
);
3457 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
3461 if (!hist_data
->n_actions
)
3464 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
3465 struct action_data
*data
= hist_data
->actions
[i
];
3467 if (data
->action
== ACTION_SNAPSHOT
)
3474 static void track_data_snapshot_print(struct seq_file
*m
,
3475 struct hist_trigger_data
*hist_data
)
3477 struct trace_event_file
*file
= hist_data
->event_file
;
3478 struct track_data
*track_data
;
3479 struct action_data
*action
;
3481 track_data
= tracing_cond_snapshot_data(file
->tr
);
3485 if (!track_data
->updated
)
3488 action
= snapshot_action(hist_data
);
3492 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3493 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
3494 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
3495 action
->track_data
.var_str
, track_data
->track_val
);
3497 seq_puts(m
, "\ttriggered by event with key: ");
3498 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
3502 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3506 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3507 struct tracing_map_elt
*elt
,
3508 struct trace_buffer
*buffer
, void *rec
,
3509 struct ring_buffer_event
*rbe
, void *key
,
3510 struct action_data
*data
,
3511 u64
*var_ref_vals
) {}
3512 static void track_data_snapshot_print(struct seq_file
*m
,
3513 struct hist_trigger_data
*hist_data
) {}
3514 #endif /* CONFIG_TRACER_SNAPSHOT */
3516 static void track_data_print(struct seq_file
*m
,
3517 struct hist_trigger_data
*hist_data
,
3518 struct tracing_map_elt
*elt
,
3519 struct action_data
*data
)
3521 u64 track_val
= get_track_val(hist_data
, elt
, data
);
3522 unsigned int i
, save_var_idx
;
3524 if (data
->handler
== HANDLER_ONMAX
)
3525 seq_printf(m
, "\n\tmax: %10llu", track_val
);
3526 else if (data
->handler
== HANDLER_ONCHANGE
)
3527 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
3529 if (data
->action
== ACTION_SNAPSHOT
)
3532 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
3533 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
3534 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
3537 save_var_idx
= save_var
->var
.idx
;
3539 val
= tracing_map_read_var(elt
, save_var_idx
);
3541 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
3542 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
3543 (char *)(uintptr_t)(val
));
3545 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
3549 static void ontrack_action(struct hist_trigger_data
*hist_data
,
3550 struct tracing_map_elt
*elt
,
3551 struct trace_buffer
*buffer
, void *rec
,
3552 struct ring_buffer_event
*rbe
, void *key
,
3553 struct action_data
*data
, u64
*var_ref_vals
)
3555 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
3557 if (check_track_val(elt
, data
, var_val
)) {
3558 save_track_val(hist_data
, elt
, data
, var_val
);
3559 save_track_data(hist_data
, elt
, buffer
, rec
, rbe
,
3560 key
, data
, var_ref_vals
);
3564 static void action_data_destroy(struct action_data
*data
)
3568 lockdep_assert_held(&event_mutex
);
3570 kfree(data
->action_name
);
3572 for (i
= 0; i
< data
->n_params
; i
++)
3573 kfree(data
->params
[i
]);
3575 if (data
->synth_event
)
3576 data
->synth_event
->ref
--;
3578 kfree(data
->synth_event_name
);
3583 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
3584 struct action_data
*data
)
3586 struct trace_event_file
*file
= hist_data
->event_file
;
3588 destroy_hist_field(data
->track_data
.track_var
, 0);
3590 if (data
->action
== ACTION_SNAPSHOT
) {
3591 struct track_data
*track_data
;
3593 track_data
= tracing_cond_snapshot_data(file
->tr
);
3594 if (track_data
&& track_data
->hist_data
== hist_data
) {
3595 tracing_snapshot_cond_disable(file
->tr
);
3596 track_data_free(track_data
);
3600 kfree(data
->track_data
.var_str
);
3602 action_data_destroy(data
);
3605 static int action_create(struct hist_trigger_data
*hist_data
,
3606 struct action_data
*data
);
3608 static int track_data_create(struct hist_trigger_data
*hist_data
,
3609 struct action_data
*data
)
3611 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
3612 struct trace_event_file
*file
= hist_data
->event_file
;
3613 struct trace_array
*tr
= file
->tr
;
3614 char *track_data_var_str
;
3617 track_data_var_str
= data
->track_data
.var_str
;
3618 if (track_data_var_str
[0] != '$') {
3619 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
3622 track_data_var_str
++;
3624 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
3626 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
3630 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
3634 data
->track_data
.var_ref
= ref_field
;
3636 if (data
->handler
== HANDLER_ONMAX
)
3637 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
3638 if (IS_ERR(track_var
)) {
3639 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3640 ret
= PTR_ERR(track_var
);
3644 if (data
->handler
== HANDLER_ONCHANGE
)
3645 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
3646 if (IS_ERR(track_var
)) {
3647 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3648 ret
= PTR_ERR(track_var
);
3651 data
->track_data
.track_var
= track_var
;
3653 ret
= action_create(hist_data
, data
);
3658 static int parse_action_params(struct trace_array
*tr
, char *params
,
3659 struct action_data
*data
)
3661 char *param
, *saved_param
;
3662 bool first_param
= true;
3666 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
3667 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
3672 param
= strsep(¶ms
, ",");
3674 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
3679 param
= strstrip(param
);
3680 if (strlen(param
) < 2) {
3681 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
3686 saved_param
= kstrdup(param
, GFP_KERNEL
);
3692 if (first_param
&& data
->use_trace_keyword
) {
3693 data
->synth_event_name
= saved_param
;
3694 first_param
= false;
3697 first_param
= false;
3699 data
->params
[data
->n_params
++] = saved_param
;
3705 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
3706 enum handler_id handler
)
3713 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3718 action_name
= strsep(&str
, "(");
3719 if (!action_name
|| !str
) {
3720 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3725 if (str_has_prefix(action_name
, "save")) {
3726 char *params
= strsep(&str
, ")");
3729 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
3734 ret
= parse_action_params(tr
, params
, data
);
3738 if (handler
== HANDLER_ONMAX
)
3739 data
->track_data
.check_val
= check_track_val_max
;
3740 else if (handler
== HANDLER_ONCHANGE
)
3741 data
->track_data
.check_val
= check_track_val_changed
;
3743 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3748 data
->track_data
.save_data
= save_track_data_vars
;
3749 data
->fn
= ontrack_action
;
3750 data
->action
= ACTION_SAVE
;
3751 } else if (str_has_prefix(action_name
, "snapshot")) {
3752 char *params
= strsep(&str
, ")");
3755 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
3760 if (handler
== HANDLER_ONMAX
)
3761 data
->track_data
.check_val
= check_track_val_max
;
3762 else if (handler
== HANDLER_ONCHANGE
)
3763 data
->track_data
.check_val
= check_track_val_changed
;
3765 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3770 data
->track_data
.save_data
= save_track_data_snapshot
;
3771 data
->fn
= ontrack_action
;
3772 data
->action
= ACTION_SNAPSHOT
;
3774 char *params
= strsep(&str
, ")");
3776 if (str_has_prefix(action_name
, "trace"))
3777 data
->use_trace_keyword
= true;
3780 ret
= parse_action_params(tr
, params
, data
);
3785 if (handler
== HANDLER_ONMAX
)
3786 data
->track_data
.check_val
= check_track_val_max
;
3787 else if (handler
== HANDLER_ONCHANGE
)
3788 data
->track_data
.check_val
= check_track_val_changed
;
3790 if (handler
!= HANDLER_ONMATCH
) {
3791 data
->track_data
.save_data
= action_trace
;
3792 data
->fn
= ontrack_action
;
3794 data
->fn
= action_trace
;
3796 data
->action
= ACTION_TRACE
;
3799 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
3800 if (!data
->action_name
) {
3805 data
->handler
= handler
;
3810 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
3811 char *str
, enum handler_id handler
)
3813 struct action_data
*data
;
3817 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3819 return ERR_PTR(-ENOMEM
);
3821 var_str
= strsep(&str
, ")");
3822 if (!var_str
|| !str
) {
3827 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
3828 if (!data
->track_data
.var_str
) {
3833 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
3839 track_data_destroy(hist_data
, data
);
3840 data
= ERR_PTR(ret
);
3844 static void onmatch_destroy(struct action_data
*data
)
3846 kfree(data
->match_data
.event
);
3847 kfree(data
->match_data
.event_system
);
3849 action_data_destroy(data
);
3852 static void destroy_field_var(struct field_var
*field_var
)
3857 destroy_hist_field(field_var
->var
, 0);
3858 destroy_hist_field(field_var
->val
, 0);
3863 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
3867 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
3868 destroy_field_var(hist_data
->field_vars
[i
]);
3870 for (i
= 0; i
< hist_data
->n_save_vars
; i
++)
3871 destroy_field_var(hist_data
->save_vars
[i
]);
3874 static void save_field_var(struct hist_trigger_data
*hist_data
,
3875 struct field_var
*field_var
)
3877 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
3879 /* Stack traces are saved in the string storage too */
3880 if (field_var
->val
->flags
& (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
3881 hist_data
->n_field_var_str
++;
3885 static int check_synth_field(struct synth_event
*event
,
3886 struct hist_field
*hist_field
,
3887 unsigned int field_pos
)
3889 struct synth_field
*field
;
3891 if (field_pos
>= event
->n_fields
)
3894 field
= event
->fields
[field_pos
];
3897 * A dynamic string synth field can accept static or
3898 * dynamic. A static string synth field can only accept a
3899 * same-sized static string, which is checked for later.
3901 if (strstr(hist_field
->type
, "char[") && field
->is_string
3902 && field
->is_dynamic
)
3905 if (strstr(hist_field
->type
, "long[") && field
->is_stack
)
3908 if (strcmp(field
->type
, hist_field
->type
) != 0) {
3909 if (field
->size
!= hist_field
->size
||
3910 (!field
->is_string
&& field
->is_signed
!= hist_field
->is_signed
))
3917 static struct hist_field
*
3918 trace_action_find_var(struct hist_trigger_data
*hist_data
,
3919 struct action_data
*data
,
3920 char *system
, char *event
, char *var
)
3922 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3923 struct hist_field
*hist_field
;
3925 var
++; /* skip '$' */
3927 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
3929 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3930 system
= data
->match_data
.event_system
;
3931 event
= data
->match_data
.event
;
3934 hist_field
= find_event_var(hist_data
, system
, event
, var
);
3938 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
3943 static struct hist_field
*
3944 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
3945 struct action_data
*data
, char *system
,
3946 char *event
, char *var
)
3948 struct hist_field
*hist_field
= NULL
;
3949 struct field_var
*field_var
;
3952 * First try to create a field var on the target event (the
3953 * currently being defined). This will create a variable for
3954 * unqualified fields on the target event, or if qualified,
3955 * target fields that have qualified names matching the target.
3957 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
3959 if (field_var
&& !IS_ERR(field_var
)) {
3960 save_field_var(hist_data
, field_var
);
3961 hist_field
= field_var
->var
;
3965 * If no explicit system.event is specified, default to
3966 * looking for fields on the onmatch(system.event.xxx)
3969 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3970 system
= data
->match_data
.event_system
;
3971 event
= data
->match_data
.event
;
3977 * At this point, we're looking at a field on another
3978 * event. Because we can't modify a hist trigger on
3979 * another event to add a variable for a field, we need
3980 * to create a new trigger on that event and create the
3981 * variable at the same time.
3983 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
3984 if (IS_ERR(hist_field
))
3990 destroy_field_var(field_var
);
3995 static int trace_action_create(struct hist_trigger_data
*hist_data
,
3996 struct action_data
*data
)
3998 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3999 char *event_name
, *param
, *system
= NULL
;
4000 struct hist_field
*hist_field
, *var_ref
;
4002 unsigned int field_pos
= 0;
4003 struct synth_event
*event
;
4004 char *synth_event_name
;
4005 int var_ref_idx
, ret
= 0;
4007 lockdep_assert_held(&event_mutex
);
4009 /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
4010 if (data
->n_params
> SYNTH_FIELDS_MAX
)
4013 if (data
->use_trace_keyword
)
4014 synth_event_name
= data
->synth_event_name
;
4016 synth_event_name
= data
->action_name
;
4018 event
= find_synth_event(synth_event_name
);
4020 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
4026 for (i
= 0; i
< data
->n_params
; i
++) {
4029 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4035 system
= strsep(¶m
, ".");
4037 param
= (char *)system
;
4038 system
= event_name
= NULL
;
4040 event_name
= strsep(¶m
, ".");
4048 if (param
[0] == '$')
4049 hist_field
= trace_action_find_var(hist_data
, data
,
4053 hist_field
= trace_action_create_field_var(hist_data
,
4065 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
4066 var_ref
= create_var_ref(hist_data
, hist_field
,
4067 system
, event_name
);
4074 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
4075 if (WARN_ON(var_ref_idx
< 0)) {
4081 data
->var_ref_idx
[i
] = var_ref_idx
;
4088 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
4094 if (field_pos
!= event
->n_fields
) {
4095 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
4100 data
->synth_event
= event
;
4109 static int action_create(struct hist_trigger_data
*hist_data
,
4110 struct action_data
*data
)
4112 struct trace_event_file
*file
= hist_data
->event_file
;
4113 struct trace_array
*tr
= file
->tr
;
4114 struct track_data
*track_data
;
4115 struct field_var
*field_var
;
4120 if (data
->action
== ACTION_TRACE
)
4121 return trace_action_create(hist_data
, data
);
4123 if (data
->action
== ACTION_SNAPSHOT
) {
4124 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
4125 if (IS_ERR(track_data
)) {
4126 ret
= PTR_ERR(track_data
);
4130 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
4131 cond_snapshot_update
);
4133 track_data_free(track_data
);
4138 if (data
->action
== ACTION_SAVE
) {
4139 if (hist_data
->n_save_vars
) {
4141 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
4145 for (i
= 0; i
< data
->n_params
; i
++) {
4146 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
4152 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
4153 if (IS_ERR(field_var
)) {
4154 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
4156 ret
= PTR_ERR(field_var
);
4161 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
4162 if (field_var
->val
->flags
&
4163 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
4164 hist_data
->n_save_var_str
++;
4172 static int onmatch_create(struct hist_trigger_data
*hist_data
,
4173 struct action_data
*data
)
4175 return action_create(hist_data
, data
);
4178 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
4180 char *match_event
, *match_event_system
;
4181 struct action_data
*data
;
4184 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
4186 return ERR_PTR(-ENOMEM
);
4188 match_event
= strsep(&str
, ")");
4189 if (!match_event
|| !str
) {
4190 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
4194 match_event_system
= strsep(&match_event
, ".");
4196 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
4200 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
4201 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
4205 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
4206 if (!data
->match_data
.event
) {
4211 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
4212 if (!data
->match_data
.event_system
) {
4217 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
4223 onmatch_destroy(data
);
4224 data
= ERR_PTR(ret
);
4228 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
4230 hist_data
->fields
[HITCOUNT_IDX
] =
4231 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
4232 if (!hist_data
->fields
[HITCOUNT_IDX
])
4235 hist_data
->n_vals
++;
4236 hist_data
->n_fields
++;
4238 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
4244 static int __create_val_field(struct hist_trigger_data
*hist_data
,
4245 unsigned int val_idx
,
4246 struct trace_event_file
*file
,
4247 char *var_name
, char *field_str
,
4248 unsigned long flags
)
4250 struct hist_field
*hist_field
;
4251 int ret
= 0, n_subexprs
= 0;
4253 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, &n_subexprs
);
4254 if (IS_ERR(hist_field
)) {
4255 ret
= PTR_ERR(hist_field
);
4259 /* values and variables should not have some modifiers */
4260 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4262 if (hist_field
->flags
& (HIST_FIELD_FL_GRAPH
| HIST_FIELD_FL_PERCENT
|
4263 HIST_FIELD_FL_BUCKET
| HIST_FIELD_FL_LOG2
))
4267 if (hist_field
->flags
& (HIST_FIELD_FL_GRAPH
| HIST_FIELD_FL_PERCENT
|
4268 HIST_FIELD_FL_BUCKET
| HIST_FIELD_FL_LOG2
|
4269 HIST_FIELD_FL_SYM
| HIST_FIELD_FL_SYM_OFFSET
|
4270 HIST_FIELD_FL_SYSCALL
| HIST_FIELD_FL_STACKTRACE
))
4274 hist_data
->fields
[val_idx
] = hist_field
;
4276 ++hist_data
->n_vals
;
4277 ++hist_data
->n_fields
;
4279 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4284 hist_err(file
->tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(field_str
));
4288 static int create_val_field(struct hist_trigger_data
*hist_data
,
4289 unsigned int val_idx
,
4290 struct trace_event_file
*file
,
4293 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
4296 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
4299 static const char no_comm
[] = "(no comm)";
4301 static u64
hist_field_execname(struct hist_field
*hist_field
,
4302 struct tracing_map_elt
*elt
,
4303 struct trace_buffer
*buffer
,
4304 struct ring_buffer_event
*rbe
,
4307 struct hist_elt_data
*elt_data
;
4309 if (WARN_ON_ONCE(!elt
))
4310 return (u64
)(unsigned long)no_comm
;
4312 elt_data
= elt
->private_data
;
4314 if (WARN_ON_ONCE(!elt_data
->comm
))
4315 return (u64
)(unsigned long)no_comm
;
4317 return (u64
)(unsigned long)(elt_data
->comm
);
4320 static u64
hist_field_stack(struct hist_field
*hist_field
,
4321 struct tracing_map_elt
*elt
,
4322 struct trace_buffer
*buffer
,
4323 struct ring_buffer_event
*rbe
,
4326 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
4327 int str_loc
= str_item
& 0xffff;
4328 char *addr
= (char *)(event
+ str_loc
);
4330 return (u64
)(unsigned long)addr
;
4333 static u64
hist_fn_call(struct hist_field
*hist_field
,
4334 struct tracing_map_elt
*elt
,
4335 struct trace_buffer
*buffer
,
4336 struct ring_buffer_event
*rbe
,
4339 switch (hist_field
->fn_num
) {
4340 case HIST_FIELD_FN_VAR_REF
:
4341 return hist_field_var_ref(hist_field
, elt
, buffer
, rbe
, event
);
4342 case HIST_FIELD_FN_COUNTER
:
4343 return hist_field_counter(hist_field
, elt
, buffer
, rbe
, event
);
4344 case HIST_FIELD_FN_CONST
:
4345 return hist_field_const(hist_field
, elt
, buffer
, rbe
, event
);
4346 case HIST_FIELD_FN_LOG2
:
4347 return hist_field_log2(hist_field
, elt
, buffer
, rbe
, event
);
4348 case HIST_FIELD_FN_BUCKET
:
4349 return hist_field_bucket(hist_field
, elt
, buffer
, rbe
, event
);
4350 case HIST_FIELD_FN_TIMESTAMP
:
4351 return hist_field_timestamp(hist_field
, elt
, buffer
, rbe
, event
);
4352 case HIST_FIELD_FN_CPU
:
4353 return hist_field_cpu(hist_field
, elt
, buffer
, rbe
, event
);
4354 case HIST_FIELD_FN_COMM
:
4355 return hist_field_comm(hist_field
, elt
, buffer
, rbe
, event
);
4356 case HIST_FIELD_FN_STRING
:
4357 return hist_field_string(hist_field
, elt
, buffer
, rbe
, event
);
4358 case HIST_FIELD_FN_DYNSTRING
:
4359 return hist_field_dynstring(hist_field
, elt
, buffer
, rbe
, event
);
4360 case HIST_FIELD_FN_RELDYNSTRING
:
4361 return hist_field_reldynstring(hist_field
, elt
, buffer
, rbe
, event
);
4362 case HIST_FIELD_FN_PSTRING
:
4363 return hist_field_pstring(hist_field
, elt
, buffer
, rbe
, event
);
4364 case HIST_FIELD_FN_S64
:
4365 return hist_field_s64(hist_field
, elt
, buffer
, rbe
, event
);
4366 case HIST_FIELD_FN_U64
:
4367 return hist_field_u64(hist_field
, elt
, buffer
, rbe
, event
);
4368 case HIST_FIELD_FN_S32
:
4369 return hist_field_s32(hist_field
, elt
, buffer
, rbe
, event
);
4370 case HIST_FIELD_FN_U32
:
4371 return hist_field_u32(hist_field
, elt
, buffer
, rbe
, event
);
4372 case HIST_FIELD_FN_S16
:
4373 return hist_field_s16(hist_field
, elt
, buffer
, rbe
, event
);
4374 case HIST_FIELD_FN_U16
:
4375 return hist_field_u16(hist_field
, elt
, buffer
, rbe
, event
);
4376 case HIST_FIELD_FN_S8
:
4377 return hist_field_s8(hist_field
, elt
, buffer
, rbe
, event
);
4378 case HIST_FIELD_FN_U8
:
4379 return hist_field_u8(hist_field
, elt
, buffer
, rbe
, event
);
4380 case HIST_FIELD_FN_UMINUS
:
4381 return hist_field_unary_minus(hist_field
, elt
, buffer
, rbe
, event
);
4382 case HIST_FIELD_FN_MINUS
:
4383 return hist_field_minus(hist_field
, elt
, buffer
, rbe
, event
);
4384 case HIST_FIELD_FN_PLUS
:
4385 return hist_field_plus(hist_field
, elt
, buffer
, rbe
, event
);
4386 case HIST_FIELD_FN_DIV
:
4387 return hist_field_div(hist_field
, elt
, buffer
, rbe
, event
);
4388 case HIST_FIELD_FN_MULT
:
4389 return hist_field_mult(hist_field
, elt
, buffer
, rbe
, event
);
4390 case HIST_FIELD_FN_DIV_POWER2
:
4391 return div_by_power_of_two(hist_field
, elt
, buffer
, rbe
, event
);
4392 case HIST_FIELD_FN_DIV_NOT_POWER2
:
4393 return div_by_not_power_of_two(hist_field
, elt
, buffer
, rbe
, event
);
4394 case HIST_FIELD_FN_DIV_MULT_SHIFT
:
4395 return div_by_mult_and_shift(hist_field
, elt
, buffer
, rbe
, event
);
4396 case HIST_FIELD_FN_EXECNAME
:
4397 return hist_field_execname(hist_field
, elt
, buffer
, rbe
, event
);
4398 case HIST_FIELD_FN_STACK
:
4399 return hist_field_stack(hist_field
, elt
, buffer
, rbe
, event
);
4405 /* Convert a var that points to common_pid.execname to a string */
4406 static void update_var_execname(struct hist_field
*hist_field
)
4408 hist_field
->flags
= HIST_FIELD_FL_STRING
| HIST_FIELD_FL_VAR
|
4409 HIST_FIELD_FL_EXECNAME
;
4410 hist_field
->size
= MAX_FILTER_STR_VAL
;
4411 hist_field
->is_signed
= 0;
4413 kfree_const(hist_field
->type
);
4414 hist_field
->type
= "char[]";
4416 hist_field
->fn_num
= HIST_FIELD_FN_EXECNAME
;
4419 static int create_var_field(struct hist_trigger_data
*hist_data
,
4420 unsigned int val_idx
,
4421 struct trace_event_file
*file
,
4422 char *var_name
, char *expr_str
)
4424 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4425 unsigned long flags
= 0;
4428 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
4431 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
4432 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
4436 flags
|= HIST_FIELD_FL_VAR
;
4437 hist_data
->n_vars
++;
4438 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
4441 ret
= __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
4443 if (!ret
&& hist_data
->fields
[val_idx
]->flags
& HIST_FIELD_FL_EXECNAME
)
4444 update_var_execname(hist_data
->fields
[val_idx
]);
4446 if (!ret
&& hist_data
->fields
[val_idx
]->flags
&
4447 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
))
4448 hist_data
->fields
[val_idx
]->var_str_idx
= hist_data
->n_var_str
++;
4453 static int create_val_fields(struct hist_trigger_data
*hist_data
,
4454 struct trace_event_file
*file
)
4456 unsigned int i
, j
= 1, n_hitcount
= 0;
4457 char *fields_str
, *field_str
;
4460 ret
= create_hitcount_val(hist_data
);
4464 fields_str
= hist_data
->attrs
->vals_str
;
4468 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
4469 j
< TRACING_MAP_VALS_MAX
; i
++) {
4470 field_str
= strsep(&fields_str
, ",");
4474 if (strcmp(field_str
, "hitcount") == 0) {
4479 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
4484 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
4487 /* There is only raw hitcount but nohitcount suppresses it. */
4488 if (j
== 1 && hist_data
->attrs
->no_hitcount
) {
4489 hist_err(hist_data
->event_file
->tr
, HIST_ERR_NEED_NOHC_VAL
, 0);
4496 static int create_key_field(struct hist_trigger_data
*hist_data
,
4497 unsigned int key_idx
,
4498 unsigned int key_offset
,
4499 struct trace_event_file
*file
,
4502 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4503 struct hist_field
*hist_field
= NULL
;
4504 unsigned long flags
= 0;
4505 unsigned int key_size
;
4506 int ret
= 0, n_subexprs
= 0;
4508 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
4511 flags
|= HIST_FIELD_FL_KEY
;
4513 if (strcmp(field_str
, "stacktrace") == 0) {
4514 flags
|= HIST_FIELD_FL_STACKTRACE
;
4515 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
4516 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
4518 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
4520 if (IS_ERR(hist_field
)) {
4521 ret
= PTR_ERR(hist_field
);
4525 if (field_has_hist_vars(hist_field
, 0)) {
4526 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
4527 destroy_hist_field(hist_field
, 0);
4532 key_size
= hist_field
->size
;
4535 hist_data
->fields
[key_idx
] = hist_field
;
4537 key_size
= ALIGN(key_size
, sizeof(u64
));
4538 hist_data
->fields
[key_idx
]->size
= key_size
;
4539 hist_data
->fields
[key_idx
]->offset
= key_offset
;
4541 hist_data
->key_size
+= key_size
;
4543 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
4548 hist_data
->n_keys
++;
4549 hist_data
->n_fields
++;
4551 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
4559 static int create_key_fields(struct hist_trigger_data
*hist_data
,
4560 struct trace_event_file
*file
)
4562 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
4563 char *fields_str
, *field_str
;
4566 fields_str
= hist_data
->attrs
->keys_str
;
4570 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
4571 field_str
= strsep(&fields_str
, ",");
4574 ret
= create_key_field(hist_data
, i
, key_offset
,
4589 static int create_var_fields(struct hist_trigger_data
*hist_data
,
4590 struct trace_event_file
*file
)
4592 unsigned int i
, j
= hist_data
->n_vals
;
4595 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
4597 for (i
= 0; i
< n_vars
; i
++) {
4598 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
4599 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
4601 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
4609 static void free_var_defs(struct hist_trigger_data
*hist_data
)
4613 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
4614 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
4615 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
4618 hist_data
->attrs
->var_defs
.n_vars
= 0;
4621 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
4623 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4624 char *s
, *str
, *var_name
, *field_str
;
4625 unsigned int i
, j
, n_vars
= 0;
4628 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
4629 str
= hist_data
->attrs
->assignment_str
[i
];
4630 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
4631 field_str
= strsep(&str
, ",");
4635 var_name
= strsep(&field_str
, "=");
4636 if (!var_name
|| !field_str
) {
4637 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
4643 if (n_vars
== TRACING_MAP_VARS_MAX
) {
4644 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
4649 s
= kstrdup(var_name
, GFP_KERNEL
);
4654 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
4656 s
= kstrdup(field_str
, GFP_KERNEL
);
4658 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
4659 hist_data
->attrs
->var_defs
.name
[n_vars
] = NULL
;
4663 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
4665 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
4671 free_var_defs(hist_data
);
4676 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
4677 struct trace_event_file
*file
)
4681 ret
= parse_var_defs(hist_data
);
4685 ret
= create_val_fields(hist_data
, file
);
4689 ret
= create_var_fields(hist_data
, file
);
4693 ret
= create_key_fields(hist_data
, file
);
4696 free_var_defs(hist_data
);
4701 static int is_descending(struct trace_array
*tr
, const char *str
)
4706 if (strcmp(str
, "descending") == 0)
4709 if (strcmp(str
, "ascending") == 0)
4712 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
4717 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
4719 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4720 char *fields_str
= hist_data
->attrs
->sort_key_str
;
4721 struct tracing_map_sort_key
*sort_key
;
4722 int descending
, ret
= 0;
4723 unsigned int i
, j
, k
;
4725 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
4730 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
4731 struct hist_field
*hist_field
;
4732 char *field_str
, *field_name
;
4733 const char *test_name
;
4735 sort_key
= &hist_data
->sort_keys
[i
];
4737 field_str
= strsep(&fields_str
, ",");
4743 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4747 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
4748 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
4753 field_name
= strsep(&field_str
, ".");
4754 if (!field_name
|| !*field_name
) {
4756 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4760 if (strcmp(field_name
, "hitcount") == 0) {
4761 descending
= is_descending(tr
, field_str
);
4762 if (descending
< 0) {
4766 sort_key
->descending
= descending
;
4770 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4773 hist_field
= hist_data
->fields
[j
];
4774 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4779 test_name
= hist_field_name(hist_field
, 0);
4781 if (strcmp(field_name
, test_name
) == 0) {
4782 sort_key
->field_idx
= idx
;
4783 descending
= is_descending(tr
, field_str
);
4784 if (descending
< 0) {
4788 sort_key
->descending
= descending
;
4792 if (j
== hist_data
->n_fields
) {
4794 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
4799 hist_data
->n_sort_keys
= i
;
4804 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4808 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4809 struct action_data
*data
= hist_data
->actions
[i
];
4811 if (data
->handler
== HANDLER_ONMATCH
)
4812 onmatch_destroy(data
);
4813 else if (data
->handler
== HANDLER_ONMAX
||
4814 data
->handler
== HANDLER_ONCHANGE
)
4815 track_data_destroy(hist_data
, data
);
4821 static int parse_actions(struct hist_trigger_data
*hist_data
)
4823 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4824 struct action_data
*data
;
4830 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4831 enum handler_id hid
= 0;
4834 str
= hist_data
->attrs
->action_str
[i
];
4836 if ((len
= str_has_prefix(str
, "onmatch(")))
4837 hid
= HANDLER_ONMATCH
;
4838 else if ((len
= str_has_prefix(str
, "onmax(")))
4839 hid
= HANDLER_ONMAX
;
4840 else if ((len
= str_has_prefix(str
, "onchange(")))
4841 hid
= HANDLER_ONCHANGE
;
4843 action_str
= str
+ len
;
4846 case HANDLER_ONMATCH
:
4847 data
= onmatch_parse(tr
, action_str
);
4850 case HANDLER_ONCHANGE
:
4851 data
= track_data_parse(hist_data
, action_str
, hid
);
4854 data
= ERR_PTR(-EINVAL
);
4859 ret
= PTR_ERR(data
);
4863 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4869 static int create_actions(struct hist_trigger_data
*hist_data
)
4871 struct action_data
*data
;
4875 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4876 data
= hist_data
->actions
[i
];
4878 if (data
->handler
== HANDLER_ONMATCH
) {
4879 ret
= onmatch_create(hist_data
, data
);
4882 } else if (data
->handler
== HANDLER_ONMAX
||
4883 data
->handler
== HANDLER_ONCHANGE
) {
4884 ret
= track_data_create(hist_data
, data
);
4896 static void print_actions(struct seq_file
*m
,
4897 struct hist_trigger_data
*hist_data
,
4898 struct tracing_map_elt
*elt
)
4902 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4903 struct action_data
*data
= hist_data
->actions
[i
];
4905 if (data
->action
== ACTION_SNAPSHOT
)
4908 if (data
->handler
== HANDLER_ONMAX
||
4909 data
->handler
== HANDLER_ONCHANGE
)
4910 track_data_print(m
, hist_data
, elt
, data
);
4914 static void print_action_spec(struct seq_file
*m
,
4915 struct hist_trigger_data
*hist_data
,
4916 struct action_data
*data
)
4920 if (data
->action
== ACTION_SAVE
) {
4921 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4922 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
4923 if (i
< hist_data
->n_save_vars
- 1)
4926 } else if (data
->action
== ACTION_TRACE
) {
4927 if (data
->use_trace_keyword
)
4928 seq_printf(m
, "%s", data
->synth_event_name
);
4929 for (i
= 0; i
< data
->n_params
; i
++) {
4930 if (i
|| data
->use_trace_keyword
)
4932 seq_printf(m
, "%s", data
->params
[i
]);
4937 static void print_track_data_spec(struct seq_file
*m
,
4938 struct hist_trigger_data
*hist_data
,
4939 struct action_data
*data
)
4941 if (data
->handler
== HANDLER_ONMAX
)
4942 seq_puts(m
, ":onmax(");
4943 else if (data
->handler
== HANDLER_ONCHANGE
)
4944 seq_puts(m
, ":onchange(");
4945 seq_printf(m
, "%s", data
->track_data
.var_str
);
4946 seq_printf(m
, ").%s(", data
->action_name
);
4948 print_action_spec(m
, hist_data
, data
);
4953 static void print_onmatch_spec(struct seq_file
*m
,
4954 struct hist_trigger_data
*hist_data
,
4955 struct action_data
*data
)
4957 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
4958 data
->match_data
.event
);
4960 seq_printf(m
, "%s(", data
->action_name
);
4962 print_action_spec(m
, hist_data
, data
);
4967 static bool actions_match(struct hist_trigger_data
*hist_data
,
4968 struct hist_trigger_data
*hist_data_test
)
4972 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4975 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4976 struct action_data
*data
= hist_data
->actions
[i
];
4977 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4978 char *action_name
, *action_name_test
;
4980 if (data
->handler
!= data_test
->handler
)
4982 if (data
->action
!= data_test
->action
)
4985 if (data
->n_params
!= data_test
->n_params
)
4988 for (j
= 0; j
< data
->n_params
; j
++) {
4989 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4993 if (data
->use_trace_keyword
)
4994 action_name
= data
->synth_event_name
;
4996 action_name
= data
->action_name
;
4998 if (data_test
->use_trace_keyword
)
4999 action_name_test
= data_test
->synth_event_name
;
5001 action_name_test
= data_test
->action_name
;
5003 if (strcmp(action_name
, action_name_test
) != 0)
5006 if (data
->handler
== HANDLER_ONMATCH
) {
5007 if (strcmp(data
->match_data
.event_system
,
5008 data_test
->match_data
.event_system
) != 0)
5010 if (strcmp(data
->match_data
.event
,
5011 data_test
->match_data
.event
) != 0)
5013 } else if (data
->handler
== HANDLER_ONMAX
||
5014 data
->handler
== HANDLER_ONCHANGE
) {
5015 if (strcmp(data
->track_data
.var_str
,
5016 data_test
->track_data
.var_str
) != 0)
5025 static void print_actions_spec(struct seq_file
*m
,
5026 struct hist_trigger_data
*hist_data
)
5030 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5031 struct action_data
*data
= hist_data
->actions
[i
];
5033 if (data
->handler
== HANDLER_ONMATCH
)
5034 print_onmatch_spec(m
, hist_data
, data
);
5035 else if (data
->handler
== HANDLER_ONMAX
||
5036 data
->handler
== HANDLER_ONCHANGE
)
5037 print_track_data_spec(m
, hist_data
, data
);
5041 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
5045 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5046 kfree(hist_data
->field_var_hists
[i
]->cmd
);
5047 kfree(hist_data
->field_var_hists
[i
]);
5051 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
5056 destroy_hist_trigger_attrs(hist_data
->attrs
);
5057 destroy_hist_fields(hist_data
);
5058 tracing_map_destroy(hist_data
->map
);
5060 destroy_actions(hist_data
);
5061 destroy_field_vars(hist_data
);
5062 destroy_field_var_hists(hist_data
);
5067 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
5069 struct tracing_map
*map
= hist_data
->map
;
5070 struct ftrace_event_field
*field
;
5071 struct hist_field
*hist_field
;
5074 for_each_hist_field(i
, hist_data
) {
5075 hist_field
= hist_data
->fields
[i
];
5076 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
5077 tracing_map_cmp_fn_t cmp_fn
;
5079 field
= hist_field
->field
;
5081 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5082 cmp_fn
= tracing_map_cmp_none
;
5083 else if (!field
|| hist_field
->flags
& HIST_FIELD_FL_CPU
)
5084 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
5085 hist_field
->is_signed
);
5086 else if (is_string_field(field
))
5087 cmp_fn
= tracing_map_cmp_string
;
5089 cmp_fn
= tracing_map_cmp_num(field
->size
,
5091 idx
= tracing_map_add_key_field(map
,
5094 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
5095 idx
= tracing_map_add_sum_field(map
);
5100 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5101 idx
= tracing_map_add_var(map
);
5104 hist_field
->var
.idx
= idx
;
5105 hist_field
->var
.hist_data
= hist_data
;
5112 static struct hist_trigger_data
*
5113 create_hist_data(unsigned int map_bits
,
5114 struct hist_trigger_attrs
*attrs
,
5115 struct trace_event_file
*file
,
5118 const struct tracing_map_ops
*map_ops
= NULL
;
5119 struct hist_trigger_data
*hist_data
;
5122 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
5124 return ERR_PTR(-ENOMEM
);
5126 hist_data
->attrs
= attrs
;
5127 hist_data
->remove
= remove
;
5128 hist_data
->event_file
= file
;
5130 ret
= parse_actions(hist_data
);
5134 ret
= create_hist_fields(hist_data
, file
);
5138 ret
= create_sort_keys(hist_data
);
5142 map_ops
= &hist_trigger_elt_data_ops
;
5144 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
5145 map_ops
, hist_data
);
5146 if (IS_ERR(hist_data
->map
)) {
5147 ret
= PTR_ERR(hist_data
->map
);
5148 hist_data
->map
= NULL
;
5152 ret
= create_tracing_map_fields(hist_data
);
5158 hist_data
->attrs
= NULL
;
5160 destroy_hist_data(hist_data
);
5162 hist_data
= ERR_PTR(ret
);
5167 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
5168 struct tracing_map_elt
*elt
,
5169 struct trace_buffer
*buffer
, void *rec
,
5170 struct ring_buffer_event
*rbe
,
5173 struct hist_elt_data
*elt_data
;
5174 struct hist_field
*hist_field
;
5175 unsigned int i
, var_idx
;
5178 elt_data
= elt
->private_data
;
5179 elt_data
->var_ref_vals
= var_ref_vals
;
5181 for_each_hist_val_field(i
, hist_data
) {
5182 hist_field
= hist_data
->fields
[i
];
5183 hist_val
= hist_fn_call(hist_field
, elt
, buffer
, rbe
, rec
);
5184 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5185 var_idx
= hist_field
->var
.idx
;
5187 if (hist_field
->flags
&
5188 (HIST_FIELD_FL_STRING
| HIST_FIELD_FL_STACKTRACE
)) {
5189 unsigned int str_start
, var_str_idx
, idx
;
5190 char *str
, *val_str
;
5193 str_start
= hist_data
->n_field_var_str
+
5194 hist_data
->n_save_var_str
;
5195 var_str_idx
= hist_field
->var_str_idx
;
5196 idx
= str_start
+ var_str_idx
;
5198 str
= elt_data
->field_var_str
[idx
];
5199 val_str
= (char *)(uintptr_t)hist_val
;
5201 if (hist_field
->flags
& HIST_FIELD_FL_STRING
) {
5202 size
= min(hist_field
->size
, STR_VAR_LEN_MAX
);
5203 strscpy(str
, val_str
, size
);
5205 char *stack_start
= str
+ sizeof(unsigned long);
5208 e
= stack_trace_save((void *)stack_start
,
5209 HIST_STACKTRACE_DEPTH
,
5210 HIST_STACKTRACE_SKIP
);
5211 if (e
< HIST_STACKTRACE_DEPTH
- 1)
5212 ((unsigned long *)stack_start
)[e
] = 0;
5213 *((unsigned long *)str
) = e
;
5215 hist_val
= (u64
)(uintptr_t)str
;
5217 tracing_map_set_var(elt
, var_idx
, hist_val
);
5220 tracing_map_update_sum(elt
, i
, hist_val
);
5223 for_each_hist_key_field(i
, hist_data
) {
5224 hist_field
= hist_data
->fields
[i
];
5225 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5226 hist_val
= hist_fn_call(hist_field
, elt
, buffer
, rbe
, rec
);
5227 var_idx
= hist_field
->var
.idx
;
5228 tracing_map_set_var(elt
, var_idx
, hist_val
);
5232 update_field_vars(hist_data
, elt
, buffer
, rbe
, rec
);
5235 static inline void add_to_key(char *compound_key
, void *key
,
5236 struct hist_field
*key_field
, void *rec
)
5238 size_t size
= key_field
->size
;
5240 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5242 if (key_field
->flags
& HIST_FIELD_FL_COMM
) {
5243 size
= strlen((char *)key
);
5245 struct ftrace_event_field
*field
;
5247 field
= key_field
->field
;
5248 if (field
->filter_type
== FILTER_DYN_STRING
||
5249 field
->filter_type
== FILTER_RDYN_STRING
)
5250 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
5251 else if (field
->filter_type
== FILTER_STATIC_STRING
)
5255 /* ensure NULL-termination */
5256 if (size
> key_field
->size
- 1)
5257 size
= key_field
->size
- 1;
5259 memcpy(compound_key
+ key_field
->offset
, key
, size
);
5263 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
5264 struct tracing_map_elt
*elt
,
5265 struct trace_buffer
*buffer
, void *rec
,
5266 struct ring_buffer_event
*rbe
, void *key
,
5269 struct action_data
*data
;
5272 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5273 data
= hist_data
->actions
[i
];
5274 data
->fn(hist_data
, elt
, buffer
, rec
, rbe
, key
, data
, var_ref_vals
);
5279 * The hist_pad structure is used to save information to create
5280 * a histogram from the histogram trigger. It's too big to store
5281 * on the stack, so when the histogram trigger is initialized
5282 * a percpu array of 4 hist_pad structures is allocated.
5283 * This will cover every context from normal, softirq, irq and NMI
5284 * in the very unlikely event that a tigger happens at each of
5285 * these contexts and interrupts a currently active trigger.
5288 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
5289 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
5290 char compound_key
[HIST_KEY_SIZE_MAX
];
5293 static struct hist_pad __percpu
*hist_pads
;
5294 static DEFINE_PER_CPU(int, hist_pad_cnt
);
5295 static refcount_t hist_pad_ref
;
5297 /* One hist_pad for every context (normal, softirq, irq, NMI) */
5298 #define MAX_HIST_CNT 4
5300 static int alloc_hist_pad(void)
5302 lockdep_assert_held(&event_mutex
);
5304 if (refcount_read(&hist_pad_ref
)) {
5305 refcount_inc(&hist_pad_ref
);
5309 hist_pads
= __alloc_percpu(sizeof(struct hist_pad
) * MAX_HIST_CNT
,
5310 __alignof__(struct hist_pad
));
5314 refcount_set(&hist_pad_ref
, 1);
5318 static void free_hist_pad(void)
5320 lockdep_assert_held(&event_mutex
);
5322 if (!refcount_dec_and_test(&hist_pad_ref
))
5325 free_percpu(hist_pads
);
5329 static struct hist_pad
*get_hist_pad(void)
5331 struct hist_pad
*hist_pad
;
5334 if (WARN_ON_ONCE(!hist_pads
))
5339 hist_pad
= per_cpu_ptr(hist_pads
, smp_processor_id());
5341 if (this_cpu_read(hist_pad_cnt
) == MAX_HIST_CNT
) {
5346 cnt
= this_cpu_inc_return(hist_pad_cnt
) - 1;
5348 return &hist_pad
[cnt
];
5351 static void put_hist_pad(void)
5353 this_cpu_dec(hist_pad_cnt
);
5357 static void event_hist_trigger(struct event_trigger_data
*data
,
5358 struct trace_buffer
*buffer
, void *rec
,
5359 struct ring_buffer_event
*rbe
)
5361 struct hist_trigger_data
*hist_data
= data
->private_data
;
5362 bool use_compound_key
= (hist_data
->n_keys
> 1);
5363 struct tracing_map_elt
*elt
= NULL
;
5364 struct hist_field
*key_field
;
5365 struct hist_pad
*hist_pad
;
5373 hist_pad
= get_hist_pad();
5377 memset(hist_pad
->compound_key
, 0, hist_data
->key_size
);
5379 for_each_hist_key_field(i
, hist_data
) {
5380 key_field
= hist_data
->fields
[i
];
5382 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5383 unsigned long *entries
= hist_pad
->entries
;
5385 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
5386 if (key_field
->field
) {
5387 unsigned long *stack
, n_entries
;
5389 field_contents
= hist_fn_call(key_field
, elt
, buffer
, rbe
, rec
);
5390 stack
= (unsigned long *)(long)field_contents
;
5392 memcpy(entries
, ++stack
, n_entries
* sizeof(unsigned long));
5394 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
5395 HIST_STACKTRACE_SKIP
);
5399 field_contents
= hist_fn_call(key_field
, elt
, buffer
, rbe
, rec
);
5400 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5401 key
= (void *)(unsigned long)field_contents
;
5402 use_compound_key
= true;
5404 key
= (void *)&field_contents
;
5407 if (use_compound_key
)
5408 add_to_key(hist_pad
->compound_key
, key
, key_field
, rec
);
5411 if (use_compound_key
)
5412 key
= hist_pad
->compound_key
;
5414 if (hist_data
->n_var_refs
&&
5415 !resolve_var_refs(hist_data
, key
, hist_pad
->var_ref_vals
, false))
5418 elt
= tracing_map_insert(hist_data
->map
, key
);
5422 hist_trigger_elt_update(hist_data
, elt
, buffer
, rec
, rbe
, hist_pad
->var_ref_vals
);
5424 if (resolve_var_refs(hist_data
, key
, hist_pad
->var_ref_vals
, true)) {
5425 hist_trigger_actions(hist_data
, elt
, buffer
, rec
, rbe
,
5426 key
, hist_pad
->var_ref_vals
);
5435 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
5436 unsigned long *stacktrace_entries
,
5437 unsigned int max_entries
)
5439 unsigned int spaces
= 8;
5442 for (i
= 0; i
< max_entries
; i
++) {
5443 if (!stacktrace_entries
[i
])
5446 seq_printf(m
, "%*c", 1 + spaces
, ' ');
5447 seq_printf(m
, "%pS\n", (void*)stacktrace_entries
[i
]);
5451 static void hist_trigger_print_key(struct seq_file
*m
,
5452 struct hist_trigger_data
*hist_data
,
5454 struct tracing_map_elt
*elt
)
5456 struct hist_field
*key_field
;
5457 bool multiline
= false;
5458 const char *field_name
;
5464 for_each_hist_key_field(i
, hist_data
) {
5465 key_field
= hist_data
->fields
[i
];
5467 if (i
> hist_data
->n_vals
)
5470 field_name
= hist_field_name(key_field
, 0);
5472 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
5473 uval
= *(u64
*)(key
+ key_field
->offset
);
5474 seq_printf(m
, "%s: %llx", field_name
, uval
);
5475 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
5476 uval
= *(u64
*)(key
+ key_field
->offset
);
5477 seq_printf(m
, "%s: [%llx] %-45ps", field_name
,
5478 uval
, (void *)(uintptr_t)uval
);
5479 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
5480 uval
= *(u64
*)(key
+ key_field
->offset
);
5481 seq_printf(m
, "%s: [%llx] %-55pS", field_name
,
5482 uval
, (void *)(uintptr_t)uval
);
5483 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
5484 struct hist_elt_data
*elt_data
= elt
->private_data
;
5487 if (WARN_ON_ONCE(!elt_data
))
5490 comm
= elt_data
->comm
;
5492 uval
= *(u64
*)(key
+ key_field
->offset
);
5493 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
5495 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
5496 const char *syscall_name
;
5498 uval
= *(u64
*)(key
+ key_field
->offset
);
5499 syscall_name
= get_syscall_name(uval
);
5501 syscall_name
= "unknown_syscall";
5503 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
5504 syscall_name
, uval
);
5505 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
5506 if (key_field
->field
)
5507 seq_printf(m
, "%s.stacktrace", key_field
->field
->name
);
5509 seq_puts(m
, "common_stacktrace:\n");
5510 hist_trigger_stacktrace_print(m
,
5511 key
+ key_field
->offset
,
5512 HIST_STACKTRACE_DEPTH
);
5514 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
5515 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
5516 *(u64
*)(key
+ key_field
->offset
));
5517 } else if (key_field
->flags
& HIST_FIELD_FL_BUCKET
) {
5518 unsigned long buckets
= key_field
->buckets
;
5519 uval
= *(u64
*)(key
+ key_field
->offset
);
5520 seq_printf(m
, "%s: ~ %llu-%llu", field_name
,
5521 uval
, uval
+ buckets
-1);
5522 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
5523 seq_printf(m
, "%s: %-50s", field_name
,
5524 (char *)(key
+ key_field
->offset
));
5526 uval
= *(u64
*)(key
+ key_field
->offset
);
5527 seq_printf(m
, "%s: %10llu", field_name
, uval
);
5537 /* Get the 100 times of the percentage of @val in @total */
5538 static inline unsigned int __get_percentage(u64 val
, u64 total
)
5543 if (val
< (U64_MAX
/ 10000))
5544 return (unsigned int)div64_ul(val
* 10000, total
);
5546 total
= div64_u64(total
, 10000);
5550 return (unsigned int)div64_ul(val
, total
);
5552 return val
? UINT_MAX
: 0;
5555 #define BAR_CHAR '#'
5557 static inline const char *__fill_bar_str(char *buf
, int size
, u64 val
, u64 max
)
5559 unsigned int len
= __get_percentage(val
, max
);
5562 if (len
== UINT_MAX
) {
5563 snprintf(buf
, size
, "[ERROR]");
5567 len
= len
* size
/ 10000;
5568 for (i
= 0; i
< len
&& i
< size
; i
++)
5577 struct hist_val_stat
{
5582 static void hist_trigger_print_val(struct seq_file
*m
, unsigned int idx
,
5583 const char *field_name
, unsigned long flags
,
5584 struct hist_val_stat
*stats
,
5585 struct tracing_map_elt
*elt
)
5587 u64 val
= tracing_map_read_sum(elt
, idx
);
5591 if (flags
& HIST_FIELD_FL_PERCENT
) {
5592 pc
= __get_percentage(val
, stats
[idx
].total
);
5594 seq_printf(m
, " %s (%%):[ERROR]", field_name
);
5596 seq_printf(m
, " %s (%%): %3u.%02u", field_name
,
5597 pc
/ 100, pc
% 100);
5598 } else if (flags
& HIST_FIELD_FL_GRAPH
) {
5599 seq_printf(m
, " %s: %20s", field_name
,
5600 __fill_bar_str(bar
, 20, val
, stats
[idx
].max
));
5601 } else if (flags
& HIST_FIELD_FL_HEX
) {
5602 seq_printf(m
, " %s: %10llx", field_name
, val
);
5604 seq_printf(m
, " %s: %10llu", field_name
, val
);
5608 static void hist_trigger_entry_print(struct seq_file
*m
,
5609 struct hist_trigger_data
*hist_data
,
5610 struct hist_val_stat
*stats
,
5612 struct tracing_map_elt
*elt
)
5614 const char *field_name
;
5615 unsigned int i
= HITCOUNT_IDX
;
5616 unsigned long flags
;
5618 hist_trigger_print_key(m
, hist_data
, key
, elt
);
5620 /* At first, show the raw hitcount if !nohitcount */
5621 if (!hist_data
->attrs
->no_hitcount
)
5622 hist_trigger_print_val(m
, i
, "hitcount", 0, stats
, elt
);
5624 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
5625 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
5626 flags
= hist_data
->fields
[i
]->flags
;
5627 if (flags
& HIST_FIELD_FL_VAR
|| flags
& HIST_FIELD_FL_EXPR
)
5631 hist_trigger_print_val(m
, i
, field_name
, flags
, stats
, elt
);
5634 print_actions(m
, hist_data
, elt
);
5639 static int print_entries(struct seq_file
*m
,
5640 struct hist_trigger_data
*hist_data
)
5642 struct tracing_map_sort_entry
**sort_entries
= NULL
;
5643 struct tracing_map
*map
= hist_data
->map
;
5644 int i
, j
, n_entries
;
5645 struct hist_val_stat
*stats
= NULL
;
5648 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
5649 hist_data
->n_sort_keys
,
5654 /* Calculate the max and the total for each field if needed. */
5655 for (j
= 0; j
< hist_data
->n_vals
; j
++) {
5656 if (!(hist_data
->fields
[j
]->flags
&
5657 (HIST_FIELD_FL_PERCENT
| HIST_FIELD_FL_GRAPH
)))
5660 stats
= kcalloc(hist_data
->n_vals
, sizeof(*stats
),
5663 n_entries
= -ENOMEM
;
5667 for (i
= 0; i
< n_entries
; i
++) {
5668 val
= tracing_map_read_sum(sort_entries
[i
]->elt
, j
);
5669 stats
[j
].total
+= val
;
5670 if (stats
[j
].max
< val
)
5675 for (i
= 0; i
< n_entries
; i
++)
5676 hist_trigger_entry_print(m
, hist_data
, stats
,
5677 sort_entries
[i
]->key
,
5678 sort_entries
[i
]->elt
);
5682 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
5687 static void hist_trigger_show(struct seq_file
*m
,
5688 struct event_trigger_data
*data
, int n
)
5690 struct hist_trigger_data
*hist_data
;
5694 seq_puts(m
, "\n\n");
5696 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
5697 data
->ops
->print(m
, data
);
5698 seq_puts(m
, "#\n\n");
5700 hist_data
= data
->private_data
;
5701 n_entries
= print_entries(m
, hist_data
);
5705 track_data_snapshot_print(m
, hist_data
);
5707 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
5708 (u64
)atomic64_read(&hist_data
->map
->hits
),
5709 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
5712 struct hist_file_data
{
5718 static u64
get_hist_hit_count(struct trace_event_file
*event_file
)
5720 struct hist_trigger_data
*hist_data
;
5721 struct event_trigger_data
*data
;
5724 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5725 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5726 hist_data
= data
->private_data
;
5727 ret
+= atomic64_read(&hist_data
->map
->hits
);
5733 static int hist_show(struct seq_file
*m
, void *v
)
5735 struct hist_file_data
*hist_file
= m
->private;
5736 struct event_trigger_data
*data
;
5737 struct trace_event_file
*event_file
;
5740 guard(mutex
)(&event_mutex
);
5742 event_file
= event_file_file(hist_file
->file
);
5743 if (unlikely(!event_file
))
5746 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5747 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5748 hist_trigger_show(m
, data
, n
++);
5750 hist_file
->last_read
= get_hist_hit_count(event_file
);
5752 * Update last_act too so that poll()/POLLPRI can wait for the next
5753 * event after any syscall on hist file.
5755 hist_file
->last_act
= hist_file
->last_read
;
5760 static __poll_t
event_hist_poll(struct file
*file
, struct poll_table_struct
*wait
)
5762 struct trace_event_file
*event_file
;
5763 struct seq_file
*m
= file
->private_data
;
5764 struct hist_file_data
*hist_file
= m
->private;
5768 guard(mutex
)(&event_mutex
);
5770 event_file
= event_file_data(file
);
5774 hist_poll_wait(file
, wait
);
5776 cnt
= get_hist_hit_count(event_file
);
5777 if (hist_file
->last_read
!= cnt
)
5778 ret
|= EPOLLIN
| EPOLLRDNORM
;
5779 if (hist_file
->last_act
!= cnt
) {
5780 hist_file
->last_act
= cnt
;
5787 static int event_hist_release(struct inode
*inode
, struct file
*file
)
5789 struct seq_file
*m
= file
->private_data
;
5790 struct hist_file_data
*hist_file
= m
->private;
5793 return tracing_single_release_file_tr(inode
, file
);
5796 static int event_hist_open(struct inode
*inode
, struct file
*file
)
5798 struct trace_event_file
*event_file
;
5799 struct hist_file_data
*hist_file
;
5802 ret
= tracing_open_file_tr(inode
, file
);
5806 guard(mutex
)(&event_mutex
);
5808 event_file
= event_file_data(file
);
5814 hist_file
= kzalloc(sizeof(*hist_file
), GFP_KERNEL
);
5820 hist_file
->file
= file
;
5821 hist_file
->last_act
= get_hist_hit_count(event_file
);
5823 /* Clear private_data to avoid warning in single_open() */
5824 file
->private_data
= NULL
;
5825 ret
= single_open(file
, hist_show
, hist_file
);
5833 tracing_release_file_tr(inode
, file
);
5837 const struct file_operations event_hist_fops
= {
5838 .open
= event_hist_open
,
5840 .llseek
= seq_lseek
,
5841 .release
= event_hist_release
,
5842 .poll
= event_hist_poll
,
5845 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
5846 static void hist_field_debug_show_flags(struct seq_file
*m
,
5847 unsigned long flags
)
5849 seq_puts(m
, " flags:\n");
5851 if (flags
& HIST_FIELD_FL_KEY
)
5852 seq_puts(m
, " HIST_FIELD_FL_KEY\n");
5853 else if (flags
& HIST_FIELD_FL_HITCOUNT
)
5854 seq_puts(m
, " VAL: HIST_FIELD_FL_HITCOUNT\n");
5855 else if (flags
& HIST_FIELD_FL_VAR
)
5856 seq_puts(m
, " HIST_FIELD_FL_VAR\n");
5857 else if (flags
& HIST_FIELD_FL_VAR_REF
)
5858 seq_puts(m
, " HIST_FIELD_FL_VAR_REF\n");
5860 seq_puts(m
, " VAL: normal u64 value\n");
5862 if (flags
& HIST_FIELD_FL_ALIAS
)
5863 seq_puts(m
, " HIST_FIELD_FL_ALIAS\n");
5864 else if (flags
& HIST_FIELD_FL_CONST
)
5865 seq_puts(m
, " HIST_FIELD_FL_CONST\n");
5868 static int hist_field_debug_show(struct seq_file
*m
,
5869 struct hist_field
*field
, unsigned long flags
)
5871 if ((field
->flags
& flags
) != flags
) {
5872 seq_printf(m
, "ERROR: bad flags - %lx\n", flags
);
5876 hist_field_debug_show_flags(m
, field
->flags
);
5878 seq_printf(m
, " ftrace_event_field name: %s\n",
5879 field
->field
->name
);
5881 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5882 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
5883 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5887 if (field
->flags
& HIST_FIELD_FL_CONST
)
5888 seq_printf(m
, " constant: %llu\n", field
->constant
);
5890 if (field
->flags
& HIST_FIELD_FL_ALIAS
)
5891 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
5892 field
->var_ref_idx
);
5894 if (field
->flags
& HIST_FIELD_FL_VAR_REF
) {
5895 seq_printf(m
, " name: %s\n", field
->name
);
5896 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5898 seq_printf(m
, " var.hist_data: %p\n", field
->var
.hist_data
);
5899 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
5900 field
->var_ref_idx
);
5902 seq_printf(m
, " system: %s\n", field
->system
);
5903 if (field
->event_name
)
5904 seq_printf(m
, " event_name: %s\n", field
->event_name
);
5907 seq_printf(m
, " type: %s\n", field
->type
);
5908 seq_printf(m
, " size: %u\n", field
->size
);
5909 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5914 static int field_var_debug_show(struct seq_file
*m
,
5915 struct field_var
*field_var
, unsigned int i
,
5918 const char *vars_name
= save_vars
? "save_vars" : "field_vars";
5919 struct hist_field
*field
;
5922 seq_printf(m
, "\n hist_data->%s[%d]:\n", vars_name
, i
);
5924 field
= field_var
->var
;
5926 seq_printf(m
, "\n %s[%d].var:\n", vars_name
, i
);
5928 hist_field_debug_show_flags(m
, field
->flags
);
5929 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
5930 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5933 field
= field_var
->val
;
5935 seq_printf(m
, "\n %s[%d].val:\n", vars_name
, i
);
5937 seq_printf(m
, " ftrace_event_field name: %s\n",
5938 field
->field
->name
);
5944 seq_printf(m
, " type: %s\n", field
->type
);
5945 seq_printf(m
, " size: %u\n", field
->size
);
5946 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5951 static int hist_action_debug_show(struct seq_file
*m
,
5952 struct action_data
*data
, int i
)
5956 if (data
->handler
== HANDLER_ONMAX
||
5957 data
->handler
== HANDLER_ONCHANGE
) {
5958 seq_printf(m
, "\n hist_data->actions[%d].track_data.var_ref:\n", i
);
5959 ret
= hist_field_debug_show(m
, data
->track_data
.var_ref
,
5960 HIST_FIELD_FL_VAR_REF
);
5964 seq_printf(m
, "\n hist_data->actions[%d].track_data.track_var:\n", i
);
5965 ret
= hist_field_debug_show(m
, data
->track_data
.track_var
,
5971 if (data
->handler
== HANDLER_ONMATCH
) {
5972 seq_printf(m
, "\n hist_data->actions[%d].match_data.event_system: %s\n",
5973 i
, data
->match_data
.event_system
);
5974 seq_printf(m
, " hist_data->actions[%d].match_data.event: %s\n",
5975 i
, data
->match_data
.event
);
5981 static int hist_actions_debug_show(struct seq_file
*m
,
5982 struct hist_trigger_data
*hist_data
)
5986 if (hist_data
->n_actions
)
5987 seq_puts(m
, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
5989 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5990 struct action_data
*action
= hist_data
->actions
[i
];
5992 ret
= hist_action_debug_show(m
, action
, i
);
5997 if (hist_data
->n_save_vars
)
5998 seq_puts(m
, "\n save action variables (save() params):\n");
6000 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
6001 ret
= field_var_debug_show(m
, hist_data
->save_vars
[i
], i
, true);
6009 static void hist_trigger_debug_show(struct seq_file
*m
,
6010 struct event_trigger_data
*data
, int n
)
6012 struct hist_trigger_data
*hist_data
;
6016 seq_puts(m
, "\n\n");
6018 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
6019 data
->ops
->print(m
, data
);
6020 seq_puts(m
, "#\n\n");
6022 hist_data
= data
->private_data
;
6024 seq_printf(m
, "hist_data: %p\n\n", hist_data
);
6025 seq_printf(m
, " n_vals: %u\n", hist_data
->n_vals
);
6026 seq_printf(m
, " n_keys: %u\n", hist_data
->n_keys
);
6027 seq_printf(m
, " n_fields: %u\n", hist_data
->n_fields
);
6029 seq_puts(m
, "\n val fields:\n\n");
6031 seq_puts(m
, " hist_data->fields[0]:\n");
6032 ret
= hist_field_debug_show(m
, hist_data
->fields
[0],
6033 HIST_FIELD_FL_HITCOUNT
);
6037 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
6038 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
6039 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
], 0);
6044 seq_puts(m
, "\n key fields:\n");
6046 for (i
= hist_data
->n_vals
; i
< hist_data
->n_fields
; i
++) {
6047 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
6048 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
],
6054 if (hist_data
->n_var_refs
)
6055 seq_puts(m
, "\n variable reference fields:\n");
6057 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
6058 seq_printf(m
, "\n hist_data->var_refs[%d]:\n", i
);
6059 ret
= hist_field_debug_show(m
, hist_data
->var_refs
[i
],
6060 HIST_FIELD_FL_VAR_REF
);
6065 if (hist_data
->n_field_vars
)
6066 seq_puts(m
, "\n field variables:\n");
6068 for (i
= 0; i
< hist_data
->n_field_vars
; i
++) {
6069 ret
= field_var_debug_show(m
, hist_data
->field_vars
[i
], i
, false);
6074 ret
= hist_actions_debug_show(m
, hist_data
);
6079 static int hist_debug_show(struct seq_file
*m
, void *v
)
6081 struct event_trigger_data
*data
;
6082 struct trace_event_file
*event_file
;
6085 guard(mutex
)(&event_mutex
);
6087 event_file
= event_file_file(m
->private);
6088 if (unlikely(!event_file
))
6091 list_for_each_entry(data
, &event_file
->triggers
, list
) {
6092 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
6093 hist_trigger_debug_show(m
, data
, n
++);
6098 static int event_hist_debug_open(struct inode
*inode
, struct file
*file
)
6102 ret
= tracing_open_file_tr(inode
, file
);
6106 /* Clear private_data to avoid warning in single_open() */
6107 file
->private_data
= NULL
;
6108 ret
= single_open(file
, hist_debug_show
, file
);
6110 tracing_release_file_tr(inode
, file
);
6114 const struct file_operations event_hist_debug_fops
= {
6115 .open
= event_hist_debug_open
,
6117 .llseek
= seq_lseek
,
6118 .release
= tracing_single_release_file_tr
,
6122 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
6124 const char *field_name
= hist_field_name(hist_field
, 0);
6126 if (hist_field
->var
.name
)
6127 seq_printf(m
, "%s=", hist_field
->var
.name
);
6129 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
6130 seq_puts(m
, "common_cpu");
6131 if (hist_field
->flags
& HIST_FIELD_FL_COMM
)
6132 seq_puts(m
, "common_comm");
6133 else if (hist_field
->flags
& HIST_FIELD_FL_CONST
)
6134 seq_printf(m
, "%llu", hist_field
->constant
);
6135 else if (field_name
) {
6136 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
6137 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
6139 seq_printf(m
, "%s", field_name
);
6140 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
6141 seq_puts(m
, "common_timestamp");
6143 if (hist_field
->flags
) {
6144 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
6145 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
) &&
6146 !(hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)) {
6147 const char *flags
= get_hist_field_flags(hist_field
);
6150 seq_printf(m
, ".%s", flags
);
6153 if (hist_field
->buckets
)
6154 seq_printf(m
, "=%ld", hist_field
->buckets
);
6157 static int event_hist_trigger_print(struct seq_file
*m
,
6158 struct event_trigger_data
*data
)
6160 struct hist_trigger_data
*hist_data
= data
->private_data
;
6161 struct hist_field
*field
;
6162 bool have_var
= false;
6163 bool show_val
= false;
6166 seq_puts(m
, HIST_PREFIX
);
6169 seq_printf(m
, "%s:", data
->name
);
6171 seq_puts(m
, "keys=");
6173 for_each_hist_key_field(i
, hist_data
) {
6174 field
= hist_data
->fields
[i
];
6176 if (i
> hist_data
->n_vals
)
6179 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
6181 seq_printf(m
, "%s.stacktrace", field
->field
->name
);
6183 seq_puts(m
, "common_stacktrace");
6185 hist_field_print(m
, field
);
6188 seq_puts(m
, ":vals=");
6190 for_each_hist_val_field(i
, hist_data
) {
6191 field
= hist_data
->fields
[i
];
6192 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6197 if (i
== HITCOUNT_IDX
) {
6198 if (hist_data
->attrs
->no_hitcount
)
6200 seq_puts(m
, "hitcount");
6204 hist_field_print(m
, field
);
6214 for_each_hist_val_field(i
, hist_data
) {
6215 field
= hist_data
->fields
[i
];
6217 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6220 hist_field_print(m
, field
);
6225 seq_puts(m
, ":sort=");
6227 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6228 struct tracing_map_sort_key
*sort_key
;
6229 unsigned int idx
, first_key_idx
;
6232 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
6234 sort_key
= &hist_data
->sort_keys
[i
];
6235 idx
= sort_key
->field_idx
;
6237 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
6243 if (idx
== HITCOUNT_IDX
)
6244 seq_puts(m
, "hitcount");
6246 if (idx
>= first_key_idx
)
6247 idx
+= hist_data
->n_vars
;
6248 hist_field_print(m
, hist_data
->fields
[idx
]);
6251 if (sort_key
->descending
)
6252 seq_puts(m
, ".descending");
6254 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
6255 if (hist_data
->enable_timestamps
)
6256 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
6257 if (hist_data
->attrs
->no_hitcount
)
6258 seq_puts(m
, ":nohitcount");
6260 print_actions_spec(m
, hist_data
);
6262 if (data
->filter_str
)
6263 seq_printf(m
, " if %s", data
->filter_str
);
6266 seq_puts(m
, " [paused]");
6268 seq_puts(m
, " [active]");
6275 static int event_hist_trigger_init(struct event_trigger_data
*data
)
6277 struct hist_trigger_data
*hist_data
= data
->private_data
;
6279 if (alloc_hist_pad() < 0)
6282 if (!data
->ref
&& hist_data
->attrs
->name
)
6283 save_named_trigger(hist_data
->attrs
->name
, data
);
6290 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
6292 struct trace_event_file
*file
;
6297 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
6298 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
6299 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
6300 ret
= event_hist_trigger_parse(&trigger_hist_cmd
, file
,
6301 "!hist", "hist", cmd
);
6302 WARN_ON_ONCE(ret
< 0);
6306 static void event_hist_trigger_free(struct event_trigger_data
*data
)
6308 struct hist_trigger_data
*hist_data
= data
->private_data
;
6310 if (WARN_ON_ONCE(data
->ref
<= 0))
6316 del_named_trigger(data
);
6318 trigger_data_free(data
);
6320 remove_hist_vars(hist_data
);
6322 unregister_field_var_hists(hist_data
);
6324 destroy_hist_data(hist_data
);
6329 static const struct event_trigger_ops event_hist_trigger_ops
= {
6330 .trigger
= event_hist_trigger
,
6331 .print
= event_hist_trigger_print
,
6332 .init
= event_hist_trigger_init
,
6333 .free
= event_hist_trigger_free
,
6336 static int event_hist_trigger_named_init(struct event_trigger_data
*data
)
6340 save_named_trigger(data
->named_data
->name
, data
);
6342 return event_hist_trigger_init(data
->named_data
);
6345 static void event_hist_trigger_named_free(struct event_trigger_data
*data
)
6347 if (WARN_ON_ONCE(data
->ref
<= 0))
6350 event_hist_trigger_free(data
->named_data
);
6354 del_named_trigger(data
);
6355 trigger_data_free(data
);
6359 static const struct event_trigger_ops event_hist_trigger_named_ops
= {
6360 .trigger
= event_hist_trigger
,
6361 .print
= event_hist_trigger_print
,
6362 .init
= event_hist_trigger_named_init
,
6363 .free
= event_hist_trigger_named_free
,
6366 static const struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
6369 return &event_hist_trigger_ops
;
6372 static void hist_clear(struct event_trigger_data
*data
)
6374 struct hist_trigger_data
*hist_data
= data
->private_data
;
6377 pause_named_trigger(data
);
6379 tracepoint_synchronize_unregister();
6381 tracing_map_clear(hist_data
->map
);
6384 unpause_named_trigger(data
);
6387 static bool compatible_field(struct ftrace_event_field
*field
,
6388 struct ftrace_event_field
*test_field
)
6390 if (field
== test_field
)
6392 if (field
== NULL
|| test_field
== NULL
)
6394 if (strcmp(field
->name
, test_field
->name
) != 0)
6396 if (strcmp(field
->type
, test_field
->type
) != 0)
6398 if (field
->size
!= test_field
->size
)
6400 if (field
->is_signed
!= test_field
->is_signed
)
6406 static bool hist_trigger_match(struct event_trigger_data
*data
,
6407 struct event_trigger_data
*data_test
,
6408 struct event_trigger_data
*named_data
,
6411 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
6412 struct hist_trigger_data
*hist_data
, *hist_data_test
;
6413 struct hist_field
*key_field
, *key_field_test
;
6416 if (named_data
&& (named_data
!= data_test
) &&
6417 (named_data
!= data_test
->named_data
))
6420 if (!named_data
&& is_named_trigger(data_test
))
6423 hist_data
= data
->private_data
;
6424 hist_data_test
= data_test
->private_data
;
6426 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
6427 hist_data
->n_fields
!= hist_data_test
->n_fields
||
6428 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
6431 if (!ignore_filter
) {
6432 if ((data
->filter_str
&& !data_test
->filter_str
) ||
6433 (!data
->filter_str
&& data_test
->filter_str
))
6437 for_each_hist_field(i
, hist_data
) {
6438 key_field
= hist_data
->fields
[i
];
6439 key_field_test
= hist_data_test
->fields
[i
];
6441 if (key_field
->flags
!= key_field_test
->flags
)
6443 if (!compatible_field(key_field
->field
, key_field_test
->field
))
6445 if (key_field
->offset
!= key_field_test
->offset
)
6447 if (key_field
->size
!= key_field_test
->size
)
6449 if (key_field
->is_signed
!= key_field_test
->is_signed
)
6451 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
6453 if (key_field
->var
.name
&&
6454 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
6458 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6459 sort_key
= &hist_data
->sort_keys
[i
];
6460 sort_key_test
= &hist_data_test
->sort_keys
[i
];
6462 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
6463 sort_key
->descending
!= sort_key_test
->descending
)
6467 if (!ignore_filter
&& data
->filter_str
&&
6468 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
6471 if (!actions_match(hist_data
, hist_data_test
))
6477 static bool existing_hist_update_only(char *glob
,
6478 struct event_trigger_data
*data
,
6479 struct trace_event_file
*file
)
6481 struct hist_trigger_data
*hist_data
= data
->private_data
;
6482 struct event_trigger_data
*test
, *named_data
= NULL
;
6483 bool updated
= false;
6485 if (!hist_data
->attrs
->pause
&& !hist_data
->attrs
->cont
&&
6486 !hist_data
->attrs
->clear
)
6489 if (hist_data
->attrs
->name
) {
6490 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6492 if (!hist_trigger_match(data
, named_data
, named_data
,
6498 if (hist_data
->attrs
->name
&& !named_data
)
6501 list_for_each_entry(test
, &file
->triggers
, list
) {
6502 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6503 if (!hist_trigger_match(data
, test
, named_data
, false))
6505 if (hist_data
->attrs
->pause
)
6506 test
->paused
= true;
6507 else if (hist_data
->attrs
->cont
)
6508 test
->paused
= false;
6509 else if (hist_data
->attrs
->clear
)
6519 static int hist_register_trigger(char *glob
,
6520 struct event_trigger_data
*data
,
6521 struct trace_event_file
*file
)
6523 struct hist_trigger_data
*hist_data
= data
->private_data
;
6524 struct event_trigger_data
*test
, *named_data
= NULL
;
6525 struct trace_array
*tr
= file
->tr
;
6528 if (hist_data
->attrs
->name
) {
6529 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6531 if (!hist_trigger_match(data
, named_data
, named_data
,
6533 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
6540 if (hist_data
->attrs
->name
&& !named_data
)
6543 lockdep_assert_held(&event_mutex
);
6545 list_for_each_entry(test
, &file
->triggers
, list
) {
6546 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6547 if (hist_trigger_match(data
, test
, named_data
, false)) {
6548 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
6555 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
6556 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
6561 if (hist_data
->attrs
->pause
)
6562 data
->paused
= true;
6565 data
->private_data
= named_data
->private_data
;
6566 set_named_trigger_data(data
, named_data
);
6567 data
->ops
= &event_hist_trigger_named_ops
;
6570 if (data
->ops
->init
) {
6571 ret
= data
->ops
->init(data
);
6576 if (hist_data
->enable_timestamps
) {
6577 char *clock
= hist_data
->attrs
->clock
;
6579 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
6581 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
6585 tracing_set_filter_buffering(file
->tr
, true);
6589 destroy_hist_data(hist_data
);
6594 static int hist_trigger_enable(struct event_trigger_data
*data
,
6595 struct trace_event_file
*file
)
6599 list_add_tail_rcu(&data
->list
, &file
->triggers
);
6601 update_cond_flag(file
);
6603 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
6604 list_del_rcu(&data
->list
);
6605 update_cond_flag(file
);
6612 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
6613 struct trace_event_file
*file
)
6615 struct hist_trigger_data
*hist_data
= data
->private_data
;
6616 struct event_trigger_data
*test
, *named_data
= NULL
;
6619 lockdep_assert_held(&event_mutex
);
6621 if (hist_data
->attrs
->name
)
6622 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6624 list_for_each_entry(test
, &file
->triggers
, list
) {
6625 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6626 if (hist_trigger_match(data
, test
, named_data
, false)) {
6636 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
6637 struct trace_event_file
*file
)
6639 struct hist_trigger_data
*hist_data
= data
->private_data
;
6640 struct event_trigger_data
*test
, *named_data
= NULL
;
6642 lockdep_assert_held(&event_mutex
);
6644 if (hist_data
->attrs
->name
)
6645 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6647 list_for_each_entry(test
, &file
->triggers
, list
) {
6648 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6649 if (!hist_trigger_match(data
, test
, named_data
, false))
6651 hist_data
= test
->private_data
;
6652 if (check_var_refs(hist_data
))
6661 static void hist_unregister_trigger(char *glob
,
6662 struct event_trigger_data
*data
,
6663 struct trace_event_file
*file
)
6665 struct event_trigger_data
*test
= NULL
, *iter
, *named_data
= NULL
;
6666 struct hist_trigger_data
*hist_data
= data
->private_data
;
6668 lockdep_assert_held(&event_mutex
);
6670 if (hist_data
->attrs
->name
)
6671 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6673 list_for_each_entry(iter
, &file
->triggers
, list
) {
6674 if (iter
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6675 if (!hist_trigger_match(data
, iter
, named_data
, false))
6678 list_del_rcu(&test
->list
);
6679 trace_event_trigger_enable_disable(file
, 0);
6680 update_cond_flag(file
);
6685 if (test
&& test
->ops
->free
)
6686 test
->ops
->free(test
);
6688 if (hist_data
->enable_timestamps
) {
6689 if (!hist_data
->remove
|| test
)
6690 tracing_set_filter_buffering(file
->tr
, false);
6694 static bool hist_file_check_refs(struct trace_event_file
*file
)
6696 struct hist_trigger_data
*hist_data
;
6697 struct event_trigger_data
*test
;
6699 lockdep_assert_held(&event_mutex
);
6701 list_for_each_entry(test
, &file
->triggers
, list
) {
6702 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6703 hist_data
= test
->private_data
;
6704 if (check_var_refs(hist_data
))
6712 static void hist_unreg_all(struct trace_event_file
*file
)
6714 struct event_trigger_data
*test
, *n
;
6715 struct hist_trigger_data
*hist_data
;
6716 struct synth_event
*se
;
6717 const char *se_name
;
6719 lockdep_assert_held(&event_mutex
);
6721 if (hist_file_check_refs(file
))
6724 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6725 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6726 hist_data
= test
->private_data
;
6727 list_del_rcu(&test
->list
);
6728 trace_event_trigger_enable_disable(file
, 0);
6730 se_name
= trace_event_name(file
->event_call
);
6731 se
= find_synth_event(se_name
);
6735 update_cond_flag(file
);
6736 if (hist_data
->enable_timestamps
)
6737 tracing_set_filter_buffering(file
->tr
, false);
6738 if (test
->ops
->free
)
6739 test
->ops
->free(test
);
6744 static int event_hist_trigger_parse(struct event_command
*cmd_ops
,
6745 struct trace_event_file
*file
,
6746 char *glob
, char *cmd
,
6747 char *param_and_filter
)
6749 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
6750 struct event_trigger_data
*trigger_data
;
6751 struct hist_trigger_attrs
*attrs
;
6752 struct hist_trigger_data
*hist_data
;
6753 char *param
, *filter
, *p
, *start
;
6754 struct synth_event
*se
;
6755 const char *se_name
;
6759 lockdep_assert_held(&event_mutex
);
6766 last_cmd_set(file
, param_and_filter
);
6769 remove
= event_trigger_check_remove(glob
);
6771 if (event_trigger_empty_param(param_and_filter
))
6775 * separate the trigger from the filter (k:v [if filter])
6776 * allowing for whitespace in the trigger
6778 p
= param
= param_and_filter
;
6780 p
= strstr(p
, "if");
6783 if (p
== param_and_filter
)
6785 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
6789 if (p
>= param_and_filter
+ strlen(param_and_filter
) - (sizeof("if") - 1) - 1)
6791 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
6802 filter
= strstrip(p
);
6803 param
= strstrip(param
);
6807 * To simplify arithmetic expression parsing, replace occurrences of
6808 * '.sym-offset' modifier with '.symXoffset'
6810 start
= strstr(param
, ".sym-offset");
6813 start
= strstr(start
+ 11, ".sym-offset");
6816 attrs
= parse_hist_trigger_attrs(file
->tr
, param
);
6818 return PTR_ERR(attrs
);
6820 if (attrs
->map_bits
)
6821 hist_trigger_bits
= attrs
->map_bits
;
6823 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
6824 if (IS_ERR(hist_data
)) {
6825 destroy_hist_trigger_attrs(attrs
);
6826 return PTR_ERR(hist_data
);
6829 trigger_data
= trigger_data_alloc(cmd_ops
, cmd
, param
, hist_data
);
6830 if (!trigger_data
) {
6835 ret
= event_trigger_set_filter(cmd_ops
, file
, filter
, trigger_data
);
6840 if (!have_hist_trigger_match(trigger_data
, file
))
6843 if (hist_trigger_check_refs(trigger_data
, file
)) {
6848 event_trigger_unregister(cmd_ops
, file
, glob
+1, trigger_data
);
6849 se_name
= trace_event_name(file
->event_call
);
6850 se
= find_synth_event(se_name
);
6857 if (existing_hist_update_only(glob
, trigger_data
, file
))
6860 if (!get_named_trigger_data(trigger_data
)) {
6862 ret
= create_actions(hist_data
);
6866 if (has_hist_vars(hist_data
) || hist_data
->n_var_refs
) {
6867 ret
= save_hist_vars(hist_data
);
6872 ret
= tracing_map_init(hist_data
->map
);
6877 ret
= event_trigger_register(cmd_ops
, file
, glob
, trigger_data
);
6881 ret
= hist_trigger_enable(trigger_data
, file
);
6885 se_name
= trace_event_name(file
->event_call
);
6886 se
= find_synth_event(se_name
);
6890 if (ret
== 0 && glob
[0])
6895 event_trigger_unregister(cmd_ops
, file
, glob
+1, trigger_data
);
6897 event_trigger_reset_filter(cmd_ops
, trigger_data
);
6899 remove_hist_vars(hist_data
);
6901 kfree(trigger_data
);
6903 destroy_hist_data(hist_data
);
6907 static struct event_command trigger_hist_cmd
= {
6909 .trigger_type
= ETT_EVENT_HIST
,
6910 .flags
= EVENT_CMD_FL_NEEDS_REC
,
6911 .parse
= event_hist_trigger_parse
,
6912 .reg
= hist_register_trigger
,
6913 .unreg
= hist_unregister_trigger
,
6914 .unreg_all
= hist_unreg_all
,
6915 .get_trigger_ops
= event_hist_get_trigger_ops
,
6916 .set_filter
= set_trigger_filter
,
6919 __init
int register_trigger_hist_cmd(void)
6923 ret
= register_event_command(&trigger_hist_cmd
);
6930 hist_enable_trigger(struct event_trigger_data
*data
,
6931 struct trace_buffer
*buffer
, void *rec
,
6932 struct ring_buffer_event
*event
)
6934 struct enable_trigger_data
*enable_data
= data
->private_data
;
6935 struct event_trigger_data
*test
;
6937 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
6938 lockdep_is_held(&event_mutex
)) {
6939 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6940 if (enable_data
->enable
)
6941 test
->paused
= false;
6943 test
->paused
= true;
6949 hist_enable_count_trigger(struct event_trigger_data
*data
,
6950 struct trace_buffer
*buffer
, void *rec
,
6951 struct ring_buffer_event
*event
)
6956 if (data
->count
!= -1)
6959 hist_enable_trigger(data
, buffer
, rec
, event
);
6962 static const struct event_trigger_ops hist_enable_trigger_ops
= {
6963 .trigger
= hist_enable_trigger
,
6964 .print
= event_enable_trigger_print
,
6965 .init
= event_trigger_init
,
6966 .free
= event_enable_trigger_free
,
6969 static const struct event_trigger_ops hist_enable_count_trigger_ops
= {
6970 .trigger
= hist_enable_count_trigger
,
6971 .print
= event_enable_trigger_print
,
6972 .init
= event_trigger_init
,
6973 .free
= event_enable_trigger_free
,
6976 static const struct event_trigger_ops hist_disable_trigger_ops
= {
6977 .trigger
= hist_enable_trigger
,
6978 .print
= event_enable_trigger_print
,
6979 .init
= event_trigger_init
,
6980 .free
= event_enable_trigger_free
,
6983 static const struct event_trigger_ops hist_disable_count_trigger_ops
= {
6984 .trigger
= hist_enable_count_trigger
,
6985 .print
= event_enable_trigger_print
,
6986 .init
= event_trigger_init
,
6987 .free
= event_enable_trigger_free
,
6990 static const struct event_trigger_ops
*
6991 hist_enable_get_trigger_ops(char *cmd
, char *param
)
6993 const struct event_trigger_ops
*ops
;
6996 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
6999 ops
= param
? &hist_enable_count_trigger_ops
:
7000 &hist_enable_trigger_ops
;
7002 ops
= param
? &hist_disable_count_trigger_ops
:
7003 &hist_disable_trigger_ops
;
7008 static void hist_enable_unreg_all(struct trace_event_file
*file
)
7010 struct event_trigger_data
*test
, *n
;
7012 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
7013 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
7014 list_del_rcu(&test
->list
);
7015 update_cond_flag(file
);
7016 trace_event_trigger_enable_disable(file
, 0);
7017 if (test
->ops
->free
)
7018 test
->ops
->free(test
);
7023 static struct event_command trigger_hist_enable_cmd
= {
7024 .name
= ENABLE_HIST_STR
,
7025 .trigger_type
= ETT_HIST_ENABLE
,
7026 .parse
= event_enable_trigger_parse
,
7027 .reg
= event_enable_register_trigger
,
7028 .unreg
= event_enable_unregister_trigger
,
7029 .unreg_all
= hist_enable_unreg_all
,
7030 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7031 .set_filter
= set_trigger_filter
,
7034 static struct event_command trigger_hist_disable_cmd
= {
7035 .name
= DISABLE_HIST_STR
,
7036 .trigger_type
= ETT_HIST_ENABLE
,
7037 .parse
= event_enable_trigger_parse
,
7038 .reg
= event_enable_register_trigger
,
7039 .unreg
= event_enable_unregister_trigger
,
7040 .unreg_all
= hist_enable_unreg_all
,
7041 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7042 .set_filter
= set_trigger_filter
,
7045 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
7047 unregister_event_command(&trigger_hist_enable_cmd
);
7048 unregister_event_command(&trigger_hist_disable_cmd
);
7051 __init
int register_trigger_hist_enable_disable_cmds(void)
7055 ret
= register_event_command(&trigger_hist_enable_cmd
);
7056 if (WARN_ON(ret
< 0))
7058 ret
= register_event_command(&trigger_hist_disable_cmd
);
7059 if (WARN_ON(ret
< 0))
7060 unregister_trigger_hist_enable_disable_cmds();