1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
23 #include "trace_dynevent.h"
25 #define SYNTH_SYSTEM "synthetic"
26 #define SYNTH_FIELDS_MAX 32
28 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
31 C(NONE, "No error"), \
32 C(DUPLICATE_VAR, "Variable already defined"), \
33 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 C(TOO_MANY_VARS, "Too many variables defined"), \
35 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
36 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
38 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
39 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
40 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
41 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
42 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
43 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
44 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
45 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
46 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
47 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
48 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
49 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
50 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
51 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
52 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
53 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
54 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
55 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
56 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
57 C(TOO_MANY_PARAMS, "Too many action params"), \
58 C(PARAM_NOT_FOUND, "Couldn't find param"), \
59 C(INVALID_PARAM, "Invalid action param"), \
60 C(ACTION_NOT_FOUND, "No action found"), \
61 C(NO_SAVE_PARAMS, "No params found for save()"), \
62 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 C(ACTION_MISMATCH, "Handler doesn't support action"), \
64 C(NO_CLOSING_PAREN, "No closing paren found"), \
65 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
66 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
67 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
68 C(VAR_NOT_FOUND, "Couldn't find variable"), \
69 C(FIELD_NOT_FOUND, "Couldn't find field"), \
70 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
71 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
72 C(EMPTY_SORT_FIELD, "Empty sort field"), \
73 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
74 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"),
77 #define C(a, b) HIST_ERR_##a
84 static const char *err_text
[] = { ERRORS
};
88 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
89 struct tracing_map_elt
*elt
,
90 struct ring_buffer_event
*rbe
,
93 #define HIST_FIELD_OPERANDS_MAX 2
94 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX 8
101 FIELD_OP_UNARY_MINUS
,
105 * A hist_var (histogram variable) contains variable information for
106 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
107 * flag set. A hist_var has a variable name e.g. ts0, and is
108 * associated with a given histogram trigger, as specified by
109 * hist_data. The hist_var idx is the unique index assigned to the
110 * variable by the hist trigger's tracing_map. The idx is what is
111 * used to set a variable's value and, by a variable reference, to
116 struct hist_trigger_data
*hist_data
;
121 struct ftrace_event_field
*field
;
127 unsigned int is_signed
;
129 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
130 struct hist_trigger_data
*hist_data
;
133 * Variable fields contain variable-specific info in var.
136 enum field_op_id
operator;
141 * The name field is used for EXPR and VAR_REF fields. VAR
142 * fields contain the variable name in var.name.
147 * When a histogram trigger is hit, if it has any references
148 * to variables, the values of those variables are collected
149 * into a var_ref_vals array by resolve_var_refs(). The
150 * current value of each variable is read from the tracing_map
151 * using the hist field's hist_var.idx and entered into the
152 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
154 unsigned int var_ref_idx
;
158 static u64
hist_field_none(struct hist_field
*field
,
159 struct tracing_map_elt
*elt
,
160 struct ring_buffer_event
*rbe
,
166 static u64
hist_field_counter(struct hist_field
*field
,
167 struct tracing_map_elt
*elt
,
168 struct ring_buffer_event
*rbe
,
174 static u64
hist_field_string(struct hist_field
*hist_field
,
175 struct tracing_map_elt
*elt
,
176 struct ring_buffer_event
*rbe
,
179 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
181 return (u64
)(unsigned long)addr
;
184 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
185 struct tracing_map_elt
*elt
,
186 struct ring_buffer_event
*rbe
,
189 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
190 int str_loc
= str_item
& 0xffff;
191 char *addr
= (char *)(event
+ str_loc
);
193 return (u64
)(unsigned long)addr
;
196 static u64
hist_field_pstring(struct hist_field
*hist_field
,
197 struct tracing_map_elt
*elt
,
198 struct ring_buffer_event
*rbe
,
201 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
203 return (u64
)(unsigned long)*addr
;
206 static u64
hist_field_log2(struct hist_field
*hist_field
,
207 struct tracing_map_elt
*elt
,
208 struct ring_buffer_event
*rbe
,
211 struct hist_field
*operand
= hist_field
->operands
[0];
213 u64 val
= operand
->fn(operand
, elt
, rbe
, event
);
215 return (u64
) ilog2(roundup_pow_of_two(val
));
218 static u64
hist_field_plus(struct hist_field
*hist_field
,
219 struct tracing_map_elt
*elt
,
220 struct ring_buffer_event
*rbe
,
223 struct hist_field
*operand1
= hist_field
->operands
[0];
224 struct hist_field
*operand2
= hist_field
->operands
[1];
226 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
227 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
232 static u64
hist_field_minus(struct hist_field
*hist_field
,
233 struct tracing_map_elt
*elt
,
234 struct ring_buffer_event
*rbe
,
237 struct hist_field
*operand1
= hist_field
->operands
[0];
238 struct hist_field
*operand2
= hist_field
->operands
[1];
240 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
241 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
246 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
247 struct tracing_map_elt
*elt
,
248 struct ring_buffer_event
*rbe
,
251 struct hist_field
*operand
= hist_field
->operands
[0];
253 s64 sval
= (s64
)operand
->fn(operand
, elt
, rbe
, event
);
254 u64 val
= (u64
)-sval
;
259 #define DEFINE_HIST_FIELD_FN(type) \
260 static u64 hist_field_##type(struct hist_field *hist_field, \
261 struct tracing_map_elt *elt, \
262 struct ring_buffer_event *rbe, \
265 type *addr = (type *)(event + hist_field->field->offset); \
267 return (u64)(unsigned long)*addr; \
270 DEFINE_HIST_FIELD_FN(s64
);
271 DEFINE_HIST_FIELD_FN(u64
);
272 DEFINE_HIST_FIELD_FN(s32
);
273 DEFINE_HIST_FIELD_FN(u32
);
274 DEFINE_HIST_FIELD_FN(s16
);
275 DEFINE_HIST_FIELD_FN(u16
);
276 DEFINE_HIST_FIELD_FN(s8
);
277 DEFINE_HIST_FIELD_FN(u8
);
279 #define for_each_hist_field(i, hist_data) \
280 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
282 #define for_each_hist_val_field(i, hist_data) \
283 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
285 #define for_each_hist_key_field(i, hist_data) \
286 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
288 #define HIST_STACKTRACE_DEPTH 16
289 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
290 #define HIST_STACKTRACE_SKIP 5
292 #define HITCOUNT_IDX 0
293 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
295 enum hist_field_flags
{
296 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
297 HIST_FIELD_FL_KEY
= 1 << 1,
298 HIST_FIELD_FL_STRING
= 1 << 2,
299 HIST_FIELD_FL_HEX
= 1 << 3,
300 HIST_FIELD_FL_SYM
= 1 << 4,
301 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
302 HIST_FIELD_FL_EXECNAME
= 1 << 6,
303 HIST_FIELD_FL_SYSCALL
= 1 << 7,
304 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
305 HIST_FIELD_FL_LOG2
= 1 << 9,
306 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
307 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
308 HIST_FIELD_FL_VAR
= 1 << 12,
309 HIST_FIELD_FL_EXPR
= 1 << 13,
310 HIST_FIELD_FL_VAR_REF
= 1 << 14,
311 HIST_FIELD_FL_CPU
= 1 << 15,
312 HIST_FIELD_FL_ALIAS
= 1 << 16,
317 char *name
[TRACING_MAP_VARS_MAX
];
318 char *expr
[TRACING_MAP_VARS_MAX
];
321 struct hist_trigger_attrs
{
331 unsigned int map_bits
;
333 char *assignment_str
[TRACING_MAP_VARS_MAX
];
334 unsigned int n_assignments
;
336 char *action_str
[HIST_ACTIONS_MAX
];
337 unsigned int n_actions
;
339 struct var_defs var_defs
;
343 struct hist_field
*var
;
344 struct hist_field
*val
;
347 struct field_var_hist
{
348 struct hist_trigger_data
*hist_data
;
352 struct hist_trigger_data
{
353 struct hist_field
*fields
[HIST_FIELDS_MAX
];
356 unsigned int n_fields
;
358 unsigned int key_size
;
359 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
360 unsigned int n_sort_keys
;
361 struct trace_event_file
*event_file
;
362 struct hist_trigger_attrs
*attrs
;
363 struct tracing_map
*map
;
364 bool enable_timestamps
;
366 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
367 unsigned int n_var_refs
;
369 struct action_data
*actions
[HIST_ACTIONS_MAX
];
370 unsigned int n_actions
;
372 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
373 unsigned int n_field_vars
;
374 unsigned int n_field_var_str
;
375 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
376 unsigned int n_field_var_hists
;
378 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
379 unsigned int n_save_vars
;
380 unsigned int n_save_var_str
;
383 static int create_synth_event(int argc
, const char **argv
);
384 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
);
385 static int synth_event_release(struct dyn_event
*ev
);
386 static bool synth_event_is_busy(struct dyn_event
*ev
);
387 static bool synth_event_match(const char *system
, const char *event
,
388 int argc
, const char **argv
, struct dyn_event
*ev
);
390 static struct dyn_event_operations synth_event_ops
= {
391 .create
= create_synth_event
,
392 .show
= synth_event_show
,
393 .is_busy
= synth_event_is_busy
,
394 .free
= synth_event_release
,
395 .match
= synth_event_match
,
408 struct dyn_event devent
;
411 struct synth_field
**fields
;
412 unsigned int n_fields
;
414 struct trace_event_class
class;
415 struct trace_event_call call
;
416 struct tracepoint
*tp
;
420 static bool is_synth_event(struct dyn_event
*ev
)
422 return ev
->ops
== &synth_event_ops
;
425 static struct synth_event
*to_synth_event(struct dyn_event
*ev
)
427 return container_of(ev
, struct synth_event
, devent
);
430 static bool synth_event_is_busy(struct dyn_event
*ev
)
432 struct synth_event
*event
= to_synth_event(ev
);
434 return event
->ref
!= 0;
437 static bool synth_event_match(const char *system
, const char *event
,
438 int argc
, const char **argv
, struct dyn_event
*ev
)
440 struct synth_event
*sev
= to_synth_event(ev
);
442 return strcmp(sev
->name
, event
) == 0 &&
443 (!system
|| strcmp(system
, SYNTH_SYSTEM
) == 0);
448 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
449 struct tracing_map_elt
*elt
, void *rec
,
450 struct ring_buffer_event
*rbe
, void *key
,
451 struct action_data
*data
, u64
*var_ref_vals
);
453 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
468 enum handler_id handler
;
469 enum action_id action
;
473 unsigned int n_params
;
474 char *params
[SYNTH_FIELDS_MAX
];
477 * When a histogram trigger is hit, the values of any
478 * references to variables, including variables being passed
479 * as parameters to synthetic events, are collected into a
480 * var_ref_vals array. This var_ref_idx array is an array of
481 * indices into the var_ref_vals array, one for each synthetic
482 * event param, and is passed to the synthetic event
485 unsigned int var_ref_idx
[TRACING_MAP_VARS_MAX
];
486 struct synth_event
*synth_event
;
487 bool use_trace_keyword
;
488 char *synth_event_name
;
498 * var_str contains the $-unstripped variable
499 * name referenced by var_ref, and used when
500 * printing the action. Because var_ref
501 * creation is deferred to create_actions(),
502 * we need a per-action way to save it until
503 * then, thus var_str.
508 * var_ref refers to the variable being
509 * tracked e.g onmax($var).
511 struct hist_field
*var_ref
;
514 * track_var contains the 'invisible' tracking
515 * variable created to keep the current
518 struct hist_field
*track_var
;
520 check_track_val_fn_t check_val
;
521 action_fn_t save_data
;
530 unsigned int key_len
;
532 struct tracing_map_elt elt
;
534 struct action_data
*action_data
;
535 struct hist_trigger_data
*hist_data
;
538 struct hist_elt_data
{
541 char *field_var_str
[SYNTH_FIELDS_MAX
];
544 struct snapshot_context
{
545 struct tracing_map_elt
*elt
;
549 static void track_data_free(struct track_data
*track_data
)
551 struct hist_elt_data
*elt_data
;
556 kfree(track_data
->key
);
558 elt_data
= track_data
->elt
.private_data
;
560 kfree(elt_data
->comm
);
567 static struct track_data
*track_data_alloc(unsigned int key_len
,
568 struct action_data
*action_data
,
569 struct hist_trigger_data
*hist_data
)
571 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
572 struct hist_elt_data
*elt_data
;
575 return ERR_PTR(-ENOMEM
);
577 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
579 track_data_free(data
);
580 return ERR_PTR(-ENOMEM
);
583 data
->key_len
= key_len
;
584 data
->action_data
= action_data
;
585 data
->hist_data
= hist_data
;
587 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
589 track_data_free(data
);
590 return ERR_PTR(-ENOMEM
);
592 data
->elt
.private_data
= elt_data
;
594 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
595 if (!elt_data
->comm
) {
596 track_data_free(data
);
597 return ERR_PTR(-ENOMEM
);
603 static char last_cmd
[MAX_FILTER_STR_VAL
];
604 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
606 static int errpos(char *str
)
608 return err_pos(last_cmd
, str
);
611 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
613 const char *system
= NULL
, *name
= NULL
;
614 struct trace_event_call
*call
;
619 strcpy(last_cmd
, "hist:");
620 strncat(last_cmd
, str
, MAX_FILTER_STR_VAL
- 1 - sizeof("hist:"));
623 call
= file
->event_call
;
625 system
= call
->class->system
;
627 name
= trace_event_name(call
);
634 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, "hist:%s:%s", system
, name
);
637 static void hist_err(struct trace_array
*tr
, u8 err_type
, u8 err_pos
)
639 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
643 static void hist_err_clear(void)
646 last_cmd_loc
[0] = '\0';
649 struct synth_trace_event
{
650 struct trace_entry ent
;
654 static int synth_event_define_fields(struct trace_event_call
*call
)
656 struct synth_trace_event trace
;
657 int offset
= offsetof(typeof(trace
), fields
);
658 struct synth_event
*event
= call
->data
;
659 unsigned int i
, size
, n_u64
;
664 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
665 size
= event
->fields
[i
]->size
;
666 is_signed
= event
->fields
[i
]->is_signed
;
667 type
= event
->fields
[i
]->type
;
668 name
= event
->fields
[i
]->name
;
669 ret
= trace_define_field(call
, type
, name
, offset
, size
,
670 is_signed
, FILTER_OTHER
);
674 event
->fields
[i
]->offset
= n_u64
;
676 if (event
->fields
[i
]->is_string
) {
677 offset
+= STR_VAR_LEN_MAX
;
678 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
680 offset
+= sizeof(u64
);
685 event
->n_u64
= n_u64
;
690 static bool synth_field_signed(char *type
)
692 if (str_has_prefix(type
, "u"))
694 if (strcmp(type
, "gfp_t") == 0)
700 static int synth_field_is_string(char *type
)
702 if (strstr(type
, "char[") != NULL
)
708 static int synth_field_string_size(char *type
)
710 char buf
[4], *end
, *start
;
714 start
= strstr(type
, "char[");
717 start
+= sizeof("char[") - 1;
719 end
= strchr(type
, ']');
720 if (!end
|| end
< start
)
727 strncpy(buf
, start
, len
);
730 err
= kstrtouint(buf
, 0, &size
);
734 if (size
> STR_VAR_LEN_MAX
)
740 static int synth_field_size(char *type
)
744 if (strcmp(type
, "s64") == 0)
746 else if (strcmp(type
, "u64") == 0)
748 else if (strcmp(type
, "s32") == 0)
750 else if (strcmp(type
, "u32") == 0)
752 else if (strcmp(type
, "s16") == 0)
754 else if (strcmp(type
, "u16") == 0)
756 else if (strcmp(type
, "s8") == 0)
758 else if (strcmp(type
, "u8") == 0)
760 else if (strcmp(type
, "char") == 0)
762 else if (strcmp(type
, "unsigned char") == 0)
763 size
= sizeof(unsigned char);
764 else if (strcmp(type
, "int") == 0)
766 else if (strcmp(type
, "unsigned int") == 0)
767 size
= sizeof(unsigned int);
768 else if (strcmp(type
, "long") == 0)
770 else if (strcmp(type
, "unsigned long") == 0)
771 size
= sizeof(unsigned long);
772 else if (strcmp(type
, "pid_t") == 0)
773 size
= sizeof(pid_t
);
774 else if (strcmp(type
, "gfp_t") == 0)
775 size
= sizeof(gfp_t
);
776 else if (synth_field_is_string(type
))
777 size
= synth_field_string_size(type
);
782 static const char *synth_field_fmt(char *type
)
784 const char *fmt
= "%llu";
786 if (strcmp(type
, "s64") == 0)
788 else if (strcmp(type
, "u64") == 0)
790 else if (strcmp(type
, "s32") == 0)
792 else if (strcmp(type
, "u32") == 0)
794 else if (strcmp(type
, "s16") == 0)
796 else if (strcmp(type
, "u16") == 0)
798 else if (strcmp(type
, "s8") == 0)
800 else if (strcmp(type
, "u8") == 0)
802 else if (strcmp(type
, "char") == 0)
804 else if (strcmp(type
, "unsigned char") == 0)
806 else if (strcmp(type
, "int") == 0)
808 else if (strcmp(type
, "unsigned int") == 0)
810 else if (strcmp(type
, "long") == 0)
812 else if (strcmp(type
, "unsigned long") == 0)
814 else if (strcmp(type
, "pid_t") == 0)
816 else if (strcmp(type
, "gfp_t") == 0)
818 else if (synth_field_is_string(type
))
824 static void print_synth_event_num_val(struct trace_seq
*s
,
825 char *print_fmt
, char *name
,
826 int size
, u64 val
, char *space
)
830 trace_seq_printf(s
, print_fmt
, name
, (u8
)val
, space
);
834 trace_seq_printf(s
, print_fmt
, name
, (u16
)val
, space
);
838 trace_seq_printf(s
, print_fmt
, name
, (u32
)val
, space
);
842 trace_seq_printf(s
, print_fmt
, name
, val
, space
);
847 static enum print_line_t
print_synth_event(struct trace_iterator
*iter
,
849 struct trace_event
*event
)
851 struct trace_array
*tr
= iter
->tr
;
852 struct trace_seq
*s
= &iter
->seq
;
853 struct synth_trace_event
*entry
;
854 struct synth_event
*se
;
855 unsigned int i
, n_u64
;
859 entry
= (struct synth_trace_event
*)iter
->ent
;
860 se
= container_of(event
, struct synth_event
, call
.event
);
862 trace_seq_printf(s
, "%s: ", se
->name
);
864 for (i
= 0, n_u64
= 0; i
< se
->n_fields
; i
++) {
865 if (trace_seq_has_overflowed(s
))
868 fmt
= synth_field_fmt(se
->fields
[i
]->type
);
870 /* parameter types */
871 if (tr
&& tr
->trace_flags
& TRACE_ITER_VERBOSE
)
872 trace_seq_printf(s
, "%s ", fmt
);
874 snprintf(print_fmt
, sizeof(print_fmt
), "%%s=%s%%s", fmt
);
876 /* parameter values */
877 if (se
->fields
[i
]->is_string
) {
878 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
879 (char *)&entry
->fields
[n_u64
],
880 i
== se
->n_fields
- 1 ? "" : " ");
881 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
883 struct trace_print_flags __flags
[] = {
884 __def_gfpflag_names
, {-1, NULL
} };
885 char *space
= (i
== se
->n_fields
- 1 ? "" : " ");
887 print_synth_event_num_val(s
, print_fmt
,
890 entry
->fields
[n_u64
],
893 if (strcmp(se
->fields
[i
]->type
, "gfp_t") == 0) {
894 trace_seq_puts(s
, " (");
895 trace_print_flags_seq(s
, "|",
896 entry
->fields
[n_u64
],
898 trace_seq_putc(s
, ')');
904 trace_seq_putc(s
, '\n');
906 return trace_handle_return(s
);
909 static struct trace_event_functions synth_event_funcs
= {
910 .trace
= print_synth_event
913 static notrace
void trace_event_raw_event_synth(void *__data
,
915 unsigned int *var_ref_idx
)
917 struct trace_event_file
*trace_file
= __data
;
918 struct synth_trace_event
*entry
;
919 struct trace_event_buffer fbuffer
;
920 struct trace_buffer
*buffer
;
921 struct synth_event
*event
;
922 unsigned int i
, n_u64
, val_idx
;
925 event
= trace_file
->event_call
->data
;
927 if (trace_trigger_soft_disabled(trace_file
))
930 fields_size
= event
->n_u64
* sizeof(u64
);
933 * Avoid ring buffer recursion detection, as this event
934 * is being performed within another event.
936 buffer
= trace_file
->tr
->array_buffer
.buffer
;
937 ring_buffer_nest_start(buffer
);
939 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
,
940 sizeof(*entry
) + fields_size
);
944 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
945 val_idx
= var_ref_idx
[i
];
946 if (event
->fields
[i
]->is_string
) {
947 char *str_val
= (char *)(long)var_ref_vals
[val_idx
];
948 char *str_field
= (char *)&entry
->fields
[n_u64
];
950 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
951 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
953 struct synth_field
*field
= event
->fields
[i
];
954 u64 val
= var_ref_vals
[val_idx
];
956 switch (field
->size
) {
958 *(u8
*)&entry
->fields
[n_u64
] = (u8
)val
;
962 *(u16
*)&entry
->fields
[n_u64
] = (u16
)val
;
966 *(u32
*)&entry
->fields
[n_u64
] = (u32
)val
;
970 entry
->fields
[n_u64
] = val
;
977 trace_event_buffer_commit(&fbuffer
);
979 ring_buffer_nest_end(buffer
);
982 static void free_synth_event_print_fmt(struct trace_event_call
*call
)
985 kfree(call
->print_fmt
);
986 call
->print_fmt
= NULL
;
990 static int __set_synth_event_print_fmt(struct synth_event
*event
,
997 /* When len=0, we just calculate the needed length */
998 #define LEN_OR_ZERO (len ? len - pos : 0)
1000 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
1001 for (i
= 0; i
< event
->n_fields
; i
++) {
1002 fmt
= synth_field_fmt(event
->fields
[i
]->type
);
1003 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s=%s%s",
1004 event
->fields
[i
]->name
, fmt
,
1005 i
== event
->n_fields
- 1 ? "" : ", ");
1007 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
1009 for (i
= 0; i
< event
->n_fields
; i
++) {
1010 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
1011 ", REC->%s", event
->fields
[i
]->name
);
1016 /* return the length of print_fmt */
1020 static int set_synth_event_print_fmt(struct trace_event_call
*call
)
1022 struct synth_event
*event
= call
->data
;
1026 /* First: called with 0 length to calculate the needed length */
1027 len
= __set_synth_event_print_fmt(event
, NULL
, 0);
1029 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
1033 /* Second: actually write the @print_fmt */
1034 __set_synth_event_print_fmt(event
, print_fmt
, len
+ 1);
1035 call
->print_fmt
= print_fmt
;
1040 static void free_synth_field(struct synth_field
*field
)
1047 static struct synth_field
*parse_synth_field(int argc
, const char **argv
,
1050 struct synth_field
*field
;
1051 const char *prefix
= NULL
, *field_type
= argv
[0], *field_name
, *array
;
1054 if (field_type
[0] == ';')
1057 if (!strcmp(field_type
, "unsigned")) {
1059 return ERR_PTR(-EINVAL
);
1060 prefix
= "unsigned ";
1061 field_type
= argv
[1];
1062 field_name
= argv
[2];
1065 field_name
= argv
[1];
1069 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
1071 return ERR_PTR(-ENOMEM
);
1073 len
= strlen(field_name
);
1074 array
= strchr(field_name
, '[');
1076 len
-= strlen(array
);
1077 else if (field_name
[len
- 1] == ';')
1080 field
->name
= kmemdup_nul(field_name
, len
, GFP_KERNEL
);
1086 if (field_type
[0] == ';')
1088 len
= strlen(field_type
) + 1;
1090 len
+= strlen(array
);
1092 len
+= strlen(prefix
);
1094 field
->type
= kzalloc(len
, GFP_KERNEL
);
1100 strcat(field
->type
, prefix
);
1101 strcat(field
->type
, field_type
);
1103 strcat(field
->type
, array
);
1104 if (field
->type
[len
- 1] == ';')
1105 field
->type
[len
- 1] = '\0';
1108 field
->size
= synth_field_size(field
->type
);
1114 if (synth_field_is_string(field
->type
))
1115 field
->is_string
= true;
1117 field
->is_signed
= synth_field_signed(field
->type
);
1122 free_synth_field(field
);
1123 field
= ERR_PTR(ret
);
1127 static void free_synth_tracepoint(struct tracepoint
*tp
)
1136 static struct tracepoint
*alloc_synth_tracepoint(char *name
)
1138 struct tracepoint
*tp
;
1140 tp
= kzalloc(sizeof(*tp
), GFP_KERNEL
);
1142 return ERR_PTR(-ENOMEM
);
1144 tp
->name
= kstrdup(name
, GFP_KERNEL
);
1147 return ERR_PTR(-ENOMEM
);
1153 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
1154 unsigned int *var_ref_idx
);
1156 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
1157 unsigned int *var_ref_idx
)
1159 struct tracepoint
*tp
= event
->tp
;
1161 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
1162 struct tracepoint_func
*probe_func_ptr
;
1163 synth_probe_func_t probe_func
;
1166 if (!(cpu_online(raw_smp_processor_id())))
1169 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
1170 if (probe_func_ptr
) {
1172 probe_func
= probe_func_ptr
->func
;
1173 __data
= probe_func_ptr
->data
;
1174 probe_func(__data
, var_ref_vals
, var_ref_idx
);
1175 } while ((++probe_func_ptr
)->func
);
1180 static struct synth_event
*find_synth_event(const char *name
)
1182 struct dyn_event
*pos
;
1183 struct synth_event
*event
;
1185 for_each_dyn_event(pos
) {
1186 if (!is_synth_event(pos
))
1188 event
= to_synth_event(pos
);
1189 if (strcmp(event
->name
, name
) == 0)
1196 static struct trace_event_fields synth_event_fields_array
[] = {
1197 { .type
= TRACE_FUNCTION_TYPE
,
1198 .define_fields
= synth_event_define_fields
},
1202 static int register_synth_event(struct synth_event
*event
)
1204 struct trace_event_call
*call
= &event
->call
;
1207 event
->call
.class = &event
->class;
1208 event
->class.system
= kstrdup(SYNTH_SYSTEM
, GFP_KERNEL
);
1209 if (!event
->class.system
) {
1214 event
->tp
= alloc_synth_tracepoint(event
->name
);
1215 if (IS_ERR(event
->tp
)) {
1216 ret
= PTR_ERR(event
->tp
);
1221 INIT_LIST_HEAD(&call
->class->fields
);
1222 call
->event
.funcs
= &synth_event_funcs
;
1223 call
->class->fields_array
= synth_event_fields_array
;
1225 ret
= register_trace_event(&call
->event
);
1230 call
->flags
= TRACE_EVENT_FL_TRACEPOINT
;
1231 call
->class->reg
= trace_event_reg
;
1232 call
->class->probe
= trace_event_raw_event_synth
;
1234 call
->tp
= event
->tp
;
1236 ret
= trace_add_event_call(call
);
1238 pr_warn("Failed to register synthetic event: %s\n",
1239 trace_event_name(call
));
1243 ret
= set_synth_event_print_fmt(call
);
1245 trace_remove_event_call(call
);
1251 unregister_trace_event(&call
->event
);
1255 static int unregister_synth_event(struct synth_event
*event
)
1257 struct trace_event_call
*call
= &event
->call
;
1260 ret
= trace_remove_event_call(call
);
1265 static void free_synth_event(struct synth_event
*event
)
1272 for (i
= 0; i
< event
->n_fields
; i
++)
1273 free_synth_field(event
->fields
[i
]);
1275 kfree(event
->fields
);
1277 kfree(event
->class.system
);
1278 free_synth_tracepoint(event
->tp
);
1279 free_synth_event_print_fmt(&event
->call
);
1283 static struct synth_event
*alloc_synth_event(const char *name
, int n_fields
,
1284 struct synth_field
**fields
)
1286 struct synth_event
*event
;
1289 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1291 event
= ERR_PTR(-ENOMEM
);
1295 event
->name
= kstrdup(name
, GFP_KERNEL
);
1298 event
= ERR_PTR(-ENOMEM
);
1302 event
->fields
= kcalloc(n_fields
, sizeof(*event
->fields
), GFP_KERNEL
);
1303 if (!event
->fields
) {
1304 free_synth_event(event
);
1305 event
= ERR_PTR(-ENOMEM
);
1309 dyn_event_init(&event
->devent
, &synth_event_ops
);
1311 for (i
= 0; i
< n_fields
; i
++)
1312 event
->fields
[i
] = fields
[i
];
1314 event
->n_fields
= n_fields
;
1319 static void action_trace(struct hist_trigger_data
*hist_data
,
1320 struct tracing_map_elt
*elt
, void *rec
,
1321 struct ring_buffer_event
*rbe
, void *key
,
1322 struct action_data
*data
, u64
*var_ref_vals
)
1324 struct synth_event
*event
= data
->synth_event
;
1326 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
1329 struct hist_var_data
{
1330 struct list_head list
;
1331 struct hist_trigger_data
*hist_data
;
1334 static int synth_event_check_arg_fn(void *data
)
1336 struct dynevent_arg_pair
*arg_pair
= data
;
1339 size
= synth_field_size((char *)arg_pair
->lhs
);
1341 return size
? 0 : -EINVAL
;
1345 * synth_event_add_field - Add a new field to a synthetic event cmd
1346 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1347 * @type: The type of the new field to add
1348 * @name: The name of the new field to add
1350 * Add a new field to a synthetic event cmd object. Field ordering is in
1351 * the same order the fields are added.
1353 * See synth_field_size() for available types. If field_name contains
1354 * [n] the field is considered to be an array.
1356 * Return: 0 if successful, error otherwise.
1358 int synth_event_add_field(struct dynevent_cmd
*cmd
, const char *type
,
1361 struct dynevent_arg_pair arg_pair
;
1364 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1370 dynevent_arg_pair_init(&arg_pair
, 0, ';');
1372 arg_pair
.lhs
= type
;
1373 arg_pair
.rhs
= name
;
1375 ret
= dynevent_arg_pair_add(cmd
, &arg_pair
, synth_event_check_arg_fn
);
1379 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
)
1384 EXPORT_SYMBOL_GPL(synth_event_add_field
);
1387 * synth_event_add_field_str - Add a new field to a synthetic event cmd
1388 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1389 * @type_name: The type and name of the new field to add, as a single string
1391 * Add a new field to a synthetic event cmd object, as a single
1392 * string. The @type_name string is expected to be of the form 'type
1393 * name', which will be appended by ';'. No sanity checking is done -
1394 * what's passed in is assumed to already be well-formed. Field
1395 * ordering is in the same order the fields are added.
1397 * See synth_field_size() for available types. If field_name contains
1398 * [n] the field is considered to be an array.
1400 * Return: 0 if successful, error otherwise.
1402 int synth_event_add_field_str(struct dynevent_cmd
*cmd
, const char *type_name
)
1404 struct dynevent_arg arg
;
1407 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1413 dynevent_arg_init(&arg
, ';');
1415 arg
.str
= type_name
;
1417 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1421 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
)
1426 EXPORT_SYMBOL_GPL(synth_event_add_field_str
);
1429 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1430 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1431 * @fields: An array of type/name field descriptions
1432 * @n_fields: The number of field descriptions contained in the fields array
1434 * Add a new set of fields to a synthetic event cmd object. The event
1435 * fields that will be defined for the event should be passed in as an
1436 * array of struct synth_field_desc, and the number of elements in the
1437 * array passed in as n_fields. Field ordering will retain the
1438 * ordering given in the fields array.
1440 * See synth_field_size() for available types. If field_name contains
1441 * [n] the field is considered to be an array.
1443 * Return: 0 if successful, error otherwise.
1445 int synth_event_add_fields(struct dynevent_cmd
*cmd
,
1446 struct synth_field_desc
*fields
,
1447 unsigned int n_fields
)
1452 for (i
= 0; i
< n_fields
; i
++) {
1453 if (fields
[i
].type
== NULL
|| fields
[i
].name
== NULL
) {
1458 ret
= synth_event_add_field(cmd
, fields
[i
].type
, fields
[i
].name
);
1465 EXPORT_SYMBOL_GPL(synth_event_add_fields
);
1468 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1469 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1470 * @name: The name of the synthetic event
1471 * @mod: The module creating the event, NULL if not created from a module
1472 * @args: Variable number of arg (pairs), one pair for each field
1474 * NOTE: Users normally won't want to call this function directly, but
1475 * rather use the synth_event_gen_cmd_start() wrapper, which
1476 * automatically adds a NULL to the end of the arg list. If this
1477 * function is used directly, make sure the last arg in the variable
1480 * Generate a synthetic event command to be executed by
1481 * synth_event_gen_cmd_end(). This function can be used to generate
1482 * the complete command or only the first part of it; in the latter
1483 * case, synth_event_add_field(), synth_event_add_field_str(), or
1484 * synth_event_add_fields() can be used to add more fields following
1487 * There should be an even number variable args, each pair consisting
1488 * of a type followed by a field name.
1490 * See synth_field_size() for available types. If field_name contains
1491 * [n] the field is considered to be an array.
1493 * Return: 0 if successful, error otherwise.
1495 int __synth_event_gen_cmd_start(struct dynevent_cmd
*cmd
, const char *name
,
1496 struct module
*mod
, ...)
1498 struct dynevent_arg arg
;
1502 cmd
->event_name
= name
;
1503 cmd
->private_data
= mod
;
1505 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1508 dynevent_arg_init(&arg
, 0);
1510 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1514 va_start(args
, mod
);
1516 const char *type
, *name
;
1518 type
= va_arg(args
, const char *);
1521 name
= va_arg(args
, const char *);
1525 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
) {
1530 ret
= synth_event_add_field(cmd
, type
, name
);
1538 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start
);
1541 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1542 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1543 * @name: The name of the synthetic event
1544 * @fields: An array of type/name field descriptions
1545 * @n_fields: The number of field descriptions contained in the fields array
1547 * Generate a synthetic event command to be executed by
1548 * synth_event_gen_cmd_end(). This function can be used to generate
1549 * the complete command or only the first part of it; in the latter
1550 * case, synth_event_add_field(), synth_event_add_field_str(), or
1551 * synth_event_add_fields() can be used to add more fields following
1554 * The event fields that will be defined for the event should be
1555 * passed in as an array of struct synth_field_desc, and the number of
1556 * elements in the array passed in as n_fields. Field ordering will
1557 * retain the ordering given in the fields array.
1559 * See synth_field_size() for available types. If field_name contains
1560 * [n] the field is considered to be an array.
1562 * Return: 0 if successful, error otherwise.
1564 int synth_event_gen_cmd_array_start(struct dynevent_cmd
*cmd
, const char *name
,
1566 struct synth_field_desc
*fields
,
1567 unsigned int n_fields
)
1569 struct dynevent_arg arg
;
1573 cmd
->event_name
= name
;
1574 cmd
->private_data
= mod
;
1576 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1579 if (n_fields
> SYNTH_FIELDS_MAX
)
1582 dynevent_arg_init(&arg
, 0);
1584 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1588 for (i
= 0; i
< n_fields
; i
++) {
1589 if (fields
[i
].type
== NULL
|| fields
[i
].name
== NULL
)
1592 ret
= synth_event_add_field(cmd
, fields
[i
].type
, fields
[i
].name
);
1599 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start
);
1601 static int __create_synth_event(int argc
, const char *name
, const char **argv
)
1603 struct synth_field
*field
, *fields
[SYNTH_FIELDS_MAX
];
1604 struct synth_event
*event
= NULL
;
1605 int i
, consumed
= 0, n_fields
= 0, ret
= 0;
1609 * - Add synthetic event: <event_name> field[;field] ...
1610 * - Remove synthetic event: !<event_name> field[;field] ...
1611 * where 'field' = type field_name
1614 if (name
[0] == '\0' || argc
< 1)
1617 mutex_lock(&event_mutex
);
1619 event
= find_synth_event(name
);
1625 for (i
= 0; i
< argc
- 1; i
++) {
1626 if (strcmp(argv
[i
], ";") == 0)
1628 if (n_fields
== SYNTH_FIELDS_MAX
) {
1633 field
= parse_synth_field(argc
- i
, &argv
[i
], &consumed
);
1634 if (IS_ERR(field
)) {
1635 ret
= PTR_ERR(field
);
1638 fields
[n_fields
++] = field
;
1642 if (i
< argc
&& strcmp(argv
[i
], ";") != 0) {
1647 event
= alloc_synth_event(name
, n_fields
, fields
);
1648 if (IS_ERR(event
)) {
1649 ret
= PTR_ERR(event
);
1653 ret
= register_synth_event(event
);
1655 dyn_event_add(&event
->devent
);
1657 free_synth_event(event
);
1659 mutex_unlock(&event_mutex
);
1663 for (i
= 0; i
< n_fields
; i
++)
1664 free_synth_field(fields
[i
]);
1670 * synth_event_create - Create a new synthetic event
1671 * @name: The name of the new sythetic event
1672 * @fields: An array of type/name field descriptions
1673 * @n_fields: The number of field descriptions contained in the fields array
1674 * @mod: The module creating the event, NULL if not created from a module
1676 * Create a new synthetic event with the given name under the
1677 * trace/events/synthetic/ directory. The event fields that will be
1678 * defined for the event should be passed in as an array of struct
1679 * synth_field_desc, and the number elements in the array passed in as
1680 * n_fields. Field ordering will retain the ordering given in the
1683 * If the new synthetic event is being created from a module, the mod
1684 * param must be non-NULL. This will ensure that the trace buffer
1685 * won't contain unreadable events.
1687 * The new synth event should be deleted using synth_event_delete()
1688 * function. The new synthetic event can be generated from modules or
1689 * other kernel code using trace_synth_event() and related functions.
1691 * Return: 0 if successful, error otherwise.
1693 int synth_event_create(const char *name
, struct synth_field_desc
*fields
,
1694 unsigned int n_fields
, struct module
*mod
)
1696 struct dynevent_cmd cmd
;
1700 buf
= kzalloc(MAX_DYNEVENT_CMD_LEN
, GFP_KERNEL
);
1704 synth_event_cmd_init(&cmd
, buf
, MAX_DYNEVENT_CMD_LEN
);
1706 ret
= synth_event_gen_cmd_array_start(&cmd
, name
, mod
,
1711 ret
= synth_event_gen_cmd_end(&cmd
);
1717 EXPORT_SYMBOL_GPL(synth_event_create
);
1719 static int destroy_synth_event(struct synth_event
*se
)
1726 ret
= unregister_synth_event(se
);
1728 dyn_event_remove(&se
->devent
);
1729 free_synth_event(se
);
1737 * synth_event_delete - Delete a synthetic event
1738 * @event_name: The name of the new sythetic event
1740 * Delete a synthetic event that was created with synth_event_create().
1742 * Return: 0 if successful, error otherwise.
1744 int synth_event_delete(const char *event_name
)
1746 struct synth_event
*se
= NULL
;
1747 struct module
*mod
= NULL
;
1750 mutex_lock(&event_mutex
);
1751 se
= find_synth_event(event_name
);
1754 ret
= destroy_synth_event(se
);
1756 mutex_unlock(&event_mutex
);
1759 mutex_lock(&trace_types_lock
);
1761 * It is safest to reset the ring buffer if the module
1762 * being unloaded registered any events that were
1763 * used. The only worry is if a new module gets
1764 * loaded, and takes on the same id as the events of
1765 * this module. When printing out the buffer, traced
1766 * events left over from this module may be passed to
1767 * the new module events and unexpected results may
1770 tracing_reset_all_online_cpus();
1771 mutex_unlock(&trace_types_lock
);
1776 EXPORT_SYMBOL_GPL(synth_event_delete
);
1778 static int create_or_delete_synth_event(int argc
, char **argv
)
1780 const char *name
= argv
[0];
1783 /* trace_run_command() ensures argc != 0 */
1784 if (name
[0] == '!') {
1785 ret
= synth_event_delete(name
+ 1);
1789 ret
= __create_synth_event(argc
- 1, name
, (const char **)argv
+ 1);
1790 return ret
== -ECANCELED
? -EINVAL
: ret
;
1793 static int synth_event_run_command(struct dynevent_cmd
*cmd
)
1795 struct synth_event
*se
;
1798 ret
= trace_run_command(cmd
->seq
.buffer
, create_or_delete_synth_event
);
1802 se
= find_synth_event(cmd
->event_name
);
1806 se
->mod
= cmd
->private_data
;
1812 * synth_event_cmd_init - Initialize a synthetic event command object
1813 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1814 * @buf: A pointer to the buffer used to build the command
1815 * @maxlen: The length of the buffer passed in @buf
1817 * Initialize a synthetic event command object. Use this before
1818 * calling any of the other dyenvent_cmd functions.
1820 void synth_event_cmd_init(struct dynevent_cmd
*cmd
, char *buf
, int maxlen
)
1822 dynevent_cmd_init(cmd
, buf
, maxlen
, DYNEVENT_TYPE_SYNTH
,
1823 synth_event_run_command
);
1825 EXPORT_SYMBOL_GPL(synth_event_cmd_init
);
1828 __synth_event_trace_start(struct trace_event_file
*file
,
1829 struct synth_event_trace_state
*trace_state
)
1831 int entry_size
, fields_size
= 0;
1834 memset(trace_state
, '\0', sizeof(*trace_state
));
1837 * Normal event tracing doesn't get called at all unless the
1838 * ENABLED bit is set (which attaches the probe thus allowing
1839 * this code to be called, etc). Because this is called
1840 * directly by the user, we don't have that but we still need
1841 * to honor not logging when disabled. For the the iterated
1842 * trace case, we save the enabed state upon start and just
1843 * ignore the following data calls.
1845 if (!(file
->flags
& EVENT_FILE_FL_ENABLED
) ||
1846 trace_trigger_soft_disabled(file
)) {
1847 trace_state
->disabled
= true;
1852 trace_state
->event
= file
->event_call
->data
;
1854 fields_size
= trace_state
->event
->n_u64
* sizeof(u64
);
1857 * Avoid ring buffer recursion detection, as this event
1858 * is being performed within another event.
1860 trace_state
->buffer
= file
->tr
->array_buffer
.buffer
;
1861 ring_buffer_nest_start(trace_state
->buffer
);
1863 entry_size
= sizeof(*trace_state
->entry
) + fields_size
;
1864 trace_state
->entry
= trace_event_buffer_reserve(&trace_state
->fbuffer
,
1867 if (!trace_state
->entry
) {
1868 ring_buffer_nest_end(trace_state
->buffer
);
1876 __synth_event_trace_end(struct synth_event_trace_state
*trace_state
)
1878 trace_event_buffer_commit(&trace_state
->fbuffer
);
1880 ring_buffer_nest_end(trace_state
->buffer
);
1884 * synth_event_trace - Trace a synthetic event
1885 * @file: The trace_event_file representing the synthetic event
1886 * @n_vals: The number of values in vals
1887 * @args: Variable number of args containing the event values
1889 * Trace a synthetic event using the values passed in the variable
1892 * The argument list should be a list 'n_vals' u64 values. The number
1893 * of vals must match the number of field in the synthetic event, and
1894 * must be in the same order as the synthetic event fields.
1896 * All vals should be cast to u64, and string vals are just pointers
1897 * to strings, cast to u64. Strings will be copied into space
1898 * reserved in the event for the string, using these pointers.
1900 * Return: 0 on success, err otherwise.
1902 int synth_event_trace(struct trace_event_file
*file
, unsigned int n_vals
, ...)
1904 struct synth_event_trace_state state
;
1905 unsigned int i
, n_u64
;
1909 ret
= __synth_event_trace_start(file
, &state
);
1912 ret
= 0; /* just disabled, not really an error */
1916 if (n_vals
!= state
.event
->n_fields
) {
1921 va_start(args
, n_vals
);
1922 for (i
= 0, n_u64
= 0; i
< state
.event
->n_fields
; i
++) {
1925 val
= va_arg(args
, u64
);
1927 if (state
.event
->fields
[i
]->is_string
) {
1928 char *str_val
= (char *)(long)val
;
1929 char *str_field
= (char *)&state
.entry
->fields
[n_u64
];
1931 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
1932 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
1934 struct synth_field
*field
= state
.event
->fields
[i
];
1936 switch (field
->size
) {
1938 *(u8
*)&state
.entry
->fields
[n_u64
] = (u8
)val
;
1942 *(u16
*)&state
.entry
->fields
[n_u64
] = (u16
)val
;
1946 *(u32
*)&state
.entry
->fields
[n_u64
] = (u32
)val
;
1950 state
.entry
->fields
[n_u64
] = val
;
1958 __synth_event_trace_end(&state
);
1962 EXPORT_SYMBOL_GPL(synth_event_trace
);
1965 * synth_event_trace_array - Trace a synthetic event from an array
1966 * @file: The trace_event_file representing the synthetic event
1967 * @vals: Array of values
1968 * @n_vals: The number of values in vals
1970 * Trace a synthetic event using the values passed in as 'vals'.
1972 * The 'vals' array is just an array of 'n_vals' u64. The number of
1973 * vals must match the number of field in the synthetic event, and
1974 * must be in the same order as the synthetic event fields.
1976 * All vals should be cast to u64, and string vals are just pointers
1977 * to strings, cast to u64. Strings will be copied into space
1978 * reserved in the event for the string, using these pointers.
1980 * Return: 0 on success, err otherwise.
1982 int synth_event_trace_array(struct trace_event_file
*file
, u64
*vals
,
1983 unsigned int n_vals
)
1985 struct synth_event_trace_state state
;
1986 unsigned int i
, n_u64
;
1989 ret
= __synth_event_trace_start(file
, &state
);
1992 ret
= 0; /* just disabled, not really an error */
1996 if (n_vals
!= state
.event
->n_fields
) {
2001 for (i
= 0, n_u64
= 0; i
< state
.event
->n_fields
; i
++) {
2002 if (state
.event
->fields
[i
]->is_string
) {
2003 char *str_val
= (char *)(long)vals
[i
];
2004 char *str_field
= (char *)&state
.entry
->fields
[n_u64
];
2006 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
2007 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
2009 struct synth_field
*field
= state
.event
->fields
[i
];
2012 switch (field
->size
) {
2014 *(u8
*)&state
.entry
->fields
[n_u64
] = (u8
)val
;
2018 *(u16
*)&state
.entry
->fields
[n_u64
] = (u16
)val
;
2022 *(u32
*)&state
.entry
->fields
[n_u64
] = (u32
)val
;
2026 state
.entry
->fields
[n_u64
] = val
;
2033 __synth_event_trace_end(&state
);
2037 EXPORT_SYMBOL_GPL(synth_event_trace_array
);
2040 * synth_event_trace_start - Start piecewise synthetic event trace
2041 * @file: The trace_event_file representing the synthetic event
2042 * @trace_state: A pointer to object tracking the piecewise trace state
2044 * Start the trace of a synthetic event field-by-field rather than all
2047 * This function 'opens' an event trace, which means space is reserved
2048 * for the event in the trace buffer, after which the event's
2049 * individual field values can be set through either
2050 * synth_event_add_next_val() or synth_event_add_val().
2052 * A pointer to a trace_state object is passed in, which will keep
2053 * track of the current event trace state until the event trace is
2054 * closed (and the event finally traced) using
2055 * synth_event_trace_end().
2057 * Note that synth_event_trace_end() must be called after all values
2058 * have been added for each event trace, regardless of whether adding
2059 * all field values succeeded or not.
2061 * Note also that for a given event trace, all fields must be added
2062 * using either synth_event_add_next_val() or synth_event_add_val()
2063 * but not both together or interleaved.
2065 * Return: 0 on success, err otherwise.
2067 int synth_event_trace_start(struct trace_event_file
*file
,
2068 struct synth_event_trace_state
*trace_state
)
2075 ret
= __synth_event_trace_start(file
, trace_state
);
2077 ret
= 0; /* just disabled, not really an error */
2081 EXPORT_SYMBOL_GPL(synth_event_trace_start
);
2083 static int __synth_event_add_val(const char *field_name
, u64 val
,
2084 struct synth_event_trace_state
*trace_state
)
2086 struct synth_field
*field
= NULL
;
2087 struct synth_trace_event
*entry
;
2088 struct synth_event
*event
;
2096 /* can't mix add_next_synth_val() with add_synth_val() */
2098 if (trace_state
->add_next
) {
2102 trace_state
->add_name
= true;
2104 if (trace_state
->add_name
) {
2108 trace_state
->add_next
= true;
2111 if (trace_state
->disabled
)
2114 event
= trace_state
->event
;
2115 if (trace_state
->add_name
) {
2116 for (i
= 0; i
< event
->n_fields
; i
++) {
2117 field
= event
->fields
[i
];
2118 if (strcmp(field
->name
, field_name
) == 0)
2126 if (trace_state
->cur_field
>= event
->n_fields
) {
2130 field
= event
->fields
[trace_state
->cur_field
++];
2133 entry
= trace_state
->entry
;
2134 if (field
->is_string
) {
2135 char *str_val
= (char *)(long)val
;
2143 str_field
= (char *)&entry
->fields
[field
->offset
];
2144 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
2146 switch (field
->size
) {
2148 *(u8
*)&trace_state
->entry
->fields
[field
->offset
] = (u8
)val
;
2152 *(u16
*)&trace_state
->entry
->fields
[field
->offset
] = (u16
)val
;
2156 *(u32
*)&trace_state
->entry
->fields
[field
->offset
] = (u32
)val
;
2160 trace_state
->entry
->fields
[field
->offset
] = val
;
2169 * synth_event_add_next_val - Add the next field's value to an open synth trace
2170 * @val: The value to set the next field to
2171 * @trace_state: A pointer to object tracking the piecewise trace state
2173 * Set the value of the next field in an event that's been opened by
2174 * synth_event_trace_start().
2176 * The val param should be the value cast to u64. If the value points
2177 * to a string, the val param should be a char * cast to u64.
2179 * This function assumes all the fields in an event are to be set one
2180 * after another - successive calls to this function are made, one for
2181 * each field, in the order of the fields in the event, until all
2182 * fields have been set. If you'd rather set each field individually
2183 * without regard to ordering, synth_event_add_val() can be used
2186 * Note however that synth_event_add_next_val() and
2187 * synth_event_add_val() can't be intermixed for a given event trace -
2188 * one or the other but not both can be used at the same time.
2190 * Note also that synth_event_trace_end() must be called after all
2191 * values have been added for each event trace, regardless of whether
2192 * adding all field values succeeded or not.
2194 * Return: 0 on success, err otherwise.
2196 int synth_event_add_next_val(u64 val
,
2197 struct synth_event_trace_state
*trace_state
)
2199 return __synth_event_add_val(NULL
, val
, trace_state
);
2201 EXPORT_SYMBOL_GPL(synth_event_add_next_val
);
2204 * synth_event_add_val - Add a named field's value to an open synth trace
2205 * @field_name: The name of the synthetic event field value to set
2206 * @val: The value to set the next field to
2207 * @trace_state: A pointer to object tracking the piecewise trace state
2209 * Set the value of the named field in an event that's been opened by
2210 * synth_event_trace_start().
2212 * The val param should be the value cast to u64. If the value points
2213 * to a string, the val param should be a char * cast to u64.
2215 * This function looks up the field name, and if found, sets the field
2216 * to the specified value. This lookup makes this function more
2217 * expensive than synth_event_add_next_val(), so use that or the
2218 * none-piecewise synth_event_trace() instead if efficiency is more
2221 * Note however that synth_event_add_next_val() and
2222 * synth_event_add_val() can't be intermixed for a given event trace -
2223 * one or the other but not both can be used at the same time.
2225 * Note also that synth_event_trace_end() must be called after all
2226 * values have been added for each event trace, regardless of whether
2227 * adding all field values succeeded or not.
2229 * Return: 0 on success, err otherwise.
2231 int synth_event_add_val(const char *field_name
, u64 val
,
2232 struct synth_event_trace_state
*trace_state
)
2234 return __synth_event_add_val(field_name
, val
, trace_state
);
2236 EXPORT_SYMBOL_GPL(synth_event_add_val
);
2239 * synth_event_trace_end - End piecewise synthetic event trace
2240 * @trace_state: A pointer to object tracking the piecewise trace state
2242 * End the trace of a synthetic event opened by
2243 * synth_event_trace__start().
2245 * This function 'closes' an event trace, which basically means that
2246 * it commits the reserved event and cleans up other loose ends.
2248 * A pointer to a trace_state object is passed in, which will keep
2249 * track of the current event trace state opened with
2250 * synth_event_trace_start().
2252 * Note that this function must be called after all values have been
2253 * added for each event trace, regardless of whether adding all field
2254 * values succeeded or not.
2256 * Return: 0 on success, err otherwise.
2258 int synth_event_trace_end(struct synth_event_trace_state
*trace_state
)
2263 __synth_event_trace_end(trace_state
);
2267 EXPORT_SYMBOL_GPL(synth_event_trace_end
);
2269 static int create_synth_event(int argc
, const char **argv
)
2271 const char *name
= argv
[0];
2274 if (name
[0] != 's' || name
[1] != ':')
2278 /* This interface accepts group name prefix */
2279 if (strchr(name
, '/')) {
2280 len
= str_has_prefix(name
, SYNTH_SYSTEM
"/");
2285 return __create_synth_event(argc
- 1, name
, argv
+ 1);
2288 static int synth_event_release(struct dyn_event
*ev
)
2290 struct synth_event
*event
= to_synth_event(ev
);
2296 ret
= unregister_synth_event(event
);
2300 dyn_event_remove(ev
);
2301 free_synth_event(event
);
2305 static int __synth_event_show(struct seq_file
*m
, struct synth_event
*event
)
2307 struct synth_field
*field
;
2310 seq_printf(m
, "%s\t", event
->name
);
2312 for (i
= 0; i
< event
->n_fields
; i
++) {
2313 field
= event
->fields
[i
];
2315 /* parameter values */
2316 seq_printf(m
, "%s %s%s", field
->type
, field
->name
,
2317 i
== event
->n_fields
- 1 ? "" : "; ");
2325 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
)
2327 struct synth_event
*event
= to_synth_event(ev
);
2329 seq_printf(m
, "s:%s/", event
->class.system
);
2331 return __synth_event_show(m
, event
);
2334 static int synth_events_seq_show(struct seq_file
*m
, void *v
)
2336 struct dyn_event
*ev
= v
;
2338 if (!is_synth_event(ev
))
2341 return __synth_event_show(m
, to_synth_event(ev
));
2344 static const struct seq_operations synth_events_seq_op
= {
2345 .start
= dyn_event_seq_start
,
2346 .next
= dyn_event_seq_next
,
2347 .stop
= dyn_event_seq_stop
,
2348 .show
= synth_events_seq_show
,
2351 static int synth_events_open(struct inode
*inode
, struct file
*file
)
2355 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
2359 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
2360 ret
= dyn_events_release_all(&synth_event_ops
);
2365 return seq_open(file
, &synth_events_seq_op
);
2368 static ssize_t
synth_events_write(struct file
*file
,
2369 const char __user
*buffer
,
2370 size_t count
, loff_t
*ppos
)
2372 return trace_parse_run_command(file
, buffer
, count
, ppos
,
2373 create_or_delete_synth_event
);
2376 static const struct file_operations synth_events_fops
= {
2377 .open
= synth_events_open
,
2378 .write
= synth_events_write
,
2380 .llseek
= seq_lseek
,
2381 .release
= seq_release
,
2384 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
2385 struct tracing_map_elt
*elt
,
2386 struct ring_buffer_event
*rbe
,
2389 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
2390 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2392 u64 ts
= ring_buffer_event_time_stamp(rbe
);
2394 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
2400 static u64
hist_field_cpu(struct hist_field
*hist_field
,
2401 struct tracing_map_elt
*elt
,
2402 struct ring_buffer_event
*rbe
,
2405 int cpu
= smp_processor_id();
2411 * check_field_for_var_ref - Check if a VAR_REF field references a variable
2412 * @hist_field: The VAR_REF field to check
2413 * @var_data: The hist trigger that owns the variable
2414 * @var_idx: The trigger variable identifier
2416 * Check the given VAR_REF field to see whether or not it references
2417 * the given variable associated with the given trigger.
2419 * Return: The VAR_REF field if it does reference the variable, NULL if not
2421 static struct hist_field
*
2422 check_field_for_var_ref(struct hist_field
*hist_field
,
2423 struct hist_trigger_data
*var_data
,
2424 unsigned int var_idx
)
2426 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
2428 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
2429 hist_field
->var
.hist_data
== var_data
)
2436 * find_var_ref - Check if a trigger has a reference to a trigger variable
2437 * @hist_data: The hist trigger that might have a reference to the variable
2438 * @var_data: The hist trigger that owns the variable
2439 * @var_idx: The trigger variable identifier
2441 * Check the list of var_refs[] on the first hist trigger to see
2442 * whether any of them are references to the variable on the second
2445 * Return: The VAR_REF field referencing the variable if so, NULL if not
2447 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
2448 struct hist_trigger_data
*var_data
,
2449 unsigned int var_idx
)
2451 struct hist_field
*hist_field
;
2454 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2455 hist_field
= hist_data
->var_refs
[i
];
2456 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
2464 * find_any_var_ref - Check if there is a reference to a given trigger variable
2465 * @hist_data: The hist trigger
2466 * @var_idx: The trigger variable identifier
2468 * Check to see whether the given variable is currently referenced by
2469 * any other trigger.
2471 * The trigger the variable is defined on is explicitly excluded - the
2472 * assumption being that a self-reference doesn't prevent a trigger
2473 * from being removed.
2475 * Return: The VAR_REF field referencing the variable if so, NULL if not
2477 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
2478 unsigned int var_idx
)
2480 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2481 struct hist_field
*found
= NULL
;
2482 struct hist_var_data
*var_data
;
2484 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2485 if (var_data
->hist_data
== hist_data
)
2487 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
2496 * check_var_refs - Check if there is a reference to any of trigger's variables
2497 * @hist_data: The hist trigger
2499 * A trigger can define one or more variables. If any one of them is
2500 * currently referenced by any other trigger, this function will
2503 * Typically used to determine whether or not a trigger can be removed
2504 * - if there are any references to a trigger's variables, it cannot.
2506 * Return: True if there is a reference to any of trigger's variables
2508 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
2510 struct hist_field
*field
;
2514 for_each_hist_field(i
, hist_data
) {
2515 field
= hist_data
->fields
[i
];
2516 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
2517 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
2527 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
2529 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2530 struct hist_var_data
*var_data
, *found
= NULL
;
2532 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2533 if (var_data
->hist_data
== hist_data
) {
2542 static bool field_has_hist_vars(struct hist_field
*hist_field
,
2553 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
2554 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
2557 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
2558 struct hist_field
*operand
;
2560 operand
= hist_field
->operands
[i
];
2561 if (field_has_hist_vars(operand
, level
+ 1))
2568 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
2570 struct hist_field
*hist_field
;
2573 for_each_hist_field(i
, hist_data
) {
2574 hist_field
= hist_data
->fields
[i
];
2575 if (field_has_hist_vars(hist_field
, 0))
2582 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
2584 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2585 struct hist_var_data
*var_data
;
2587 var_data
= find_hist_vars(hist_data
);
2591 if (tracing_check_open_get_tr(tr
))
2594 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
2596 trace_array_put(tr
);
2600 var_data
->hist_data
= hist_data
;
2601 list_add(&var_data
->list
, &tr
->hist_vars
);
2606 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
2608 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2609 struct hist_var_data
*var_data
;
2611 var_data
= find_hist_vars(hist_data
);
2615 if (WARN_ON(check_var_refs(hist_data
)))
2618 list_del(&var_data
->list
);
2622 trace_array_put(tr
);
2625 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
2626 const char *var_name
)
2628 struct hist_field
*hist_field
, *found
= NULL
;
2631 for_each_hist_field(i
, hist_data
) {
2632 hist_field
= hist_data
->fields
[i
];
2633 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
2634 strcmp(hist_field
->var
.name
, var_name
) == 0) {
2643 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
2644 struct trace_event_file
*file
,
2645 const char *var_name
)
2647 struct hist_trigger_data
*test_data
;
2648 struct event_trigger_data
*test
;
2649 struct hist_field
*hist_field
;
2651 lockdep_assert_held(&event_mutex
);
2653 hist_field
= find_var_field(hist_data
, var_name
);
2657 list_for_each_entry(test
, &file
->triggers
, list
) {
2658 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2659 test_data
= test
->private_data
;
2660 hist_field
= find_var_field(test_data
, var_name
);
2669 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
2674 struct hist_trigger_data
*var_hist_data
;
2675 struct hist_var_data
*var_data
;
2676 struct trace_event_file
*file
, *found
= NULL
;
2679 return find_event_file(tr
, system
, event_name
);
2681 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2682 var_hist_data
= var_data
->hist_data
;
2683 file
= var_hist_data
->event_file
;
2687 if (find_var_field(var_hist_data
, var_name
)) {
2689 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
2700 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
2701 const char *var_name
)
2703 struct hist_trigger_data
*test_data
;
2704 struct event_trigger_data
*test
;
2705 struct hist_field
*hist_field
;
2707 lockdep_assert_held(&event_mutex
);
2709 list_for_each_entry(test
, &file
->triggers
, list
) {
2710 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2711 test_data
= test
->private_data
;
2712 hist_field
= find_var_field(test_data
, var_name
);
2721 static struct hist_field
*
2722 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
2724 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2725 struct hist_field
*hist_field
, *found
= NULL
;
2726 struct trace_event_file
*file
;
2729 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
2730 struct action_data
*data
= hist_data
->actions
[i
];
2732 if (data
->handler
== HANDLER_ONMATCH
) {
2733 char *system
= data
->match_data
.event_system
;
2734 char *event_name
= data
->match_data
.event
;
2736 file
= find_var_file(tr
, system
, event_name
, var_name
);
2739 hist_field
= find_file_var(file
, var_name
);
2742 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
2744 return ERR_PTR(-EINVAL
);
2754 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
2759 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2760 struct hist_field
*hist_field
= NULL
;
2761 struct trace_event_file
*file
;
2763 if (!system
|| !event_name
) {
2764 hist_field
= find_match_var(hist_data
, var_name
);
2765 if (IS_ERR(hist_field
))
2771 file
= find_var_file(tr
, system
, event_name
, var_name
);
2775 hist_field
= find_file_var(file
, var_name
);
2780 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
2781 struct tracing_map_elt
*elt
,
2782 struct ring_buffer_event
*rbe
,
2785 struct hist_elt_data
*elt_data
;
2788 if (WARN_ON_ONCE(!elt
))
2791 elt_data
= elt
->private_data
;
2792 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
2797 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
2798 u64
*var_ref_vals
, bool self
)
2800 struct hist_trigger_data
*var_data
;
2801 struct tracing_map_elt
*var_elt
;
2802 struct hist_field
*hist_field
;
2803 unsigned int i
, var_idx
;
2804 bool resolved
= true;
2807 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2808 hist_field
= hist_data
->var_refs
[i
];
2809 var_idx
= hist_field
->var
.idx
;
2810 var_data
= hist_field
->var
.hist_data
;
2812 if (var_data
== NULL
) {
2817 if ((self
&& var_data
!= hist_data
) ||
2818 (!self
&& var_data
== hist_data
))
2821 var_elt
= tracing_map_lookup(var_data
->map
, key
);
2827 if (!tracing_map_var_set(var_elt
, var_idx
)) {
2832 if (self
|| !hist_field
->read_once
)
2833 var_val
= tracing_map_read_var(var_elt
, var_idx
);
2835 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
2837 var_ref_vals
[i
] = var_val
;
2843 static const char *hist_field_name(struct hist_field
*field
,
2846 const char *field_name
= "";
2852 field_name
= field
->field
->name
;
2853 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
2854 field
->flags
& HIST_FIELD_FL_ALIAS
)
2855 field_name
= hist_field_name(field
->operands
[0], ++level
);
2856 else if (field
->flags
& HIST_FIELD_FL_CPU
)
2858 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
2859 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
2860 if (field
->system
) {
2861 static char full_name
[MAX_FILTER_STR_VAL
];
2863 strcat(full_name
, field
->system
);
2864 strcat(full_name
, ".");
2865 strcat(full_name
, field
->event_name
);
2866 strcat(full_name
, ".");
2867 strcat(full_name
, field
->name
);
2868 field_name
= full_name
;
2870 field_name
= field
->name
;
2871 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
2872 field_name
= "common_timestamp";
2874 if (field_name
== NULL
)
2880 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
2882 hist_field_fn_t fn
= NULL
;
2884 switch (field_size
) {
2886 if (field_is_signed
)
2887 fn
= hist_field_s64
;
2889 fn
= hist_field_u64
;
2892 if (field_is_signed
)
2893 fn
= hist_field_s32
;
2895 fn
= hist_field_u32
;
2898 if (field_is_signed
)
2899 fn
= hist_field_s16
;
2901 fn
= hist_field_u16
;
2904 if (field_is_signed
)
2914 static int parse_map_size(char *str
)
2916 unsigned long size
, map_bits
;
2919 ret
= kstrtoul(str
, 0, &size
);
2923 map_bits
= ilog2(roundup_pow_of_two(size
));
2924 if (map_bits
< TRACING_MAP_BITS_MIN
||
2925 map_bits
> TRACING_MAP_BITS_MAX
)
2933 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
2940 for (i
= 0; i
< attrs
->n_assignments
; i
++)
2941 kfree(attrs
->assignment_str
[i
]);
2943 for (i
= 0; i
< attrs
->n_actions
; i
++)
2944 kfree(attrs
->action_str
[i
]);
2947 kfree(attrs
->sort_key_str
);
2948 kfree(attrs
->keys_str
);
2949 kfree(attrs
->vals_str
);
2950 kfree(attrs
->clock
);
2954 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
2958 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
2961 if ((str_has_prefix(str
, "onmatch(")) ||
2962 (str_has_prefix(str
, "onmax(")) ||
2963 (str_has_prefix(str
, "onchange("))) {
2964 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
2965 if (!attrs
->action_str
[attrs
->n_actions
]) {
2975 static int parse_assignment(struct trace_array
*tr
,
2976 char *str
, struct hist_trigger_attrs
*attrs
)
2980 if ((len
= str_has_prefix(str
, "key=")) ||
2981 (len
= str_has_prefix(str
, "keys="))) {
2982 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2983 if (!attrs
->keys_str
) {
2987 } else if ((len
= str_has_prefix(str
, "val=")) ||
2988 (len
= str_has_prefix(str
, "vals=")) ||
2989 (len
= str_has_prefix(str
, "values="))) {
2990 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2991 if (!attrs
->vals_str
) {
2995 } else if ((len
= str_has_prefix(str
, "sort="))) {
2996 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2997 if (!attrs
->sort_key_str
) {
3001 } else if (str_has_prefix(str
, "name=")) {
3002 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
3007 } else if ((len
= str_has_prefix(str
, "clock="))) {
3010 str
= strstrip(str
);
3011 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
3012 if (!attrs
->clock
) {
3016 } else if ((len
= str_has_prefix(str
, "size="))) {
3017 int map_bits
= parse_map_size(str
+ len
);
3023 attrs
->map_bits
= map_bits
;
3027 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
3028 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
3033 assignment
= kstrdup(str
, GFP_KERNEL
);
3039 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
3045 static struct hist_trigger_attrs
*
3046 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
3048 struct hist_trigger_attrs
*attrs
;
3051 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
3053 return ERR_PTR(-ENOMEM
);
3055 while (trigger_str
) {
3056 char *str
= strsep(&trigger_str
, ":");
3059 rhs
= strchr(str
, '=');
3061 if (!strlen(++rhs
)) {
3063 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
3066 ret
= parse_assignment(tr
, str
, attrs
);
3069 } else if (strcmp(str
, "pause") == 0)
3070 attrs
->pause
= true;
3071 else if ((strcmp(str
, "cont") == 0) ||
3072 (strcmp(str
, "continue") == 0))
3074 else if (strcmp(str
, "clear") == 0)
3075 attrs
->clear
= true;
3077 ret
= parse_action(str
, attrs
);
3083 if (!attrs
->keys_str
) {
3088 if (!attrs
->clock
) {
3089 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
3090 if (!attrs
->clock
) {
3098 destroy_hist_trigger_attrs(attrs
);
3100 return ERR_PTR(ret
);
3103 static inline void save_comm(char *comm
, struct task_struct
*task
)
3106 strcpy(comm
, "<idle>");
3110 if (WARN_ON_ONCE(task
->pid
< 0)) {
3111 strcpy(comm
, "<XXX>");
3115 strncpy(comm
, task
->comm
, TASK_COMM_LEN
);
3118 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
3122 for (i
= 0; i
< SYNTH_FIELDS_MAX
; i
++)
3123 kfree(elt_data
->field_var_str
[i
]);
3125 kfree(elt_data
->comm
);
3129 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
3131 struct hist_elt_data
*elt_data
= elt
->private_data
;
3133 hist_elt_data_free(elt_data
);
3136 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
3138 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
3139 unsigned int size
= TASK_COMM_LEN
;
3140 struct hist_elt_data
*elt_data
;
3141 struct hist_field
*key_field
;
3142 unsigned int i
, n_str
;
3144 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
3148 for_each_hist_key_field(i
, hist_data
) {
3149 key_field
= hist_data
->fields
[i
];
3151 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
3152 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
3153 if (!elt_data
->comm
) {
3161 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
;
3163 size
= STR_VAR_LEN_MAX
;
3165 for (i
= 0; i
< n_str
; i
++) {
3166 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
3167 if (!elt_data
->field_var_str
[i
]) {
3168 hist_elt_data_free(elt_data
);
3173 elt
->private_data
= elt_data
;
3178 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
3180 struct hist_elt_data
*elt_data
= elt
->private_data
;
3183 save_comm(elt_data
->comm
, current
);
3186 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
3187 .elt_alloc
= hist_trigger_elt_data_alloc
,
3188 .elt_free
= hist_trigger_elt_data_free
,
3189 .elt_init
= hist_trigger_elt_data_init
,
3192 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
3194 const char *flags_str
= NULL
;
3196 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
3198 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
3200 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
3201 flags_str
= "sym-offset";
3202 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
3203 flags_str
= "execname";
3204 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
3205 flags_str
= "syscall";
3206 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
3208 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
3209 flags_str
= "usecs";
3214 static void expr_field_str(struct hist_field
*field
, char *expr
)
3216 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
3219 strcat(expr
, hist_field_name(field
, 0));
3221 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
3222 const char *flags_str
= get_hist_field_flags(field
);
3226 strcat(expr
, flags_str
);
3231 static char *expr_str(struct hist_field
*field
, unsigned int level
)
3238 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3242 if (!field
->operands
[0]) {
3243 expr_field_str(field
, expr
);
3247 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
3251 subexpr
= expr_str(field
->operands
[0], ++level
);
3256 strcat(expr
, subexpr
);
3264 expr_field_str(field
->operands
[0], expr
);
3266 switch (field
->operator) {
3267 case FIELD_OP_MINUS
:
3278 expr_field_str(field
->operands
[1], expr
);
3283 static int contains_operator(char *str
)
3285 enum field_op_id field_op
= FIELD_OP_NONE
;
3288 op
= strpbrk(str
, "+-");
3290 return FIELD_OP_NONE
;
3295 field_op
= FIELD_OP_UNARY_MINUS
;
3297 field_op
= FIELD_OP_MINUS
;
3300 field_op
= FIELD_OP_PLUS
;
3309 static void get_hist_field(struct hist_field
*hist_field
)
3314 static void __destroy_hist_field(struct hist_field
*hist_field
)
3316 if (--hist_field
->ref
> 1)
3319 kfree(hist_field
->var
.name
);
3320 kfree(hist_field
->name
);
3321 kfree(hist_field
->type
);
3326 static void destroy_hist_field(struct hist_field
*hist_field
,
3337 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
3338 return; /* var refs will be destroyed separately */
3340 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
3341 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
3343 __destroy_hist_field(hist_field
);
3346 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
3347 struct ftrace_event_field
*field
,
3348 unsigned long flags
,
3351 struct hist_field
*hist_field
;
3353 if (field
&& is_function_field(field
))
3356 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3360 hist_field
->ref
= 1;
3362 hist_field
->hist_data
= hist_data
;
3364 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
3365 goto out
; /* caller will populate */
3367 if (flags
& HIST_FIELD_FL_VAR_REF
) {
3368 hist_field
->fn
= hist_field_var_ref
;
3372 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
3373 hist_field
->fn
= hist_field_counter
;
3374 hist_field
->size
= sizeof(u64
);
3375 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
3376 if (!hist_field
->type
)
3381 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
3382 hist_field
->fn
= hist_field_none
;
3386 if (flags
& HIST_FIELD_FL_LOG2
) {
3387 unsigned long fl
= flags
& ~HIST_FIELD_FL_LOG2
;
3388 hist_field
->fn
= hist_field_log2
;
3389 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
3390 hist_field
->size
= hist_field
->operands
[0]->size
;
3391 hist_field
->type
= kstrdup(hist_field
->operands
[0]->type
, GFP_KERNEL
);
3392 if (!hist_field
->type
)
3397 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
3398 hist_field
->fn
= hist_field_timestamp
;
3399 hist_field
->size
= sizeof(u64
);
3400 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
3401 if (!hist_field
->type
)
3406 if (flags
& HIST_FIELD_FL_CPU
) {
3407 hist_field
->fn
= hist_field_cpu
;
3408 hist_field
->size
= sizeof(int);
3409 hist_field
->type
= kstrdup("unsigned int", GFP_KERNEL
);
3410 if (!hist_field
->type
)
3415 if (WARN_ON_ONCE(!field
))
3418 if (is_string_field(field
)) {
3419 flags
|= HIST_FIELD_FL_STRING
;
3421 hist_field
->size
= MAX_FILTER_STR_VAL
;
3422 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
3423 if (!hist_field
->type
)
3426 if (field
->filter_type
== FILTER_STATIC_STRING
)
3427 hist_field
->fn
= hist_field_string
;
3428 else if (field
->filter_type
== FILTER_DYN_STRING
)
3429 hist_field
->fn
= hist_field_dynstring
;
3431 hist_field
->fn
= hist_field_pstring
;
3433 hist_field
->size
= field
->size
;
3434 hist_field
->is_signed
= field
->is_signed
;
3435 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
3436 if (!hist_field
->type
)
3439 hist_field
->fn
= select_value_fn(field
->size
,
3441 if (!hist_field
->fn
) {
3442 destroy_hist_field(hist_field
, 0);
3447 hist_field
->field
= field
;
3448 hist_field
->flags
= flags
;
3451 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
3452 if (!hist_field
->var
.name
)
3458 destroy_hist_field(hist_field
, 0);
3462 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
3466 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
3467 if (hist_data
->fields
[i
]) {
3468 destroy_hist_field(hist_data
->fields
[i
], 0);
3469 hist_data
->fields
[i
] = NULL
;
3473 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3474 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
3475 __destroy_hist_field(hist_data
->var_refs
[i
]);
3476 hist_data
->var_refs
[i
] = NULL
;
3480 static int init_var_ref(struct hist_field
*ref_field
,
3481 struct hist_field
*var_field
,
3482 char *system
, char *event_name
)
3486 ref_field
->var
.idx
= var_field
->var
.idx
;
3487 ref_field
->var
.hist_data
= var_field
->hist_data
;
3488 ref_field
->size
= var_field
->size
;
3489 ref_field
->is_signed
= var_field
->is_signed
;
3490 ref_field
->flags
|= var_field
->flags
&
3491 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3494 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
3495 if (!ref_field
->system
)
3500 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
3501 if (!ref_field
->event_name
) {
3507 if (var_field
->var
.name
) {
3508 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
3509 if (!ref_field
->name
) {
3513 } else if (var_field
->name
) {
3514 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
3515 if (!ref_field
->name
) {
3521 ref_field
->type
= kstrdup(var_field
->type
, GFP_KERNEL
);
3522 if (!ref_field
->type
) {
3529 kfree(ref_field
->system
);
3530 kfree(ref_field
->event_name
);
3531 kfree(ref_field
->name
);
3536 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
3537 struct hist_field
*var_field
)
3539 struct hist_field
*ref_field
;
3542 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3543 ref_field
= hist_data
->var_refs
[i
];
3544 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
3545 ref_field
->var
.hist_data
== var_field
->hist_data
)
3553 * create_var_ref - Create a variable reference and attach it to trigger
3554 * @hist_data: The trigger that will be referencing the variable
3555 * @var_field: The VAR field to create a reference to
3556 * @system: The optional system string
3557 * @event_name: The optional event_name string
3559 * Given a variable hist_field, create a VAR_REF hist_field that
3560 * represents a reference to it.
3562 * This function also adds the reference to the trigger that
3563 * now references the variable.
3565 * Return: The VAR_REF field if successful, NULL if not
3567 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
3568 struct hist_field
*var_field
,
3569 char *system
, char *event_name
)
3571 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
3572 struct hist_field
*ref_field
;
3575 /* Check if the variable already exists */
3576 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3577 ref_field
= hist_data
->var_refs
[i
];
3578 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
3579 ref_field
->var
.hist_data
== var_field
->hist_data
) {
3580 get_hist_field(ref_field
);
3585 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
3587 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
3588 destroy_hist_field(ref_field
, 0);
3592 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
3593 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
3599 static bool is_var_ref(char *var_name
)
3601 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
3607 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
3613 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
3614 name
= hist_data
->attrs
->var_defs
.name
[i
];
3616 if (strcmp(var_name
, name
) == 0) {
3617 field
= hist_data
->attrs
->var_defs
.expr
[i
];
3618 if (contains_operator(field
) || is_var_ref(field
))
3627 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
3628 char *system
, char *event_name
,
3631 struct trace_event_call
*call
;
3633 if (system
&& event_name
) {
3634 call
= hist_data
->event_file
->event_call
;
3636 if (strcmp(system
, call
->class->system
) != 0)
3639 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3643 if (!!system
!= !!event_name
)
3646 if (!is_var_ref(var_name
))
3651 return field_name_from_var(hist_data
, var_name
);
3654 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
3655 char *system
, char *event_name
,
3658 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
3659 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3661 if (!is_var_ref(var_name
))
3666 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
3668 ref_field
= create_var_ref(hist_data
, var_field
,
3669 system
, event_name
);
3672 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
3677 static struct ftrace_event_field
*
3678 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
3679 char *field_str
, unsigned long *flags
)
3681 struct ftrace_event_field
*field
= NULL
;
3682 char *field_name
, *modifier
, *str
;
3683 struct trace_array
*tr
= file
->tr
;
3685 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
3687 return ERR_PTR(-ENOMEM
);
3689 field_name
= strsep(&modifier
, ".");
3691 if (strcmp(modifier
, "hex") == 0)
3692 *flags
|= HIST_FIELD_FL_HEX
;
3693 else if (strcmp(modifier
, "sym") == 0)
3694 *flags
|= HIST_FIELD_FL_SYM
;
3695 else if (strcmp(modifier
, "sym-offset") == 0)
3696 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
3697 else if ((strcmp(modifier
, "execname") == 0) &&
3698 (strcmp(field_name
, "common_pid") == 0))
3699 *flags
|= HIST_FIELD_FL_EXECNAME
;
3700 else if (strcmp(modifier
, "syscall") == 0)
3701 *flags
|= HIST_FIELD_FL_SYSCALL
;
3702 else if (strcmp(modifier
, "log2") == 0)
3703 *flags
|= HIST_FIELD_FL_LOG2
;
3704 else if (strcmp(modifier
, "usecs") == 0)
3705 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
3707 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
3708 field
= ERR_PTR(-EINVAL
);
3713 if (strcmp(field_name
, "common_timestamp") == 0) {
3714 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
3715 hist_data
->enable_timestamps
= true;
3716 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
3717 hist_data
->attrs
->ts_in_usecs
= true;
3718 } else if (strcmp(field_name
, "cpu") == 0)
3719 *flags
|= HIST_FIELD_FL_CPU
;
3721 field
= trace_find_event_field(file
->event_call
, field_name
);
3722 if (!field
|| !field
->size
) {
3723 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
, errpos(field_name
));
3724 field
= ERR_PTR(-EINVAL
);
3734 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
3735 struct hist_field
*var_ref
,
3738 struct hist_field
*alias
= NULL
;
3739 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
3741 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3745 alias
->fn
= var_ref
->fn
;
3746 alias
->operands
[0] = var_ref
;
3748 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
3749 destroy_hist_field(alias
, 0);
3753 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
3758 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
3759 struct trace_event_file
*file
, char *str
,
3760 unsigned long *flags
, char *var_name
)
3762 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
3763 struct ftrace_event_field
*field
= NULL
;
3764 struct hist_field
*hist_field
= NULL
;
3767 s
= strchr(str
, '.');
3769 s
= strchr(++s
, '.');
3771 ref_system
= strsep(&str
, ".");
3776 ref_event
= strsep(&str
, ".");
3785 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
3787 hist_field
= parse_var_ref(hist_data
, ref_system
,
3788 ref_event
, ref_var
);
3791 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
3802 field
= parse_field(hist_data
, file
, str
, flags
);
3803 if (IS_ERR(field
)) {
3804 ret
= PTR_ERR(field
);
3808 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
3816 return ERR_PTR(ret
);
3819 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
3820 struct trace_event_file
*file
,
3821 char *str
, unsigned long flags
,
3822 char *var_name
, unsigned int level
);
3824 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
3825 struct trace_event_file
*file
,
3826 char *str
, unsigned long flags
,
3827 char *var_name
, unsigned int level
)
3829 struct hist_field
*operand1
, *expr
= NULL
;
3830 unsigned long operand_flags
;
3834 /* we support only -(xxx) i.e. explicit parens required */
3837 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
3842 str
++; /* skip leading '-' */
3844 s
= strchr(str
, '(');
3852 s
= strrchr(str
, ')');
3856 ret
= -EINVAL
; /* no closing ')' */
3860 flags
|= HIST_FIELD_FL_EXPR
;
3861 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3868 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
3869 if (IS_ERR(operand1
)) {
3870 ret
= PTR_ERR(operand1
);
3874 expr
->flags
|= operand1
->flags
&
3875 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3876 expr
->fn
= hist_field_unary_minus
;
3877 expr
->operands
[0] = operand1
;
3878 expr
->operator = FIELD_OP_UNARY_MINUS
;
3879 expr
->name
= expr_str(expr
, 0);
3880 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
3888 destroy_hist_field(expr
, 0);
3889 return ERR_PTR(ret
);
3892 static int check_expr_operands(struct trace_array
*tr
,
3893 struct hist_field
*operand1
,
3894 struct hist_field
*operand2
)
3896 unsigned long operand1_flags
= operand1
->flags
;
3897 unsigned long operand2_flags
= operand2
->flags
;
3899 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
3900 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
3901 struct hist_field
*var
;
3903 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
3906 operand1_flags
= var
->flags
;
3909 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
3910 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
3911 struct hist_field
*var
;
3913 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
3916 operand2_flags
= var
->flags
;
3919 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
3920 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
3921 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
3928 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
3929 struct trace_event_file
*file
,
3930 char *str
, unsigned long flags
,
3931 char *var_name
, unsigned int level
)
3933 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
3934 unsigned long operand_flags
;
3935 int field_op
, ret
= -EINVAL
;
3936 char *sep
, *operand1_str
;
3939 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
3940 return ERR_PTR(-EINVAL
);
3943 field_op
= contains_operator(str
);
3945 if (field_op
== FIELD_OP_NONE
)
3946 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
3948 if (field_op
== FIELD_OP_UNARY_MINUS
)
3949 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
3952 case FIELD_OP_MINUS
:
3962 operand1_str
= strsep(&str
, sep
);
3963 if (!operand1_str
|| !str
)
3967 operand1
= parse_atom(hist_data
, file
, operand1_str
,
3968 &operand_flags
, NULL
);
3969 if (IS_ERR(operand1
)) {
3970 ret
= PTR_ERR(operand1
);
3975 /* rest of string could be another expression e.g. b+c in a+b+c */
3977 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
3978 if (IS_ERR(operand2
)) {
3979 ret
= PTR_ERR(operand2
);
3984 ret
= check_expr_operands(file
->tr
, operand1
, operand2
);
3988 flags
|= HIST_FIELD_FL_EXPR
;
3990 flags
|= operand1
->flags
&
3991 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3993 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3999 operand1
->read_once
= true;
4000 operand2
->read_once
= true;
4002 expr
->operands
[0] = operand1
;
4003 expr
->operands
[1] = operand2
;
4004 expr
->operator = field_op
;
4005 expr
->name
= expr_str(expr
, 0);
4006 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
4013 case FIELD_OP_MINUS
:
4014 expr
->fn
= hist_field_minus
;
4017 expr
->fn
= hist_field_plus
;
4026 destroy_hist_field(operand1
, 0);
4027 destroy_hist_field(operand2
, 0);
4028 destroy_hist_field(expr
, 0);
4030 return ERR_PTR(ret
);
4033 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
4034 struct trace_event_file
*file
)
4036 struct event_trigger_data
*test
;
4038 lockdep_assert_held(&event_mutex
);
4040 list_for_each_entry(test
, &file
->triggers
, list
) {
4041 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
4042 if (test
->private_data
== hist_data
)
4043 return test
->filter_str
;
4050 static struct event_command trigger_hist_cmd
;
4051 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
4052 struct trace_event_file
*file
,
4053 char *glob
, char *cmd
, char *param
);
4055 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
4056 struct hist_trigger_data
*hist_data
,
4057 unsigned int n_keys
)
4059 struct hist_field
*target_hist_field
, *hist_field
;
4060 unsigned int n
, i
, j
;
4062 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
4065 i
= hist_data
->n_vals
;
4066 j
= target_hist_data
->n_vals
;
4068 for (n
= 0; n
< n_keys
; n
++) {
4069 hist_field
= hist_data
->fields
[i
+ n
];
4070 target_hist_field
= target_hist_data
->fields
[j
+ n
];
4072 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
4074 if (hist_field
->size
!= target_hist_field
->size
)
4076 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
4083 static struct hist_trigger_data
*
4084 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
4085 struct trace_event_file
*file
)
4087 struct hist_trigger_data
*hist_data
;
4088 struct event_trigger_data
*test
;
4089 unsigned int n_keys
;
4091 lockdep_assert_held(&event_mutex
);
4093 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
4095 list_for_each_entry(test
, &file
->triggers
, list
) {
4096 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
4097 hist_data
= test
->private_data
;
4099 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
4107 static struct trace_event_file
*event_file(struct trace_array
*tr
,
4108 char *system
, char *event_name
)
4110 struct trace_event_file
*file
;
4112 file
= __find_event_file(tr
, system
, event_name
);
4114 return ERR_PTR(-EINVAL
);
4119 static struct hist_field
*
4120 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
4121 char *system
, char *event_name
, char *field_name
)
4123 struct hist_field
*event_var
;
4124 char *synthetic_name
;
4126 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
4127 if (!synthetic_name
)
4128 return ERR_PTR(-ENOMEM
);
4130 strcpy(synthetic_name
, "synthetic_");
4131 strcat(synthetic_name
, field_name
);
4133 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
4135 kfree(synthetic_name
);
4141 * create_field_var_hist - Automatically create a histogram and var for a field
4142 * @target_hist_data: The target hist trigger
4143 * @subsys_name: Optional subsystem name
4144 * @event_name: Optional event name
4145 * @field_name: The name of the field (and the resulting variable)
4147 * Hist trigger actions fetch data from variables, not directly from
4148 * events. However, for convenience, users are allowed to directly
4149 * specify an event field in an action, which will be automatically
4150 * converted into a variable on their behalf.
4152 * If a user specifies a field on an event that isn't the event the
4153 * histogram currently being defined (the target event histogram), the
4154 * only way that can be accomplished is if a new hist trigger is
4155 * created and the field variable defined on that.
4157 * This function creates a new histogram compatible with the target
4158 * event (meaning a histogram with the same key as the target
4159 * histogram), and creates a variable for the specified field, but
4160 * with 'synthetic_' prepended to the variable name in order to avoid
4161 * collision with normal field variables.
4163 * Return: The variable created for the field.
4165 static struct hist_field
*
4166 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
4167 char *subsys_name
, char *event_name
, char *field_name
)
4169 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
4170 struct hist_field
*event_var
= ERR_PTR(-EINVAL
);
4171 struct hist_trigger_data
*hist_data
;
4172 unsigned int i
, n
, first
= true;
4173 struct field_var_hist
*var_hist
;
4174 struct trace_event_file
*file
;
4175 struct hist_field
*key_field
;
4180 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
4181 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
4182 return ERR_PTR(-EINVAL
);
4185 file
= event_file(tr
, subsys_name
, event_name
);
4188 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
4189 ret
= PTR_ERR(file
);
4190 return ERR_PTR(ret
);
4194 * Look for a histogram compatible with target. We'll use the
4195 * found histogram specification to create a new matching
4196 * histogram with our variable on it. target_hist_data is not
4197 * yet a registered histogram so we can't use that.
4199 hist_data
= find_compatible_hist(target_hist_data
, file
);
4201 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
4202 return ERR_PTR(-EINVAL
);
4205 /* See if a synthetic field variable has already been created */
4206 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
4207 event_name
, field_name
);
4208 if (!IS_ERR_OR_NULL(event_var
))
4211 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
4213 return ERR_PTR(-ENOMEM
);
4215 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
4218 return ERR_PTR(-ENOMEM
);
4221 /* Use the same keys as the compatible histogram */
4222 strcat(cmd
, "keys=");
4224 for_each_hist_key_field(i
, hist_data
) {
4225 key_field
= hist_data
->fields
[i
];
4228 strcat(cmd
, key_field
->field
->name
);
4232 /* Create the synthetic field variable specification */
4233 strcat(cmd
, ":synthetic_");
4234 strcat(cmd
, field_name
);
4236 strcat(cmd
, field_name
);
4238 /* Use the same filter as the compatible histogram */
4239 saved_filter
= find_trigger_filter(hist_data
, file
);
4241 strcat(cmd
, " if ");
4242 strcat(cmd
, saved_filter
);
4245 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
4246 if (!var_hist
->cmd
) {
4249 return ERR_PTR(-ENOMEM
);
4252 /* Save the compatible histogram information */
4253 var_hist
->hist_data
= hist_data
;
4255 /* Create the new histogram with our variable */
4256 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
4260 kfree(var_hist
->cmd
);
4262 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
4263 return ERR_PTR(ret
);
4268 /* If we can't find the variable, something went wrong */
4269 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
4270 event_name
, field_name
);
4271 if (IS_ERR_OR_NULL(event_var
)) {
4272 kfree(var_hist
->cmd
);
4274 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
4275 return ERR_PTR(-EINVAL
);
4278 n
= target_hist_data
->n_field_var_hists
;
4279 target_hist_data
->field_var_hists
[n
] = var_hist
;
4280 target_hist_data
->n_field_var_hists
++;
4285 static struct hist_field
*
4286 find_target_event_var(struct hist_trigger_data
*hist_data
,
4287 char *subsys_name
, char *event_name
, char *var_name
)
4289 struct trace_event_file
*file
= hist_data
->event_file
;
4290 struct hist_field
*hist_field
= NULL
;
4293 struct trace_event_call
*call
;
4298 call
= file
->event_call
;
4300 if (strcmp(subsys_name
, call
->class->system
) != 0)
4303 if (strcmp(event_name
, trace_event_name(call
)) != 0)
4307 hist_field
= find_var_field(hist_data
, var_name
);
4312 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
4313 struct ring_buffer_event
*rbe
,
4315 struct field_var
**field_vars
,
4316 unsigned int n_field_vars
,
4317 unsigned int field_var_str_start
)
4319 struct hist_elt_data
*elt_data
= elt
->private_data
;
4320 unsigned int i
, j
, var_idx
;
4323 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
4324 struct field_var
*field_var
= field_vars
[i
];
4325 struct hist_field
*var
= field_var
->var
;
4326 struct hist_field
*val
= field_var
->val
;
4328 var_val
= val
->fn(val
, elt
, rbe
, rec
);
4329 var_idx
= var
->var
.idx
;
4331 if (val
->flags
& HIST_FIELD_FL_STRING
) {
4332 char *str
= elt_data
->field_var_str
[j
++];
4333 char *val_str
= (char *)(uintptr_t)var_val
;
4335 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
4336 var_val
= (u64
)(uintptr_t)str
;
4338 tracing_map_set_var(elt
, var_idx
, var_val
);
4342 static void update_field_vars(struct hist_trigger_data
*hist_data
,
4343 struct tracing_map_elt
*elt
,
4344 struct ring_buffer_event
*rbe
,
4347 __update_field_vars(elt
, rbe
, rec
, hist_data
->field_vars
,
4348 hist_data
->n_field_vars
, 0);
4351 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
4352 struct tracing_map_elt
*elt
, void *rec
,
4353 struct ring_buffer_event
*rbe
, void *key
,
4354 struct action_data
*data
, u64
*var_ref_vals
)
4356 __update_field_vars(elt
, rbe
, rec
, hist_data
->save_vars
,
4357 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
4360 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
4361 struct trace_event_file
*file
,
4362 char *name
, int size
, const char *type
)
4364 struct hist_field
*var
;
4367 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
4368 var
= ERR_PTR(-EINVAL
);
4372 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
4374 var
= ERR_PTR(-ENOMEM
);
4378 idx
= tracing_map_add_var(hist_data
->map
);
4381 var
= ERR_PTR(-EINVAL
);
4385 var
->flags
= HIST_FIELD_FL_VAR
;
4387 var
->var
.hist_data
= var
->hist_data
= hist_data
;
4389 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
4390 var
->type
= kstrdup(type
, GFP_KERNEL
);
4391 if (!var
->var
.name
|| !var
->type
) {
4392 kfree(var
->var
.name
);
4395 var
= ERR_PTR(-ENOMEM
);
4401 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
4402 struct trace_event_file
*file
,
4405 struct hist_field
*val
= NULL
, *var
= NULL
;
4406 unsigned long flags
= HIST_FIELD_FL_VAR
;
4407 struct trace_array
*tr
= file
->tr
;
4408 struct field_var
*field_var
;
4411 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
4412 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
4417 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
4419 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
4424 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
4426 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
4432 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
4440 field_var
->var
= var
;
4441 field_var
->val
= val
;
4445 field_var
= ERR_PTR(ret
);
4450 * create_target_field_var - Automatically create a variable for a field
4451 * @target_hist_data: The target hist trigger
4452 * @subsys_name: Optional subsystem name
4453 * @event_name: Optional event name
4454 * @var_name: The name of the field (and the resulting variable)
4456 * Hist trigger actions fetch data from variables, not directly from
4457 * events. However, for convenience, users are allowed to directly
4458 * specify an event field in an action, which will be automatically
4459 * converted into a variable on their behalf.
4461 * This function creates a field variable with the name var_name on
4462 * the hist trigger currently being defined on the target event. If
4463 * subsys_name and event_name are specified, this function simply
4464 * verifies that they do in fact match the target event subsystem and
4467 * Return: The variable created for the field.
4469 static struct field_var
*
4470 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
4471 char *subsys_name
, char *event_name
, char *var_name
)
4473 struct trace_event_file
*file
= target_hist_data
->event_file
;
4476 struct trace_event_call
*call
;
4481 call
= file
->event_call
;
4483 if (strcmp(subsys_name
, call
->class->system
) != 0)
4486 if (strcmp(event_name
, trace_event_name(call
)) != 0)
4490 return create_field_var(target_hist_data
, file
, var_name
);
4493 static bool check_track_val_max(u64 track_val
, u64 var_val
)
4495 if (var_val
<= track_val
)
4501 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
4503 if (var_val
== track_val
)
4509 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
4510 struct tracing_map_elt
*elt
,
4511 struct action_data
*data
)
4513 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
4516 track_val
= tracing_map_read_var(elt
, track_var_idx
);
4521 static void save_track_val(struct hist_trigger_data
*hist_data
,
4522 struct tracing_map_elt
*elt
,
4523 struct action_data
*data
, u64 var_val
)
4525 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
4527 tracing_map_set_var(elt
, track_var_idx
, var_val
);
4530 static void save_track_data(struct hist_trigger_data
*hist_data
,
4531 struct tracing_map_elt
*elt
, void *rec
,
4532 struct ring_buffer_event
*rbe
, void *key
,
4533 struct action_data
*data
, u64
*var_ref_vals
)
4535 if (data
->track_data
.save_data
)
4536 data
->track_data
.save_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
4539 static bool check_track_val(struct tracing_map_elt
*elt
,
4540 struct action_data
*data
,
4543 struct hist_trigger_data
*hist_data
;
4546 hist_data
= data
->track_data
.track_var
->hist_data
;
4547 track_val
= get_track_val(hist_data
, elt
, data
);
4549 return data
->track_data
.check_val(track_val
, var_val
);
4552 #ifdef CONFIG_TRACER_SNAPSHOT
4553 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
4555 /* called with tr->max_lock held */
4556 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
4557 struct hist_elt_data
*elt_data
, *track_elt_data
;
4558 struct snapshot_context
*context
= cond_data
;
4559 struct action_data
*action
;
4565 action
= track_data
->action_data
;
4567 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
4568 track_data
->action_data
);
4570 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
4573 track_data
->track_val
= track_val
;
4574 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
4576 elt_data
= context
->elt
->private_data
;
4577 track_elt_data
= track_data
->elt
.private_data
;
4579 strncpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
4581 track_data
->updated
= true;
4586 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
4587 struct tracing_map_elt
*elt
, void *rec
,
4588 struct ring_buffer_event
*rbe
, void *key
,
4589 struct action_data
*data
,
4592 struct trace_event_file
*file
= hist_data
->event_file
;
4593 struct snapshot_context context
;
4598 tracing_snapshot_cond(file
->tr
, &context
);
4601 static void hist_trigger_print_key(struct seq_file
*m
,
4602 struct hist_trigger_data
*hist_data
,
4604 struct tracing_map_elt
*elt
);
4606 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
4610 if (!hist_data
->n_actions
)
4613 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4614 struct action_data
*data
= hist_data
->actions
[i
];
4616 if (data
->action
== ACTION_SNAPSHOT
)
4623 static void track_data_snapshot_print(struct seq_file
*m
,
4624 struct hist_trigger_data
*hist_data
)
4626 struct trace_event_file
*file
= hist_data
->event_file
;
4627 struct track_data
*track_data
;
4628 struct action_data
*action
;
4630 track_data
= tracing_cond_snapshot_data(file
->tr
);
4634 if (!track_data
->updated
)
4637 action
= snapshot_action(hist_data
);
4641 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
4642 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
4643 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
4644 action
->track_data
.var_str
, track_data
->track_val
);
4646 seq_puts(m
, "\ttriggered by event with key: ");
4647 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
4651 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
4655 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
4656 struct tracing_map_elt
*elt
, void *rec
,
4657 struct ring_buffer_event
*rbe
, void *key
,
4658 struct action_data
*data
,
4659 u64
*var_ref_vals
) {}
4660 static void track_data_snapshot_print(struct seq_file
*m
,
4661 struct hist_trigger_data
*hist_data
) {}
4662 #endif /* CONFIG_TRACER_SNAPSHOT */
4664 static void track_data_print(struct seq_file
*m
,
4665 struct hist_trigger_data
*hist_data
,
4666 struct tracing_map_elt
*elt
,
4667 struct action_data
*data
)
4669 u64 track_val
= get_track_val(hist_data
, elt
, data
);
4670 unsigned int i
, save_var_idx
;
4672 if (data
->handler
== HANDLER_ONMAX
)
4673 seq_printf(m
, "\n\tmax: %10llu", track_val
);
4674 else if (data
->handler
== HANDLER_ONCHANGE
)
4675 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
4677 if (data
->action
== ACTION_SNAPSHOT
)
4680 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4681 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
4682 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
4685 save_var_idx
= save_var
->var
.idx
;
4687 val
= tracing_map_read_var(elt
, save_var_idx
);
4689 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
4690 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
4691 (char *)(uintptr_t)(val
));
4693 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
4697 static void ontrack_action(struct hist_trigger_data
*hist_data
,
4698 struct tracing_map_elt
*elt
, void *rec
,
4699 struct ring_buffer_event
*rbe
, void *key
,
4700 struct action_data
*data
, u64
*var_ref_vals
)
4702 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
4704 if (check_track_val(elt
, data
, var_val
)) {
4705 save_track_val(hist_data
, elt
, data
, var_val
);
4706 save_track_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
4710 static void action_data_destroy(struct action_data
*data
)
4714 lockdep_assert_held(&event_mutex
);
4716 kfree(data
->action_name
);
4718 for (i
= 0; i
< data
->n_params
; i
++)
4719 kfree(data
->params
[i
]);
4721 if (data
->synth_event
)
4722 data
->synth_event
->ref
--;
4724 kfree(data
->synth_event_name
);
4729 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
4730 struct action_data
*data
)
4732 struct trace_event_file
*file
= hist_data
->event_file
;
4734 destroy_hist_field(data
->track_data
.track_var
, 0);
4736 if (data
->action
== ACTION_SNAPSHOT
) {
4737 struct track_data
*track_data
;
4739 track_data
= tracing_cond_snapshot_data(file
->tr
);
4740 if (track_data
&& track_data
->hist_data
== hist_data
) {
4741 tracing_snapshot_cond_disable(file
->tr
);
4742 track_data_free(track_data
);
4746 kfree(data
->track_data
.var_str
);
4748 action_data_destroy(data
);
4751 static int action_create(struct hist_trigger_data
*hist_data
,
4752 struct action_data
*data
);
4754 static int track_data_create(struct hist_trigger_data
*hist_data
,
4755 struct action_data
*data
)
4757 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
4758 struct trace_event_file
*file
= hist_data
->event_file
;
4759 struct trace_array
*tr
= file
->tr
;
4760 char *track_data_var_str
;
4763 track_data_var_str
= data
->track_data
.var_str
;
4764 if (track_data_var_str
[0] != '$') {
4765 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
4768 track_data_var_str
++;
4770 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
4772 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
4776 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
4780 data
->track_data
.var_ref
= ref_field
;
4782 if (data
->handler
== HANDLER_ONMAX
)
4783 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
4784 if (IS_ERR(track_var
)) {
4785 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
4786 ret
= PTR_ERR(track_var
);
4790 if (data
->handler
== HANDLER_ONCHANGE
)
4791 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
4792 if (IS_ERR(track_var
)) {
4793 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
4794 ret
= PTR_ERR(track_var
);
4797 data
->track_data
.track_var
= track_var
;
4799 ret
= action_create(hist_data
, data
);
4804 static int parse_action_params(struct trace_array
*tr
, char *params
,
4805 struct action_data
*data
)
4807 char *param
, *saved_param
;
4808 bool first_param
= true;
4812 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
4813 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
4817 param
= strsep(¶ms
, ",");
4819 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
4824 param
= strstrip(param
);
4825 if (strlen(param
) < 2) {
4826 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
4831 saved_param
= kstrdup(param
, GFP_KERNEL
);
4837 if (first_param
&& data
->use_trace_keyword
) {
4838 data
->synth_event_name
= saved_param
;
4839 first_param
= false;
4842 first_param
= false;
4844 data
->params
[data
->n_params
++] = saved_param
;
4850 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
4851 enum handler_id handler
)
4858 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
4863 action_name
= strsep(&str
, "(");
4864 if (!action_name
|| !str
) {
4865 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
4870 if (str_has_prefix(action_name
, "save")) {
4871 char *params
= strsep(&str
, ")");
4874 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
4879 ret
= parse_action_params(tr
, params
, data
);
4883 if (handler
== HANDLER_ONMAX
)
4884 data
->track_data
.check_val
= check_track_val_max
;
4885 else if (handler
== HANDLER_ONCHANGE
)
4886 data
->track_data
.check_val
= check_track_val_changed
;
4888 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
4893 data
->track_data
.save_data
= save_track_data_vars
;
4894 data
->fn
= ontrack_action
;
4895 data
->action
= ACTION_SAVE
;
4896 } else if (str_has_prefix(action_name
, "snapshot")) {
4897 char *params
= strsep(&str
, ")");
4900 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
4905 if (handler
== HANDLER_ONMAX
)
4906 data
->track_data
.check_val
= check_track_val_max
;
4907 else if (handler
== HANDLER_ONCHANGE
)
4908 data
->track_data
.check_val
= check_track_val_changed
;
4910 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
4915 data
->track_data
.save_data
= save_track_data_snapshot
;
4916 data
->fn
= ontrack_action
;
4917 data
->action
= ACTION_SNAPSHOT
;
4919 char *params
= strsep(&str
, ")");
4921 if (str_has_prefix(action_name
, "trace"))
4922 data
->use_trace_keyword
= true;
4925 ret
= parse_action_params(tr
, params
, data
);
4930 if (handler
== HANDLER_ONMAX
)
4931 data
->track_data
.check_val
= check_track_val_max
;
4932 else if (handler
== HANDLER_ONCHANGE
)
4933 data
->track_data
.check_val
= check_track_val_changed
;
4935 if (handler
!= HANDLER_ONMATCH
) {
4936 data
->track_data
.save_data
= action_trace
;
4937 data
->fn
= ontrack_action
;
4939 data
->fn
= action_trace
;
4941 data
->action
= ACTION_TRACE
;
4944 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
4945 if (!data
->action_name
) {
4950 data
->handler
= handler
;
4955 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
4956 char *str
, enum handler_id handler
)
4958 struct action_data
*data
;
4962 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
4964 return ERR_PTR(-ENOMEM
);
4966 var_str
= strsep(&str
, ")");
4967 if (!var_str
|| !str
) {
4972 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
4973 if (!data
->track_data
.var_str
) {
4978 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
4984 track_data_destroy(hist_data
, data
);
4985 data
= ERR_PTR(ret
);
4989 static void onmatch_destroy(struct action_data
*data
)
4991 kfree(data
->match_data
.event
);
4992 kfree(data
->match_data
.event_system
);
4994 action_data_destroy(data
);
4997 static void destroy_field_var(struct field_var
*field_var
)
5002 destroy_hist_field(field_var
->var
, 0);
5003 destroy_hist_field(field_var
->val
, 0);
5008 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
5012 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
5013 destroy_field_var(hist_data
->field_vars
[i
]);
5016 static void save_field_var(struct hist_trigger_data
*hist_data
,
5017 struct field_var
*field_var
)
5019 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
5021 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
5022 hist_data
->n_field_var_str
++;
5026 static int check_synth_field(struct synth_event
*event
,
5027 struct hist_field
*hist_field
,
5028 unsigned int field_pos
)
5030 struct synth_field
*field
;
5032 if (field_pos
>= event
->n_fields
)
5035 field
= event
->fields
[field_pos
];
5037 if (strcmp(field
->type
, hist_field
->type
) != 0) {
5038 if (field
->size
!= hist_field
->size
||
5039 field
->is_signed
!= hist_field
->is_signed
)
5046 static struct hist_field
*
5047 trace_action_find_var(struct hist_trigger_data
*hist_data
,
5048 struct action_data
*data
,
5049 char *system
, char *event
, char *var
)
5051 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5052 struct hist_field
*hist_field
;
5054 var
++; /* skip '$' */
5056 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
5058 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
5059 system
= data
->match_data
.event_system
;
5060 event
= data
->match_data
.event
;
5063 hist_field
= find_event_var(hist_data
, system
, event
, var
);
5067 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
5072 static struct hist_field
*
5073 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
5074 struct action_data
*data
, char *system
,
5075 char *event
, char *var
)
5077 struct hist_field
*hist_field
= NULL
;
5078 struct field_var
*field_var
;
5081 * First try to create a field var on the target event (the
5082 * currently being defined). This will create a variable for
5083 * unqualified fields on the target event, or if qualified,
5084 * target fields that have qualified names matching the target.
5086 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
5088 if (field_var
&& !IS_ERR(field_var
)) {
5089 save_field_var(hist_data
, field_var
);
5090 hist_field
= field_var
->var
;
5094 * If no explicit system.event is specfied, default to
5095 * looking for fields on the onmatch(system.event.xxx)
5098 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
5099 system
= data
->match_data
.event_system
;
5100 event
= data
->match_data
.event
;
5104 * At this point, we're looking at a field on another
5105 * event. Because we can't modify a hist trigger on
5106 * another event to add a variable for a field, we need
5107 * to create a new trigger on that event and create the
5108 * variable at the same time.
5110 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
5111 if (IS_ERR(hist_field
))
5117 destroy_field_var(field_var
);
5122 static int trace_action_create(struct hist_trigger_data
*hist_data
,
5123 struct action_data
*data
)
5125 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5126 char *event_name
, *param
, *system
= NULL
;
5127 struct hist_field
*hist_field
, *var_ref
;
5129 unsigned int field_pos
= 0;
5130 struct synth_event
*event
;
5131 char *synth_event_name
;
5132 int var_ref_idx
, ret
= 0;
5134 lockdep_assert_held(&event_mutex
);
5136 if (data
->use_trace_keyword
)
5137 synth_event_name
= data
->synth_event_name
;
5139 synth_event_name
= data
->action_name
;
5141 event
= find_synth_event(synth_event_name
);
5143 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
5149 for (i
= 0; i
< data
->n_params
; i
++) {
5152 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
5158 system
= strsep(¶m
, ".");
5160 param
= (char *)system
;
5161 system
= event_name
= NULL
;
5163 event_name
= strsep(¶m
, ".");
5171 if (param
[0] == '$')
5172 hist_field
= trace_action_find_var(hist_data
, data
,
5176 hist_field
= trace_action_create_field_var(hist_data
,
5188 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
5189 var_ref
= create_var_ref(hist_data
, hist_field
,
5190 system
, event_name
);
5197 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
5198 if (WARN_ON(var_ref_idx
< 0)) {
5203 data
->var_ref_idx
[i
] = var_ref_idx
;
5210 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
5216 if (field_pos
!= event
->n_fields
) {
5217 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
5222 data
->synth_event
= event
;
5231 static int action_create(struct hist_trigger_data
*hist_data
,
5232 struct action_data
*data
)
5234 struct trace_event_file
*file
= hist_data
->event_file
;
5235 struct trace_array
*tr
= file
->tr
;
5236 struct track_data
*track_data
;
5237 struct field_var
*field_var
;
5242 if (data
->action
== ACTION_TRACE
)
5243 return trace_action_create(hist_data
, data
);
5245 if (data
->action
== ACTION_SNAPSHOT
) {
5246 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
5247 if (IS_ERR(track_data
)) {
5248 ret
= PTR_ERR(track_data
);
5252 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
5253 cond_snapshot_update
);
5255 track_data_free(track_data
);
5260 if (data
->action
== ACTION_SAVE
) {
5261 if (hist_data
->n_save_vars
) {
5263 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
5267 for (i
= 0; i
< data
->n_params
; i
++) {
5268 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
5274 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
5275 if (IS_ERR(field_var
)) {
5276 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
5278 ret
= PTR_ERR(field_var
);
5283 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
5284 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
5285 hist_data
->n_save_var_str
++;
5293 static int onmatch_create(struct hist_trigger_data
*hist_data
,
5294 struct action_data
*data
)
5296 return action_create(hist_data
, data
);
5299 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
5301 char *match_event
, *match_event_system
;
5302 struct action_data
*data
;
5305 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
5307 return ERR_PTR(-ENOMEM
);
5309 match_event
= strsep(&str
, ")");
5310 if (!match_event
|| !str
) {
5311 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
5315 match_event_system
= strsep(&match_event
, ".");
5317 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
5321 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
5322 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
5326 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
5327 if (!data
->match_data
.event
) {
5332 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
5333 if (!data
->match_data
.event_system
) {
5338 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
5344 onmatch_destroy(data
);
5345 data
= ERR_PTR(ret
);
5349 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
5351 hist_data
->fields
[HITCOUNT_IDX
] =
5352 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
5353 if (!hist_data
->fields
[HITCOUNT_IDX
])
5356 hist_data
->n_vals
++;
5357 hist_data
->n_fields
++;
5359 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
5365 static int __create_val_field(struct hist_trigger_data
*hist_data
,
5366 unsigned int val_idx
,
5367 struct trace_event_file
*file
,
5368 char *var_name
, char *field_str
,
5369 unsigned long flags
)
5371 struct hist_field
*hist_field
;
5374 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
5375 if (IS_ERR(hist_field
)) {
5376 ret
= PTR_ERR(hist_field
);
5380 hist_data
->fields
[val_idx
] = hist_field
;
5382 ++hist_data
->n_vals
;
5383 ++hist_data
->n_fields
;
5385 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
5391 static int create_val_field(struct hist_trigger_data
*hist_data
,
5392 unsigned int val_idx
,
5393 struct trace_event_file
*file
,
5396 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
5399 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
5402 static int create_var_field(struct hist_trigger_data
*hist_data
,
5403 unsigned int val_idx
,
5404 struct trace_event_file
*file
,
5405 char *var_name
, char *expr_str
)
5407 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5408 unsigned long flags
= 0;
5410 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
5413 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
5414 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
5418 flags
|= HIST_FIELD_FL_VAR
;
5419 hist_data
->n_vars
++;
5420 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
5423 return __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
5426 static int create_val_fields(struct hist_trigger_data
*hist_data
,
5427 struct trace_event_file
*file
)
5429 char *fields_str
, *field_str
;
5430 unsigned int i
, j
= 1;
5433 ret
= create_hitcount_val(hist_data
);
5437 fields_str
= hist_data
->attrs
->vals_str
;
5441 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
5442 j
< TRACING_MAP_VALS_MAX
; i
++) {
5443 field_str
= strsep(&fields_str
, ",");
5447 if (strcmp(field_str
, "hitcount") == 0)
5450 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
5455 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
5461 static int create_key_field(struct hist_trigger_data
*hist_data
,
5462 unsigned int key_idx
,
5463 unsigned int key_offset
,
5464 struct trace_event_file
*file
,
5467 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5468 struct hist_field
*hist_field
= NULL
;
5469 unsigned long flags
= 0;
5470 unsigned int key_size
;
5473 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
5476 flags
|= HIST_FIELD_FL_KEY
;
5478 if (strcmp(field_str
, "stacktrace") == 0) {
5479 flags
|= HIST_FIELD_FL_STACKTRACE
;
5480 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
5481 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
5483 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
5485 if (IS_ERR(hist_field
)) {
5486 ret
= PTR_ERR(hist_field
);
5490 if (field_has_hist_vars(hist_field
, 0)) {
5491 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
5492 destroy_hist_field(hist_field
, 0);
5497 key_size
= hist_field
->size
;
5500 hist_data
->fields
[key_idx
] = hist_field
;
5502 key_size
= ALIGN(key_size
, sizeof(u64
));
5503 hist_data
->fields
[key_idx
]->size
= key_size
;
5504 hist_data
->fields
[key_idx
]->offset
= key_offset
;
5506 hist_data
->key_size
+= key_size
;
5508 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
5513 hist_data
->n_keys
++;
5514 hist_data
->n_fields
++;
5516 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
5524 static int create_key_fields(struct hist_trigger_data
*hist_data
,
5525 struct trace_event_file
*file
)
5527 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
5528 char *fields_str
, *field_str
;
5531 fields_str
= hist_data
->attrs
->keys_str
;
5535 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
5536 field_str
= strsep(&fields_str
, ",");
5539 ret
= create_key_field(hist_data
, i
, key_offset
,
5554 static int create_var_fields(struct hist_trigger_data
*hist_data
,
5555 struct trace_event_file
*file
)
5557 unsigned int i
, j
= hist_data
->n_vals
;
5560 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
5562 for (i
= 0; i
< n_vars
; i
++) {
5563 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
5564 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
5566 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
5574 static void free_var_defs(struct hist_trigger_data
*hist_data
)
5578 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
5579 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
5580 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
5583 hist_data
->attrs
->var_defs
.n_vars
= 0;
5586 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
5588 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5589 char *s
, *str
, *var_name
, *field_str
;
5590 unsigned int i
, j
, n_vars
= 0;
5593 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
5594 str
= hist_data
->attrs
->assignment_str
[i
];
5595 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
5596 field_str
= strsep(&str
, ",");
5600 var_name
= strsep(&field_str
, "=");
5601 if (!var_name
|| !field_str
) {
5602 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
5608 if (n_vars
== TRACING_MAP_VARS_MAX
) {
5609 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
5614 s
= kstrdup(var_name
, GFP_KERNEL
);
5619 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
5621 s
= kstrdup(field_str
, GFP_KERNEL
);
5623 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
5627 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
5629 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
5635 free_var_defs(hist_data
);
5640 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
5641 struct trace_event_file
*file
)
5645 ret
= parse_var_defs(hist_data
);
5649 ret
= create_val_fields(hist_data
, file
);
5653 ret
= create_var_fields(hist_data
, file
);
5657 ret
= create_key_fields(hist_data
, file
);
5661 free_var_defs(hist_data
);
5666 static int is_descending(struct trace_array
*tr
, const char *str
)
5671 if (strcmp(str
, "descending") == 0)
5674 if (strcmp(str
, "ascending") == 0)
5677 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
5682 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
5684 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5685 char *fields_str
= hist_data
->attrs
->sort_key_str
;
5686 struct tracing_map_sort_key
*sort_key
;
5687 int descending
, ret
= 0;
5688 unsigned int i
, j
, k
;
5690 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
5695 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
5696 struct hist_field
*hist_field
;
5697 char *field_str
, *field_name
;
5698 const char *test_name
;
5700 sort_key
= &hist_data
->sort_keys
[i
];
5702 field_str
= strsep(&fields_str
, ",");
5708 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
5712 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
5713 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
5718 field_name
= strsep(&field_str
, ".");
5719 if (!field_name
|| !*field_name
) {
5721 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
5725 if (strcmp(field_name
, "hitcount") == 0) {
5726 descending
= is_descending(tr
, field_str
);
5727 if (descending
< 0) {
5731 sort_key
->descending
= descending
;
5735 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
5738 hist_field
= hist_data
->fields
[j
];
5739 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
5744 test_name
= hist_field_name(hist_field
, 0);
5746 if (strcmp(field_name
, test_name
) == 0) {
5747 sort_key
->field_idx
= idx
;
5748 descending
= is_descending(tr
, field_str
);
5749 if (descending
< 0) {
5753 sort_key
->descending
= descending
;
5757 if (j
== hist_data
->n_fields
) {
5759 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
5764 hist_data
->n_sort_keys
= i
;
5769 static void destroy_actions(struct hist_trigger_data
*hist_data
)
5773 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5774 struct action_data
*data
= hist_data
->actions
[i
];
5776 if (data
->handler
== HANDLER_ONMATCH
)
5777 onmatch_destroy(data
);
5778 else if (data
->handler
== HANDLER_ONMAX
||
5779 data
->handler
== HANDLER_ONCHANGE
)
5780 track_data_destroy(hist_data
, data
);
5786 static int parse_actions(struct hist_trigger_data
*hist_data
)
5788 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5789 struct action_data
*data
;
5795 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
5796 str
= hist_data
->attrs
->action_str
[i
];
5798 if ((len
= str_has_prefix(str
, "onmatch("))) {
5799 char *action_str
= str
+ len
;
5801 data
= onmatch_parse(tr
, action_str
);
5803 ret
= PTR_ERR(data
);
5806 } else if ((len
= str_has_prefix(str
, "onmax("))) {
5807 char *action_str
= str
+ len
;
5809 data
= track_data_parse(hist_data
, action_str
,
5812 ret
= PTR_ERR(data
);
5815 } else if ((len
= str_has_prefix(str
, "onchange("))) {
5816 char *action_str
= str
+ len
;
5818 data
= track_data_parse(hist_data
, action_str
,
5821 ret
= PTR_ERR(data
);
5829 hist_data
->actions
[hist_data
->n_actions
++] = data
;
5835 static int create_actions(struct hist_trigger_data
*hist_data
)
5837 struct action_data
*data
;
5841 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
5842 data
= hist_data
->actions
[i
];
5844 if (data
->handler
== HANDLER_ONMATCH
) {
5845 ret
= onmatch_create(hist_data
, data
);
5848 } else if (data
->handler
== HANDLER_ONMAX
||
5849 data
->handler
== HANDLER_ONCHANGE
) {
5850 ret
= track_data_create(hist_data
, data
);
5862 static void print_actions(struct seq_file
*m
,
5863 struct hist_trigger_data
*hist_data
,
5864 struct tracing_map_elt
*elt
)
5868 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5869 struct action_data
*data
= hist_data
->actions
[i
];
5871 if (data
->action
== ACTION_SNAPSHOT
)
5874 if (data
->handler
== HANDLER_ONMAX
||
5875 data
->handler
== HANDLER_ONCHANGE
)
5876 track_data_print(m
, hist_data
, elt
, data
);
5880 static void print_action_spec(struct seq_file
*m
,
5881 struct hist_trigger_data
*hist_data
,
5882 struct action_data
*data
)
5886 if (data
->action
== ACTION_SAVE
) {
5887 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
5888 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
5889 if (i
< hist_data
->n_save_vars
- 1)
5892 } else if (data
->action
== ACTION_TRACE
) {
5893 if (data
->use_trace_keyword
)
5894 seq_printf(m
, "%s", data
->synth_event_name
);
5895 for (i
= 0; i
< data
->n_params
; i
++) {
5896 if (i
|| data
->use_trace_keyword
)
5898 seq_printf(m
, "%s", data
->params
[i
]);
5903 static void print_track_data_spec(struct seq_file
*m
,
5904 struct hist_trigger_data
*hist_data
,
5905 struct action_data
*data
)
5907 if (data
->handler
== HANDLER_ONMAX
)
5908 seq_puts(m
, ":onmax(");
5909 else if (data
->handler
== HANDLER_ONCHANGE
)
5910 seq_puts(m
, ":onchange(");
5911 seq_printf(m
, "%s", data
->track_data
.var_str
);
5912 seq_printf(m
, ").%s(", data
->action_name
);
5914 print_action_spec(m
, hist_data
, data
);
5919 static void print_onmatch_spec(struct seq_file
*m
,
5920 struct hist_trigger_data
*hist_data
,
5921 struct action_data
*data
)
5923 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
5924 data
->match_data
.event
);
5926 seq_printf(m
, "%s(", data
->action_name
);
5928 print_action_spec(m
, hist_data
, data
);
5933 static bool actions_match(struct hist_trigger_data
*hist_data
,
5934 struct hist_trigger_data
*hist_data_test
)
5938 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
5941 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5942 struct action_data
*data
= hist_data
->actions
[i
];
5943 struct action_data
*data_test
= hist_data_test
->actions
[i
];
5944 char *action_name
, *action_name_test
;
5946 if (data
->handler
!= data_test
->handler
)
5948 if (data
->action
!= data_test
->action
)
5951 if (data
->n_params
!= data_test
->n_params
)
5954 for (j
= 0; j
< data
->n_params
; j
++) {
5955 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
5959 if (data
->use_trace_keyword
)
5960 action_name
= data
->synth_event_name
;
5962 action_name
= data
->action_name
;
5964 if (data_test
->use_trace_keyword
)
5965 action_name_test
= data_test
->synth_event_name
;
5967 action_name_test
= data_test
->action_name
;
5969 if (strcmp(action_name
, action_name_test
) != 0)
5972 if (data
->handler
== HANDLER_ONMATCH
) {
5973 if (strcmp(data
->match_data
.event_system
,
5974 data_test
->match_data
.event_system
) != 0)
5976 if (strcmp(data
->match_data
.event
,
5977 data_test
->match_data
.event
) != 0)
5979 } else if (data
->handler
== HANDLER_ONMAX
||
5980 data
->handler
== HANDLER_ONCHANGE
) {
5981 if (strcmp(data
->track_data
.var_str
,
5982 data_test
->track_data
.var_str
) != 0)
5991 static void print_actions_spec(struct seq_file
*m
,
5992 struct hist_trigger_data
*hist_data
)
5996 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5997 struct action_data
*data
= hist_data
->actions
[i
];
5999 if (data
->handler
== HANDLER_ONMATCH
)
6000 print_onmatch_spec(m
, hist_data
, data
);
6001 else if (data
->handler
== HANDLER_ONMAX
||
6002 data
->handler
== HANDLER_ONCHANGE
)
6003 print_track_data_spec(m
, hist_data
, data
);
6007 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
6011 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
6012 kfree(hist_data
->field_var_hists
[i
]->cmd
);
6013 kfree(hist_data
->field_var_hists
[i
]);
6017 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
6022 destroy_hist_trigger_attrs(hist_data
->attrs
);
6023 destroy_hist_fields(hist_data
);
6024 tracing_map_destroy(hist_data
->map
);
6026 destroy_actions(hist_data
);
6027 destroy_field_vars(hist_data
);
6028 destroy_field_var_hists(hist_data
);
6033 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
6035 struct tracing_map
*map
= hist_data
->map
;
6036 struct ftrace_event_field
*field
;
6037 struct hist_field
*hist_field
;
6040 for_each_hist_field(i
, hist_data
) {
6041 hist_field
= hist_data
->fields
[i
];
6042 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
6043 tracing_map_cmp_fn_t cmp_fn
;
6045 field
= hist_field
->field
;
6047 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
6048 cmp_fn
= tracing_map_cmp_none
;
6050 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
6051 hist_field
->is_signed
);
6052 else if (is_string_field(field
))
6053 cmp_fn
= tracing_map_cmp_string
;
6055 cmp_fn
= tracing_map_cmp_num(field
->size
,
6057 idx
= tracing_map_add_key_field(map
,
6060 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
6061 idx
= tracing_map_add_sum_field(map
);
6066 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6067 idx
= tracing_map_add_var(map
);
6070 hist_field
->var
.idx
= idx
;
6071 hist_field
->var
.hist_data
= hist_data
;
6078 static struct hist_trigger_data
*
6079 create_hist_data(unsigned int map_bits
,
6080 struct hist_trigger_attrs
*attrs
,
6081 struct trace_event_file
*file
,
6084 const struct tracing_map_ops
*map_ops
= NULL
;
6085 struct hist_trigger_data
*hist_data
;
6088 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
6090 return ERR_PTR(-ENOMEM
);
6092 hist_data
->attrs
= attrs
;
6093 hist_data
->remove
= remove
;
6094 hist_data
->event_file
= file
;
6096 ret
= parse_actions(hist_data
);
6100 ret
= create_hist_fields(hist_data
, file
);
6104 ret
= create_sort_keys(hist_data
);
6108 map_ops
= &hist_trigger_elt_data_ops
;
6110 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
6111 map_ops
, hist_data
);
6112 if (IS_ERR(hist_data
->map
)) {
6113 ret
= PTR_ERR(hist_data
->map
);
6114 hist_data
->map
= NULL
;
6118 ret
= create_tracing_map_fields(hist_data
);
6124 hist_data
->attrs
= NULL
;
6126 destroy_hist_data(hist_data
);
6128 hist_data
= ERR_PTR(ret
);
6133 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
6134 struct tracing_map_elt
*elt
, void *rec
,
6135 struct ring_buffer_event
*rbe
,
6138 struct hist_elt_data
*elt_data
;
6139 struct hist_field
*hist_field
;
6140 unsigned int i
, var_idx
;
6143 elt_data
= elt
->private_data
;
6144 elt_data
->var_ref_vals
= var_ref_vals
;
6146 for_each_hist_val_field(i
, hist_data
) {
6147 hist_field
= hist_data
->fields
[i
];
6148 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
6149 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6150 var_idx
= hist_field
->var
.idx
;
6151 tracing_map_set_var(elt
, var_idx
, hist_val
);
6154 tracing_map_update_sum(elt
, i
, hist_val
);
6157 for_each_hist_key_field(i
, hist_data
) {
6158 hist_field
= hist_data
->fields
[i
];
6159 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6160 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
6161 var_idx
= hist_field
->var
.idx
;
6162 tracing_map_set_var(elt
, var_idx
, hist_val
);
6166 update_field_vars(hist_data
, elt
, rbe
, rec
);
6169 static inline void add_to_key(char *compound_key
, void *key
,
6170 struct hist_field
*key_field
, void *rec
)
6172 size_t size
= key_field
->size
;
6174 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6175 struct ftrace_event_field
*field
;
6177 field
= key_field
->field
;
6178 if (field
->filter_type
== FILTER_DYN_STRING
)
6179 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
6180 else if (field
->filter_type
== FILTER_PTR_STRING
)
6182 else if (field
->filter_type
== FILTER_STATIC_STRING
)
6185 /* ensure NULL-termination */
6186 if (size
> key_field
->size
- 1)
6187 size
= key_field
->size
- 1;
6189 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
6191 memcpy(compound_key
+ key_field
->offset
, key
, size
);
6195 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
6196 struct tracing_map_elt
*elt
, void *rec
,
6197 struct ring_buffer_event
*rbe
, void *key
,
6200 struct action_data
*data
;
6203 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
6204 data
= hist_data
->actions
[i
];
6205 data
->fn(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
6209 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
,
6210 struct ring_buffer_event
*rbe
)
6212 struct hist_trigger_data
*hist_data
= data
->private_data
;
6213 bool use_compound_key
= (hist_data
->n_keys
> 1);
6214 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
6215 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
6216 char compound_key
[HIST_KEY_SIZE_MAX
];
6217 struct tracing_map_elt
*elt
= NULL
;
6218 struct hist_field
*key_field
;
6223 memset(compound_key
, 0, hist_data
->key_size
);
6225 for_each_hist_key_field(i
, hist_data
) {
6226 key_field
= hist_data
->fields
[i
];
6228 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
6229 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
6230 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
6231 HIST_STACKTRACE_SKIP
);
6234 field_contents
= key_field
->fn(key_field
, elt
, rbe
, rec
);
6235 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6236 key
= (void *)(unsigned long)field_contents
;
6237 use_compound_key
= true;
6239 key
= (void *)&field_contents
;
6242 if (use_compound_key
)
6243 add_to_key(compound_key
, key
, key_field
, rec
);
6246 if (use_compound_key
)
6249 if (hist_data
->n_var_refs
&&
6250 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
6253 elt
= tracing_map_insert(hist_data
->map
, key
);
6257 hist_trigger_elt_update(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
6259 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
6260 hist_trigger_actions(hist_data
, elt
, rec
, rbe
, key
, var_ref_vals
);
6263 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
6264 unsigned long *stacktrace_entries
,
6265 unsigned int max_entries
)
6267 char str
[KSYM_SYMBOL_LEN
];
6268 unsigned int spaces
= 8;
6271 for (i
= 0; i
< max_entries
; i
++) {
6272 if (!stacktrace_entries
[i
])
6275 seq_printf(m
, "%*c", 1 + spaces
, ' ');
6276 sprint_symbol(str
, stacktrace_entries
[i
]);
6277 seq_printf(m
, "%s\n", str
);
6281 static void hist_trigger_print_key(struct seq_file
*m
,
6282 struct hist_trigger_data
*hist_data
,
6284 struct tracing_map_elt
*elt
)
6286 struct hist_field
*key_field
;
6287 char str
[KSYM_SYMBOL_LEN
];
6288 bool multiline
= false;
6289 const char *field_name
;
6295 for_each_hist_key_field(i
, hist_data
) {
6296 key_field
= hist_data
->fields
[i
];
6298 if (i
> hist_data
->n_vals
)
6301 field_name
= hist_field_name(key_field
, 0);
6303 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
6304 uval
= *(u64
*)(key
+ key_field
->offset
);
6305 seq_printf(m
, "%s: %llx", field_name
, uval
);
6306 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
6307 uval
= *(u64
*)(key
+ key_field
->offset
);
6308 sprint_symbol_no_offset(str
, uval
);
6309 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
6311 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
6312 uval
= *(u64
*)(key
+ key_field
->offset
);
6313 sprint_symbol(str
, uval
);
6314 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
6316 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
6317 struct hist_elt_data
*elt_data
= elt
->private_data
;
6320 if (WARN_ON_ONCE(!elt_data
))
6323 comm
= elt_data
->comm
;
6325 uval
= *(u64
*)(key
+ key_field
->offset
);
6326 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
6328 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
6329 const char *syscall_name
;
6331 uval
= *(u64
*)(key
+ key_field
->offset
);
6332 syscall_name
= get_syscall_name(uval
);
6334 syscall_name
= "unknown_syscall";
6336 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
6337 syscall_name
, uval
);
6338 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
6339 seq_puts(m
, "stacktrace:\n");
6340 hist_trigger_stacktrace_print(m
,
6341 key
+ key_field
->offset
,
6342 HIST_STACKTRACE_DEPTH
);
6344 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
6345 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
6346 *(u64
*)(key
+ key_field
->offset
));
6347 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6348 seq_printf(m
, "%s: %-50s", field_name
,
6349 (char *)(key
+ key_field
->offset
));
6351 uval
= *(u64
*)(key
+ key_field
->offset
);
6352 seq_printf(m
, "%s: %10llu", field_name
, uval
);
6362 static void hist_trigger_entry_print(struct seq_file
*m
,
6363 struct hist_trigger_data
*hist_data
,
6365 struct tracing_map_elt
*elt
)
6367 const char *field_name
;
6370 hist_trigger_print_key(m
, hist_data
, key
, elt
);
6372 seq_printf(m
, " hitcount: %10llu",
6373 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
6375 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
6376 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
6378 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
6379 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
6382 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
6383 seq_printf(m
, " %s: %10llx", field_name
,
6384 tracing_map_read_sum(elt
, i
));
6386 seq_printf(m
, " %s: %10llu", field_name
,
6387 tracing_map_read_sum(elt
, i
));
6391 print_actions(m
, hist_data
, elt
);
6396 static int print_entries(struct seq_file
*m
,
6397 struct hist_trigger_data
*hist_data
)
6399 struct tracing_map_sort_entry
**sort_entries
= NULL
;
6400 struct tracing_map
*map
= hist_data
->map
;
6403 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
6404 hist_data
->n_sort_keys
,
6409 for (i
= 0; i
< n_entries
; i
++)
6410 hist_trigger_entry_print(m
, hist_data
,
6411 sort_entries
[i
]->key
,
6412 sort_entries
[i
]->elt
);
6414 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
6419 static void hist_trigger_show(struct seq_file
*m
,
6420 struct event_trigger_data
*data
, int n
)
6422 struct hist_trigger_data
*hist_data
;
6426 seq_puts(m
, "\n\n");
6428 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
6429 data
->ops
->print(m
, data
->ops
, data
);
6430 seq_puts(m
, "#\n\n");
6432 hist_data
= data
->private_data
;
6433 n_entries
= print_entries(m
, hist_data
);
6437 track_data_snapshot_print(m
, hist_data
);
6439 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
6440 (u64
)atomic64_read(&hist_data
->map
->hits
),
6441 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
6444 static int hist_show(struct seq_file
*m
, void *v
)
6446 struct event_trigger_data
*data
;
6447 struct trace_event_file
*event_file
;
6450 mutex_lock(&event_mutex
);
6452 event_file
= event_file_data(m
->private);
6453 if (unlikely(!event_file
)) {
6458 list_for_each_entry(data
, &event_file
->triggers
, list
) {
6459 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
6460 hist_trigger_show(m
, data
, n
++);
6464 mutex_unlock(&event_mutex
);
6469 static int event_hist_open(struct inode
*inode
, struct file
*file
)
6473 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
6477 return single_open(file
, hist_show
, file
);
6480 const struct file_operations event_hist_fops
= {
6481 .open
= event_hist_open
,
6483 .llseek
= seq_lseek
,
6484 .release
= single_release
,
6487 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
6489 const char *field_name
= hist_field_name(hist_field
, 0);
6491 if (hist_field
->var
.name
)
6492 seq_printf(m
, "%s=", hist_field
->var
.name
);
6494 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
6496 else if (field_name
) {
6497 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
6498 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
6500 seq_printf(m
, "%s", field_name
);
6501 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
6502 seq_puts(m
, "common_timestamp");
6504 if (hist_field
->flags
) {
6505 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
6506 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
)) {
6507 const char *flags
= get_hist_field_flags(hist_field
);
6510 seq_printf(m
, ".%s", flags
);
6515 static int event_hist_trigger_print(struct seq_file
*m
,
6516 struct event_trigger_ops
*ops
,
6517 struct event_trigger_data
*data
)
6519 struct hist_trigger_data
*hist_data
= data
->private_data
;
6520 struct hist_field
*field
;
6521 bool have_var
= false;
6524 seq_puts(m
, "hist:");
6527 seq_printf(m
, "%s:", data
->name
);
6529 seq_puts(m
, "keys=");
6531 for_each_hist_key_field(i
, hist_data
) {
6532 field
= hist_data
->fields
[i
];
6534 if (i
> hist_data
->n_vals
)
6537 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
6538 seq_puts(m
, "stacktrace");
6540 hist_field_print(m
, field
);
6543 seq_puts(m
, ":vals=");
6545 for_each_hist_val_field(i
, hist_data
) {
6546 field
= hist_data
->fields
[i
];
6547 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6552 if (i
== HITCOUNT_IDX
)
6553 seq_puts(m
, "hitcount");
6556 hist_field_print(m
, field
);
6565 for_each_hist_val_field(i
, hist_data
) {
6566 field
= hist_data
->fields
[i
];
6568 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6571 hist_field_print(m
, field
);
6576 seq_puts(m
, ":sort=");
6578 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6579 struct tracing_map_sort_key
*sort_key
;
6580 unsigned int idx
, first_key_idx
;
6583 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
6585 sort_key
= &hist_data
->sort_keys
[i
];
6586 idx
= sort_key
->field_idx
;
6588 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
6594 if (idx
== HITCOUNT_IDX
)
6595 seq_puts(m
, "hitcount");
6597 if (idx
>= first_key_idx
)
6598 idx
+= hist_data
->n_vars
;
6599 hist_field_print(m
, hist_data
->fields
[idx
]);
6602 if (sort_key
->descending
)
6603 seq_puts(m
, ".descending");
6605 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
6606 if (hist_data
->enable_timestamps
)
6607 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
6609 print_actions_spec(m
, hist_data
);
6611 if (data
->filter_str
)
6612 seq_printf(m
, " if %s", data
->filter_str
);
6615 seq_puts(m
, " [paused]");
6617 seq_puts(m
, " [active]");
6624 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
6625 struct event_trigger_data
*data
)
6627 struct hist_trigger_data
*hist_data
= data
->private_data
;
6629 if (!data
->ref
&& hist_data
->attrs
->name
)
6630 save_named_trigger(hist_data
->attrs
->name
, data
);
6637 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
6639 struct trace_event_file
*file
;
6644 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
6645 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
6646 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
6647 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
6648 "!hist", "hist", cmd
);
6652 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
6653 struct event_trigger_data
*data
)
6655 struct hist_trigger_data
*hist_data
= data
->private_data
;
6657 if (WARN_ON_ONCE(data
->ref
<= 0))
6663 del_named_trigger(data
);
6665 trigger_data_free(data
);
6667 remove_hist_vars(hist_data
);
6669 unregister_field_var_hists(hist_data
);
6671 destroy_hist_data(hist_data
);
6675 static struct event_trigger_ops event_hist_trigger_ops
= {
6676 .func
= event_hist_trigger
,
6677 .print
= event_hist_trigger_print
,
6678 .init
= event_hist_trigger_init
,
6679 .free
= event_hist_trigger_free
,
6682 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
6683 struct event_trigger_data
*data
)
6687 save_named_trigger(data
->named_data
->name
, data
);
6689 event_hist_trigger_init(ops
, data
->named_data
);
6694 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
6695 struct event_trigger_data
*data
)
6697 if (WARN_ON_ONCE(data
->ref
<= 0))
6700 event_hist_trigger_free(ops
, data
->named_data
);
6704 del_named_trigger(data
);
6705 trigger_data_free(data
);
6709 static struct event_trigger_ops event_hist_trigger_named_ops
= {
6710 .func
= event_hist_trigger
,
6711 .print
= event_hist_trigger_print
,
6712 .init
= event_hist_trigger_named_init
,
6713 .free
= event_hist_trigger_named_free
,
6716 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
6719 return &event_hist_trigger_ops
;
6722 static void hist_clear(struct event_trigger_data
*data
)
6724 struct hist_trigger_data
*hist_data
= data
->private_data
;
6727 pause_named_trigger(data
);
6729 tracepoint_synchronize_unregister();
6731 tracing_map_clear(hist_data
->map
);
6734 unpause_named_trigger(data
);
6737 static bool compatible_field(struct ftrace_event_field
*field
,
6738 struct ftrace_event_field
*test_field
)
6740 if (field
== test_field
)
6742 if (field
== NULL
|| test_field
== NULL
)
6744 if (strcmp(field
->name
, test_field
->name
) != 0)
6746 if (strcmp(field
->type
, test_field
->type
) != 0)
6748 if (field
->size
!= test_field
->size
)
6750 if (field
->is_signed
!= test_field
->is_signed
)
6756 static bool hist_trigger_match(struct event_trigger_data
*data
,
6757 struct event_trigger_data
*data_test
,
6758 struct event_trigger_data
*named_data
,
6761 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
6762 struct hist_trigger_data
*hist_data
, *hist_data_test
;
6763 struct hist_field
*key_field
, *key_field_test
;
6766 if (named_data
&& (named_data
!= data_test
) &&
6767 (named_data
!= data_test
->named_data
))
6770 if (!named_data
&& is_named_trigger(data_test
))
6773 hist_data
= data
->private_data
;
6774 hist_data_test
= data_test
->private_data
;
6776 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
6777 hist_data
->n_fields
!= hist_data_test
->n_fields
||
6778 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
6781 if (!ignore_filter
) {
6782 if ((data
->filter_str
&& !data_test
->filter_str
) ||
6783 (!data
->filter_str
&& data_test
->filter_str
))
6787 for_each_hist_field(i
, hist_data
) {
6788 key_field
= hist_data
->fields
[i
];
6789 key_field_test
= hist_data_test
->fields
[i
];
6791 if (key_field
->flags
!= key_field_test
->flags
)
6793 if (!compatible_field(key_field
->field
, key_field_test
->field
))
6795 if (key_field
->offset
!= key_field_test
->offset
)
6797 if (key_field
->size
!= key_field_test
->size
)
6799 if (key_field
->is_signed
!= key_field_test
->is_signed
)
6801 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
6803 if (key_field
->var
.name
&&
6804 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
6808 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6809 sort_key
= &hist_data
->sort_keys
[i
];
6810 sort_key_test
= &hist_data_test
->sort_keys
[i
];
6812 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
6813 sort_key
->descending
!= sort_key_test
->descending
)
6817 if (!ignore_filter
&& data
->filter_str
&&
6818 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
6821 if (!actions_match(hist_data
, hist_data_test
))
6827 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
6828 struct event_trigger_data
*data
,
6829 struct trace_event_file
*file
)
6831 struct hist_trigger_data
*hist_data
= data
->private_data
;
6832 struct event_trigger_data
*test
, *named_data
= NULL
;
6833 struct trace_array
*tr
= file
->tr
;
6836 if (hist_data
->attrs
->name
) {
6837 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6839 if (!hist_trigger_match(data
, named_data
, named_data
,
6841 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
6848 if (hist_data
->attrs
->name
&& !named_data
)
6851 lockdep_assert_held(&event_mutex
);
6853 list_for_each_entry(test
, &file
->triggers
, list
) {
6854 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6855 if (!hist_trigger_match(data
, test
, named_data
, false))
6857 if (hist_data
->attrs
->pause
)
6858 test
->paused
= true;
6859 else if (hist_data
->attrs
->cont
)
6860 test
->paused
= false;
6861 else if (hist_data
->attrs
->clear
)
6864 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
6871 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
6872 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
6877 if (hist_data
->attrs
->pause
)
6878 data
->paused
= true;
6881 data
->private_data
= named_data
->private_data
;
6882 set_named_trigger_data(data
, named_data
);
6883 data
->ops
= &event_hist_trigger_named_ops
;
6886 if (data
->ops
->init
) {
6887 ret
= data
->ops
->init(data
->ops
, data
);
6892 if (hist_data
->enable_timestamps
) {
6893 char *clock
= hist_data
->attrs
->clock
;
6895 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
6897 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
6901 tracing_set_time_stamp_abs(file
->tr
, true);
6905 destroy_hist_data(hist_data
);
6912 static int hist_trigger_enable(struct event_trigger_data
*data
,
6913 struct trace_event_file
*file
)
6917 list_add_tail_rcu(&data
->list
, &file
->triggers
);
6919 update_cond_flag(file
);
6921 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
6922 list_del_rcu(&data
->list
);
6923 update_cond_flag(file
);
6930 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
6931 struct trace_event_file
*file
)
6933 struct hist_trigger_data
*hist_data
= data
->private_data
;
6934 struct event_trigger_data
*test
, *named_data
= NULL
;
6937 lockdep_assert_held(&event_mutex
);
6939 if (hist_data
->attrs
->name
)
6940 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6942 list_for_each_entry(test
, &file
->triggers
, list
) {
6943 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6944 if (hist_trigger_match(data
, test
, named_data
, false)) {
6954 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
6955 struct trace_event_file
*file
)
6957 struct hist_trigger_data
*hist_data
= data
->private_data
;
6958 struct event_trigger_data
*test
, *named_data
= NULL
;
6960 lockdep_assert_held(&event_mutex
);
6962 if (hist_data
->attrs
->name
)
6963 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6965 list_for_each_entry(test
, &file
->triggers
, list
) {
6966 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6967 if (!hist_trigger_match(data
, test
, named_data
, false))
6969 hist_data
= test
->private_data
;
6970 if (check_var_refs(hist_data
))
6979 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
6980 struct event_trigger_data
*data
,
6981 struct trace_event_file
*file
)
6983 struct hist_trigger_data
*hist_data
= data
->private_data
;
6984 struct event_trigger_data
*test
, *named_data
= NULL
;
6985 bool unregistered
= false;
6987 lockdep_assert_held(&event_mutex
);
6989 if (hist_data
->attrs
->name
)
6990 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6992 list_for_each_entry(test
, &file
->triggers
, list
) {
6993 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6994 if (!hist_trigger_match(data
, test
, named_data
, false))
6996 unregistered
= true;
6997 list_del_rcu(&test
->list
);
6998 trace_event_trigger_enable_disable(file
, 0);
6999 update_cond_flag(file
);
7004 if (unregistered
&& test
->ops
->free
)
7005 test
->ops
->free(test
->ops
, test
);
7007 if (hist_data
->enable_timestamps
) {
7008 if (!hist_data
->remove
|| unregistered
)
7009 tracing_set_time_stamp_abs(file
->tr
, false);
7013 static bool hist_file_check_refs(struct trace_event_file
*file
)
7015 struct hist_trigger_data
*hist_data
;
7016 struct event_trigger_data
*test
;
7018 lockdep_assert_held(&event_mutex
);
7020 list_for_each_entry(test
, &file
->triggers
, list
) {
7021 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
7022 hist_data
= test
->private_data
;
7023 if (check_var_refs(hist_data
))
7031 static void hist_unreg_all(struct trace_event_file
*file
)
7033 struct event_trigger_data
*test
, *n
;
7034 struct hist_trigger_data
*hist_data
;
7035 struct synth_event
*se
;
7036 const char *se_name
;
7038 lockdep_assert_held(&event_mutex
);
7040 if (hist_file_check_refs(file
))
7043 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
7044 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
7045 hist_data
= test
->private_data
;
7046 list_del_rcu(&test
->list
);
7047 trace_event_trigger_enable_disable(file
, 0);
7049 se_name
= trace_event_name(file
->event_call
);
7050 se
= find_synth_event(se_name
);
7054 update_cond_flag(file
);
7055 if (hist_data
->enable_timestamps
)
7056 tracing_set_time_stamp_abs(file
->tr
, false);
7057 if (test
->ops
->free
)
7058 test
->ops
->free(test
->ops
, test
);
7063 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
7064 struct trace_event_file
*file
,
7065 char *glob
, char *cmd
, char *param
)
7067 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
7068 struct event_trigger_data
*trigger_data
;
7069 struct hist_trigger_attrs
*attrs
;
7070 struct event_trigger_ops
*trigger_ops
;
7071 struct hist_trigger_data
*hist_data
;
7072 struct synth_event
*se
;
7073 const char *se_name
;
7074 bool remove
= false;
7078 lockdep_assert_held(&event_mutex
);
7080 if (glob
&& strlen(glob
)) {
7082 last_cmd_set(file
, param
);
7092 * separate the trigger from the filter (k:v [if filter])
7093 * allowing for whitespace in the trigger
7095 p
= trigger
= param
;
7097 p
= strstr(p
, "if");
7102 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
7106 if (p
>= param
+ strlen(param
) - (sizeof("if") - 1) - 1)
7108 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
7119 param
= strstrip(p
);
7120 trigger
= strstrip(trigger
);
7123 attrs
= parse_hist_trigger_attrs(file
->tr
, trigger
);
7125 return PTR_ERR(attrs
);
7127 if (attrs
->map_bits
)
7128 hist_trigger_bits
= attrs
->map_bits
;
7130 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
7131 if (IS_ERR(hist_data
)) {
7132 destroy_hist_trigger_attrs(attrs
);
7133 return PTR_ERR(hist_data
);
7136 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
7138 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
7139 if (!trigger_data
) {
7144 trigger_data
->count
= -1;
7145 trigger_data
->ops
= trigger_ops
;
7146 trigger_data
->cmd_ops
= cmd_ops
;
7148 INIT_LIST_HEAD(&trigger_data
->list
);
7149 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
7151 trigger_data
->private_data
= hist_data
;
7153 /* if param is non-empty, it's supposed to be a filter */
7154 if (param
&& cmd_ops
->set_filter
) {
7155 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
7161 if (!have_hist_trigger_match(trigger_data
, file
))
7164 if (hist_trigger_check_refs(trigger_data
, file
)) {
7169 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
7170 se_name
= trace_event_name(file
->event_call
);
7171 se
= find_synth_event(se_name
);
7178 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
7180 * The above returns on success the # of triggers registered,
7181 * but if it didn't register any it returns zero. Consider no
7182 * triggers registered a failure too.
7185 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
7191 if (get_named_trigger_data(trigger_data
))
7194 if (has_hist_vars(hist_data
))
7195 save_hist_vars(hist_data
);
7197 ret
= create_actions(hist_data
);
7201 ret
= tracing_map_init(hist_data
->map
);
7205 ret
= hist_trigger_enable(trigger_data
, file
);
7209 se_name
= trace_event_name(file
->event_call
);
7210 se
= find_synth_event(se_name
);
7213 /* Just return zero, not the number of registered triggers */
7221 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
7223 if (cmd_ops
->set_filter
)
7224 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
7226 remove_hist_vars(hist_data
);
7228 kfree(trigger_data
);
7230 destroy_hist_data(hist_data
);
7234 static struct event_command trigger_hist_cmd
= {
7236 .trigger_type
= ETT_EVENT_HIST
,
7237 .flags
= EVENT_CMD_FL_NEEDS_REC
,
7238 .func
= event_hist_trigger_func
,
7239 .reg
= hist_register_trigger
,
7240 .unreg
= hist_unregister_trigger
,
7241 .unreg_all
= hist_unreg_all
,
7242 .get_trigger_ops
= event_hist_get_trigger_ops
,
7243 .set_filter
= set_trigger_filter
,
7246 __init
int register_trigger_hist_cmd(void)
7250 ret
= register_event_command(&trigger_hist_cmd
);
7257 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
,
7258 struct ring_buffer_event
*event
)
7260 struct enable_trigger_data
*enable_data
= data
->private_data
;
7261 struct event_trigger_data
*test
;
7263 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
7264 lockdep_is_held(&event_mutex
)) {
7265 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
7266 if (enable_data
->enable
)
7267 test
->paused
= false;
7269 test
->paused
= true;
7275 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
7276 struct ring_buffer_event
*event
)
7281 if (data
->count
!= -1)
7284 hist_enable_trigger(data
, rec
, event
);
7287 static struct event_trigger_ops hist_enable_trigger_ops
= {
7288 .func
= hist_enable_trigger
,
7289 .print
= event_enable_trigger_print
,
7290 .init
= event_trigger_init
,
7291 .free
= event_enable_trigger_free
,
7294 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
7295 .func
= hist_enable_count_trigger
,
7296 .print
= event_enable_trigger_print
,
7297 .init
= event_trigger_init
,
7298 .free
= event_enable_trigger_free
,
7301 static struct event_trigger_ops hist_disable_trigger_ops
= {
7302 .func
= hist_enable_trigger
,
7303 .print
= event_enable_trigger_print
,
7304 .init
= event_trigger_init
,
7305 .free
= event_enable_trigger_free
,
7308 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
7309 .func
= hist_enable_count_trigger
,
7310 .print
= event_enable_trigger_print
,
7311 .init
= event_trigger_init
,
7312 .free
= event_enable_trigger_free
,
7315 static struct event_trigger_ops
*
7316 hist_enable_get_trigger_ops(char *cmd
, char *param
)
7318 struct event_trigger_ops
*ops
;
7321 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
7324 ops
= param
? &hist_enable_count_trigger_ops
:
7325 &hist_enable_trigger_ops
;
7327 ops
= param
? &hist_disable_count_trigger_ops
:
7328 &hist_disable_trigger_ops
;
7333 static void hist_enable_unreg_all(struct trace_event_file
*file
)
7335 struct event_trigger_data
*test
, *n
;
7337 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
7338 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
7339 list_del_rcu(&test
->list
);
7340 update_cond_flag(file
);
7341 trace_event_trigger_enable_disable(file
, 0);
7342 if (test
->ops
->free
)
7343 test
->ops
->free(test
->ops
, test
);
7348 static struct event_command trigger_hist_enable_cmd
= {
7349 .name
= ENABLE_HIST_STR
,
7350 .trigger_type
= ETT_HIST_ENABLE
,
7351 .func
= event_enable_trigger_func
,
7352 .reg
= event_enable_register_trigger
,
7353 .unreg
= event_enable_unregister_trigger
,
7354 .unreg_all
= hist_enable_unreg_all
,
7355 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7356 .set_filter
= set_trigger_filter
,
7359 static struct event_command trigger_hist_disable_cmd
= {
7360 .name
= DISABLE_HIST_STR
,
7361 .trigger_type
= ETT_HIST_ENABLE
,
7362 .func
= event_enable_trigger_func
,
7363 .reg
= event_enable_register_trigger
,
7364 .unreg
= event_enable_unregister_trigger
,
7365 .unreg_all
= hist_enable_unreg_all
,
7366 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7367 .set_filter
= set_trigger_filter
,
7370 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
7372 unregister_event_command(&trigger_hist_enable_cmd
);
7373 unregister_event_command(&trigger_hist_disable_cmd
);
7376 __init
int register_trigger_hist_enable_disable_cmds(void)
7380 ret
= register_event_command(&trigger_hist_enable_cmd
);
7381 if (WARN_ON(ret
< 0))
7383 ret
= register_event_command(&trigger_hist_disable_cmd
);
7384 if (WARN_ON(ret
< 0))
7385 unregister_trigger_hist_enable_disable_cmds();
7390 static __init
int trace_events_hist_init(void)
7392 struct dentry
*entry
= NULL
;
7393 struct dentry
*d_tracer
;
7396 err
= dyn_event_register(&synth_event_ops
);
7398 pr_warn("Could not register synth_event_ops\n");
7402 d_tracer
= tracing_init_dentry();
7403 if (IS_ERR(d_tracer
)) {
7404 err
= PTR_ERR(d_tracer
);
7408 entry
= tracefs_create_file("synthetic_events", 0644, d_tracer
,
7409 NULL
, &synth_events_fops
);
7417 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
7422 fs_initcall(trace_events_hist_init
);