]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * trace_events_hist - trace event hist triggers | |
4 | * | |
5 | * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> | |
6 | */ | |
7 | ||
8 | #include <linux/module.h> | |
9 | #include <linux/kallsyms.h> | |
10 | #include <linux/security.h> | |
11 | #include <linux/mutex.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/stacktrace.h> | |
14 | #include <linux/rculist.h> | |
15 | #include <linux/tracefs.h> | |
16 | ||
17 | /* for gfp flag names */ | |
18 | #include <linux/trace_events.h> | |
19 | #include <trace/events/mmflags.h> | |
20 | ||
21 | #include "tracing_map.h" | |
22 | #include "trace_synth.h" | |
23 | ||
24 | #define ERRORS \ | |
25 | C(NONE, "No error"), \ | |
26 | C(DUPLICATE_VAR, "Variable already defined"), \ | |
27 | C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ | |
28 | C(TOO_MANY_VARS, "Too many variables defined"), \ | |
29 | C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ | |
30 | C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ | |
31 | C(TRIGGER_EEXIST, "Hist trigger already exists"), \ | |
32 | C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ | |
33 | C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ | |
34 | C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ | |
35 | C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ | |
36 | C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ | |
37 | C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ | |
38 | C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ | |
39 | C(HIST_NOT_FOUND, "Matching event histogram not found"), \ | |
40 | C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ | |
41 | C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ | |
42 | C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ | |
43 | C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ | |
44 | C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ | |
45 | C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ | |
46 | C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ | |
47 | C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ | |
48 | C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ | |
49 | C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ | |
50 | C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ | |
51 | C(TOO_MANY_PARAMS, "Too many action params"), \ | |
52 | C(PARAM_NOT_FOUND, "Couldn't find param"), \ | |
53 | C(INVALID_PARAM, "Invalid action param"), \ | |
54 | C(ACTION_NOT_FOUND, "No action found"), \ | |
55 | C(NO_SAVE_PARAMS, "No params found for save()"), \ | |
56 | C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ | |
57 | C(ACTION_MISMATCH, "Handler doesn't support action"), \ | |
58 | C(NO_CLOSING_PAREN, "No closing paren found"), \ | |
59 | C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ | |
60 | C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ | |
61 | C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ | |
62 | C(VAR_NOT_FOUND, "Couldn't find variable"), \ | |
63 | C(FIELD_NOT_FOUND, "Couldn't find field"), \ | |
64 | C(EMPTY_ASSIGNMENT, "Empty assignment"), \ | |
65 | C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ | |
66 | C(EMPTY_SORT_FIELD, "Empty sort field"), \ | |
67 | C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ | |
68 | C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \ | |
69 | C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \ | |
70 | C(EXPECT_NUMBER, "Expecting numeric literal"), \ | |
71 | C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \ | |
72 | C(DIVISION_BY_ZERO, "Division by zero"), \ | |
73 | C(NEED_NOHC_VAL, "Non-hitcount value is required for 'nohitcount'"), | |
74 | ||
75 | #undef C | |
76 | #define C(a, b) HIST_ERR_##a | |
77 | ||
78 | enum { ERRORS }; | |
79 | ||
80 | #undef C | |
81 | #define C(a, b) b | |
82 | ||
83 | static const char *err_text[] = { ERRORS }; | |
84 | ||
85 | struct hist_field; | |
86 | ||
87 | typedef u64 (*hist_field_fn_t) (struct hist_field *field, | |
88 | struct tracing_map_elt *elt, | |
89 | struct trace_buffer *buffer, | |
90 | struct ring_buffer_event *rbe, | |
91 | void *event); | |
92 | ||
93 | #define HIST_FIELD_OPERANDS_MAX 2 | |
94 | #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) | |
95 | #define HIST_ACTIONS_MAX 8 | |
96 | #define HIST_CONST_DIGITS_MAX 21 | |
97 | #define HIST_DIV_SHIFT 20 /* For optimizing division by constants */ | |
98 | ||
99 | enum field_op_id { | |
100 | FIELD_OP_NONE, | |
101 | FIELD_OP_PLUS, | |
102 | FIELD_OP_MINUS, | |
103 | FIELD_OP_UNARY_MINUS, | |
104 | FIELD_OP_DIV, | |
105 | FIELD_OP_MULT, | |
106 | }; | |
107 | ||
108 | enum hist_field_fn { | |
109 | HIST_FIELD_FN_NOP, | |
110 | HIST_FIELD_FN_VAR_REF, | |
111 | HIST_FIELD_FN_COUNTER, | |
112 | HIST_FIELD_FN_CONST, | |
113 | HIST_FIELD_FN_LOG2, | |
114 | HIST_FIELD_FN_BUCKET, | |
115 | HIST_FIELD_FN_TIMESTAMP, | |
116 | HIST_FIELD_FN_CPU, | |
117 | HIST_FIELD_FN_COMM, | |
118 | HIST_FIELD_FN_STRING, | |
119 | HIST_FIELD_FN_DYNSTRING, | |
120 | HIST_FIELD_FN_RELDYNSTRING, | |
121 | HIST_FIELD_FN_PSTRING, | |
122 | HIST_FIELD_FN_S64, | |
123 | HIST_FIELD_FN_U64, | |
124 | HIST_FIELD_FN_S32, | |
125 | HIST_FIELD_FN_U32, | |
126 | HIST_FIELD_FN_S16, | |
127 | HIST_FIELD_FN_U16, | |
128 | HIST_FIELD_FN_S8, | |
129 | HIST_FIELD_FN_U8, | |
130 | HIST_FIELD_FN_UMINUS, | |
131 | HIST_FIELD_FN_MINUS, | |
132 | HIST_FIELD_FN_PLUS, | |
133 | HIST_FIELD_FN_DIV, | |
134 | HIST_FIELD_FN_MULT, | |
135 | HIST_FIELD_FN_DIV_POWER2, | |
136 | HIST_FIELD_FN_DIV_NOT_POWER2, | |
137 | HIST_FIELD_FN_DIV_MULT_SHIFT, | |
138 | HIST_FIELD_FN_EXECNAME, | |
139 | HIST_FIELD_FN_STACK, | |
140 | }; | |
141 | ||
142 | /* | |
143 | * A hist_var (histogram variable) contains variable information for | |
144 | * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF | |
145 | * flag set. A hist_var has a variable name e.g. ts0, and is | |
146 | * associated with a given histogram trigger, as specified by | |
147 | * hist_data. The hist_var idx is the unique index assigned to the | |
148 | * variable by the hist trigger's tracing_map. The idx is what is | |
149 | * used to set a variable's value and, by a variable reference, to | |
150 | * retrieve it. | |
151 | */ | |
152 | struct hist_var { | |
153 | char *name; | |
154 | struct hist_trigger_data *hist_data; | |
155 | unsigned int idx; | |
156 | }; | |
157 | ||
158 | struct hist_field { | |
159 | struct ftrace_event_field *field; | |
160 | unsigned long flags; | |
161 | unsigned long buckets; | |
162 | const char *type; | |
163 | struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; | |
164 | struct hist_trigger_data *hist_data; | |
165 | enum hist_field_fn fn_num; | |
166 | unsigned int ref; | |
167 | unsigned int size; | |
168 | unsigned int offset; | |
169 | unsigned int is_signed; | |
170 | ||
171 | /* | |
172 | * Variable fields contain variable-specific info in var. | |
173 | */ | |
174 | struct hist_var var; | |
175 | enum field_op_id operator; | |
176 | char *system; | |
177 | char *event_name; | |
178 | ||
179 | /* | |
180 | * The name field is used for EXPR and VAR_REF fields. VAR | |
181 | * fields contain the variable name in var.name. | |
182 | */ | |
183 | char *name; | |
184 | ||
185 | /* | |
186 | * When a histogram trigger is hit, if it has any references | |
187 | * to variables, the values of those variables are collected | |
188 | * into a var_ref_vals array by resolve_var_refs(). The | |
189 | * current value of each variable is read from the tracing_map | |
190 | * using the hist field's hist_var.idx and entered into the | |
191 | * var_ref_idx entry i.e. var_ref_vals[var_ref_idx]. | |
192 | */ | |
193 | unsigned int var_ref_idx; | |
194 | bool read_once; | |
195 | ||
196 | unsigned int var_str_idx; | |
197 | ||
198 | /* Numeric literals are represented as u64 */ | |
199 | u64 constant; | |
200 | /* Used to optimize division by constants */ | |
201 | u64 div_multiplier; | |
202 | }; | |
203 | ||
204 | static u64 hist_fn_call(struct hist_field *hist_field, | |
205 | struct tracing_map_elt *elt, | |
206 | struct trace_buffer *buffer, | |
207 | struct ring_buffer_event *rbe, | |
208 | void *event); | |
209 | ||
210 | static u64 hist_field_const(struct hist_field *field, | |
211 | struct tracing_map_elt *elt, | |
212 | struct trace_buffer *buffer, | |
213 | struct ring_buffer_event *rbe, | |
214 | void *event) | |
215 | { | |
216 | return field->constant; | |
217 | } | |
218 | ||
219 | static u64 hist_field_counter(struct hist_field *field, | |
220 | struct tracing_map_elt *elt, | |
221 | struct trace_buffer *buffer, | |
222 | struct ring_buffer_event *rbe, | |
223 | void *event) | |
224 | { | |
225 | return 1; | |
226 | } | |
227 | ||
228 | static u64 hist_field_string(struct hist_field *hist_field, | |
229 | struct tracing_map_elt *elt, | |
230 | struct trace_buffer *buffer, | |
231 | struct ring_buffer_event *rbe, | |
232 | void *event) | |
233 | { | |
234 | char *addr = (char *)(event + hist_field->field->offset); | |
235 | ||
236 | return (u64)(unsigned long)addr; | |
237 | } | |
238 | ||
239 | static u64 hist_field_dynstring(struct hist_field *hist_field, | |
240 | struct tracing_map_elt *elt, | |
241 | struct trace_buffer *buffer, | |
242 | struct ring_buffer_event *rbe, | |
243 | void *event) | |
244 | { | |
245 | u32 str_item = *(u32 *)(event + hist_field->field->offset); | |
246 | int str_loc = str_item & 0xffff; | |
247 | char *addr = (char *)(event + str_loc); | |
248 | ||
249 | return (u64)(unsigned long)addr; | |
250 | } | |
251 | ||
252 | static u64 hist_field_reldynstring(struct hist_field *hist_field, | |
253 | struct tracing_map_elt *elt, | |
254 | struct trace_buffer *buffer, | |
255 | struct ring_buffer_event *rbe, | |
256 | void *event) | |
257 | { | |
258 | u32 *item = event + hist_field->field->offset; | |
259 | u32 str_item = *item; | |
260 | int str_loc = str_item & 0xffff; | |
261 | char *addr = (char *)&item[1] + str_loc; | |
262 | ||
263 | return (u64)(unsigned long)addr; | |
264 | } | |
265 | ||
266 | static u64 hist_field_pstring(struct hist_field *hist_field, | |
267 | struct tracing_map_elt *elt, | |
268 | struct trace_buffer *buffer, | |
269 | struct ring_buffer_event *rbe, | |
270 | void *event) | |
271 | { | |
272 | char **addr = (char **)(event + hist_field->field->offset); | |
273 | ||
274 | return (u64)(unsigned long)*addr; | |
275 | } | |
276 | ||
277 | static u64 hist_field_log2(struct hist_field *hist_field, | |
278 | struct tracing_map_elt *elt, | |
279 | struct trace_buffer *buffer, | |
280 | struct ring_buffer_event *rbe, | |
281 | void *event) | |
282 | { | |
283 | struct hist_field *operand = hist_field->operands[0]; | |
284 | ||
285 | u64 val = hist_fn_call(operand, elt, buffer, rbe, event); | |
286 | ||
287 | return (u64) ilog2(roundup_pow_of_two(val)); | |
288 | } | |
289 | ||
290 | static u64 hist_field_bucket(struct hist_field *hist_field, | |
291 | struct tracing_map_elt *elt, | |
292 | struct trace_buffer *buffer, | |
293 | struct ring_buffer_event *rbe, | |
294 | void *event) | |
295 | { | |
296 | struct hist_field *operand = hist_field->operands[0]; | |
297 | unsigned long buckets = hist_field->buckets; | |
298 | ||
299 | u64 val = hist_fn_call(operand, elt, buffer, rbe, event); | |
300 | ||
301 | if (WARN_ON_ONCE(!buckets)) | |
302 | return val; | |
303 | ||
304 | if (val >= LONG_MAX) | |
305 | val = div64_ul(val, buckets); | |
306 | else | |
307 | val = (u64)((unsigned long)val / buckets); | |
308 | return val * buckets; | |
309 | } | |
310 | ||
311 | static u64 hist_field_plus(struct hist_field *hist_field, | |
312 | struct tracing_map_elt *elt, | |
313 | struct trace_buffer *buffer, | |
314 | struct ring_buffer_event *rbe, | |
315 | void *event) | |
316 | { | |
317 | struct hist_field *operand1 = hist_field->operands[0]; | |
318 | struct hist_field *operand2 = hist_field->operands[1]; | |
319 | ||
320 | u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); | |
321 | u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); | |
322 | ||
323 | return val1 + val2; | |
324 | } | |
325 | ||
326 | static u64 hist_field_minus(struct hist_field *hist_field, | |
327 | struct tracing_map_elt *elt, | |
328 | struct trace_buffer *buffer, | |
329 | struct ring_buffer_event *rbe, | |
330 | void *event) | |
331 | { | |
332 | struct hist_field *operand1 = hist_field->operands[0]; | |
333 | struct hist_field *operand2 = hist_field->operands[1]; | |
334 | ||
335 | u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); | |
336 | u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); | |
337 | ||
338 | return val1 - val2; | |
339 | } | |
340 | ||
341 | static u64 hist_field_div(struct hist_field *hist_field, | |
342 | struct tracing_map_elt *elt, | |
343 | struct trace_buffer *buffer, | |
344 | struct ring_buffer_event *rbe, | |
345 | void *event) | |
346 | { | |
347 | struct hist_field *operand1 = hist_field->operands[0]; | |
348 | struct hist_field *operand2 = hist_field->operands[1]; | |
349 | ||
350 | u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); | |
351 | u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); | |
352 | ||
353 | /* Return -1 for the undefined case */ | |
354 | if (!val2) | |
355 | return -1; | |
356 | ||
357 | /* Use shift if the divisor is a power of 2 */ | |
358 | if (!(val2 & (val2 - 1))) | |
359 | return val1 >> __ffs64(val2); | |
360 | ||
361 | return div64_u64(val1, val2); | |
362 | } | |
363 | ||
364 | static u64 div_by_power_of_two(struct hist_field *hist_field, | |
365 | struct tracing_map_elt *elt, | |
366 | struct trace_buffer *buffer, | |
367 | struct ring_buffer_event *rbe, | |
368 | void *event) | |
369 | { | |
370 | struct hist_field *operand1 = hist_field->operands[0]; | |
371 | struct hist_field *operand2 = hist_field->operands[1]; | |
372 | ||
373 | u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); | |
374 | ||
375 | return val1 >> __ffs64(operand2->constant); | |
376 | } | |
377 | ||
378 | static u64 div_by_not_power_of_two(struct hist_field *hist_field, | |
379 | struct tracing_map_elt *elt, | |
380 | struct trace_buffer *buffer, | |
381 | struct ring_buffer_event *rbe, | |
382 | void *event) | |
383 | { | |
384 | struct hist_field *operand1 = hist_field->operands[0]; | |
385 | struct hist_field *operand2 = hist_field->operands[1]; | |
386 | ||
387 | u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); | |
388 | ||
389 | return div64_u64(val1, operand2->constant); | |
390 | } | |
391 | ||
392 | static u64 div_by_mult_and_shift(struct hist_field *hist_field, | |
393 | struct tracing_map_elt *elt, | |
394 | struct trace_buffer *buffer, | |
395 | struct ring_buffer_event *rbe, | |
396 | void *event) | |
397 | { | |
398 | struct hist_field *operand1 = hist_field->operands[0]; | |
399 | struct hist_field *operand2 = hist_field->operands[1]; | |
400 | ||
401 | u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); | |
402 | ||
403 | /* | |
404 | * If the divisor is a constant, do a multiplication and shift instead. | |
405 | * | |
406 | * Choose Z = some power of 2. If Y <= Z, then: | |
407 | * X / Y = (X * (Z / Y)) / Z | |
408 | * | |
409 | * (Z / Y) is a constant (mult) which is calculated at parse time, so: | |
410 | * X / Y = (X * mult) / Z | |
411 | * | |
412 | * The division by Z can be replaced by a shift since Z is a power of 2: | |
413 | * X / Y = (X * mult) >> HIST_DIV_SHIFT | |
414 | * | |
415 | * As long, as X < Z the results will not be off by more than 1. | |
416 | */ | |
417 | if (val1 < (1 << HIST_DIV_SHIFT)) { | |
418 | u64 mult = operand2->div_multiplier; | |
419 | ||
420 | return (val1 * mult + ((1 << HIST_DIV_SHIFT) - 1)) >> HIST_DIV_SHIFT; | |
421 | } | |
422 | ||
423 | return div64_u64(val1, operand2->constant); | |
424 | } | |
425 | ||
426 | static u64 hist_field_mult(struct hist_field *hist_field, | |
427 | struct tracing_map_elt *elt, | |
428 | struct trace_buffer *buffer, | |
429 | struct ring_buffer_event *rbe, | |
430 | void *event) | |
431 | { | |
432 | struct hist_field *operand1 = hist_field->operands[0]; | |
433 | struct hist_field *operand2 = hist_field->operands[1]; | |
434 | ||
435 | u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); | |
436 | u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); | |
437 | ||
438 | return val1 * val2; | |
439 | } | |
440 | ||
441 | static u64 hist_field_unary_minus(struct hist_field *hist_field, | |
442 | struct tracing_map_elt *elt, | |
443 | struct trace_buffer *buffer, | |
444 | struct ring_buffer_event *rbe, | |
445 | void *event) | |
446 | { | |
447 | struct hist_field *operand = hist_field->operands[0]; | |
448 | ||
449 | s64 sval = (s64)hist_fn_call(operand, elt, buffer, rbe, event); | |
450 | u64 val = (u64)-sval; | |
451 | ||
452 | return val; | |
453 | } | |
454 | ||
455 | #define DEFINE_HIST_FIELD_FN(type) \ | |
456 | static u64 hist_field_##type(struct hist_field *hist_field, \ | |
457 | struct tracing_map_elt *elt, \ | |
458 | struct trace_buffer *buffer, \ | |
459 | struct ring_buffer_event *rbe, \ | |
460 | void *event) \ | |
461 | { \ | |
462 | type *addr = (type *)(event + hist_field->field->offset); \ | |
463 | \ | |
464 | return (u64)(unsigned long)*addr; \ | |
465 | } | |
466 | ||
467 | DEFINE_HIST_FIELD_FN(s64); | |
468 | DEFINE_HIST_FIELD_FN(u64); | |
469 | DEFINE_HIST_FIELD_FN(s32); | |
470 | DEFINE_HIST_FIELD_FN(u32); | |
471 | DEFINE_HIST_FIELD_FN(s16); | |
472 | DEFINE_HIST_FIELD_FN(u16); | |
473 | DEFINE_HIST_FIELD_FN(s8); | |
474 | DEFINE_HIST_FIELD_FN(u8); | |
475 | ||
476 | #define for_each_hist_field(i, hist_data) \ | |
477 | for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) | |
478 | ||
479 | #define for_each_hist_val_field(i, hist_data) \ | |
480 | for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) | |
481 | ||
482 | #define for_each_hist_key_field(i, hist_data) \ | |
483 | for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) | |
484 | ||
485 | #define HITCOUNT_IDX 0 | |
486 | #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) | |
487 | ||
488 | enum hist_field_flags { | |
489 | HIST_FIELD_FL_HITCOUNT = 1 << 0, | |
490 | HIST_FIELD_FL_KEY = 1 << 1, | |
491 | HIST_FIELD_FL_STRING = 1 << 2, | |
492 | HIST_FIELD_FL_HEX = 1 << 3, | |
493 | HIST_FIELD_FL_SYM = 1 << 4, | |
494 | HIST_FIELD_FL_SYM_OFFSET = 1 << 5, | |
495 | HIST_FIELD_FL_EXECNAME = 1 << 6, | |
496 | HIST_FIELD_FL_SYSCALL = 1 << 7, | |
497 | HIST_FIELD_FL_STACKTRACE = 1 << 8, | |
498 | HIST_FIELD_FL_LOG2 = 1 << 9, | |
499 | HIST_FIELD_FL_TIMESTAMP = 1 << 10, | |
500 | HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, | |
501 | HIST_FIELD_FL_VAR = 1 << 12, | |
502 | HIST_FIELD_FL_EXPR = 1 << 13, | |
503 | HIST_FIELD_FL_VAR_REF = 1 << 14, | |
504 | HIST_FIELD_FL_CPU = 1 << 15, | |
505 | HIST_FIELD_FL_ALIAS = 1 << 16, | |
506 | HIST_FIELD_FL_BUCKET = 1 << 17, | |
507 | HIST_FIELD_FL_CONST = 1 << 18, | |
508 | HIST_FIELD_FL_PERCENT = 1 << 19, | |
509 | HIST_FIELD_FL_GRAPH = 1 << 20, | |
510 | HIST_FIELD_FL_COMM = 1 << 21, | |
511 | }; | |
512 | ||
513 | struct var_defs { | |
514 | unsigned int n_vars; | |
515 | char *name[TRACING_MAP_VARS_MAX]; | |
516 | char *expr[TRACING_MAP_VARS_MAX]; | |
517 | }; | |
518 | ||
519 | struct hist_trigger_attrs { | |
520 | char *keys_str; | |
521 | char *vals_str; | |
522 | char *sort_key_str; | |
523 | char *name; | |
524 | char *clock; | |
525 | bool pause; | |
526 | bool cont; | |
527 | bool clear; | |
528 | bool ts_in_usecs; | |
529 | bool no_hitcount; | |
530 | unsigned int map_bits; | |
531 | ||
532 | char *assignment_str[TRACING_MAP_VARS_MAX]; | |
533 | unsigned int n_assignments; | |
534 | ||
535 | char *action_str[HIST_ACTIONS_MAX]; | |
536 | unsigned int n_actions; | |
537 | ||
538 | struct var_defs var_defs; | |
539 | }; | |
540 | ||
541 | struct field_var { | |
542 | struct hist_field *var; | |
543 | struct hist_field *val; | |
544 | }; | |
545 | ||
546 | struct field_var_hist { | |
547 | struct hist_trigger_data *hist_data; | |
548 | char *cmd; | |
549 | }; | |
550 | ||
551 | struct hist_trigger_data { | |
552 | struct hist_field *fields[HIST_FIELDS_MAX]; | |
553 | unsigned int n_vals; | |
554 | unsigned int n_keys; | |
555 | unsigned int n_fields; | |
556 | unsigned int n_vars; | |
557 | unsigned int n_var_str; | |
558 | unsigned int key_size; | |
559 | struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; | |
560 | unsigned int n_sort_keys; | |
561 | struct trace_event_file *event_file; | |
562 | struct hist_trigger_attrs *attrs; | |
563 | struct tracing_map *map; | |
564 | bool enable_timestamps; | |
565 | bool remove; | |
566 | struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; | |
567 | unsigned int n_var_refs; | |
568 | ||
569 | struct action_data *actions[HIST_ACTIONS_MAX]; | |
570 | unsigned int n_actions; | |
571 | ||
572 | struct field_var *field_vars[SYNTH_FIELDS_MAX]; | |
573 | unsigned int n_field_vars; | |
574 | unsigned int n_field_var_str; | |
575 | struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; | |
576 | unsigned int n_field_var_hists; | |
577 | ||
578 | struct field_var *save_vars[SYNTH_FIELDS_MAX]; | |
579 | unsigned int n_save_vars; | |
580 | unsigned int n_save_var_str; | |
581 | }; | |
582 | ||
583 | struct action_data; | |
584 | ||
585 | typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, | |
586 | struct tracing_map_elt *elt, | |
587 | struct trace_buffer *buffer, void *rec, | |
588 | struct ring_buffer_event *rbe, void *key, | |
589 | struct action_data *data, u64 *var_ref_vals); | |
590 | ||
591 | typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val); | |
592 | ||
593 | enum handler_id { | |
594 | HANDLER_ONMATCH = 1, | |
595 | HANDLER_ONMAX, | |
596 | HANDLER_ONCHANGE, | |
597 | }; | |
598 | ||
599 | enum action_id { | |
600 | ACTION_SAVE = 1, | |
601 | ACTION_TRACE, | |
602 | ACTION_SNAPSHOT, | |
603 | }; | |
604 | ||
605 | struct action_data { | |
606 | enum handler_id handler; | |
607 | enum action_id action; | |
608 | char *action_name; | |
609 | action_fn_t fn; | |
610 | ||
611 | unsigned int n_params; | |
612 | char *params[SYNTH_FIELDS_MAX]; | |
613 | ||
614 | /* | |
615 | * When a histogram trigger is hit, the values of any | |
616 | * references to variables, including variables being passed | |
617 | * as parameters to synthetic events, are collected into a | |
618 | * var_ref_vals array. This var_ref_idx array is an array of | |
619 | * indices into the var_ref_vals array, one for each synthetic | |
620 | * event param, and is passed to the synthetic event | |
621 | * invocation. | |
622 | */ | |
623 | unsigned int var_ref_idx[SYNTH_FIELDS_MAX]; | |
624 | struct synth_event *synth_event; | |
625 | bool use_trace_keyword; | |
626 | char *synth_event_name; | |
627 | ||
628 | union { | |
629 | struct { | |
630 | char *event; | |
631 | char *event_system; | |
632 | } match_data; | |
633 | ||
634 | struct { | |
635 | /* | |
636 | * var_str contains the $-unstripped variable | |
637 | * name referenced by var_ref, and used when | |
638 | * printing the action. Because var_ref | |
639 | * creation is deferred to create_actions(), | |
640 | * we need a per-action way to save it until | |
641 | * then, thus var_str. | |
642 | */ | |
643 | char *var_str; | |
644 | ||
645 | /* | |
646 | * var_ref refers to the variable being | |
647 | * tracked e.g onmax($var). | |
648 | */ | |
649 | struct hist_field *var_ref; | |
650 | ||
651 | /* | |
652 | * track_var contains the 'invisible' tracking | |
653 | * variable created to keep the current | |
654 | * e.g. max value. | |
655 | */ | |
656 | struct hist_field *track_var; | |
657 | ||
658 | check_track_val_fn_t check_val; | |
659 | action_fn_t save_data; | |
660 | } track_data; | |
661 | }; | |
662 | }; | |
663 | ||
664 | struct track_data { | |
665 | u64 track_val; | |
666 | bool updated; | |
667 | ||
668 | unsigned int key_len; | |
669 | void *key; | |
670 | struct tracing_map_elt elt; | |
671 | ||
672 | struct action_data *action_data; | |
673 | struct hist_trigger_data *hist_data; | |
674 | }; | |
675 | ||
676 | struct hist_elt_data { | |
677 | char *comm; | |
678 | u64 *var_ref_vals; | |
679 | char **field_var_str; | |
680 | int n_field_var_str; | |
681 | }; | |
682 | ||
683 | struct snapshot_context { | |
684 | struct tracing_map_elt *elt; | |
685 | void *key; | |
686 | }; | |
687 | ||
688 | /* | |
689 | * Returns the specific division function to use if the divisor | |
690 | * is constant. This avoids extra branches when the trigger is hit. | |
691 | */ | |
692 | static enum hist_field_fn hist_field_get_div_fn(struct hist_field *divisor) | |
693 | { | |
694 | u64 div = divisor->constant; | |
695 | ||
696 | if (!(div & (div - 1))) | |
697 | return HIST_FIELD_FN_DIV_POWER2; | |
698 | ||
699 | /* If the divisor is too large, do a regular division */ | |
700 | if (div > (1 << HIST_DIV_SHIFT)) | |
701 | return HIST_FIELD_FN_DIV_NOT_POWER2; | |
702 | ||
703 | divisor->div_multiplier = div64_u64((u64)(1 << HIST_DIV_SHIFT), div); | |
704 | return HIST_FIELD_FN_DIV_MULT_SHIFT; | |
705 | } | |
706 | ||
707 | static void track_data_free(struct track_data *track_data) | |
708 | { | |
709 | struct hist_elt_data *elt_data; | |
710 | ||
711 | if (!track_data) | |
712 | return; | |
713 | ||
714 | kfree(track_data->key); | |
715 | ||
716 | elt_data = track_data->elt.private_data; | |
717 | if (elt_data) { | |
718 | kfree(elt_data->comm); | |
719 | kfree(elt_data); | |
720 | } | |
721 | ||
722 | kfree(track_data); | |
723 | } | |
724 | ||
725 | static struct track_data *track_data_alloc(unsigned int key_len, | |
726 | struct action_data *action_data, | |
727 | struct hist_trigger_data *hist_data) | |
728 | { | |
729 | struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); | |
730 | struct hist_elt_data *elt_data; | |
731 | ||
732 | if (!data) | |
733 | return ERR_PTR(-ENOMEM); | |
734 | ||
735 | data->key = kzalloc(key_len, GFP_KERNEL); | |
736 | if (!data->key) { | |
737 | track_data_free(data); | |
738 | return ERR_PTR(-ENOMEM); | |
739 | } | |
740 | ||
741 | data->key_len = key_len; | |
742 | data->action_data = action_data; | |
743 | data->hist_data = hist_data; | |
744 | ||
745 | elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); | |
746 | if (!elt_data) { | |
747 | track_data_free(data); | |
748 | return ERR_PTR(-ENOMEM); | |
749 | } | |
750 | ||
751 | data->elt.private_data = elt_data; | |
752 | ||
753 | elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); | |
754 | if (!elt_data->comm) { | |
755 | track_data_free(data); | |
756 | return ERR_PTR(-ENOMEM); | |
757 | } | |
758 | ||
759 | return data; | |
760 | } | |
761 | ||
762 | #define HIST_PREFIX "hist:" | |
763 | ||
764 | static char *last_cmd; | |
765 | static char last_cmd_loc[MAX_FILTER_STR_VAL]; | |
766 | ||
767 | static int errpos(char *str) | |
768 | { | |
769 | if (!str || !last_cmd) | |
770 | return 0; | |
771 | ||
772 | return err_pos(last_cmd, str); | |
773 | } | |
774 | ||
775 | static void last_cmd_set(struct trace_event_file *file, char *str) | |
776 | { | |
777 | const char *system = NULL, *name = NULL; | |
778 | struct trace_event_call *call; | |
779 | ||
780 | if (!str) | |
781 | return; | |
782 | ||
783 | kfree(last_cmd); | |
784 | ||
785 | last_cmd = kasprintf(GFP_KERNEL, HIST_PREFIX "%s", str); | |
786 | if (!last_cmd) | |
787 | return; | |
788 | ||
789 | if (file) { | |
790 | call = file->event_call; | |
791 | system = call->class->system; | |
792 | if (system) { | |
793 | name = trace_event_name(call); | |
794 | if (!name) | |
795 | system = NULL; | |
796 | } | |
797 | } | |
798 | ||
799 | if (system) | |
800 | snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, HIST_PREFIX "%s:%s", system, name); | |
801 | } | |
802 | ||
803 | static void hist_err(struct trace_array *tr, u8 err_type, u16 err_pos) | |
804 | { | |
805 | if (!last_cmd) | |
806 | return; | |
807 | ||
808 | tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, | |
809 | err_type, err_pos); | |
810 | } | |
811 | ||
812 | static void hist_err_clear(void) | |
813 | { | |
814 | if (last_cmd) | |
815 | last_cmd[0] = '\0'; | |
816 | last_cmd_loc[0] = '\0'; | |
817 | } | |
818 | ||
819 | typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, | |
820 | unsigned int *var_ref_idx); | |
821 | ||
822 | static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, | |
823 | unsigned int *var_ref_idx) | |
824 | { | |
825 | struct tracepoint *tp = event->tp; | |
826 | ||
827 | if (unlikely(static_key_enabled(&tp->key))) { | |
828 | struct tracepoint_func *probe_func_ptr; | |
829 | synth_probe_func_t probe_func; | |
830 | void *__data; | |
831 | ||
832 | if (!(cpu_online(raw_smp_processor_id()))) | |
833 | return; | |
834 | ||
835 | probe_func_ptr = rcu_dereference_sched((tp)->funcs); | |
836 | if (probe_func_ptr) { | |
837 | do { | |
838 | probe_func = probe_func_ptr->func; | |
839 | __data = probe_func_ptr->data; | |
840 | probe_func(__data, var_ref_vals, var_ref_idx); | |
841 | } while ((++probe_func_ptr)->func); | |
842 | } | |
843 | } | |
844 | } | |
845 | ||
846 | static void action_trace(struct hist_trigger_data *hist_data, | |
847 | struct tracing_map_elt *elt, | |
848 | struct trace_buffer *buffer, void *rec, | |
849 | struct ring_buffer_event *rbe, void *key, | |
850 | struct action_data *data, u64 *var_ref_vals) | |
851 | { | |
852 | struct synth_event *event = data->synth_event; | |
853 | ||
854 | trace_synth(event, var_ref_vals, data->var_ref_idx); | |
855 | } | |
856 | ||
857 | struct hist_var_data { | |
858 | struct list_head list; | |
859 | struct hist_trigger_data *hist_data; | |
860 | }; | |
861 | ||
862 | static u64 hist_field_timestamp(struct hist_field *hist_field, | |
863 | struct tracing_map_elt *elt, | |
864 | struct trace_buffer *buffer, | |
865 | struct ring_buffer_event *rbe, | |
866 | void *event) | |
867 | { | |
868 | struct hist_trigger_data *hist_data = hist_field->hist_data; | |
869 | struct trace_array *tr = hist_data->event_file->tr; | |
870 | ||
871 | u64 ts = ring_buffer_event_time_stamp(buffer, rbe); | |
872 | ||
873 | if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) | |
874 | ts = ns2usecs(ts); | |
875 | ||
876 | return ts; | |
877 | } | |
878 | ||
879 | static u64 hist_field_cpu(struct hist_field *hist_field, | |
880 | struct tracing_map_elt *elt, | |
881 | struct trace_buffer *buffer, | |
882 | struct ring_buffer_event *rbe, | |
883 | void *event) | |
884 | { | |
885 | int cpu = smp_processor_id(); | |
886 | ||
887 | return cpu; | |
888 | } | |
889 | ||
890 | static u64 hist_field_comm(struct hist_field *hist_field, | |
891 | struct tracing_map_elt *elt, | |
892 | struct trace_buffer *buffer, | |
893 | struct ring_buffer_event *rbe, | |
894 | void *event) | |
895 | { | |
896 | return (u64)(unsigned long)current->comm; | |
897 | } | |
898 | ||
899 | /** | |
900 | * check_field_for_var_ref - Check if a VAR_REF field references a variable | |
901 | * @hist_field: The VAR_REF field to check | |
902 | * @var_data: The hist trigger that owns the variable | |
903 | * @var_idx: The trigger variable identifier | |
904 | * | |
905 | * Check the given VAR_REF field to see whether or not it references | |
906 | * the given variable associated with the given trigger. | |
907 | * | |
908 | * Return: The VAR_REF field if it does reference the variable, NULL if not | |
909 | */ | |
910 | static struct hist_field * | |
911 | check_field_for_var_ref(struct hist_field *hist_field, | |
912 | struct hist_trigger_data *var_data, | |
913 | unsigned int var_idx) | |
914 | { | |
915 | WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF)); | |
916 | ||
917 | if (hist_field && hist_field->var.idx == var_idx && | |
918 | hist_field->var.hist_data == var_data) | |
919 | return hist_field; | |
920 | ||
921 | return NULL; | |
922 | } | |
923 | ||
924 | /** | |
925 | * find_var_ref - Check if a trigger has a reference to a trigger variable | |
926 | * @hist_data: The hist trigger that might have a reference to the variable | |
927 | * @var_data: The hist trigger that owns the variable | |
928 | * @var_idx: The trigger variable identifier | |
929 | * | |
930 | * Check the list of var_refs[] on the first hist trigger to see | |
931 | * whether any of them are references to the variable on the second | |
932 | * trigger. | |
933 | * | |
934 | * Return: The VAR_REF field referencing the variable if so, NULL if not | |
935 | */ | |
936 | static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, | |
937 | struct hist_trigger_data *var_data, | |
938 | unsigned int var_idx) | |
939 | { | |
940 | struct hist_field *hist_field; | |
941 | unsigned int i; | |
942 | ||
943 | for (i = 0; i < hist_data->n_var_refs; i++) { | |
944 | hist_field = hist_data->var_refs[i]; | |
945 | if (check_field_for_var_ref(hist_field, var_data, var_idx)) | |
946 | return hist_field; | |
947 | } | |
948 | ||
949 | return NULL; | |
950 | } | |
951 | ||
952 | /** | |
953 | * find_any_var_ref - Check if there is a reference to a given trigger variable | |
954 | * @hist_data: The hist trigger | |
955 | * @var_idx: The trigger variable identifier | |
956 | * | |
957 | * Check to see whether the given variable is currently referenced by | |
958 | * any other trigger. | |
959 | * | |
960 | * The trigger the variable is defined on is explicitly excluded - the | |
961 | * assumption being that a self-reference doesn't prevent a trigger | |
962 | * from being removed. | |
963 | * | |
964 | * Return: The VAR_REF field referencing the variable if so, NULL if not | |
965 | */ | |
966 | static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, | |
967 | unsigned int var_idx) | |
968 | { | |
969 | struct trace_array *tr = hist_data->event_file->tr; | |
970 | struct hist_field *found = NULL; | |
971 | struct hist_var_data *var_data; | |
972 | ||
973 | list_for_each_entry(var_data, &tr->hist_vars, list) { | |
974 | if (var_data->hist_data == hist_data) | |
975 | continue; | |
976 | found = find_var_ref(var_data->hist_data, hist_data, var_idx); | |
977 | if (found) | |
978 | break; | |
979 | } | |
980 | ||
981 | return found; | |
982 | } | |
983 | ||
984 | /** | |
985 | * check_var_refs - Check if there is a reference to any of trigger's variables | |
986 | * @hist_data: The hist trigger | |
987 | * | |
988 | * A trigger can define one or more variables. If any one of them is | |
989 | * currently referenced by any other trigger, this function will | |
990 | * determine that. | |
991 | * | |
992 | * Typically used to determine whether or not a trigger can be removed | |
993 | * - if there are any references to a trigger's variables, it cannot. | |
994 | * | |
995 | * Return: True if there is a reference to any of trigger's variables | |
996 | */ | |
997 | static bool check_var_refs(struct hist_trigger_data *hist_data) | |
998 | { | |
999 | struct hist_field *field; | |
1000 | bool found = false; | |
1001 | int i; | |
1002 | ||
1003 | for_each_hist_field(i, hist_data) { | |
1004 | field = hist_data->fields[i]; | |
1005 | if (field && field->flags & HIST_FIELD_FL_VAR) { | |
1006 | if (find_any_var_ref(hist_data, field->var.idx)) { | |
1007 | found = true; | |
1008 | break; | |
1009 | } | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | return found; | |
1014 | } | |
1015 | ||
1016 | static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) | |
1017 | { | |
1018 | struct trace_array *tr = hist_data->event_file->tr; | |
1019 | struct hist_var_data *var_data, *found = NULL; | |
1020 | ||
1021 | list_for_each_entry(var_data, &tr->hist_vars, list) { | |
1022 | if (var_data->hist_data == hist_data) { | |
1023 | found = var_data; | |
1024 | break; | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | return found; | |
1029 | } | |
1030 | ||
1031 | static bool field_has_hist_vars(struct hist_field *hist_field, | |
1032 | unsigned int level) | |
1033 | { | |
1034 | int i; | |
1035 | ||
1036 | if (level > 3) | |
1037 | return false; | |
1038 | ||
1039 | if (!hist_field) | |
1040 | return false; | |
1041 | ||
1042 | if (hist_field->flags & HIST_FIELD_FL_VAR || | |
1043 | hist_field->flags & HIST_FIELD_FL_VAR_REF) | |
1044 | return true; | |
1045 | ||
1046 | for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { | |
1047 | struct hist_field *operand; | |
1048 | ||
1049 | operand = hist_field->operands[i]; | |
1050 | if (field_has_hist_vars(operand, level + 1)) | |
1051 | return true; | |
1052 | } | |
1053 | ||
1054 | return false; | |
1055 | } | |
1056 | ||
1057 | static bool has_hist_vars(struct hist_trigger_data *hist_data) | |
1058 | { | |
1059 | struct hist_field *hist_field; | |
1060 | int i; | |
1061 | ||
1062 | for_each_hist_field(i, hist_data) { | |
1063 | hist_field = hist_data->fields[i]; | |
1064 | if (field_has_hist_vars(hist_field, 0)) | |
1065 | return true; | |
1066 | } | |
1067 | ||
1068 | return false; | |
1069 | } | |
1070 | ||
1071 | static int save_hist_vars(struct hist_trigger_data *hist_data) | |
1072 | { | |
1073 | struct trace_array *tr = hist_data->event_file->tr; | |
1074 | struct hist_var_data *var_data; | |
1075 | ||
1076 | var_data = find_hist_vars(hist_data); | |
1077 | if (var_data) | |
1078 | return 0; | |
1079 | ||
1080 | if (tracing_check_open_get_tr(tr)) | |
1081 | return -ENODEV; | |
1082 | ||
1083 | var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); | |
1084 | if (!var_data) { | |
1085 | trace_array_put(tr); | |
1086 | return -ENOMEM; | |
1087 | } | |
1088 | ||
1089 | var_data->hist_data = hist_data; | |
1090 | list_add(&var_data->list, &tr->hist_vars); | |
1091 | ||
1092 | return 0; | |
1093 | } | |
1094 | ||
1095 | static void remove_hist_vars(struct hist_trigger_data *hist_data) | |
1096 | { | |
1097 | struct trace_array *tr = hist_data->event_file->tr; | |
1098 | struct hist_var_data *var_data; | |
1099 | ||
1100 | var_data = find_hist_vars(hist_data); | |
1101 | if (!var_data) | |
1102 | return; | |
1103 | ||
1104 | if (WARN_ON(check_var_refs(hist_data))) | |
1105 | return; | |
1106 | ||
1107 | list_del(&var_data->list); | |
1108 | ||
1109 | kfree(var_data); | |
1110 | ||
1111 | trace_array_put(tr); | |
1112 | } | |
1113 | ||
1114 | static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, | |
1115 | const char *var_name) | |
1116 | { | |
1117 | struct hist_field *hist_field, *found = NULL; | |
1118 | int i; | |
1119 | ||
1120 | for_each_hist_field(i, hist_data) { | |
1121 | hist_field = hist_data->fields[i]; | |
1122 | if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && | |
1123 | strcmp(hist_field->var.name, var_name) == 0) { | |
1124 | found = hist_field; | |
1125 | break; | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | return found; | |
1130 | } | |
1131 | ||
1132 | static struct hist_field *find_var(struct hist_trigger_data *hist_data, | |
1133 | struct trace_event_file *file, | |
1134 | const char *var_name) | |
1135 | { | |
1136 | struct hist_trigger_data *test_data; | |
1137 | struct event_trigger_data *test; | |
1138 | struct hist_field *hist_field; | |
1139 | ||
1140 | lockdep_assert_held(&event_mutex); | |
1141 | ||
1142 | hist_field = find_var_field(hist_data, var_name); | |
1143 | if (hist_field) | |
1144 | return hist_field; | |
1145 | ||
1146 | list_for_each_entry(test, &file->triggers, list) { | |
1147 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
1148 | test_data = test->private_data; | |
1149 | hist_field = find_var_field(test_data, var_name); | |
1150 | if (hist_field) | |
1151 | return hist_field; | |
1152 | } | |
1153 | } | |
1154 | ||
1155 | return NULL; | |
1156 | } | |
1157 | ||
1158 | static struct trace_event_file *find_var_file(struct trace_array *tr, | |
1159 | char *system, | |
1160 | char *event_name, | |
1161 | char *var_name) | |
1162 | { | |
1163 | struct hist_trigger_data *var_hist_data; | |
1164 | struct hist_var_data *var_data; | |
1165 | struct trace_event_file *file, *found = NULL; | |
1166 | ||
1167 | if (system) | |
1168 | return find_event_file(tr, system, event_name); | |
1169 | ||
1170 | list_for_each_entry(var_data, &tr->hist_vars, list) { | |
1171 | var_hist_data = var_data->hist_data; | |
1172 | file = var_hist_data->event_file; | |
1173 | if (file == found) | |
1174 | continue; | |
1175 | ||
1176 | if (find_var_field(var_hist_data, var_name)) { | |
1177 | if (found) { | |
1178 | hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); | |
1179 | return NULL; | |
1180 | } | |
1181 | ||
1182 | found = file; | |
1183 | } | |
1184 | } | |
1185 | ||
1186 | return found; | |
1187 | } | |
1188 | ||
1189 | static struct hist_field *find_file_var(struct trace_event_file *file, | |
1190 | const char *var_name) | |
1191 | { | |
1192 | struct hist_trigger_data *test_data; | |
1193 | struct event_trigger_data *test; | |
1194 | struct hist_field *hist_field; | |
1195 | ||
1196 | lockdep_assert_held(&event_mutex); | |
1197 | ||
1198 | list_for_each_entry(test, &file->triggers, list) { | |
1199 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
1200 | test_data = test->private_data; | |
1201 | hist_field = find_var_field(test_data, var_name); | |
1202 | if (hist_field) | |
1203 | return hist_field; | |
1204 | } | |
1205 | } | |
1206 | ||
1207 | return NULL; | |
1208 | } | |
1209 | ||
1210 | static struct hist_field * | |
1211 | find_match_var(struct hist_trigger_data *hist_data, char *var_name) | |
1212 | { | |
1213 | struct trace_array *tr = hist_data->event_file->tr; | |
1214 | struct hist_field *hist_field, *found = NULL; | |
1215 | struct trace_event_file *file; | |
1216 | unsigned int i; | |
1217 | ||
1218 | for (i = 0; i < hist_data->n_actions; i++) { | |
1219 | struct action_data *data = hist_data->actions[i]; | |
1220 | ||
1221 | if (data->handler == HANDLER_ONMATCH) { | |
1222 | char *system = data->match_data.event_system; | |
1223 | char *event_name = data->match_data.event; | |
1224 | ||
1225 | file = find_var_file(tr, system, event_name, var_name); | |
1226 | if (!file) | |
1227 | continue; | |
1228 | hist_field = find_file_var(file, var_name); | |
1229 | if (hist_field) { | |
1230 | if (found) { | |
1231 | hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, | |
1232 | errpos(var_name)); | |
1233 | return ERR_PTR(-EINVAL); | |
1234 | } | |
1235 | ||
1236 | found = hist_field; | |
1237 | } | |
1238 | } | |
1239 | } | |
1240 | return found; | |
1241 | } | |
1242 | ||
1243 | static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, | |
1244 | char *system, | |
1245 | char *event_name, | |
1246 | char *var_name) | |
1247 | { | |
1248 | struct trace_array *tr = hist_data->event_file->tr; | |
1249 | struct hist_field *hist_field = NULL; | |
1250 | struct trace_event_file *file; | |
1251 | ||
1252 | if (!system || !event_name) { | |
1253 | hist_field = find_match_var(hist_data, var_name); | |
1254 | if (IS_ERR(hist_field)) | |
1255 | return NULL; | |
1256 | if (hist_field) | |
1257 | return hist_field; | |
1258 | } | |
1259 | ||
1260 | file = find_var_file(tr, system, event_name, var_name); | |
1261 | if (!file) | |
1262 | return NULL; | |
1263 | ||
1264 | hist_field = find_file_var(file, var_name); | |
1265 | ||
1266 | return hist_field; | |
1267 | } | |
1268 | ||
1269 | static u64 hist_field_var_ref(struct hist_field *hist_field, | |
1270 | struct tracing_map_elt *elt, | |
1271 | struct trace_buffer *buffer, | |
1272 | struct ring_buffer_event *rbe, | |
1273 | void *event) | |
1274 | { | |
1275 | struct hist_elt_data *elt_data; | |
1276 | u64 var_val = 0; | |
1277 | ||
1278 | if (WARN_ON_ONCE(!elt)) | |
1279 | return var_val; | |
1280 | ||
1281 | elt_data = elt->private_data; | |
1282 | var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; | |
1283 | ||
1284 | return var_val; | |
1285 | } | |
1286 | ||
1287 | static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, | |
1288 | u64 *var_ref_vals, bool self) | |
1289 | { | |
1290 | struct hist_trigger_data *var_data; | |
1291 | struct tracing_map_elt *var_elt; | |
1292 | struct hist_field *hist_field; | |
1293 | unsigned int i, var_idx; | |
1294 | bool resolved = true; | |
1295 | u64 var_val = 0; | |
1296 | ||
1297 | for (i = 0; i < hist_data->n_var_refs; i++) { | |
1298 | hist_field = hist_data->var_refs[i]; | |
1299 | var_idx = hist_field->var.idx; | |
1300 | var_data = hist_field->var.hist_data; | |
1301 | ||
1302 | if (var_data == NULL) { | |
1303 | resolved = false; | |
1304 | break; | |
1305 | } | |
1306 | ||
1307 | if ((self && var_data != hist_data) || | |
1308 | (!self && var_data == hist_data)) | |
1309 | continue; | |
1310 | ||
1311 | var_elt = tracing_map_lookup(var_data->map, key); | |
1312 | if (!var_elt) { | |
1313 | resolved = false; | |
1314 | break; | |
1315 | } | |
1316 | ||
1317 | if (!tracing_map_var_set(var_elt, var_idx)) { | |
1318 | resolved = false; | |
1319 | break; | |
1320 | } | |
1321 | ||
1322 | if (self || !hist_field->read_once) | |
1323 | var_val = tracing_map_read_var(var_elt, var_idx); | |
1324 | else | |
1325 | var_val = tracing_map_read_var_once(var_elt, var_idx); | |
1326 | ||
1327 | var_ref_vals[i] = var_val; | |
1328 | } | |
1329 | ||
1330 | return resolved; | |
1331 | } | |
1332 | ||
1333 | static const char *hist_field_name(struct hist_field *field, | |
1334 | unsigned int level) | |
1335 | { | |
1336 | const char *field_name = ""; | |
1337 | ||
1338 | if (WARN_ON_ONCE(!field)) | |
1339 | return field_name; | |
1340 | ||
1341 | if (level > 1) | |
1342 | return field_name; | |
1343 | ||
1344 | if (field->field) | |
1345 | field_name = field->field->name; | |
1346 | else if (field->flags & HIST_FIELD_FL_LOG2 || | |
1347 | field->flags & HIST_FIELD_FL_ALIAS || | |
1348 | field->flags & HIST_FIELD_FL_BUCKET) | |
1349 | field_name = hist_field_name(field->operands[0], ++level); | |
1350 | else if (field->flags & HIST_FIELD_FL_CPU) | |
1351 | field_name = "common_cpu"; | |
1352 | else if (field->flags & HIST_FIELD_FL_COMM) | |
1353 | field_name = "common_comm"; | |
1354 | else if (field->flags & HIST_FIELD_FL_EXPR || | |
1355 | field->flags & HIST_FIELD_FL_VAR_REF) { | |
1356 | if (field->system) { | |
1357 | static char full_name[MAX_FILTER_STR_VAL]; | |
1358 | ||
1359 | strcat(full_name, field->system); | |
1360 | strcat(full_name, "."); | |
1361 | strcat(full_name, field->event_name); | |
1362 | strcat(full_name, "."); | |
1363 | strcat(full_name, field->name); | |
1364 | field_name = full_name; | |
1365 | } else | |
1366 | field_name = field->name; | |
1367 | } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) | |
1368 | field_name = "common_timestamp"; | |
1369 | else if (field->flags & HIST_FIELD_FL_STACKTRACE) { | |
1370 | field_name = "common_stacktrace"; | |
1371 | } else if (field->flags & HIST_FIELD_FL_HITCOUNT) | |
1372 | field_name = "hitcount"; | |
1373 | ||
1374 | if (field_name == NULL) | |
1375 | field_name = ""; | |
1376 | ||
1377 | return field_name; | |
1378 | } | |
1379 | ||
1380 | static enum hist_field_fn select_value_fn(int field_size, int field_is_signed) | |
1381 | { | |
1382 | switch (field_size) { | |
1383 | case 8: | |
1384 | if (field_is_signed) | |
1385 | return HIST_FIELD_FN_S64; | |
1386 | else | |
1387 | return HIST_FIELD_FN_U64; | |
1388 | case 4: | |
1389 | if (field_is_signed) | |
1390 | return HIST_FIELD_FN_S32; | |
1391 | else | |
1392 | return HIST_FIELD_FN_U32; | |
1393 | case 2: | |
1394 | if (field_is_signed) | |
1395 | return HIST_FIELD_FN_S16; | |
1396 | else | |
1397 | return HIST_FIELD_FN_U16; | |
1398 | case 1: | |
1399 | if (field_is_signed) | |
1400 | return HIST_FIELD_FN_S8; | |
1401 | else | |
1402 | return HIST_FIELD_FN_U8; | |
1403 | } | |
1404 | ||
1405 | return HIST_FIELD_FN_NOP; | |
1406 | } | |
1407 | ||
1408 | static int parse_map_size(char *str) | |
1409 | { | |
1410 | unsigned long size, map_bits; | |
1411 | int ret; | |
1412 | ||
1413 | ret = kstrtoul(str, 0, &size); | |
1414 | if (ret) | |
1415 | goto out; | |
1416 | ||
1417 | map_bits = ilog2(roundup_pow_of_two(size)); | |
1418 | if (map_bits < TRACING_MAP_BITS_MIN || | |
1419 | map_bits > TRACING_MAP_BITS_MAX) | |
1420 | ret = -EINVAL; | |
1421 | else | |
1422 | ret = map_bits; | |
1423 | out: | |
1424 | return ret; | |
1425 | } | |
1426 | ||
1427 | static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) | |
1428 | { | |
1429 | unsigned int i; | |
1430 | ||
1431 | if (!attrs) | |
1432 | return; | |
1433 | ||
1434 | for (i = 0; i < attrs->n_assignments; i++) | |
1435 | kfree(attrs->assignment_str[i]); | |
1436 | ||
1437 | for (i = 0; i < attrs->n_actions; i++) | |
1438 | kfree(attrs->action_str[i]); | |
1439 | ||
1440 | kfree(attrs->name); | |
1441 | kfree(attrs->sort_key_str); | |
1442 | kfree(attrs->keys_str); | |
1443 | kfree(attrs->vals_str); | |
1444 | kfree(attrs->clock); | |
1445 | kfree(attrs); | |
1446 | } | |
1447 | ||
1448 | static int parse_action(char *str, struct hist_trigger_attrs *attrs) | |
1449 | { | |
1450 | int ret = -EINVAL; | |
1451 | ||
1452 | if (attrs->n_actions >= HIST_ACTIONS_MAX) | |
1453 | return ret; | |
1454 | ||
1455 | if ((str_has_prefix(str, "onmatch(")) || | |
1456 | (str_has_prefix(str, "onmax(")) || | |
1457 | (str_has_prefix(str, "onchange("))) { | |
1458 | attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); | |
1459 | if (!attrs->action_str[attrs->n_actions]) { | |
1460 | ret = -ENOMEM; | |
1461 | return ret; | |
1462 | } | |
1463 | attrs->n_actions++; | |
1464 | ret = 0; | |
1465 | } | |
1466 | return ret; | |
1467 | } | |
1468 | ||
1469 | static int parse_assignment(struct trace_array *tr, | |
1470 | char *str, struct hist_trigger_attrs *attrs) | |
1471 | { | |
1472 | int len, ret = 0; | |
1473 | ||
1474 | if ((len = str_has_prefix(str, "key=")) || | |
1475 | (len = str_has_prefix(str, "keys="))) { | |
1476 | attrs->keys_str = kstrdup(str + len, GFP_KERNEL); | |
1477 | if (!attrs->keys_str) { | |
1478 | ret = -ENOMEM; | |
1479 | goto out; | |
1480 | } | |
1481 | } else if ((len = str_has_prefix(str, "val=")) || | |
1482 | (len = str_has_prefix(str, "vals=")) || | |
1483 | (len = str_has_prefix(str, "values="))) { | |
1484 | attrs->vals_str = kstrdup(str + len, GFP_KERNEL); | |
1485 | if (!attrs->vals_str) { | |
1486 | ret = -ENOMEM; | |
1487 | goto out; | |
1488 | } | |
1489 | } else if ((len = str_has_prefix(str, "sort="))) { | |
1490 | attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL); | |
1491 | if (!attrs->sort_key_str) { | |
1492 | ret = -ENOMEM; | |
1493 | goto out; | |
1494 | } | |
1495 | } else if (str_has_prefix(str, "name=")) { | |
1496 | attrs->name = kstrdup(str, GFP_KERNEL); | |
1497 | if (!attrs->name) { | |
1498 | ret = -ENOMEM; | |
1499 | goto out; | |
1500 | } | |
1501 | } else if ((len = str_has_prefix(str, "clock="))) { | |
1502 | str += len; | |
1503 | ||
1504 | str = strstrip(str); | |
1505 | attrs->clock = kstrdup(str, GFP_KERNEL); | |
1506 | if (!attrs->clock) { | |
1507 | ret = -ENOMEM; | |
1508 | goto out; | |
1509 | } | |
1510 | } else if ((len = str_has_prefix(str, "size="))) { | |
1511 | int map_bits = parse_map_size(str + len); | |
1512 | ||
1513 | if (map_bits < 0) { | |
1514 | ret = map_bits; | |
1515 | goto out; | |
1516 | } | |
1517 | attrs->map_bits = map_bits; | |
1518 | } else { | |
1519 | char *assignment; | |
1520 | ||
1521 | if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { | |
1522 | hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); | |
1523 | ret = -EINVAL; | |
1524 | goto out; | |
1525 | } | |
1526 | ||
1527 | assignment = kstrdup(str, GFP_KERNEL); | |
1528 | if (!assignment) { | |
1529 | ret = -ENOMEM; | |
1530 | goto out; | |
1531 | } | |
1532 | ||
1533 | attrs->assignment_str[attrs->n_assignments++] = assignment; | |
1534 | } | |
1535 | out: | |
1536 | return ret; | |
1537 | } | |
1538 | ||
1539 | static struct hist_trigger_attrs * | |
1540 | parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) | |
1541 | { | |
1542 | struct hist_trigger_attrs *attrs; | |
1543 | int ret = 0; | |
1544 | ||
1545 | attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); | |
1546 | if (!attrs) | |
1547 | return ERR_PTR(-ENOMEM); | |
1548 | ||
1549 | while (trigger_str) { | |
1550 | char *str = strsep(&trigger_str, ":"); | |
1551 | char *rhs; | |
1552 | ||
1553 | rhs = strchr(str, '='); | |
1554 | if (rhs) { | |
1555 | if (!strlen(++rhs)) { | |
1556 | ret = -EINVAL; | |
1557 | hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str)); | |
1558 | goto free; | |
1559 | } | |
1560 | ret = parse_assignment(tr, str, attrs); | |
1561 | if (ret) | |
1562 | goto free; | |
1563 | } else if (strcmp(str, "nohitcount") == 0 || | |
1564 | strcmp(str, "NOHC") == 0) | |
1565 | attrs->no_hitcount = true; | |
1566 | else if (strcmp(str, "pause") == 0) | |
1567 | attrs->pause = true; | |
1568 | else if ((strcmp(str, "cont") == 0) || | |
1569 | (strcmp(str, "continue") == 0)) | |
1570 | attrs->cont = true; | |
1571 | else if (strcmp(str, "clear") == 0) | |
1572 | attrs->clear = true; | |
1573 | else { | |
1574 | ret = parse_action(str, attrs); | |
1575 | if (ret) | |
1576 | goto free; | |
1577 | } | |
1578 | } | |
1579 | ||
1580 | if (!attrs->keys_str) { | |
1581 | ret = -EINVAL; | |
1582 | goto free; | |
1583 | } | |
1584 | ||
1585 | if (!attrs->clock) { | |
1586 | attrs->clock = kstrdup("global", GFP_KERNEL); | |
1587 | if (!attrs->clock) { | |
1588 | ret = -ENOMEM; | |
1589 | goto free; | |
1590 | } | |
1591 | } | |
1592 | ||
1593 | return attrs; | |
1594 | free: | |
1595 | destroy_hist_trigger_attrs(attrs); | |
1596 | ||
1597 | return ERR_PTR(ret); | |
1598 | } | |
1599 | ||
1600 | static inline void save_comm(char *comm, struct task_struct *task) | |
1601 | { | |
1602 | if (!task->pid) { | |
1603 | strcpy(comm, "<idle>"); | |
1604 | return; | |
1605 | } | |
1606 | ||
1607 | if (WARN_ON_ONCE(task->pid < 0)) { | |
1608 | strcpy(comm, "<XXX>"); | |
1609 | return; | |
1610 | } | |
1611 | ||
1612 | strscpy(comm, task->comm, TASK_COMM_LEN); | |
1613 | } | |
1614 | ||
1615 | static void hist_elt_data_free(struct hist_elt_data *elt_data) | |
1616 | { | |
1617 | unsigned int i; | |
1618 | ||
1619 | for (i = 0; i < elt_data->n_field_var_str; i++) | |
1620 | kfree(elt_data->field_var_str[i]); | |
1621 | ||
1622 | kfree(elt_data->field_var_str); | |
1623 | ||
1624 | kfree(elt_data->comm); | |
1625 | kfree(elt_data); | |
1626 | } | |
1627 | ||
1628 | static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) | |
1629 | { | |
1630 | struct hist_elt_data *elt_data = elt->private_data; | |
1631 | ||
1632 | hist_elt_data_free(elt_data); | |
1633 | } | |
1634 | ||
1635 | static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) | |
1636 | { | |
1637 | struct hist_trigger_data *hist_data = elt->map->private_data; | |
1638 | unsigned int size = TASK_COMM_LEN; | |
1639 | struct hist_elt_data *elt_data; | |
1640 | struct hist_field *hist_field; | |
1641 | unsigned int i, n_str; | |
1642 | ||
1643 | elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); | |
1644 | if (!elt_data) | |
1645 | return -ENOMEM; | |
1646 | ||
1647 | for_each_hist_field(i, hist_data) { | |
1648 | hist_field = hist_data->fields[i]; | |
1649 | ||
1650 | if (hist_field->flags & HIST_FIELD_FL_EXECNAME) { | |
1651 | elt_data->comm = kzalloc(size, GFP_KERNEL); | |
1652 | if (!elt_data->comm) { | |
1653 | kfree(elt_data); | |
1654 | return -ENOMEM; | |
1655 | } | |
1656 | break; | |
1657 | } | |
1658 | } | |
1659 | ||
1660 | n_str = hist_data->n_field_var_str + hist_data->n_save_var_str + | |
1661 | hist_data->n_var_str; | |
1662 | if (n_str > SYNTH_FIELDS_MAX) { | |
1663 | hist_elt_data_free(elt_data); | |
1664 | return -EINVAL; | |
1665 | } | |
1666 | ||
1667 | BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1)); | |
1668 | ||
1669 | size = STR_VAR_LEN_MAX; | |
1670 | ||
1671 | elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL); | |
1672 | if (!elt_data->field_var_str) { | |
1673 | hist_elt_data_free(elt_data); | |
1674 | return -EINVAL; | |
1675 | } | |
1676 | elt_data->n_field_var_str = n_str; | |
1677 | ||
1678 | for (i = 0; i < n_str; i++) { | |
1679 | elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); | |
1680 | if (!elt_data->field_var_str[i]) { | |
1681 | hist_elt_data_free(elt_data); | |
1682 | return -ENOMEM; | |
1683 | } | |
1684 | } | |
1685 | ||
1686 | elt->private_data = elt_data; | |
1687 | ||
1688 | return 0; | |
1689 | } | |
1690 | ||
1691 | static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) | |
1692 | { | |
1693 | struct hist_elt_data *elt_data = elt->private_data; | |
1694 | ||
1695 | if (elt_data->comm) | |
1696 | save_comm(elt_data->comm, current); | |
1697 | } | |
1698 | ||
1699 | static const struct tracing_map_ops hist_trigger_elt_data_ops = { | |
1700 | .elt_alloc = hist_trigger_elt_data_alloc, | |
1701 | .elt_free = hist_trigger_elt_data_free, | |
1702 | .elt_init = hist_trigger_elt_data_init, | |
1703 | }; | |
1704 | ||
1705 | static const char *get_hist_field_flags(struct hist_field *hist_field) | |
1706 | { | |
1707 | const char *flags_str = NULL; | |
1708 | ||
1709 | if (hist_field->flags & HIST_FIELD_FL_HEX) | |
1710 | flags_str = "hex"; | |
1711 | else if (hist_field->flags & HIST_FIELD_FL_SYM) | |
1712 | flags_str = "sym"; | |
1713 | else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) | |
1714 | flags_str = "sym-offset"; | |
1715 | else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) | |
1716 | flags_str = "execname"; | |
1717 | else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) | |
1718 | flags_str = "syscall"; | |
1719 | else if (hist_field->flags & HIST_FIELD_FL_LOG2) | |
1720 | flags_str = "log2"; | |
1721 | else if (hist_field->flags & HIST_FIELD_FL_BUCKET) | |
1722 | flags_str = "buckets"; | |
1723 | else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) | |
1724 | flags_str = "usecs"; | |
1725 | else if (hist_field->flags & HIST_FIELD_FL_PERCENT) | |
1726 | flags_str = "percent"; | |
1727 | else if (hist_field->flags & HIST_FIELD_FL_GRAPH) | |
1728 | flags_str = "graph"; | |
1729 | else if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) | |
1730 | flags_str = "stacktrace"; | |
1731 | ||
1732 | return flags_str; | |
1733 | } | |
1734 | ||
1735 | static void expr_field_str(struct hist_field *field, char *expr) | |
1736 | { | |
1737 | if (field->flags & HIST_FIELD_FL_VAR_REF) | |
1738 | strcat(expr, "$"); | |
1739 | else if (field->flags & HIST_FIELD_FL_CONST) { | |
1740 | char str[HIST_CONST_DIGITS_MAX]; | |
1741 | ||
1742 | snprintf(str, HIST_CONST_DIGITS_MAX, "%llu", field->constant); | |
1743 | strcat(expr, str); | |
1744 | } | |
1745 | ||
1746 | strcat(expr, hist_field_name(field, 0)); | |
1747 | ||
1748 | if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { | |
1749 | const char *flags_str = get_hist_field_flags(field); | |
1750 | ||
1751 | if (flags_str) { | |
1752 | strcat(expr, "."); | |
1753 | strcat(expr, flags_str); | |
1754 | } | |
1755 | } | |
1756 | } | |
1757 | ||
1758 | static char *expr_str(struct hist_field *field, unsigned int level) | |
1759 | { | |
1760 | char *expr; | |
1761 | ||
1762 | if (level > 1) | |
1763 | return NULL; | |
1764 | ||
1765 | expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); | |
1766 | if (!expr) | |
1767 | return NULL; | |
1768 | ||
1769 | if (!field->operands[0]) { | |
1770 | expr_field_str(field, expr); | |
1771 | return expr; | |
1772 | } | |
1773 | ||
1774 | if (field->operator == FIELD_OP_UNARY_MINUS) { | |
1775 | char *subexpr; | |
1776 | ||
1777 | strcat(expr, "-("); | |
1778 | subexpr = expr_str(field->operands[0], ++level); | |
1779 | if (!subexpr) { | |
1780 | kfree(expr); | |
1781 | return NULL; | |
1782 | } | |
1783 | strcat(expr, subexpr); | |
1784 | strcat(expr, ")"); | |
1785 | ||
1786 | kfree(subexpr); | |
1787 | ||
1788 | return expr; | |
1789 | } | |
1790 | ||
1791 | expr_field_str(field->operands[0], expr); | |
1792 | ||
1793 | switch (field->operator) { | |
1794 | case FIELD_OP_MINUS: | |
1795 | strcat(expr, "-"); | |
1796 | break; | |
1797 | case FIELD_OP_PLUS: | |
1798 | strcat(expr, "+"); | |
1799 | break; | |
1800 | case FIELD_OP_DIV: | |
1801 | strcat(expr, "/"); | |
1802 | break; | |
1803 | case FIELD_OP_MULT: | |
1804 | strcat(expr, "*"); | |
1805 | break; | |
1806 | default: | |
1807 | kfree(expr); | |
1808 | return NULL; | |
1809 | } | |
1810 | ||
1811 | expr_field_str(field->operands[1], expr); | |
1812 | ||
1813 | return expr; | |
1814 | } | |
1815 | ||
1816 | /* | |
1817 | * If field_op != FIELD_OP_NONE, *sep points to the root operator | |
1818 | * of the expression tree to be evaluated. | |
1819 | */ | |
1820 | static int contains_operator(char *str, char **sep) | |
1821 | { | |
1822 | enum field_op_id field_op = FIELD_OP_NONE; | |
1823 | char *minus_op, *plus_op, *div_op, *mult_op; | |
1824 | ||
1825 | ||
1826 | /* | |
1827 | * Report the last occurrence of the operators first, so that the | |
1828 | * expression is evaluated left to right. This is important since | |
1829 | * subtraction and division are not associative. | |
1830 | * | |
1831 | * e.g | |
1832 | * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2 | |
1833 | * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2 | |
1834 | */ | |
1835 | ||
1836 | /* | |
1837 | * First, find lower precedence addition and subtraction | |
1838 | * since the expression will be evaluated recursively. | |
1839 | */ | |
1840 | minus_op = strrchr(str, '-'); | |
1841 | if (minus_op) { | |
1842 | /* | |
1843 | * Unary minus is not supported in sub-expressions. If | |
1844 | * present, it is always the next root operator. | |
1845 | */ | |
1846 | if (minus_op == str) { | |
1847 | field_op = FIELD_OP_UNARY_MINUS; | |
1848 | goto out; | |
1849 | } | |
1850 | ||
1851 | field_op = FIELD_OP_MINUS; | |
1852 | } | |
1853 | ||
1854 | plus_op = strrchr(str, '+'); | |
1855 | if (plus_op || minus_op) { | |
1856 | /* | |
1857 | * For operators of the same precedence use to rightmost as the | |
1858 | * root, so that the expression is evaluated left to right. | |
1859 | */ | |
1860 | if (plus_op > minus_op) | |
1861 | field_op = FIELD_OP_PLUS; | |
1862 | goto out; | |
1863 | } | |
1864 | ||
1865 | /* | |
1866 | * Multiplication and division have higher precedence than addition and | |
1867 | * subtraction. | |
1868 | */ | |
1869 | div_op = strrchr(str, '/'); | |
1870 | if (div_op) | |
1871 | field_op = FIELD_OP_DIV; | |
1872 | ||
1873 | mult_op = strrchr(str, '*'); | |
1874 | /* | |
1875 | * For operators of the same precedence use to rightmost as the | |
1876 | * root, so that the expression is evaluated left to right. | |
1877 | */ | |
1878 | if (mult_op > div_op) | |
1879 | field_op = FIELD_OP_MULT; | |
1880 | ||
1881 | out: | |
1882 | if (sep) { | |
1883 | switch (field_op) { | |
1884 | case FIELD_OP_UNARY_MINUS: | |
1885 | case FIELD_OP_MINUS: | |
1886 | *sep = minus_op; | |
1887 | break; | |
1888 | case FIELD_OP_PLUS: | |
1889 | *sep = plus_op; | |
1890 | break; | |
1891 | case FIELD_OP_DIV: | |
1892 | *sep = div_op; | |
1893 | break; | |
1894 | case FIELD_OP_MULT: | |
1895 | *sep = mult_op; | |
1896 | break; | |
1897 | case FIELD_OP_NONE: | |
1898 | default: | |
1899 | *sep = NULL; | |
1900 | break; | |
1901 | } | |
1902 | } | |
1903 | ||
1904 | return field_op; | |
1905 | } | |
1906 | ||
1907 | static void get_hist_field(struct hist_field *hist_field) | |
1908 | { | |
1909 | hist_field->ref++; | |
1910 | } | |
1911 | ||
1912 | static void __destroy_hist_field(struct hist_field *hist_field) | |
1913 | { | |
1914 | if (--hist_field->ref > 1) | |
1915 | return; | |
1916 | ||
1917 | kfree(hist_field->var.name); | |
1918 | kfree(hist_field->name); | |
1919 | ||
1920 | /* Can likely be a const */ | |
1921 | kfree_const(hist_field->type); | |
1922 | ||
1923 | kfree(hist_field->system); | |
1924 | kfree(hist_field->event_name); | |
1925 | ||
1926 | kfree(hist_field); | |
1927 | } | |
1928 | ||
1929 | static void destroy_hist_field(struct hist_field *hist_field, | |
1930 | unsigned int level) | |
1931 | { | |
1932 | unsigned int i; | |
1933 | ||
1934 | if (level > 3) | |
1935 | return; | |
1936 | ||
1937 | if (!hist_field) | |
1938 | return; | |
1939 | ||
1940 | if (hist_field->flags & HIST_FIELD_FL_VAR_REF) | |
1941 | return; /* var refs will be destroyed separately */ | |
1942 | ||
1943 | for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) | |
1944 | destroy_hist_field(hist_field->operands[i], level + 1); | |
1945 | ||
1946 | __destroy_hist_field(hist_field); | |
1947 | } | |
1948 | ||
1949 | static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, | |
1950 | struct ftrace_event_field *field, | |
1951 | unsigned long flags, | |
1952 | char *var_name) | |
1953 | { | |
1954 | struct hist_field *hist_field; | |
1955 | ||
1956 | if (field && is_function_field(field)) | |
1957 | return NULL; | |
1958 | ||
1959 | hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); | |
1960 | if (!hist_field) | |
1961 | return NULL; | |
1962 | ||
1963 | hist_field->ref = 1; | |
1964 | ||
1965 | hist_field->hist_data = hist_data; | |
1966 | ||
1967 | if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) | |
1968 | goto out; /* caller will populate */ | |
1969 | ||
1970 | if (flags & HIST_FIELD_FL_VAR_REF) { | |
1971 | hist_field->fn_num = HIST_FIELD_FN_VAR_REF; | |
1972 | goto out; | |
1973 | } | |
1974 | ||
1975 | if (flags & HIST_FIELD_FL_HITCOUNT) { | |
1976 | hist_field->fn_num = HIST_FIELD_FN_COUNTER; | |
1977 | hist_field->size = sizeof(u64); | |
1978 | hist_field->type = "u64"; | |
1979 | goto out; | |
1980 | } | |
1981 | ||
1982 | if (flags & HIST_FIELD_FL_CONST) { | |
1983 | hist_field->fn_num = HIST_FIELD_FN_CONST; | |
1984 | hist_field->size = sizeof(u64); | |
1985 | hist_field->type = kstrdup("u64", GFP_KERNEL); | |
1986 | if (!hist_field->type) | |
1987 | goto free; | |
1988 | goto out; | |
1989 | } | |
1990 | ||
1991 | if (flags & HIST_FIELD_FL_STACKTRACE) { | |
1992 | if (field) | |
1993 | hist_field->fn_num = HIST_FIELD_FN_STACK; | |
1994 | else | |
1995 | hist_field->fn_num = HIST_FIELD_FN_NOP; | |
1996 | hist_field->size = HIST_STACKTRACE_SIZE; | |
1997 | hist_field->type = kstrdup_const("unsigned long[]", GFP_KERNEL); | |
1998 | if (!hist_field->type) | |
1999 | goto free; | |
2000 | goto out; | |
2001 | } | |
2002 | ||
2003 | if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) { | |
2004 | unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET); | |
2005 | hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 : | |
2006 | HIST_FIELD_FN_BUCKET; | |
2007 | hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); | |
2008 | if (!hist_field->operands[0]) | |
2009 | goto free; | |
2010 | hist_field->size = hist_field->operands[0]->size; | |
2011 | hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL); | |
2012 | if (!hist_field->type) | |
2013 | goto free; | |
2014 | goto out; | |
2015 | } | |
2016 | ||
2017 | if (flags & HIST_FIELD_FL_TIMESTAMP) { | |
2018 | hist_field->fn_num = HIST_FIELD_FN_TIMESTAMP; | |
2019 | hist_field->size = sizeof(u64); | |
2020 | hist_field->type = "u64"; | |
2021 | goto out; | |
2022 | } | |
2023 | ||
2024 | if (flags & HIST_FIELD_FL_CPU) { | |
2025 | hist_field->fn_num = HIST_FIELD_FN_CPU; | |
2026 | hist_field->size = sizeof(int); | |
2027 | hist_field->type = "unsigned int"; | |
2028 | goto out; | |
2029 | } | |
2030 | ||
2031 | if (flags & HIST_FIELD_FL_COMM) { | |
2032 | hist_field->fn_num = HIST_FIELD_FN_COMM; | |
2033 | hist_field->size = MAX_FILTER_STR_VAL; | |
2034 | hist_field->type = "char[]"; | |
2035 | goto out; | |
2036 | } | |
2037 | ||
2038 | if (WARN_ON_ONCE(!field)) | |
2039 | goto out; | |
2040 | ||
2041 | /* Pointers to strings are just pointers and dangerous to dereference */ | |
2042 | if (is_string_field(field) && | |
2043 | (field->filter_type != FILTER_PTR_STRING)) { | |
2044 | flags |= HIST_FIELD_FL_STRING; | |
2045 | ||
2046 | hist_field->size = MAX_FILTER_STR_VAL; | |
2047 | hist_field->type = kstrdup_const(field->type, GFP_KERNEL); | |
2048 | if (!hist_field->type) | |
2049 | goto free; | |
2050 | ||
2051 | if (field->filter_type == FILTER_STATIC_STRING) { | |
2052 | hist_field->fn_num = HIST_FIELD_FN_STRING; | |
2053 | hist_field->size = field->size; | |
2054 | } else if (field->filter_type == FILTER_DYN_STRING) { | |
2055 | hist_field->fn_num = HIST_FIELD_FN_DYNSTRING; | |
2056 | } else if (field->filter_type == FILTER_RDYN_STRING) | |
2057 | hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING; | |
2058 | else | |
2059 | hist_field->fn_num = HIST_FIELD_FN_PSTRING; | |
2060 | } else { | |
2061 | hist_field->size = field->size; | |
2062 | hist_field->is_signed = field->is_signed; | |
2063 | hist_field->type = kstrdup_const(field->type, GFP_KERNEL); | |
2064 | if (!hist_field->type) | |
2065 | goto free; | |
2066 | ||
2067 | hist_field->fn_num = select_value_fn(field->size, | |
2068 | field->is_signed); | |
2069 | if (hist_field->fn_num == HIST_FIELD_FN_NOP) { | |
2070 | destroy_hist_field(hist_field, 0); | |
2071 | return NULL; | |
2072 | } | |
2073 | } | |
2074 | out: | |
2075 | hist_field->field = field; | |
2076 | hist_field->flags = flags; | |
2077 | ||
2078 | if (var_name) { | |
2079 | hist_field->var.name = kstrdup(var_name, GFP_KERNEL); | |
2080 | if (!hist_field->var.name) | |
2081 | goto free; | |
2082 | } | |
2083 | ||
2084 | return hist_field; | |
2085 | free: | |
2086 | destroy_hist_field(hist_field, 0); | |
2087 | return NULL; | |
2088 | } | |
2089 | ||
2090 | static void destroy_hist_fields(struct hist_trigger_data *hist_data) | |
2091 | { | |
2092 | unsigned int i; | |
2093 | ||
2094 | for (i = 0; i < HIST_FIELDS_MAX; i++) { | |
2095 | if (hist_data->fields[i]) { | |
2096 | destroy_hist_field(hist_data->fields[i], 0); | |
2097 | hist_data->fields[i] = NULL; | |
2098 | } | |
2099 | } | |
2100 | ||
2101 | for (i = 0; i < hist_data->n_var_refs; i++) { | |
2102 | WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); | |
2103 | __destroy_hist_field(hist_data->var_refs[i]); | |
2104 | hist_data->var_refs[i] = NULL; | |
2105 | } | |
2106 | } | |
2107 | ||
2108 | static int init_var_ref(struct hist_field *ref_field, | |
2109 | struct hist_field *var_field, | |
2110 | char *system, char *event_name) | |
2111 | { | |
2112 | int err = 0; | |
2113 | ||
2114 | ref_field->var.idx = var_field->var.idx; | |
2115 | ref_field->var.hist_data = var_field->hist_data; | |
2116 | ref_field->size = var_field->size; | |
2117 | ref_field->is_signed = var_field->is_signed; | |
2118 | ref_field->flags |= var_field->flags & | |
2119 | (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); | |
2120 | ||
2121 | if (system) { | |
2122 | ref_field->system = kstrdup(system, GFP_KERNEL); | |
2123 | if (!ref_field->system) | |
2124 | return -ENOMEM; | |
2125 | } | |
2126 | ||
2127 | if (event_name) { | |
2128 | ref_field->event_name = kstrdup(event_name, GFP_KERNEL); | |
2129 | if (!ref_field->event_name) { | |
2130 | err = -ENOMEM; | |
2131 | goto free; | |
2132 | } | |
2133 | } | |
2134 | ||
2135 | if (var_field->var.name) { | |
2136 | ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); | |
2137 | if (!ref_field->name) { | |
2138 | err = -ENOMEM; | |
2139 | goto free; | |
2140 | } | |
2141 | } else if (var_field->name) { | |
2142 | ref_field->name = kstrdup(var_field->name, GFP_KERNEL); | |
2143 | if (!ref_field->name) { | |
2144 | err = -ENOMEM; | |
2145 | goto free; | |
2146 | } | |
2147 | } | |
2148 | ||
2149 | ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL); | |
2150 | if (!ref_field->type) { | |
2151 | err = -ENOMEM; | |
2152 | goto free; | |
2153 | } | |
2154 | out: | |
2155 | return err; | |
2156 | free: | |
2157 | kfree(ref_field->system); | |
2158 | ref_field->system = NULL; | |
2159 | kfree(ref_field->event_name); | |
2160 | ref_field->event_name = NULL; | |
2161 | kfree(ref_field->name); | |
2162 | ref_field->name = NULL; | |
2163 | ||
2164 | goto out; | |
2165 | } | |
2166 | ||
2167 | static int find_var_ref_idx(struct hist_trigger_data *hist_data, | |
2168 | struct hist_field *var_field) | |
2169 | { | |
2170 | struct hist_field *ref_field; | |
2171 | int i; | |
2172 | ||
2173 | for (i = 0; i < hist_data->n_var_refs; i++) { | |
2174 | ref_field = hist_data->var_refs[i]; | |
2175 | if (ref_field->var.idx == var_field->var.idx && | |
2176 | ref_field->var.hist_data == var_field->hist_data) | |
2177 | return i; | |
2178 | } | |
2179 | ||
2180 | return -ENOENT; | |
2181 | } | |
2182 | ||
2183 | /** | |
2184 | * create_var_ref - Create a variable reference and attach it to trigger | |
2185 | * @hist_data: The trigger that will be referencing the variable | |
2186 | * @var_field: The VAR field to create a reference to | |
2187 | * @system: The optional system string | |
2188 | * @event_name: The optional event_name string | |
2189 | * | |
2190 | * Given a variable hist_field, create a VAR_REF hist_field that | |
2191 | * represents a reference to it. | |
2192 | * | |
2193 | * This function also adds the reference to the trigger that | |
2194 | * now references the variable. | |
2195 | * | |
2196 | * Return: The VAR_REF field if successful, NULL if not | |
2197 | */ | |
2198 | static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, | |
2199 | struct hist_field *var_field, | |
2200 | char *system, char *event_name) | |
2201 | { | |
2202 | unsigned long flags = HIST_FIELD_FL_VAR_REF; | |
2203 | struct hist_field *ref_field; | |
2204 | int i; | |
2205 | ||
2206 | /* Check if the variable already exists */ | |
2207 | for (i = 0; i < hist_data->n_var_refs; i++) { | |
2208 | ref_field = hist_data->var_refs[i]; | |
2209 | if (ref_field->var.idx == var_field->var.idx && | |
2210 | ref_field->var.hist_data == var_field->hist_data) { | |
2211 | get_hist_field(ref_field); | |
2212 | return ref_field; | |
2213 | } | |
2214 | } | |
2215 | /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */ | |
2216 | if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX) | |
2217 | return NULL; | |
2218 | ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); | |
2219 | if (ref_field) { | |
2220 | if (init_var_ref(ref_field, var_field, system, event_name)) { | |
2221 | destroy_hist_field(ref_field, 0); | |
2222 | return NULL; | |
2223 | } | |
2224 | ||
2225 | hist_data->var_refs[hist_data->n_var_refs] = ref_field; | |
2226 | ref_field->var_ref_idx = hist_data->n_var_refs++; | |
2227 | } | |
2228 | ||
2229 | return ref_field; | |
2230 | } | |
2231 | ||
2232 | static bool is_var_ref(char *var_name) | |
2233 | { | |
2234 | if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') | |
2235 | return false; | |
2236 | ||
2237 | return true; | |
2238 | } | |
2239 | ||
2240 | static char *field_name_from_var(struct hist_trigger_data *hist_data, | |
2241 | char *var_name) | |
2242 | { | |
2243 | char *name, *field; | |
2244 | unsigned int i; | |
2245 | ||
2246 | for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { | |
2247 | name = hist_data->attrs->var_defs.name[i]; | |
2248 | ||
2249 | if (strcmp(var_name, name) == 0) { | |
2250 | field = hist_data->attrs->var_defs.expr[i]; | |
2251 | if (contains_operator(field, NULL) || is_var_ref(field)) | |
2252 | continue; | |
2253 | return field; | |
2254 | } | |
2255 | } | |
2256 | ||
2257 | return NULL; | |
2258 | } | |
2259 | ||
2260 | static char *local_field_var_ref(struct hist_trigger_data *hist_data, | |
2261 | char *system, char *event_name, | |
2262 | char *var_name) | |
2263 | { | |
2264 | struct trace_event_call *call; | |
2265 | ||
2266 | if (system && event_name) { | |
2267 | call = hist_data->event_file->event_call; | |
2268 | ||
2269 | if (strcmp(system, call->class->system) != 0) | |
2270 | return NULL; | |
2271 | ||
2272 | if (strcmp(event_name, trace_event_name(call)) != 0) | |
2273 | return NULL; | |
2274 | } | |
2275 | ||
2276 | if (!!system != !!event_name) | |
2277 | return NULL; | |
2278 | ||
2279 | if (!is_var_ref(var_name)) | |
2280 | return NULL; | |
2281 | ||
2282 | var_name++; | |
2283 | ||
2284 | return field_name_from_var(hist_data, var_name); | |
2285 | } | |
2286 | ||
2287 | static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, | |
2288 | char *system, char *event_name, | |
2289 | char *var_name) | |
2290 | { | |
2291 | struct hist_field *var_field = NULL, *ref_field = NULL; | |
2292 | struct trace_array *tr = hist_data->event_file->tr; | |
2293 | ||
2294 | if (!is_var_ref(var_name)) | |
2295 | return NULL; | |
2296 | ||
2297 | var_name++; | |
2298 | ||
2299 | var_field = find_event_var(hist_data, system, event_name, var_name); | |
2300 | if (var_field) | |
2301 | ref_field = create_var_ref(hist_data, var_field, | |
2302 | system, event_name); | |
2303 | ||
2304 | if (!ref_field) | |
2305 | hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); | |
2306 | ||
2307 | return ref_field; | |
2308 | } | |
2309 | ||
2310 | static struct ftrace_event_field * | |
2311 | parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, | |
2312 | char *field_str, unsigned long *flags, unsigned long *buckets) | |
2313 | { | |
2314 | struct ftrace_event_field *field = NULL; | |
2315 | char *field_name, *modifier, *str; | |
2316 | struct trace_array *tr = file->tr; | |
2317 | ||
2318 | modifier = str = kstrdup(field_str, GFP_KERNEL); | |
2319 | if (!modifier) | |
2320 | return ERR_PTR(-ENOMEM); | |
2321 | ||
2322 | field_name = strsep(&modifier, "."); | |
2323 | if (modifier) { | |
2324 | if (strcmp(modifier, "hex") == 0) | |
2325 | *flags |= HIST_FIELD_FL_HEX; | |
2326 | else if (strcmp(modifier, "sym") == 0) | |
2327 | *flags |= HIST_FIELD_FL_SYM; | |
2328 | /* | |
2329 | * 'sym-offset' occurrences in the trigger string are modified | |
2330 | * to 'symXoffset' to simplify arithmetic expression parsing. | |
2331 | */ | |
2332 | else if (strcmp(modifier, "symXoffset") == 0) | |
2333 | *flags |= HIST_FIELD_FL_SYM_OFFSET; | |
2334 | else if ((strcmp(modifier, "execname") == 0) && | |
2335 | (strcmp(field_name, "common_pid") == 0)) | |
2336 | *flags |= HIST_FIELD_FL_EXECNAME; | |
2337 | else if (strcmp(modifier, "syscall") == 0) | |
2338 | *flags |= HIST_FIELD_FL_SYSCALL; | |
2339 | else if (strcmp(modifier, "stacktrace") == 0) | |
2340 | *flags |= HIST_FIELD_FL_STACKTRACE; | |
2341 | else if (strcmp(modifier, "log2") == 0) | |
2342 | *flags |= HIST_FIELD_FL_LOG2; | |
2343 | else if (strcmp(modifier, "usecs") == 0) | |
2344 | *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; | |
2345 | else if (strncmp(modifier, "bucket", 6) == 0) { | |
2346 | int ret; | |
2347 | ||
2348 | modifier += 6; | |
2349 | ||
2350 | if (*modifier == 's') | |
2351 | modifier++; | |
2352 | if (*modifier != '=') | |
2353 | goto error; | |
2354 | modifier++; | |
2355 | ret = kstrtoul(modifier, 0, buckets); | |
2356 | if (ret || !(*buckets)) | |
2357 | goto error; | |
2358 | *flags |= HIST_FIELD_FL_BUCKET; | |
2359 | } else if (strncmp(modifier, "percent", 7) == 0) { | |
2360 | if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY)) | |
2361 | goto error; | |
2362 | *flags |= HIST_FIELD_FL_PERCENT; | |
2363 | } else if (strncmp(modifier, "graph", 5) == 0) { | |
2364 | if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY)) | |
2365 | goto error; | |
2366 | *flags |= HIST_FIELD_FL_GRAPH; | |
2367 | } else { | |
2368 | error: | |
2369 | hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); | |
2370 | field = ERR_PTR(-EINVAL); | |
2371 | goto out; | |
2372 | } | |
2373 | } | |
2374 | ||
2375 | if (strcmp(field_name, "common_timestamp") == 0) { | |
2376 | *flags |= HIST_FIELD_FL_TIMESTAMP; | |
2377 | hist_data->enable_timestamps = true; | |
2378 | if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) | |
2379 | hist_data->attrs->ts_in_usecs = true; | |
2380 | } else if (strcmp(field_name, "common_stacktrace") == 0) { | |
2381 | *flags |= HIST_FIELD_FL_STACKTRACE; | |
2382 | } else if (strcmp(field_name, "common_cpu") == 0) { | |
2383 | *flags |= HIST_FIELD_FL_CPU; | |
2384 | } else if (strcmp(field_name, "common_comm") == 0) { | |
2385 | *flags |= HIST_FIELD_FL_COMM | HIST_FIELD_FL_STRING; | |
2386 | } else if (strcmp(field_name, "hitcount") == 0) | |
2387 | *flags |= HIST_FIELD_FL_HITCOUNT; | |
2388 | else { | |
2389 | field = trace_find_event_field(file->event_call, field_name); | |
2390 | if (!field || !field->size) { | |
2391 | /* | |
2392 | * For backward compatibility, if field_name | |
2393 | * was "cpu" or "stacktrace", then we treat this | |
2394 | * the same as common_cpu and common_stacktrace | |
2395 | * respectively. This also works for "CPU", and | |
2396 | * "STACKTRACE". | |
2397 | */ | |
2398 | if (field && field->filter_type == FILTER_CPU) { | |
2399 | *flags |= HIST_FIELD_FL_CPU; | |
2400 | } else if (field && field->filter_type == FILTER_STACKTRACE) { | |
2401 | *flags |= HIST_FIELD_FL_STACKTRACE; | |
2402 | } else if (field && field->filter_type == FILTER_COMM) { | |
2403 | *flags |= HIST_FIELD_FL_COMM | HIST_FIELD_FL_STRING; | |
2404 | } else { | |
2405 | hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, | |
2406 | errpos(field_name)); | |
2407 | field = ERR_PTR(-EINVAL); | |
2408 | goto out; | |
2409 | } | |
2410 | } | |
2411 | } | |
2412 | out: | |
2413 | kfree(str); | |
2414 | ||
2415 | return field; | |
2416 | } | |
2417 | ||
2418 | static struct hist_field *create_alias(struct hist_trigger_data *hist_data, | |
2419 | struct hist_field *var_ref, | |
2420 | char *var_name) | |
2421 | { | |
2422 | struct hist_field *alias = NULL; | |
2423 | unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; | |
2424 | ||
2425 | alias = create_hist_field(hist_data, NULL, flags, var_name); | |
2426 | if (!alias) | |
2427 | return NULL; | |
2428 | ||
2429 | alias->fn_num = var_ref->fn_num; | |
2430 | alias->operands[0] = var_ref; | |
2431 | ||
2432 | if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { | |
2433 | destroy_hist_field(alias, 0); | |
2434 | return NULL; | |
2435 | } | |
2436 | ||
2437 | alias->var_ref_idx = var_ref->var_ref_idx; | |
2438 | ||
2439 | return alias; | |
2440 | } | |
2441 | ||
2442 | static struct hist_field *parse_const(struct hist_trigger_data *hist_data, | |
2443 | char *str, char *var_name, | |
2444 | unsigned long *flags) | |
2445 | { | |
2446 | struct trace_array *tr = hist_data->event_file->tr; | |
2447 | struct hist_field *field = NULL; | |
2448 | u64 constant; | |
2449 | ||
2450 | if (kstrtoull(str, 0, &constant)) { | |
2451 | hist_err(tr, HIST_ERR_EXPECT_NUMBER, errpos(str)); | |
2452 | return NULL; | |
2453 | } | |
2454 | ||
2455 | *flags |= HIST_FIELD_FL_CONST; | |
2456 | field = create_hist_field(hist_data, NULL, *flags, var_name); | |
2457 | if (!field) | |
2458 | return NULL; | |
2459 | ||
2460 | field->constant = constant; | |
2461 | ||
2462 | return field; | |
2463 | } | |
2464 | ||
2465 | static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, | |
2466 | struct trace_event_file *file, char *str, | |
2467 | unsigned long *flags, char *var_name) | |
2468 | { | |
2469 | char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; | |
2470 | struct ftrace_event_field *field = NULL; | |
2471 | struct hist_field *hist_field = NULL; | |
2472 | unsigned long buckets = 0; | |
2473 | int ret = 0; | |
2474 | ||
2475 | if (isdigit(str[0])) { | |
2476 | hist_field = parse_const(hist_data, str, var_name, flags); | |
2477 | if (!hist_field) { | |
2478 | ret = -EINVAL; | |
2479 | goto out; | |
2480 | } | |
2481 | return hist_field; | |
2482 | } | |
2483 | ||
2484 | s = strchr(str, '.'); | |
2485 | if (s) { | |
2486 | s = strchr(++s, '.'); | |
2487 | if (s) { | |
2488 | ref_system = strsep(&str, "."); | |
2489 | if (!str) { | |
2490 | ret = -EINVAL; | |
2491 | goto out; | |
2492 | } | |
2493 | ref_event = strsep(&str, "."); | |
2494 | if (!str) { | |
2495 | ret = -EINVAL; | |
2496 | goto out; | |
2497 | } | |
2498 | ref_var = str; | |
2499 | } | |
2500 | } | |
2501 | ||
2502 | s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); | |
2503 | if (!s) { | |
2504 | hist_field = parse_var_ref(hist_data, ref_system, | |
2505 | ref_event, ref_var); | |
2506 | if (hist_field) { | |
2507 | if (var_name) { | |
2508 | hist_field = create_alias(hist_data, hist_field, var_name); | |
2509 | if (!hist_field) { | |
2510 | ret = -ENOMEM; | |
2511 | goto out; | |
2512 | } | |
2513 | } | |
2514 | return hist_field; | |
2515 | } | |
2516 | } else | |
2517 | str = s; | |
2518 | ||
2519 | field = parse_field(hist_data, file, str, flags, &buckets); | |
2520 | if (IS_ERR(field)) { | |
2521 | ret = PTR_ERR(field); | |
2522 | goto out; | |
2523 | } | |
2524 | ||
2525 | hist_field = create_hist_field(hist_data, field, *flags, var_name); | |
2526 | if (!hist_field) { | |
2527 | ret = -ENOMEM; | |
2528 | goto out; | |
2529 | } | |
2530 | hist_field->buckets = buckets; | |
2531 | ||
2532 | return hist_field; | |
2533 | out: | |
2534 | return ERR_PTR(ret); | |
2535 | } | |
2536 | ||
2537 | static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, | |
2538 | struct trace_event_file *file, | |
2539 | char *str, unsigned long flags, | |
2540 | char *var_name, unsigned int *n_subexprs); | |
2541 | ||
2542 | static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, | |
2543 | struct trace_event_file *file, | |
2544 | char *str, unsigned long flags, | |
2545 | char *var_name, unsigned int *n_subexprs) | |
2546 | { | |
2547 | struct hist_field *operand1, *expr = NULL; | |
2548 | unsigned long operand_flags; | |
2549 | int ret = 0; | |
2550 | char *s; | |
2551 | ||
2552 | /* Unary minus operator, increment n_subexprs */ | |
2553 | ++*n_subexprs; | |
2554 | ||
2555 | /* we support only -(xxx) i.e. explicit parens required */ | |
2556 | ||
2557 | if (*n_subexprs > 3) { | |
2558 | hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); | |
2559 | ret = -EINVAL; | |
2560 | goto free; | |
2561 | } | |
2562 | ||
2563 | str++; /* skip leading '-' */ | |
2564 | ||
2565 | s = strchr(str, '('); | |
2566 | if (s) | |
2567 | str++; | |
2568 | else { | |
2569 | ret = -EINVAL; | |
2570 | goto free; | |
2571 | } | |
2572 | ||
2573 | s = strrchr(str, ')'); | |
2574 | if (s) { | |
2575 | /* unary minus not supported in sub-expressions */ | |
2576 | if (*(s+1) != '\0') { | |
2577 | hist_err(file->tr, HIST_ERR_UNARY_MINUS_SUBEXPR, | |
2578 | errpos(str)); | |
2579 | ret = -EINVAL; | |
2580 | goto free; | |
2581 | } | |
2582 | *s = '\0'; | |
2583 | } | |
2584 | else { | |
2585 | ret = -EINVAL; /* no closing ')' */ | |
2586 | goto free; | |
2587 | } | |
2588 | ||
2589 | flags |= HIST_FIELD_FL_EXPR; | |
2590 | expr = create_hist_field(hist_data, NULL, flags, var_name); | |
2591 | if (!expr) { | |
2592 | ret = -ENOMEM; | |
2593 | goto free; | |
2594 | } | |
2595 | ||
2596 | operand_flags = 0; | |
2597 | operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); | |
2598 | if (IS_ERR(operand1)) { | |
2599 | ret = PTR_ERR(operand1); | |
2600 | goto free; | |
2601 | } | |
2602 | if (operand1->flags & HIST_FIELD_FL_STRING) { | |
2603 | /* String type can not be the operand of unary operator. */ | |
2604 | hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); | |
2605 | destroy_hist_field(operand1, 0); | |
2606 | ret = -EINVAL; | |
2607 | goto free; | |
2608 | } | |
2609 | ||
2610 | expr->flags |= operand1->flags & | |
2611 | (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); | |
2612 | expr->fn_num = HIST_FIELD_FN_UMINUS; | |
2613 | expr->operands[0] = operand1; | |
2614 | expr->size = operand1->size; | |
2615 | expr->is_signed = operand1->is_signed; | |
2616 | expr->operator = FIELD_OP_UNARY_MINUS; | |
2617 | expr->name = expr_str(expr, 0); | |
2618 | expr->type = kstrdup_const(operand1->type, GFP_KERNEL); | |
2619 | if (!expr->type) { | |
2620 | ret = -ENOMEM; | |
2621 | goto free; | |
2622 | } | |
2623 | ||
2624 | return expr; | |
2625 | free: | |
2626 | destroy_hist_field(expr, 0); | |
2627 | return ERR_PTR(ret); | |
2628 | } | |
2629 | ||
2630 | /* | |
2631 | * If the operands are var refs, return pointers the | |
2632 | * variable(s) referenced in var1 and var2, else NULL. | |
2633 | */ | |
2634 | static int check_expr_operands(struct trace_array *tr, | |
2635 | struct hist_field *operand1, | |
2636 | struct hist_field *operand2, | |
2637 | struct hist_field **var1, | |
2638 | struct hist_field **var2) | |
2639 | { | |
2640 | unsigned long operand1_flags = operand1->flags; | |
2641 | unsigned long operand2_flags = operand2->flags; | |
2642 | ||
2643 | if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || | |
2644 | (operand1_flags & HIST_FIELD_FL_ALIAS)) { | |
2645 | struct hist_field *var; | |
2646 | ||
2647 | var = find_var_field(operand1->var.hist_data, operand1->name); | |
2648 | if (!var) | |
2649 | return -EINVAL; | |
2650 | operand1_flags = var->flags; | |
2651 | *var1 = var; | |
2652 | } | |
2653 | ||
2654 | if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || | |
2655 | (operand2_flags & HIST_FIELD_FL_ALIAS)) { | |
2656 | struct hist_field *var; | |
2657 | ||
2658 | var = find_var_field(operand2->var.hist_data, operand2->name); | |
2659 | if (!var) | |
2660 | return -EINVAL; | |
2661 | operand2_flags = var->flags; | |
2662 | *var2 = var; | |
2663 | } | |
2664 | ||
2665 | if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != | |
2666 | (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { | |
2667 | hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); | |
2668 | return -EINVAL; | |
2669 | } | |
2670 | ||
2671 | return 0; | |
2672 | } | |
2673 | ||
2674 | static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, | |
2675 | struct trace_event_file *file, | |
2676 | char *str, unsigned long flags, | |
2677 | char *var_name, unsigned int *n_subexprs) | |
2678 | { | |
2679 | struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; | |
2680 | struct hist_field *var1 = NULL, *var2 = NULL; | |
2681 | unsigned long operand_flags, operand2_flags; | |
2682 | int field_op, ret = -EINVAL; | |
2683 | char *sep, *operand1_str; | |
2684 | enum hist_field_fn op_fn; | |
2685 | bool combine_consts; | |
2686 | ||
2687 | if (*n_subexprs > 3) { | |
2688 | hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); | |
2689 | return ERR_PTR(-EINVAL); | |
2690 | } | |
2691 | ||
2692 | field_op = contains_operator(str, &sep); | |
2693 | ||
2694 | if (field_op == FIELD_OP_NONE) | |
2695 | return parse_atom(hist_data, file, str, &flags, var_name); | |
2696 | ||
2697 | if (field_op == FIELD_OP_UNARY_MINUS) | |
2698 | return parse_unary(hist_data, file, str, flags, var_name, n_subexprs); | |
2699 | ||
2700 | /* Binary operator found, increment n_subexprs */ | |
2701 | ++*n_subexprs; | |
2702 | ||
2703 | /* Split the expression string at the root operator */ | |
2704 | if (!sep) | |
2705 | return ERR_PTR(-EINVAL); | |
2706 | ||
2707 | *sep = '\0'; | |
2708 | operand1_str = str; | |
2709 | str = sep+1; | |
2710 | ||
2711 | /* Binary operator requires both operands */ | |
2712 | if (*operand1_str == '\0' || *str == '\0') | |
2713 | return ERR_PTR(-EINVAL); | |
2714 | ||
2715 | operand_flags = 0; | |
2716 | ||
2717 | /* LHS of string is an expression e.g. a+b in a+b+c */ | |
2718 | operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs); | |
2719 | if (IS_ERR(operand1)) | |
2720 | return ERR_CAST(operand1); | |
2721 | ||
2722 | if (operand1->flags & HIST_FIELD_FL_STRING) { | |
2723 | hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str)); | |
2724 | ret = -EINVAL; | |
2725 | goto free_op1; | |
2726 | } | |
2727 | ||
2728 | /* RHS of string is another expression e.g. c in a+b+c */ | |
2729 | operand_flags = 0; | |
2730 | operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); | |
2731 | if (IS_ERR(operand2)) { | |
2732 | ret = PTR_ERR(operand2); | |
2733 | goto free_op1; | |
2734 | } | |
2735 | if (operand2->flags & HIST_FIELD_FL_STRING) { | |
2736 | hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); | |
2737 | ret = -EINVAL; | |
2738 | goto free_operands; | |
2739 | } | |
2740 | ||
2741 | switch (field_op) { | |
2742 | case FIELD_OP_MINUS: | |
2743 | op_fn = HIST_FIELD_FN_MINUS; | |
2744 | break; | |
2745 | case FIELD_OP_PLUS: | |
2746 | op_fn = HIST_FIELD_FN_PLUS; | |
2747 | break; | |
2748 | case FIELD_OP_DIV: | |
2749 | op_fn = HIST_FIELD_FN_DIV; | |
2750 | break; | |
2751 | case FIELD_OP_MULT: | |
2752 | op_fn = HIST_FIELD_FN_MULT; | |
2753 | break; | |
2754 | default: | |
2755 | ret = -EINVAL; | |
2756 | goto free_operands; | |
2757 | } | |
2758 | ||
2759 | ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2); | |
2760 | if (ret) | |
2761 | goto free_operands; | |
2762 | ||
2763 | operand_flags = var1 ? var1->flags : operand1->flags; | |
2764 | operand2_flags = var2 ? var2->flags : operand2->flags; | |
2765 | ||
2766 | /* | |
2767 | * If both operands are constant, the expression can be | |
2768 | * collapsed to a single constant. | |
2769 | */ | |
2770 | combine_consts = operand_flags & operand2_flags & HIST_FIELD_FL_CONST; | |
2771 | ||
2772 | flags |= combine_consts ? HIST_FIELD_FL_CONST : HIST_FIELD_FL_EXPR; | |
2773 | ||
2774 | flags |= operand1->flags & | |
2775 | (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); | |
2776 | ||
2777 | expr = create_hist_field(hist_data, NULL, flags, var_name); | |
2778 | if (!expr) { | |
2779 | ret = -ENOMEM; | |
2780 | goto free_operands; | |
2781 | } | |
2782 | ||
2783 | operand1->read_once = true; | |
2784 | operand2->read_once = true; | |
2785 | ||
2786 | /* The operands are now owned and free'd by 'expr' */ | |
2787 | expr->operands[0] = operand1; | |
2788 | expr->operands[1] = operand2; | |
2789 | ||
2790 | if (field_op == FIELD_OP_DIV && | |
2791 | operand2_flags & HIST_FIELD_FL_CONST) { | |
2792 | u64 divisor = var2 ? var2->constant : operand2->constant; | |
2793 | ||
2794 | if (!divisor) { | |
2795 | hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str)); | |
2796 | ret = -EDOM; | |
2797 | goto free_expr; | |
2798 | } | |
2799 | ||
2800 | /* | |
2801 | * Copy the divisor here so we don't have to look it up | |
2802 | * later if this is a var ref | |
2803 | */ | |
2804 | operand2->constant = divisor; | |
2805 | op_fn = hist_field_get_div_fn(operand2); | |
2806 | } | |
2807 | ||
2808 | expr->fn_num = op_fn; | |
2809 | ||
2810 | if (combine_consts) { | |
2811 | if (var1) | |
2812 | expr->operands[0] = var1; | |
2813 | if (var2) | |
2814 | expr->operands[1] = var2; | |
2815 | ||
2816 | expr->constant = hist_fn_call(expr, NULL, NULL, NULL, NULL); | |
2817 | expr->fn_num = HIST_FIELD_FN_CONST; | |
2818 | ||
2819 | expr->operands[0] = NULL; | |
2820 | expr->operands[1] = NULL; | |
2821 | ||
2822 | /* | |
2823 | * var refs won't be destroyed immediately | |
2824 | * See: destroy_hist_field() | |
2825 | */ | |
2826 | destroy_hist_field(operand2, 0); | |
2827 | destroy_hist_field(operand1, 0); | |
2828 | ||
2829 | expr->name = expr_str(expr, 0); | |
2830 | } else { | |
2831 | /* The operand sizes should be the same, so just pick one */ | |
2832 | expr->size = operand1->size; | |
2833 | expr->is_signed = operand1->is_signed; | |
2834 | ||
2835 | expr->operator = field_op; | |
2836 | expr->type = kstrdup_const(operand1->type, GFP_KERNEL); | |
2837 | if (!expr->type) { | |
2838 | ret = -ENOMEM; | |
2839 | goto free_expr; | |
2840 | } | |
2841 | ||
2842 | expr->name = expr_str(expr, 0); | |
2843 | } | |
2844 | ||
2845 | return expr; | |
2846 | ||
2847 | free_operands: | |
2848 | destroy_hist_field(operand2, 0); | |
2849 | free_op1: | |
2850 | destroy_hist_field(operand1, 0); | |
2851 | return ERR_PTR(ret); | |
2852 | ||
2853 | free_expr: | |
2854 | destroy_hist_field(expr, 0); | |
2855 | return ERR_PTR(ret); | |
2856 | } | |
2857 | ||
2858 | static char *find_trigger_filter(struct hist_trigger_data *hist_data, | |
2859 | struct trace_event_file *file) | |
2860 | { | |
2861 | struct event_trigger_data *test; | |
2862 | ||
2863 | lockdep_assert_held(&event_mutex); | |
2864 | ||
2865 | list_for_each_entry(test, &file->triggers, list) { | |
2866 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
2867 | if (test->private_data == hist_data) | |
2868 | return test->filter_str; | |
2869 | } | |
2870 | } | |
2871 | ||
2872 | return NULL; | |
2873 | } | |
2874 | ||
2875 | static struct event_command trigger_hist_cmd; | |
2876 | static int event_hist_trigger_parse(struct event_command *cmd_ops, | |
2877 | struct trace_event_file *file, | |
2878 | char *glob, char *cmd, | |
2879 | char *param_and_filter); | |
2880 | ||
2881 | static bool compatible_keys(struct hist_trigger_data *target_hist_data, | |
2882 | struct hist_trigger_data *hist_data, | |
2883 | unsigned int n_keys) | |
2884 | { | |
2885 | struct hist_field *target_hist_field, *hist_field; | |
2886 | unsigned int n, i, j; | |
2887 | ||
2888 | if (hist_data->n_fields - hist_data->n_vals != n_keys) | |
2889 | return false; | |
2890 | ||
2891 | i = hist_data->n_vals; | |
2892 | j = target_hist_data->n_vals; | |
2893 | ||
2894 | for (n = 0; n < n_keys; n++) { | |
2895 | hist_field = hist_data->fields[i + n]; | |
2896 | target_hist_field = target_hist_data->fields[j + n]; | |
2897 | ||
2898 | if (strcmp(hist_field->type, target_hist_field->type) != 0) | |
2899 | return false; | |
2900 | if (hist_field->size != target_hist_field->size) | |
2901 | return false; | |
2902 | if (hist_field->is_signed != target_hist_field->is_signed) | |
2903 | return false; | |
2904 | } | |
2905 | ||
2906 | return true; | |
2907 | } | |
2908 | ||
2909 | static struct hist_trigger_data * | |
2910 | find_compatible_hist(struct hist_trigger_data *target_hist_data, | |
2911 | struct trace_event_file *file) | |
2912 | { | |
2913 | struct hist_trigger_data *hist_data; | |
2914 | struct event_trigger_data *test; | |
2915 | unsigned int n_keys; | |
2916 | ||
2917 | lockdep_assert_held(&event_mutex); | |
2918 | ||
2919 | n_keys = target_hist_data->n_fields - target_hist_data->n_vals; | |
2920 | ||
2921 | list_for_each_entry(test, &file->triggers, list) { | |
2922 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
2923 | hist_data = test->private_data; | |
2924 | ||
2925 | if (compatible_keys(target_hist_data, hist_data, n_keys)) | |
2926 | return hist_data; | |
2927 | } | |
2928 | } | |
2929 | ||
2930 | return NULL; | |
2931 | } | |
2932 | ||
2933 | static struct trace_event_file *event_file(struct trace_array *tr, | |
2934 | char *system, char *event_name) | |
2935 | { | |
2936 | struct trace_event_file *file; | |
2937 | ||
2938 | file = __find_event_file(tr, system, event_name); | |
2939 | if (!file) | |
2940 | return ERR_PTR(-EINVAL); | |
2941 | ||
2942 | return file; | |
2943 | } | |
2944 | ||
2945 | static struct hist_field * | |
2946 | find_synthetic_field_var(struct hist_trigger_data *target_hist_data, | |
2947 | char *system, char *event_name, char *field_name) | |
2948 | { | |
2949 | struct hist_field *event_var; | |
2950 | char *synthetic_name; | |
2951 | ||
2952 | synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); | |
2953 | if (!synthetic_name) | |
2954 | return ERR_PTR(-ENOMEM); | |
2955 | ||
2956 | strcpy(synthetic_name, "synthetic_"); | |
2957 | strcat(synthetic_name, field_name); | |
2958 | ||
2959 | event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); | |
2960 | ||
2961 | kfree(synthetic_name); | |
2962 | ||
2963 | return event_var; | |
2964 | } | |
2965 | ||
2966 | /** | |
2967 | * create_field_var_hist - Automatically create a histogram and var for a field | |
2968 | * @target_hist_data: The target hist trigger | |
2969 | * @subsys_name: Optional subsystem name | |
2970 | * @event_name: Optional event name | |
2971 | * @field_name: The name of the field (and the resulting variable) | |
2972 | * | |
2973 | * Hist trigger actions fetch data from variables, not directly from | |
2974 | * events. However, for convenience, users are allowed to directly | |
2975 | * specify an event field in an action, which will be automatically | |
2976 | * converted into a variable on their behalf. | |
2977 | * | |
2978 | * If a user specifies a field on an event that isn't the event the | |
2979 | * histogram currently being defined (the target event histogram), the | |
2980 | * only way that can be accomplished is if a new hist trigger is | |
2981 | * created and the field variable defined on that. | |
2982 | * | |
2983 | * This function creates a new histogram compatible with the target | |
2984 | * event (meaning a histogram with the same key as the target | |
2985 | * histogram), and creates a variable for the specified field, but | |
2986 | * with 'synthetic_' prepended to the variable name in order to avoid | |
2987 | * collision with normal field variables. | |
2988 | * | |
2989 | * Return: The variable created for the field. | |
2990 | */ | |
2991 | static struct hist_field * | |
2992 | create_field_var_hist(struct hist_trigger_data *target_hist_data, | |
2993 | char *subsys_name, char *event_name, char *field_name) | |
2994 | { | |
2995 | struct trace_array *tr = target_hist_data->event_file->tr; | |
2996 | struct hist_trigger_data *hist_data; | |
2997 | unsigned int i, n, first = true; | |
2998 | struct field_var_hist *var_hist; | |
2999 | struct trace_event_file *file; | |
3000 | struct hist_field *key_field; | |
3001 | struct hist_field *event_var; | |
3002 | char *saved_filter; | |
3003 | char *cmd; | |
3004 | int ret; | |
3005 | ||
3006 | if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { | |
3007 | hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); | |
3008 | return ERR_PTR(-EINVAL); | |
3009 | } | |
3010 | ||
3011 | file = event_file(tr, subsys_name, event_name); | |
3012 | ||
3013 | if (IS_ERR(file)) { | |
3014 | hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); | |
3015 | ret = PTR_ERR(file); | |
3016 | return ERR_PTR(ret); | |
3017 | } | |
3018 | ||
3019 | /* | |
3020 | * Look for a histogram compatible with target. We'll use the | |
3021 | * found histogram specification to create a new matching | |
3022 | * histogram with our variable on it. target_hist_data is not | |
3023 | * yet a registered histogram so we can't use that. | |
3024 | */ | |
3025 | hist_data = find_compatible_hist(target_hist_data, file); | |
3026 | if (!hist_data) { | |
3027 | hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); | |
3028 | return ERR_PTR(-EINVAL); | |
3029 | } | |
3030 | ||
3031 | /* See if a synthetic field variable has already been created */ | |
3032 | event_var = find_synthetic_field_var(target_hist_data, subsys_name, | |
3033 | event_name, field_name); | |
3034 | if (!IS_ERR_OR_NULL(event_var)) | |
3035 | return event_var; | |
3036 | ||
3037 | var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); | |
3038 | if (!var_hist) | |
3039 | return ERR_PTR(-ENOMEM); | |
3040 | ||
3041 | cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); | |
3042 | if (!cmd) { | |
3043 | kfree(var_hist); | |
3044 | return ERR_PTR(-ENOMEM); | |
3045 | } | |
3046 | ||
3047 | /* Use the same keys as the compatible histogram */ | |
3048 | strcat(cmd, "keys="); | |
3049 | ||
3050 | for_each_hist_key_field(i, hist_data) { | |
3051 | key_field = hist_data->fields[i]; | |
3052 | if (!first) | |
3053 | strcat(cmd, ","); | |
3054 | strcat(cmd, key_field->field->name); | |
3055 | first = false; | |
3056 | } | |
3057 | ||
3058 | /* Create the synthetic field variable specification */ | |
3059 | strcat(cmd, ":synthetic_"); | |
3060 | strcat(cmd, field_name); | |
3061 | strcat(cmd, "="); | |
3062 | strcat(cmd, field_name); | |
3063 | ||
3064 | /* Use the same filter as the compatible histogram */ | |
3065 | saved_filter = find_trigger_filter(hist_data, file); | |
3066 | if (saved_filter) { | |
3067 | strcat(cmd, " if "); | |
3068 | strcat(cmd, saved_filter); | |
3069 | } | |
3070 | ||
3071 | var_hist->cmd = kstrdup(cmd, GFP_KERNEL); | |
3072 | if (!var_hist->cmd) { | |
3073 | kfree(cmd); | |
3074 | kfree(var_hist); | |
3075 | return ERR_PTR(-ENOMEM); | |
3076 | } | |
3077 | ||
3078 | /* Save the compatible histogram information */ | |
3079 | var_hist->hist_data = hist_data; | |
3080 | ||
3081 | /* Create the new histogram with our variable */ | |
3082 | ret = event_hist_trigger_parse(&trigger_hist_cmd, file, | |
3083 | "", "hist", cmd); | |
3084 | if (ret) { | |
3085 | kfree(cmd); | |
3086 | kfree(var_hist->cmd); | |
3087 | kfree(var_hist); | |
3088 | hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); | |
3089 | return ERR_PTR(ret); | |
3090 | } | |
3091 | ||
3092 | kfree(cmd); | |
3093 | ||
3094 | /* If we can't find the variable, something went wrong */ | |
3095 | event_var = find_synthetic_field_var(target_hist_data, subsys_name, | |
3096 | event_name, field_name); | |
3097 | if (IS_ERR_OR_NULL(event_var)) { | |
3098 | kfree(var_hist->cmd); | |
3099 | kfree(var_hist); | |
3100 | hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); | |
3101 | return ERR_PTR(-EINVAL); | |
3102 | } | |
3103 | ||
3104 | n = target_hist_data->n_field_var_hists; | |
3105 | target_hist_data->field_var_hists[n] = var_hist; | |
3106 | target_hist_data->n_field_var_hists++; | |
3107 | ||
3108 | return event_var; | |
3109 | } | |
3110 | ||
3111 | static struct hist_field * | |
3112 | find_target_event_var(struct hist_trigger_data *hist_data, | |
3113 | char *subsys_name, char *event_name, char *var_name) | |
3114 | { | |
3115 | struct trace_event_file *file = hist_data->event_file; | |
3116 | struct hist_field *hist_field = NULL; | |
3117 | ||
3118 | if (subsys_name) { | |
3119 | struct trace_event_call *call; | |
3120 | ||
3121 | if (!event_name) | |
3122 | return NULL; | |
3123 | ||
3124 | call = file->event_call; | |
3125 | ||
3126 | if (strcmp(subsys_name, call->class->system) != 0) | |
3127 | return NULL; | |
3128 | ||
3129 | if (strcmp(event_name, trace_event_name(call)) != 0) | |
3130 | return NULL; | |
3131 | } | |
3132 | ||
3133 | hist_field = find_var_field(hist_data, var_name); | |
3134 | ||
3135 | return hist_field; | |
3136 | } | |
3137 | ||
3138 | static inline void __update_field_vars(struct tracing_map_elt *elt, | |
3139 | struct trace_buffer *buffer, | |
3140 | struct ring_buffer_event *rbe, | |
3141 | void *rec, | |
3142 | struct field_var **field_vars, | |
3143 | unsigned int n_field_vars, | |
3144 | unsigned int field_var_str_start) | |
3145 | { | |
3146 | struct hist_elt_data *elt_data = elt->private_data; | |
3147 | unsigned int i, j, var_idx; | |
3148 | u64 var_val; | |
3149 | ||
3150 | /* Make sure stacktrace can fit in the string variable length */ | |
3151 | BUILD_BUG_ON((HIST_STACKTRACE_DEPTH + 1) * sizeof(long) >= STR_VAR_LEN_MAX); | |
3152 | ||
3153 | for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { | |
3154 | struct field_var *field_var = field_vars[i]; | |
3155 | struct hist_field *var = field_var->var; | |
3156 | struct hist_field *val = field_var->val; | |
3157 | ||
3158 | var_val = hist_fn_call(val, elt, buffer, rbe, rec); | |
3159 | var_idx = var->var.idx; | |
3160 | ||
3161 | if (val->flags & (HIST_FIELD_FL_STRING | | |
3162 | HIST_FIELD_FL_STACKTRACE)) { | |
3163 | char *str = elt_data->field_var_str[j++]; | |
3164 | char *val_str = (char *)(uintptr_t)var_val; | |
3165 | unsigned int size; | |
3166 | ||
3167 | if (val->flags & HIST_FIELD_FL_STRING) { | |
3168 | size = min(val->size, STR_VAR_LEN_MAX); | |
3169 | strscpy(str, val_str, size); | |
3170 | } else { | |
3171 | char *stack_start = str + sizeof(unsigned long); | |
3172 | int e; | |
3173 | ||
3174 | e = stack_trace_save((void *)stack_start, | |
3175 | HIST_STACKTRACE_DEPTH, | |
3176 | HIST_STACKTRACE_SKIP); | |
3177 | if (e < HIST_STACKTRACE_DEPTH - 1) | |
3178 | ((unsigned long *)stack_start)[e] = 0; | |
3179 | *((unsigned long *)str) = e; | |
3180 | } | |
3181 | var_val = (u64)(uintptr_t)str; | |
3182 | } | |
3183 | tracing_map_set_var(elt, var_idx, var_val); | |
3184 | } | |
3185 | } | |
3186 | ||
3187 | static void update_field_vars(struct hist_trigger_data *hist_data, | |
3188 | struct tracing_map_elt *elt, | |
3189 | struct trace_buffer *buffer, | |
3190 | struct ring_buffer_event *rbe, | |
3191 | void *rec) | |
3192 | { | |
3193 | __update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars, | |
3194 | hist_data->n_field_vars, 0); | |
3195 | } | |
3196 | ||
3197 | static void save_track_data_vars(struct hist_trigger_data *hist_data, | |
3198 | struct tracing_map_elt *elt, | |
3199 | struct trace_buffer *buffer, void *rec, | |
3200 | struct ring_buffer_event *rbe, void *key, | |
3201 | struct action_data *data, u64 *var_ref_vals) | |
3202 | { | |
3203 | __update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars, | |
3204 | hist_data->n_save_vars, hist_data->n_field_var_str); | |
3205 | } | |
3206 | ||
3207 | static struct hist_field *create_var(struct hist_trigger_data *hist_data, | |
3208 | struct trace_event_file *file, | |
3209 | char *name, int size, const char *type) | |
3210 | { | |
3211 | struct hist_field *var; | |
3212 | int idx; | |
3213 | ||
3214 | if (find_var(hist_data, file, name) && !hist_data->remove) { | |
3215 | var = ERR_PTR(-EINVAL); | |
3216 | goto out; | |
3217 | } | |
3218 | ||
3219 | var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); | |
3220 | if (!var) { | |
3221 | var = ERR_PTR(-ENOMEM); | |
3222 | goto out; | |
3223 | } | |
3224 | ||
3225 | idx = tracing_map_add_var(hist_data->map); | |
3226 | if (idx < 0) { | |
3227 | kfree(var); | |
3228 | var = ERR_PTR(-EINVAL); | |
3229 | goto out; | |
3230 | } | |
3231 | ||
3232 | var->ref = 1; | |
3233 | var->flags = HIST_FIELD_FL_VAR; | |
3234 | var->var.idx = idx; | |
3235 | var->var.hist_data = var->hist_data = hist_data; | |
3236 | var->size = size; | |
3237 | var->var.name = kstrdup(name, GFP_KERNEL); | |
3238 | var->type = kstrdup_const(type, GFP_KERNEL); | |
3239 | if (!var->var.name || !var->type) { | |
3240 | kfree_const(var->type); | |
3241 | kfree(var->var.name); | |
3242 | kfree(var); | |
3243 | var = ERR_PTR(-ENOMEM); | |
3244 | } | |
3245 | out: | |
3246 | return var; | |
3247 | } | |
3248 | ||
3249 | static struct field_var *create_field_var(struct hist_trigger_data *hist_data, | |
3250 | struct trace_event_file *file, | |
3251 | char *field_name) | |
3252 | { | |
3253 | struct hist_field *val = NULL, *var = NULL; | |
3254 | unsigned long flags = HIST_FIELD_FL_VAR; | |
3255 | struct trace_array *tr = file->tr; | |
3256 | struct field_var *field_var; | |
3257 | int ret = 0; | |
3258 | ||
3259 | if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { | |
3260 | hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); | |
3261 | ret = -EINVAL; | |
3262 | goto err; | |
3263 | } | |
3264 | ||
3265 | val = parse_atom(hist_data, file, field_name, &flags, NULL); | |
3266 | if (IS_ERR(val)) { | |
3267 | hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); | |
3268 | ret = PTR_ERR(val); | |
3269 | goto err; | |
3270 | } | |
3271 | ||
3272 | var = create_var(hist_data, file, field_name, val->size, val->type); | |
3273 | if (IS_ERR(var)) { | |
3274 | hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); | |
3275 | kfree(val); | |
3276 | ret = PTR_ERR(var); | |
3277 | goto err; | |
3278 | } | |
3279 | ||
3280 | field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); | |
3281 | if (!field_var) { | |
3282 | kfree(val); | |
3283 | kfree(var); | |
3284 | ret = -ENOMEM; | |
3285 | goto err; | |
3286 | } | |
3287 | ||
3288 | field_var->var = var; | |
3289 | field_var->val = val; | |
3290 | out: | |
3291 | return field_var; | |
3292 | err: | |
3293 | field_var = ERR_PTR(ret); | |
3294 | goto out; | |
3295 | } | |
3296 | ||
3297 | /** | |
3298 | * create_target_field_var - Automatically create a variable for a field | |
3299 | * @target_hist_data: The target hist trigger | |
3300 | * @subsys_name: Optional subsystem name | |
3301 | * @event_name: Optional event name | |
3302 | * @var_name: The name of the field (and the resulting variable) | |
3303 | * | |
3304 | * Hist trigger actions fetch data from variables, not directly from | |
3305 | * events. However, for convenience, users are allowed to directly | |
3306 | * specify an event field in an action, which will be automatically | |
3307 | * converted into a variable on their behalf. | |
3308 | * | |
3309 | * This function creates a field variable with the name var_name on | |
3310 | * the hist trigger currently being defined on the target event. If | |
3311 | * subsys_name and event_name are specified, this function simply | |
3312 | * verifies that they do in fact match the target event subsystem and | |
3313 | * event name. | |
3314 | * | |
3315 | * Return: The variable created for the field. | |
3316 | */ | |
3317 | static struct field_var * | |
3318 | create_target_field_var(struct hist_trigger_data *target_hist_data, | |
3319 | char *subsys_name, char *event_name, char *var_name) | |
3320 | { | |
3321 | struct trace_event_file *file = target_hist_data->event_file; | |
3322 | ||
3323 | if (subsys_name) { | |
3324 | struct trace_event_call *call; | |
3325 | ||
3326 | if (!event_name) | |
3327 | return NULL; | |
3328 | ||
3329 | call = file->event_call; | |
3330 | ||
3331 | if (strcmp(subsys_name, call->class->system) != 0) | |
3332 | return NULL; | |
3333 | ||
3334 | if (strcmp(event_name, trace_event_name(call)) != 0) | |
3335 | return NULL; | |
3336 | } | |
3337 | ||
3338 | return create_field_var(target_hist_data, file, var_name); | |
3339 | } | |
3340 | ||
3341 | static bool check_track_val_max(u64 track_val, u64 var_val) | |
3342 | { | |
3343 | if (var_val <= track_val) | |
3344 | return false; | |
3345 | ||
3346 | return true; | |
3347 | } | |
3348 | ||
3349 | static bool check_track_val_changed(u64 track_val, u64 var_val) | |
3350 | { | |
3351 | if (var_val == track_val) | |
3352 | return false; | |
3353 | ||
3354 | return true; | |
3355 | } | |
3356 | ||
3357 | static u64 get_track_val(struct hist_trigger_data *hist_data, | |
3358 | struct tracing_map_elt *elt, | |
3359 | struct action_data *data) | |
3360 | { | |
3361 | unsigned int track_var_idx = data->track_data.track_var->var.idx; | |
3362 | u64 track_val; | |
3363 | ||
3364 | track_val = tracing_map_read_var(elt, track_var_idx); | |
3365 | ||
3366 | return track_val; | |
3367 | } | |
3368 | ||
3369 | static void save_track_val(struct hist_trigger_data *hist_data, | |
3370 | struct tracing_map_elt *elt, | |
3371 | struct action_data *data, u64 var_val) | |
3372 | { | |
3373 | unsigned int track_var_idx = data->track_data.track_var->var.idx; | |
3374 | ||
3375 | tracing_map_set_var(elt, track_var_idx, var_val); | |
3376 | } | |
3377 | ||
3378 | static void save_track_data(struct hist_trigger_data *hist_data, | |
3379 | struct tracing_map_elt *elt, | |
3380 | struct trace_buffer *buffer, void *rec, | |
3381 | struct ring_buffer_event *rbe, void *key, | |
3382 | struct action_data *data, u64 *var_ref_vals) | |
3383 | { | |
3384 | if (data->track_data.save_data) | |
3385 | data->track_data.save_data(hist_data, elt, buffer, rec, rbe, | |
3386 | key, data, var_ref_vals); | |
3387 | } | |
3388 | ||
3389 | static bool check_track_val(struct tracing_map_elt *elt, | |
3390 | struct action_data *data, | |
3391 | u64 var_val) | |
3392 | { | |
3393 | struct hist_trigger_data *hist_data; | |
3394 | u64 track_val; | |
3395 | ||
3396 | hist_data = data->track_data.track_var->hist_data; | |
3397 | track_val = get_track_val(hist_data, elt, data); | |
3398 | ||
3399 | return data->track_data.check_val(track_val, var_val); | |
3400 | } | |
3401 | ||
3402 | #ifdef CONFIG_TRACER_SNAPSHOT | |
3403 | static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) | |
3404 | { | |
3405 | /* called with tr->max_lock held */ | |
3406 | struct track_data *track_data = tr->cond_snapshot->cond_data; | |
3407 | struct hist_elt_data *elt_data, *track_elt_data; | |
3408 | struct snapshot_context *context = cond_data; | |
3409 | struct action_data *action; | |
3410 | u64 track_val; | |
3411 | ||
3412 | if (!track_data) | |
3413 | return false; | |
3414 | ||
3415 | action = track_data->action_data; | |
3416 | ||
3417 | track_val = get_track_val(track_data->hist_data, context->elt, | |
3418 | track_data->action_data); | |
3419 | ||
3420 | if (!action->track_data.check_val(track_data->track_val, track_val)) | |
3421 | return false; | |
3422 | ||
3423 | track_data->track_val = track_val; | |
3424 | memcpy(track_data->key, context->key, track_data->key_len); | |
3425 | ||
3426 | elt_data = context->elt->private_data; | |
3427 | track_elt_data = track_data->elt.private_data; | |
3428 | if (elt_data->comm) | |
3429 | strscpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN); | |
3430 | ||
3431 | track_data->updated = true; | |
3432 | ||
3433 | return true; | |
3434 | } | |
3435 | ||
3436 | static void save_track_data_snapshot(struct hist_trigger_data *hist_data, | |
3437 | struct tracing_map_elt *elt, | |
3438 | struct trace_buffer *buffer, void *rec, | |
3439 | struct ring_buffer_event *rbe, void *key, | |
3440 | struct action_data *data, | |
3441 | u64 *var_ref_vals) | |
3442 | { | |
3443 | struct trace_event_file *file = hist_data->event_file; | |
3444 | struct snapshot_context context; | |
3445 | ||
3446 | context.elt = elt; | |
3447 | context.key = key; | |
3448 | ||
3449 | tracing_snapshot_cond(file->tr, &context); | |
3450 | } | |
3451 | ||
3452 | static void hist_trigger_print_key(struct seq_file *m, | |
3453 | struct hist_trigger_data *hist_data, | |
3454 | void *key, | |
3455 | struct tracing_map_elt *elt); | |
3456 | ||
3457 | static struct action_data *snapshot_action(struct hist_trigger_data *hist_data) | |
3458 | { | |
3459 | unsigned int i; | |
3460 | ||
3461 | if (!hist_data->n_actions) | |
3462 | return NULL; | |
3463 | ||
3464 | for (i = 0; i < hist_data->n_actions; i++) { | |
3465 | struct action_data *data = hist_data->actions[i]; | |
3466 | ||
3467 | if (data->action == ACTION_SNAPSHOT) | |
3468 | return data; | |
3469 | } | |
3470 | ||
3471 | return NULL; | |
3472 | } | |
3473 | ||
3474 | static void track_data_snapshot_print(struct seq_file *m, | |
3475 | struct hist_trigger_data *hist_data) | |
3476 | { | |
3477 | struct trace_event_file *file = hist_data->event_file; | |
3478 | struct track_data *track_data; | |
3479 | struct action_data *action; | |
3480 | ||
3481 | track_data = tracing_cond_snapshot_data(file->tr); | |
3482 | if (!track_data) | |
3483 | return; | |
3484 | ||
3485 | if (!track_data->updated) | |
3486 | return; | |
3487 | ||
3488 | action = snapshot_action(hist_data); | |
3489 | if (!action) | |
3490 | return; | |
3491 | ||
3492 | seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n"); | |
3493 | seq_printf(m, "\ttriggering value { %s(%s) }: %10llu", | |
3494 | action->handler == HANDLER_ONMAX ? "onmax" : "onchange", | |
3495 | action->track_data.var_str, track_data->track_val); | |
3496 | ||
3497 | seq_puts(m, "\ttriggered by event with key: "); | |
3498 | hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt); | |
3499 | seq_putc(m, '\n'); | |
3500 | } | |
3501 | #else | |
3502 | static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) | |
3503 | { | |
3504 | return false; | |
3505 | } | |
3506 | static void save_track_data_snapshot(struct hist_trigger_data *hist_data, | |
3507 | struct tracing_map_elt *elt, | |
3508 | struct trace_buffer *buffer, void *rec, | |
3509 | struct ring_buffer_event *rbe, void *key, | |
3510 | struct action_data *data, | |
3511 | u64 *var_ref_vals) {} | |
3512 | static void track_data_snapshot_print(struct seq_file *m, | |
3513 | struct hist_trigger_data *hist_data) {} | |
3514 | #endif /* CONFIG_TRACER_SNAPSHOT */ | |
3515 | ||
3516 | static void track_data_print(struct seq_file *m, | |
3517 | struct hist_trigger_data *hist_data, | |
3518 | struct tracing_map_elt *elt, | |
3519 | struct action_data *data) | |
3520 | { | |
3521 | u64 track_val = get_track_val(hist_data, elt, data); | |
3522 | unsigned int i, save_var_idx; | |
3523 | ||
3524 | if (data->handler == HANDLER_ONMAX) | |
3525 | seq_printf(m, "\n\tmax: %10llu", track_val); | |
3526 | else if (data->handler == HANDLER_ONCHANGE) | |
3527 | seq_printf(m, "\n\tchanged: %10llu", track_val); | |
3528 | ||
3529 | if (data->action == ACTION_SNAPSHOT) | |
3530 | return; | |
3531 | ||
3532 | for (i = 0; i < hist_data->n_save_vars; i++) { | |
3533 | struct hist_field *save_val = hist_data->save_vars[i]->val; | |
3534 | struct hist_field *save_var = hist_data->save_vars[i]->var; | |
3535 | u64 val; | |
3536 | ||
3537 | save_var_idx = save_var->var.idx; | |
3538 | ||
3539 | val = tracing_map_read_var(elt, save_var_idx); | |
3540 | ||
3541 | if (save_val->flags & HIST_FIELD_FL_STRING) { | |
3542 | seq_printf(m, " %s: %-32s", save_var->var.name, | |
3543 | (char *)(uintptr_t)(val)); | |
3544 | } else | |
3545 | seq_printf(m, " %s: %10llu", save_var->var.name, val); | |
3546 | } | |
3547 | } | |
3548 | ||
3549 | static void ontrack_action(struct hist_trigger_data *hist_data, | |
3550 | struct tracing_map_elt *elt, | |
3551 | struct trace_buffer *buffer, void *rec, | |
3552 | struct ring_buffer_event *rbe, void *key, | |
3553 | struct action_data *data, u64 *var_ref_vals) | |
3554 | { | |
3555 | u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx]; | |
3556 | ||
3557 | if (check_track_val(elt, data, var_val)) { | |
3558 | save_track_val(hist_data, elt, data, var_val); | |
3559 | save_track_data(hist_data, elt, buffer, rec, rbe, | |
3560 | key, data, var_ref_vals); | |
3561 | } | |
3562 | } | |
3563 | ||
3564 | static void action_data_destroy(struct action_data *data) | |
3565 | { | |
3566 | unsigned int i; | |
3567 | ||
3568 | lockdep_assert_held(&event_mutex); | |
3569 | ||
3570 | kfree(data->action_name); | |
3571 | ||
3572 | for (i = 0; i < data->n_params; i++) | |
3573 | kfree(data->params[i]); | |
3574 | ||
3575 | if (data->synth_event) | |
3576 | data->synth_event->ref--; | |
3577 | ||
3578 | kfree(data->synth_event_name); | |
3579 | ||
3580 | kfree(data); | |
3581 | } | |
3582 | ||
3583 | static void track_data_destroy(struct hist_trigger_data *hist_data, | |
3584 | struct action_data *data) | |
3585 | { | |
3586 | struct trace_event_file *file = hist_data->event_file; | |
3587 | ||
3588 | destroy_hist_field(data->track_data.track_var, 0); | |
3589 | ||
3590 | if (data->action == ACTION_SNAPSHOT) { | |
3591 | struct track_data *track_data; | |
3592 | ||
3593 | track_data = tracing_cond_snapshot_data(file->tr); | |
3594 | if (track_data && track_data->hist_data == hist_data) { | |
3595 | tracing_snapshot_cond_disable(file->tr); | |
3596 | track_data_free(track_data); | |
3597 | } | |
3598 | } | |
3599 | ||
3600 | kfree(data->track_data.var_str); | |
3601 | ||
3602 | action_data_destroy(data); | |
3603 | } | |
3604 | ||
3605 | static int action_create(struct hist_trigger_data *hist_data, | |
3606 | struct action_data *data); | |
3607 | ||
3608 | static int track_data_create(struct hist_trigger_data *hist_data, | |
3609 | struct action_data *data) | |
3610 | { | |
3611 | struct hist_field *var_field, *ref_field, *track_var = NULL; | |
3612 | struct trace_event_file *file = hist_data->event_file; | |
3613 | struct trace_array *tr = file->tr; | |
3614 | char *track_data_var_str; | |
3615 | int ret = 0; | |
3616 | ||
3617 | track_data_var_str = data->track_data.var_str; | |
3618 | if (track_data_var_str[0] != '$') { | |
3619 | hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); | |
3620 | return -EINVAL; | |
3621 | } | |
3622 | track_data_var_str++; | |
3623 | ||
3624 | var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); | |
3625 | if (!var_field) { | |
3626 | hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); | |
3627 | return -EINVAL; | |
3628 | } | |
3629 | ||
3630 | ref_field = create_var_ref(hist_data, var_field, NULL, NULL); | |
3631 | if (!ref_field) | |
3632 | return -ENOMEM; | |
3633 | ||
3634 | data->track_data.var_ref = ref_field; | |
3635 | ||
3636 | if (data->handler == HANDLER_ONMAX) | |
3637 | track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); | |
3638 | if (IS_ERR(track_var)) { | |
3639 | hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); | |
3640 | ret = PTR_ERR(track_var); | |
3641 | goto out; | |
3642 | } | |
3643 | ||
3644 | if (data->handler == HANDLER_ONCHANGE) | |
3645 | track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); | |
3646 | if (IS_ERR(track_var)) { | |
3647 | hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); | |
3648 | ret = PTR_ERR(track_var); | |
3649 | goto out; | |
3650 | } | |
3651 | data->track_data.track_var = track_var; | |
3652 | ||
3653 | ret = action_create(hist_data, data); | |
3654 | out: | |
3655 | return ret; | |
3656 | } | |
3657 | ||
3658 | static int parse_action_params(struct trace_array *tr, char *params, | |
3659 | struct action_data *data) | |
3660 | { | |
3661 | char *param, *saved_param; | |
3662 | bool first_param = true; | |
3663 | int ret = 0; | |
3664 | ||
3665 | while (params) { | |
3666 | if (data->n_params >= SYNTH_FIELDS_MAX) { | |
3667 | hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); | |
3668 | ret = -EINVAL; | |
3669 | goto out; | |
3670 | } | |
3671 | ||
3672 | param = strsep(¶ms, ","); | |
3673 | if (!param) { | |
3674 | hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); | |
3675 | ret = -EINVAL; | |
3676 | goto out; | |
3677 | } | |
3678 | ||
3679 | param = strstrip(param); | |
3680 | if (strlen(param) < 2) { | |
3681 | hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); | |
3682 | ret = -EINVAL; | |
3683 | goto out; | |
3684 | } | |
3685 | ||
3686 | saved_param = kstrdup(param, GFP_KERNEL); | |
3687 | if (!saved_param) { | |
3688 | ret = -ENOMEM; | |
3689 | goto out; | |
3690 | } | |
3691 | ||
3692 | if (first_param && data->use_trace_keyword) { | |
3693 | data->synth_event_name = saved_param; | |
3694 | first_param = false; | |
3695 | continue; | |
3696 | } | |
3697 | first_param = false; | |
3698 | ||
3699 | data->params[data->n_params++] = saved_param; | |
3700 | } | |
3701 | out: | |
3702 | return ret; | |
3703 | } | |
3704 | ||
3705 | static int action_parse(struct trace_array *tr, char *str, struct action_data *data, | |
3706 | enum handler_id handler) | |
3707 | { | |
3708 | char *action_name; | |
3709 | int ret = 0; | |
3710 | ||
3711 | strsep(&str, "."); | |
3712 | if (!str) { | |
3713 | hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); | |
3714 | ret = -EINVAL; | |
3715 | goto out; | |
3716 | } | |
3717 | ||
3718 | action_name = strsep(&str, "("); | |
3719 | if (!action_name || !str) { | |
3720 | hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); | |
3721 | ret = -EINVAL; | |
3722 | goto out; | |
3723 | } | |
3724 | ||
3725 | if (str_has_prefix(action_name, "save")) { | |
3726 | char *params = strsep(&str, ")"); | |
3727 | ||
3728 | if (!params) { | |
3729 | hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); | |
3730 | ret = -EINVAL; | |
3731 | goto out; | |
3732 | } | |
3733 | ||
3734 | ret = parse_action_params(tr, params, data); | |
3735 | if (ret) | |
3736 | goto out; | |
3737 | ||
3738 | if (handler == HANDLER_ONMAX) | |
3739 | data->track_data.check_val = check_track_val_max; | |
3740 | else if (handler == HANDLER_ONCHANGE) | |
3741 | data->track_data.check_val = check_track_val_changed; | |
3742 | else { | |
3743 | hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); | |
3744 | ret = -EINVAL; | |
3745 | goto out; | |
3746 | } | |
3747 | ||
3748 | data->track_data.save_data = save_track_data_vars; | |
3749 | data->fn = ontrack_action; | |
3750 | data->action = ACTION_SAVE; | |
3751 | } else if (str_has_prefix(action_name, "snapshot")) { | |
3752 | char *params = strsep(&str, ")"); | |
3753 | ||
3754 | if (!str) { | |
3755 | hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); | |
3756 | ret = -EINVAL; | |
3757 | goto out; | |
3758 | } | |
3759 | ||
3760 | if (handler == HANDLER_ONMAX) | |
3761 | data->track_data.check_val = check_track_val_max; | |
3762 | else if (handler == HANDLER_ONCHANGE) | |
3763 | data->track_data.check_val = check_track_val_changed; | |
3764 | else { | |
3765 | hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); | |
3766 | ret = -EINVAL; | |
3767 | goto out; | |
3768 | } | |
3769 | ||
3770 | data->track_data.save_data = save_track_data_snapshot; | |
3771 | data->fn = ontrack_action; | |
3772 | data->action = ACTION_SNAPSHOT; | |
3773 | } else { | |
3774 | char *params = strsep(&str, ")"); | |
3775 | ||
3776 | if (str_has_prefix(action_name, "trace")) | |
3777 | data->use_trace_keyword = true; | |
3778 | ||
3779 | if (params) { | |
3780 | ret = parse_action_params(tr, params, data); | |
3781 | if (ret) | |
3782 | goto out; | |
3783 | } | |
3784 | ||
3785 | if (handler == HANDLER_ONMAX) | |
3786 | data->track_data.check_val = check_track_val_max; | |
3787 | else if (handler == HANDLER_ONCHANGE) | |
3788 | data->track_data.check_val = check_track_val_changed; | |
3789 | ||
3790 | if (handler != HANDLER_ONMATCH) { | |
3791 | data->track_data.save_data = action_trace; | |
3792 | data->fn = ontrack_action; | |
3793 | } else | |
3794 | data->fn = action_trace; | |
3795 | ||
3796 | data->action = ACTION_TRACE; | |
3797 | } | |
3798 | ||
3799 | data->action_name = kstrdup(action_name, GFP_KERNEL); | |
3800 | if (!data->action_name) { | |
3801 | ret = -ENOMEM; | |
3802 | goto out; | |
3803 | } | |
3804 | ||
3805 | data->handler = handler; | |
3806 | out: | |
3807 | return ret; | |
3808 | } | |
3809 | ||
3810 | static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, | |
3811 | char *str, enum handler_id handler) | |
3812 | { | |
3813 | struct action_data *data; | |
3814 | int ret = -EINVAL; | |
3815 | char *var_str; | |
3816 | ||
3817 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
3818 | if (!data) | |
3819 | return ERR_PTR(-ENOMEM); | |
3820 | ||
3821 | var_str = strsep(&str, ")"); | |
3822 | if (!var_str || !str) { | |
3823 | ret = -EINVAL; | |
3824 | goto free; | |
3825 | } | |
3826 | ||
3827 | data->track_data.var_str = kstrdup(var_str, GFP_KERNEL); | |
3828 | if (!data->track_data.var_str) { | |
3829 | ret = -ENOMEM; | |
3830 | goto free; | |
3831 | } | |
3832 | ||
3833 | ret = action_parse(hist_data->event_file->tr, str, data, handler); | |
3834 | if (ret) | |
3835 | goto free; | |
3836 | out: | |
3837 | return data; | |
3838 | free: | |
3839 | track_data_destroy(hist_data, data); | |
3840 | data = ERR_PTR(ret); | |
3841 | goto out; | |
3842 | } | |
3843 | ||
3844 | static void onmatch_destroy(struct action_data *data) | |
3845 | { | |
3846 | kfree(data->match_data.event); | |
3847 | kfree(data->match_data.event_system); | |
3848 | ||
3849 | action_data_destroy(data); | |
3850 | } | |
3851 | ||
3852 | static void destroy_field_var(struct field_var *field_var) | |
3853 | { | |
3854 | if (!field_var) | |
3855 | return; | |
3856 | ||
3857 | destroy_hist_field(field_var->var, 0); | |
3858 | destroy_hist_field(field_var->val, 0); | |
3859 | ||
3860 | kfree(field_var); | |
3861 | } | |
3862 | ||
3863 | static void destroy_field_vars(struct hist_trigger_data *hist_data) | |
3864 | { | |
3865 | unsigned int i; | |
3866 | ||
3867 | for (i = 0; i < hist_data->n_field_vars; i++) | |
3868 | destroy_field_var(hist_data->field_vars[i]); | |
3869 | ||
3870 | for (i = 0; i < hist_data->n_save_vars; i++) | |
3871 | destroy_field_var(hist_data->save_vars[i]); | |
3872 | } | |
3873 | ||
3874 | static void save_field_var(struct hist_trigger_data *hist_data, | |
3875 | struct field_var *field_var) | |
3876 | { | |
3877 | hist_data->field_vars[hist_data->n_field_vars++] = field_var; | |
3878 | ||
3879 | /* Stack traces are saved in the string storage too */ | |
3880 | if (field_var->val->flags & (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) | |
3881 | hist_data->n_field_var_str++; | |
3882 | } | |
3883 | ||
3884 | ||
3885 | static int check_synth_field(struct synth_event *event, | |
3886 | struct hist_field *hist_field, | |
3887 | unsigned int field_pos) | |
3888 | { | |
3889 | struct synth_field *field; | |
3890 | ||
3891 | if (field_pos >= event->n_fields) | |
3892 | return -EINVAL; | |
3893 | ||
3894 | field = event->fields[field_pos]; | |
3895 | ||
3896 | /* | |
3897 | * A dynamic string synth field can accept static or | |
3898 | * dynamic. A static string synth field can only accept a | |
3899 | * same-sized static string, which is checked for later. | |
3900 | */ | |
3901 | if (strstr(hist_field->type, "char[") && field->is_string | |
3902 | && field->is_dynamic) | |
3903 | return 0; | |
3904 | ||
3905 | if (strstr(hist_field->type, "long[") && field->is_stack) | |
3906 | return 0; | |
3907 | ||
3908 | if (strcmp(field->type, hist_field->type) != 0) { | |
3909 | if (field->size != hist_field->size || | |
3910 | (!field->is_string && field->is_signed != hist_field->is_signed)) | |
3911 | return -EINVAL; | |
3912 | } | |
3913 | ||
3914 | return 0; | |
3915 | } | |
3916 | ||
3917 | static struct hist_field * | |
3918 | trace_action_find_var(struct hist_trigger_data *hist_data, | |
3919 | struct action_data *data, | |
3920 | char *system, char *event, char *var) | |
3921 | { | |
3922 | struct trace_array *tr = hist_data->event_file->tr; | |
3923 | struct hist_field *hist_field; | |
3924 | ||
3925 | var++; /* skip '$' */ | |
3926 | ||
3927 | hist_field = find_target_event_var(hist_data, system, event, var); | |
3928 | if (!hist_field) { | |
3929 | if (!system && data->handler == HANDLER_ONMATCH) { | |
3930 | system = data->match_data.event_system; | |
3931 | event = data->match_data.event; | |
3932 | } | |
3933 | ||
3934 | hist_field = find_event_var(hist_data, system, event, var); | |
3935 | } | |
3936 | ||
3937 | if (!hist_field) | |
3938 | hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); | |
3939 | ||
3940 | return hist_field; | |
3941 | } | |
3942 | ||
3943 | static struct hist_field * | |
3944 | trace_action_create_field_var(struct hist_trigger_data *hist_data, | |
3945 | struct action_data *data, char *system, | |
3946 | char *event, char *var) | |
3947 | { | |
3948 | struct hist_field *hist_field = NULL; | |
3949 | struct field_var *field_var; | |
3950 | ||
3951 | /* | |
3952 | * First try to create a field var on the target event (the | |
3953 | * currently being defined). This will create a variable for | |
3954 | * unqualified fields on the target event, or if qualified, | |
3955 | * target fields that have qualified names matching the target. | |
3956 | */ | |
3957 | field_var = create_target_field_var(hist_data, system, event, var); | |
3958 | ||
3959 | if (field_var && !IS_ERR(field_var)) { | |
3960 | save_field_var(hist_data, field_var); | |
3961 | hist_field = field_var->var; | |
3962 | } else { | |
3963 | field_var = NULL; | |
3964 | /* | |
3965 | * If no explicit system.event is specified, default to | |
3966 | * looking for fields on the onmatch(system.event.xxx) | |
3967 | * event. | |
3968 | */ | |
3969 | if (!system && data->handler == HANDLER_ONMATCH) { | |
3970 | system = data->match_data.event_system; | |
3971 | event = data->match_data.event; | |
3972 | } | |
3973 | ||
3974 | if (!event) | |
3975 | goto free; | |
3976 | /* | |
3977 | * At this point, we're looking at a field on another | |
3978 | * event. Because we can't modify a hist trigger on | |
3979 | * another event to add a variable for a field, we need | |
3980 | * to create a new trigger on that event and create the | |
3981 | * variable at the same time. | |
3982 | */ | |
3983 | hist_field = create_field_var_hist(hist_data, system, event, var); | |
3984 | if (IS_ERR(hist_field)) | |
3985 | goto free; | |
3986 | } | |
3987 | out: | |
3988 | return hist_field; | |
3989 | free: | |
3990 | destroy_field_var(field_var); | |
3991 | hist_field = NULL; | |
3992 | goto out; | |
3993 | } | |
3994 | ||
3995 | static int trace_action_create(struct hist_trigger_data *hist_data, | |
3996 | struct action_data *data) | |
3997 | { | |
3998 | struct trace_array *tr = hist_data->event_file->tr; | |
3999 | char *event_name, *param, *system = NULL; | |
4000 | struct hist_field *hist_field, *var_ref; | |
4001 | unsigned int i; | |
4002 | unsigned int field_pos = 0; | |
4003 | struct synth_event *event; | |
4004 | char *synth_event_name; | |
4005 | int var_ref_idx, ret = 0; | |
4006 | ||
4007 | lockdep_assert_held(&event_mutex); | |
4008 | ||
4009 | /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */ | |
4010 | if (data->n_params > SYNTH_FIELDS_MAX) | |
4011 | return -EINVAL; | |
4012 | ||
4013 | if (data->use_trace_keyword) | |
4014 | synth_event_name = data->synth_event_name; | |
4015 | else | |
4016 | synth_event_name = data->action_name; | |
4017 | ||
4018 | event = find_synth_event(synth_event_name); | |
4019 | if (!event) { | |
4020 | hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); | |
4021 | return -EINVAL; | |
4022 | } | |
4023 | ||
4024 | event->ref++; | |
4025 | ||
4026 | for (i = 0; i < data->n_params; i++) { | |
4027 | char *p; | |
4028 | ||
4029 | p = param = kstrdup(data->params[i], GFP_KERNEL); | |
4030 | if (!param) { | |
4031 | ret = -ENOMEM; | |
4032 | goto err; | |
4033 | } | |
4034 | ||
4035 | system = strsep(¶m, "."); | |
4036 | if (!param) { | |
4037 | param = (char *)system; | |
4038 | system = event_name = NULL; | |
4039 | } else { | |
4040 | event_name = strsep(¶m, "."); | |
4041 | if (!param) { | |
4042 | kfree(p); | |
4043 | ret = -EINVAL; | |
4044 | goto err; | |
4045 | } | |
4046 | } | |
4047 | ||
4048 | if (param[0] == '$') | |
4049 | hist_field = trace_action_find_var(hist_data, data, | |
4050 | system, event_name, | |
4051 | param); | |
4052 | else | |
4053 | hist_field = trace_action_create_field_var(hist_data, | |
4054 | data, | |
4055 | system, | |
4056 | event_name, | |
4057 | param); | |
4058 | ||
4059 | if (!hist_field) { | |
4060 | kfree(p); | |
4061 | ret = -EINVAL; | |
4062 | goto err; | |
4063 | } | |
4064 | ||
4065 | if (check_synth_field(event, hist_field, field_pos) == 0) { | |
4066 | var_ref = create_var_ref(hist_data, hist_field, | |
4067 | system, event_name); | |
4068 | if (!var_ref) { | |
4069 | kfree(p); | |
4070 | ret = -ENOMEM; | |
4071 | goto err; | |
4072 | } | |
4073 | ||
4074 | var_ref_idx = find_var_ref_idx(hist_data, var_ref); | |
4075 | if (WARN_ON(var_ref_idx < 0)) { | |
4076 | kfree(p); | |
4077 | ret = var_ref_idx; | |
4078 | goto err; | |
4079 | } | |
4080 | ||
4081 | data->var_ref_idx[i] = var_ref_idx; | |
4082 | ||
4083 | field_pos++; | |
4084 | kfree(p); | |
4085 | continue; | |
4086 | } | |
4087 | ||
4088 | hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); | |
4089 | kfree(p); | |
4090 | ret = -EINVAL; | |
4091 | goto err; | |
4092 | } | |
4093 | ||
4094 | if (field_pos != event->n_fields) { | |
4095 | hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); | |
4096 | ret = -EINVAL; | |
4097 | goto err; | |
4098 | } | |
4099 | ||
4100 | data->synth_event = event; | |
4101 | out: | |
4102 | return ret; | |
4103 | err: | |
4104 | event->ref--; | |
4105 | ||
4106 | goto out; | |
4107 | } | |
4108 | ||
4109 | static int action_create(struct hist_trigger_data *hist_data, | |
4110 | struct action_data *data) | |
4111 | { | |
4112 | struct trace_event_file *file = hist_data->event_file; | |
4113 | struct trace_array *tr = file->tr; | |
4114 | struct track_data *track_data; | |
4115 | struct field_var *field_var; | |
4116 | unsigned int i; | |
4117 | char *param; | |
4118 | int ret = 0; | |
4119 | ||
4120 | if (data->action == ACTION_TRACE) | |
4121 | return trace_action_create(hist_data, data); | |
4122 | ||
4123 | if (data->action == ACTION_SNAPSHOT) { | |
4124 | track_data = track_data_alloc(hist_data->key_size, data, hist_data); | |
4125 | if (IS_ERR(track_data)) { | |
4126 | ret = PTR_ERR(track_data); | |
4127 | goto out; | |
4128 | } | |
4129 | ||
4130 | ret = tracing_snapshot_cond_enable(file->tr, track_data, | |
4131 | cond_snapshot_update); | |
4132 | if (ret) | |
4133 | track_data_free(track_data); | |
4134 | ||
4135 | goto out; | |
4136 | } | |
4137 | ||
4138 | if (data->action == ACTION_SAVE) { | |
4139 | if (hist_data->n_save_vars) { | |
4140 | ret = -EEXIST; | |
4141 | hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); | |
4142 | goto out; | |
4143 | } | |
4144 | ||
4145 | for (i = 0; i < data->n_params; i++) { | |
4146 | param = kstrdup(data->params[i], GFP_KERNEL); | |
4147 | if (!param) { | |
4148 | ret = -ENOMEM; | |
4149 | goto out; | |
4150 | } | |
4151 | ||
4152 | field_var = create_target_field_var(hist_data, NULL, NULL, param); | |
4153 | if (IS_ERR(field_var)) { | |
4154 | hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, | |
4155 | errpos(param)); | |
4156 | ret = PTR_ERR(field_var); | |
4157 | kfree(param); | |
4158 | goto out; | |
4159 | } | |
4160 | ||
4161 | hist_data->save_vars[hist_data->n_save_vars++] = field_var; | |
4162 | if (field_var->val->flags & | |
4163 | (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) | |
4164 | hist_data->n_save_var_str++; | |
4165 | kfree(param); | |
4166 | } | |
4167 | } | |
4168 | out: | |
4169 | return ret; | |
4170 | } | |
4171 | ||
4172 | static int onmatch_create(struct hist_trigger_data *hist_data, | |
4173 | struct action_data *data) | |
4174 | { | |
4175 | return action_create(hist_data, data); | |
4176 | } | |
4177 | ||
4178 | static struct action_data *onmatch_parse(struct trace_array *tr, char *str) | |
4179 | { | |
4180 | char *match_event, *match_event_system; | |
4181 | struct action_data *data; | |
4182 | int ret = -EINVAL; | |
4183 | ||
4184 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
4185 | if (!data) | |
4186 | return ERR_PTR(-ENOMEM); | |
4187 | ||
4188 | match_event = strsep(&str, ")"); | |
4189 | if (!match_event || !str) { | |
4190 | hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); | |
4191 | goto free; | |
4192 | } | |
4193 | ||
4194 | match_event_system = strsep(&match_event, "."); | |
4195 | if (!match_event) { | |
4196 | hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); | |
4197 | goto free; | |
4198 | } | |
4199 | ||
4200 | if (IS_ERR(event_file(tr, match_event_system, match_event))) { | |
4201 | hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); | |
4202 | goto free; | |
4203 | } | |
4204 | ||
4205 | data->match_data.event = kstrdup(match_event, GFP_KERNEL); | |
4206 | if (!data->match_data.event) { | |
4207 | ret = -ENOMEM; | |
4208 | goto free; | |
4209 | } | |
4210 | ||
4211 | data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL); | |
4212 | if (!data->match_data.event_system) { | |
4213 | ret = -ENOMEM; | |
4214 | goto free; | |
4215 | } | |
4216 | ||
4217 | ret = action_parse(tr, str, data, HANDLER_ONMATCH); | |
4218 | if (ret) | |
4219 | goto free; | |
4220 | out: | |
4221 | return data; | |
4222 | free: | |
4223 | onmatch_destroy(data); | |
4224 | data = ERR_PTR(ret); | |
4225 | goto out; | |
4226 | } | |
4227 | ||
4228 | static int create_hitcount_val(struct hist_trigger_data *hist_data) | |
4229 | { | |
4230 | hist_data->fields[HITCOUNT_IDX] = | |
4231 | create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); | |
4232 | if (!hist_data->fields[HITCOUNT_IDX]) | |
4233 | return -ENOMEM; | |
4234 | ||
4235 | hist_data->n_vals++; | |
4236 | hist_data->n_fields++; | |
4237 | ||
4238 | if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) | |
4239 | return -EINVAL; | |
4240 | ||
4241 | return 0; | |
4242 | } | |
4243 | ||
4244 | static int __create_val_field(struct hist_trigger_data *hist_data, | |
4245 | unsigned int val_idx, | |
4246 | struct trace_event_file *file, | |
4247 | char *var_name, char *field_str, | |
4248 | unsigned long flags) | |
4249 | { | |
4250 | struct hist_field *hist_field; | |
4251 | int ret = 0, n_subexprs = 0; | |
4252 | ||
4253 | hist_field = parse_expr(hist_data, file, field_str, flags, var_name, &n_subexprs); | |
4254 | if (IS_ERR(hist_field)) { | |
4255 | ret = PTR_ERR(hist_field); | |
4256 | goto out; | |
4257 | } | |
4258 | ||
4259 | /* values and variables should not have some modifiers */ | |
4260 | if (hist_field->flags & HIST_FIELD_FL_VAR) { | |
4261 | /* Variable */ | |
4262 | if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | | |
4263 | HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2)) | |
4264 | goto err; | |
4265 | } else { | |
4266 | /* Value */ | |
4267 | if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | | |
4268 | HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | | |
4269 | HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | | |
4270 | HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) | |
4271 | goto err; | |
4272 | } | |
4273 | ||
4274 | hist_data->fields[val_idx] = hist_field; | |
4275 | ||
4276 | ++hist_data->n_vals; | |
4277 | ++hist_data->n_fields; | |
4278 | ||
4279 | if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) | |
4280 | ret = -EINVAL; | |
4281 | out: | |
4282 | return ret; | |
4283 | err: | |
4284 | hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); | |
4285 | return -EINVAL; | |
4286 | } | |
4287 | ||
4288 | static int create_val_field(struct hist_trigger_data *hist_data, | |
4289 | unsigned int val_idx, | |
4290 | struct trace_event_file *file, | |
4291 | char *field_str) | |
4292 | { | |
4293 | if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) | |
4294 | return -EINVAL; | |
4295 | ||
4296 | return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); | |
4297 | } | |
4298 | ||
4299 | static const char no_comm[] = "(no comm)"; | |
4300 | ||
4301 | static u64 hist_field_execname(struct hist_field *hist_field, | |
4302 | struct tracing_map_elt *elt, | |
4303 | struct trace_buffer *buffer, | |
4304 | struct ring_buffer_event *rbe, | |
4305 | void *event) | |
4306 | { | |
4307 | struct hist_elt_data *elt_data; | |
4308 | ||
4309 | if (WARN_ON_ONCE(!elt)) | |
4310 | return (u64)(unsigned long)no_comm; | |
4311 | ||
4312 | elt_data = elt->private_data; | |
4313 | ||
4314 | if (WARN_ON_ONCE(!elt_data->comm)) | |
4315 | return (u64)(unsigned long)no_comm; | |
4316 | ||
4317 | return (u64)(unsigned long)(elt_data->comm); | |
4318 | } | |
4319 | ||
4320 | static u64 hist_field_stack(struct hist_field *hist_field, | |
4321 | struct tracing_map_elt *elt, | |
4322 | struct trace_buffer *buffer, | |
4323 | struct ring_buffer_event *rbe, | |
4324 | void *event) | |
4325 | { | |
4326 | u32 str_item = *(u32 *)(event + hist_field->field->offset); | |
4327 | int str_loc = str_item & 0xffff; | |
4328 | char *addr = (char *)(event + str_loc); | |
4329 | ||
4330 | return (u64)(unsigned long)addr; | |
4331 | } | |
4332 | ||
4333 | static u64 hist_fn_call(struct hist_field *hist_field, | |
4334 | struct tracing_map_elt *elt, | |
4335 | struct trace_buffer *buffer, | |
4336 | struct ring_buffer_event *rbe, | |
4337 | void *event) | |
4338 | { | |
4339 | switch (hist_field->fn_num) { | |
4340 | case HIST_FIELD_FN_VAR_REF: | |
4341 | return hist_field_var_ref(hist_field, elt, buffer, rbe, event); | |
4342 | case HIST_FIELD_FN_COUNTER: | |
4343 | return hist_field_counter(hist_field, elt, buffer, rbe, event); | |
4344 | case HIST_FIELD_FN_CONST: | |
4345 | return hist_field_const(hist_field, elt, buffer, rbe, event); | |
4346 | case HIST_FIELD_FN_LOG2: | |
4347 | return hist_field_log2(hist_field, elt, buffer, rbe, event); | |
4348 | case HIST_FIELD_FN_BUCKET: | |
4349 | return hist_field_bucket(hist_field, elt, buffer, rbe, event); | |
4350 | case HIST_FIELD_FN_TIMESTAMP: | |
4351 | return hist_field_timestamp(hist_field, elt, buffer, rbe, event); | |
4352 | case HIST_FIELD_FN_CPU: | |
4353 | return hist_field_cpu(hist_field, elt, buffer, rbe, event); | |
4354 | case HIST_FIELD_FN_COMM: | |
4355 | return hist_field_comm(hist_field, elt, buffer, rbe, event); | |
4356 | case HIST_FIELD_FN_STRING: | |
4357 | return hist_field_string(hist_field, elt, buffer, rbe, event); | |
4358 | case HIST_FIELD_FN_DYNSTRING: | |
4359 | return hist_field_dynstring(hist_field, elt, buffer, rbe, event); | |
4360 | case HIST_FIELD_FN_RELDYNSTRING: | |
4361 | return hist_field_reldynstring(hist_field, elt, buffer, rbe, event); | |
4362 | case HIST_FIELD_FN_PSTRING: | |
4363 | return hist_field_pstring(hist_field, elt, buffer, rbe, event); | |
4364 | case HIST_FIELD_FN_S64: | |
4365 | return hist_field_s64(hist_field, elt, buffer, rbe, event); | |
4366 | case HIST_FIELD_FN_U64: | |
4367 | return hist_field_u64(hist_field, elt, buffer, rbe, event); | |
4368 | case HIST_FIELD_FN_S32: | |
4369 | return hist_field_s32(hist_field, elt, buffer, rbe, event); | |
4370 | case HIST_FIELD_FN_U32: | |
4371 | return hist_field_u32(hist_field, elt, buffer, rbe, event); | |
4372 | case HIST_FIELD_FN_S16: | |
4373 | return hist_field_s16(hist_field, elt, buffer, rbe, event); | |
4374 | case HIST_FIELD_FN_U16: | |
4375 | return hist_field_u16(hist_field, elt, buffer, rbe, event); | |
4376 | case HIST_FIELD_FN_S8: | |
4377 | return hist_field_s8(hist_field, elt, buffer, rbe, event); | |
4378 | case HIST_FIELD_FN_U8: | |
4379 | return hist_field_u8(hist_field, elt, buffer, rbe, event); | |
4380 | case HIST_FIELD_FN_UMINUS: | |
4381 | return hist_field_unary_minus(hist_field, elt, buffer, rbe, event); | |
4382 | case HIST_FIELD_FN_MINUS: | |
4383 | return hist_field_minus(hist_field, elt, buffer, rbe, event); | |
4384 | case HIST_FIELD_FN_PLUS: | |
4385 | return hist_field_plus(hist_field, elt, buffer, rbe, event); | |
4386 | case HIST_FIELD_FN_DIV: | |
4387 | return hist_field_div(hist_field, elt, buffer, rbe, event); | |
4388 | case HIST_FIELD_FN_MULT: | |
4389 | return hist_field_mult(hist_field, elt, buffer, rbe, event); | |
4390 | case HIST_FIELD_FN_DIV_POWER2: | |
4391 | return div_by_power_of_two(hist_field, elt, buffer, rbe, event); | |
4392 | case HIST_FIELD_FN_DIV_NOT_POWER2: | |
4393 | return div_by_not_power_of_two(hist_field, elt, buffer, rbe, event); | |
4394 | case HIST_FIELD_FN_DIV_MULT_SHIFT: | |
4395 | return div_by_mult_and_shift(hist_field, elt, buffer, rbe, event); | |
4396 | case HIST_FIELD_FN_EXECNAME: | |
4397 | return hist_field_execname(hist_field, elt, buffer, rbe, event); | |
4398 | case HIST_FIELD_FN_STACK: | |
4399 | return hist_field_stack(hist_field, elt, buffer, rbe, event); | |
4400 | default: | |
4401 | return 0; | |
4402 | } | |
4403 | } | |
4404 | ||
4405 | /* Convert a var that points to common_pid.execname to a string */ | |
4406 | static void update_var_execname(struct hist_field *hist_field) | |
4407 | { | |
4408 | hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR | | |
4409 | HIST_FIELD_FL_EXECNAME; | |
4410 | hist_field->size = MAX_FILTER_STR_VAL; | |
4411 | hist_field->is_signed = 0; | |
4412 | ||
4413 | kfree_const(hist_field->type); | |
4414 | hist_field->type = "char[]"; | |
4415 | ||
4416 | hist_field->fn_num = HIST_FIELD_FN_EXECNAME; | |
4417 | } | |
4418 | ||
4419 | static int create_var_field(struct hist_trigger_data *hist_data, | |
4420 | unsigned int val_idx, | |
4421 | struct trace_event_file *file, | |
4422 | char *var_name, char *expr_str) | |
4423 | { | |
4424 | struct trace_array *tr = hist_data->event_file->tr; | |
4425 | unsigned long flags = 0; | |
4426 | int ret; | |
4427 | ||
4428 | if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) | |
4429 | return -EINVAL; | |
4430 | ||
4431 | if (find_var(hist_data, file, var_name) && !hist_data->remove) { | |
4432 | hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); | |
4433 | return -EINVAL; | |
4434 | } | |
4435 | ||
4436 | flags |= HIST_FIELD_FL_VAR; | |
4437 | hist_data->n_vars++; | |
4438 | if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) | |
4439 | return -EINVAL; | |
4440 | ||
4441 | ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); | |
4442 | ||
4443 | if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME) | |
4444 | update_var_execname(hist_data->fields[val_idx]); | |
4445 | ||
4446 | if (!ret && hist_data->fields[val_idx]->flags & | |
4447 | (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) | |
4448 | hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++; | |
4449 | ||
4450 | return ret; | |
4451 | } | |
4452 | ||
4453 | static int create_val_fields(struct hist_trigger_data *hist_data, | |
4454 | struct trace_event_file *file) | |
4455 | { | |
4456 | unsigned int i, j = 1, n_hitcount = 0; | |
4457 | char *fields_str, *field_str; | |
4458 | int ret; | |
4459 | ||
4460 | ret = create_hitcount_val(hist_data); | |
4461 | if (ret) | |
4462 | goto out; | |
4463 | ||
4464 | fields_str = hist_data->attrs->vals_str; | |
4465 | if (!fields_str) | |
4466 | goto out; | |
4467 | ||
4468 | for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && | |
4469 | j < TRACING_MAP_VALS_MAX; i++) { | |
4470 | field_str = strsep(&fields_str, ","); | |
4471 | if (!field_str) | |
4472 | break; | |
4473 | ||
4474 | if (strcmp(field_str, "hitcount") == 0) { | |
4475 | if (!n_hitcount++) | |
4476 | continue; | |
4477 | } | |
4478 | ||
4479 | ret = create_val_field(hist_data, j++, file, field_str); | |
4480 | if (ret) | |
4481 | goto out; | |
4482 | } | |
4483 | ||
4484 | if (fields_str && (strcmp(fields_str, "hitcount") != 0)) | |
4485 | ret = -EINVAL; | |
4486 | out: | |
4487 | /* There is only raw hitcount but nohitcount suppresses it. */ | |
4488 | if (j == 1 && hist_data->attrs->no_hitcount) { | |
4489 | hist_err(hist_data->event_file->tr, HIST_ERR_NEED_NOHC_VAL, 0); | |
4490 | ret = -ENOENT; | |
4491 | } | |
4492 | ||
4493 | return ret; | |
4494 | } | |
4495 | ||
4496 | static int create_key_field(struct hist_trigger_data *hist_data, | |
4497 | unsigned int key_idx, | |
4498 | unsigned int key_offset, | |
4499 | struct trace_event_file *file, | |
4500 | char *field_str) | |
4501 | { | |
4502 | struct trace_array *tr = hist_data->event_file->tr; | |
4503 | struct hist_field *hist_field = NULL; | |
4504 | unsigned long flags = 0; | |
4505 | unsigned int key_size; | |
4506 | int ret = 0, n_subexprs = 0; | |
4507 | ||
4508 | if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) | |
4509 | return -EINVAL; | |
4510 | ||
4511 | flags |= HIST_FIELD_FL_KEY; | |
4512 | ||
4513 | if (strcmp(field_str, "stacktrace") == 0) { | |
4514 | flags |= HIST_FIELD_FL_STACKTRACE; | |
4515 | key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; | |
4516 | hist_field = create_hist_field(hist_data, NULL, flags, NULL); | |
4517 | } else { | |
4518 | hist_field = parse_expr(hist_data, file, field_str, flags, | |
4519 | NULL, &n_subexprs); | |
4520 | if (IS_ERR(hist_field)) { | |
4521 | ret = PTR_ERR(hist_field); | |
4522 | goto out; | |
4523 | } | |
4524 | ||
4525 | if (field_has_hist_vars(hist_field, 0)) { | |
4526 | hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); | |
4527 | destroy_hist_field(hist_field, 0); | |
4528 | ret = -EINVAL; | |
4529 | goto out; | |
4530 | } | |
4531 | ||
4532 | key_size = hist_field->size; | |
4533 | } | |
4534 | ||
4535 | hist_data->fields[key_idx] = hist_field; | |
4536 | ||
4537 | key_size = ALIGN(key_size, sizeof(u64)); | |
4538 | hist_data->fields[key_idx]->size = key_size; | |
4539 | hist_data->fields[key_idx]->offset = key_offset; | |
4540 | ||
4541 | hist_data->key_size += key_size; | |
4542 | ||
4543 | if (hist_data->key_size > HIST_KEY_SIZE_MAX) { | |
4544 | ret = -EINVAL; | |
4545 | goto out; | |
4546 | } | |
4547 | ||
4548 | hist_data->n_keys++; | |
4549 | hist_data->n_fields++; | |
4550 | ||
4551 | if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) | |
4552 | return -EINVAL; | |
4553 | ||
4554 | ret = key_size; | |
4555 | out: | |
4556 | return ret; | |
4557 | } | |
4558 | ||
4559 | static int create_key_fields(struct hist_trigger_data *hist_data, | |
4560 | struct trace_event_file *file) | |
4561 | { | |
4562 | unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; | |
4563 | char *fields_str, *field_str; | |
4564 | int ret = -EINVAL; | |
4565 | ||
4566 | fields_str = hist_data->attrs->keys_str; | |
4567 | if (!fields_str) | |
4568 | goto out; | |
4569 | ||
4570 | for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { | |
4571 | field_str = strsep(&fields_str, ","); | |
4572 | if (!field_str) | |
4573 | break; | |
4574 | ret = create_key_field(hist_data, i, key_offset, | |
4575 | file, field_str); | |
4576 | if (ret < 0) | |
4577 | goto out; | |
4578 | key_offset += ret; | |
4579 | } | |
4580 | if (fields_str) { | |
4581 | ret = -EINVAL; | |
4582 | goto out; | |
4583 | } | |
4584 | ret = 0; | |
4585 | out: | |
4586 | return ret; | |
4587 | } | |
4588 | ||
4589 | static int create_var_fields(struct hist_trigger_data *hist_data, | |
4590 | struct trace_event_file *file) | |
4591 | { | |
4592 | unsigned int i, j = hist_data->n_vals; | |
4593 | int ret = 0; | |
4594 | ||
4595 | unsigned int n_vars = hist_data->attrs->var_defs.n_vars; | |
4596 | ||
4597 | for (i = 0; i < n_vars; i++) { | |
4598 | char *var_name = hist_data->attrs->var_defs.name[i]; | |
4599 | char *expr = hist_data->attrs->var_defs.expr[i]; | |
4600 | ||
4601 | ret = create_var_field(hist_data, j++, file, var_name, expr); | |
4602 | if (ret) | |
4603 | goto out; | |
4604 | } | |
4605 | out: | |
4606 | return ret; | |
4607 | } | |
4608 | ||
4609 | static void free_var_defs(struct hist_trigger_data *hist_data) | |
4610 | { | |
4611 | unsigned int i; | |
4612 | ||
4613 | for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { | |
4614 | kfree(hist_data->attrs->var_defs.name[i]); | |
4615 | kfree(hist_data->attrs->var_defs.expr[i]); | |
4616 | } | |
4617 | ||
4618 | hist_data->attrs->var_defs.n_vars = 0; | |
4619 | } | |
4620 | ||
4621 | static int parse_var_defs(struct hist_trigger_data *hist_data) | |
4622 | { | |
4623 | struct trace_array *tr = hist_data->event_file->tr; | |
4624 | char *s, *str, *var_name, *field_str; | |
4625 | unsigned int i, j, n_vars = 0; | |
4626 | int ret = 0; | |
4627 | ||
4628 | for (i = 0; i < hist_data->attrs->n_assignments; i++) { | |
4629 | str = hist_data->attrs->assignment_str[i]; | |
4630 | for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { | |
4631 | field_str = strsep(&str, ","); | |
4632 | if (!field_str) | |
4633 | break; | |
4634 | ||
4635 | var_name = strsep(&field_str, "="); | |
4636 | if (!var_name || !field_str) { | |
4637 | hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, | |
4638 | errpos(var_name)); | |
4639 | ret = -EINVAL; | |
4640 | goto free; | |
4641 | } | |
4642 | ||
4643 | if (n_vars == TRACING_MAP_VARS_MAX) { | |
4644 | hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); | |
4645 | ret = -EINVAL; | |
4646 | goto free; | |
4647 | } | |
4648 | ||
4649 | s = kstrdup(var_name, GFP_KERNEL); | |
4650 | if (!s) { | |
4651 | ret = -ENOMEM; | |
4652 | goto free; | |
4653 | } | |
4654 | hist_data->attrs->var_defs.name[n_vars] = s; | |
4655 | ||
4656 | s = kstrdup(field_str, GFP_KERNEL); | |
4657 | if (!s) { | |
4658 | kfree(hist_data->attrs->var_defs.name[n_vars]); | |
4659 | hist_data->attrs->var_defs.name[n_vars] = NULL; | |
4660 | ret = -ENOMEM; | |
4661 | goto free; | |
4662 | } | |
4663 | hist_data->attrs->var_defs.expr[n_vars++] = s; | |
4664 | ||
4665 | hist_data->attrs->var_defs.n_vars = n_vars; | |
4666 | } | |
4667 | } | |
4668 | ||
4669 | return ret; | |
4670 | free: | |
4671 | free_var_defs(hist_data); | |
4672 | ||
4673 | return ret; | |
4674 | } | |
4675 | ||
4676 | static int create_hist_fields(struct hist_trigger_data *hist_data, | |
4677 | struct trace_event_file *file) | |
4678 | { | |
4679 | int ret; | |
4680 | ||
4681 | ret = parse_var_defs(hist_data); | |
4682 | if (ret) | |
4683 | return ret; | |
4684 | ||
4685 | ret = create_val_fields(hist_data, file); | |
4686 | if (ret) | |
4687 | goto out; | |
4688 | ||
4689 | ret = create_var_fields(hist_data, file); | |
4690 | if (ret) | |
4691 | goto out; | |
4692 | ||
4693 | ret = create_key_fields(hist_data, file); | |
4694 | ||
4695 | out: | |
4696 | free_var_defs(hist_data); | |
4697 | ||
4698 | return ret; | |
4699 | } | |
4700 | ||
4701 | static int is_descending(struct trace_array *tr, const char *str) | |
4702 | { | |
4703 | if (!str) | |
4704 | return 0; | |
4705 | ||
4706 | if (strcmp(str, "descending") == 0) | |
4707 | return 1; | |
4708 | ||
4709 | if (strcmp(str, "ascending") == 0) | |
4710 | return 0; | |
4711 | ||
4712 | hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str)); | |
4713 | ||
4714 | return -EINVAL; | |
4715 | } | |
4716 | ||
4717 | static int create_sort_keys(struct hist_trigger_data *hist_data) | |
4718 | { | |
4719 | struct trace_array *tr = hist_data->event_file->tr; | |
4720 | char *fields_str = hist_data->attrs->sort_key_str; | |
4721 | struct tracing_map_sort_key *sort_key; | |
4722 | int descending, ret = 0; | |
4723 | unsigned int i, j, k; | |
4724 | ||
4725 | hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ | |
4726 | ||
4727 | if (!fields_str) | |
4728 | goto out; | |
4729 | ||
4730 | for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { | |
4731 | struct hist_field *hist_field; | |
4732 | char *field_str, *field_name; | |
4733 | const char *test_name; | |
4734 | ||
4735 | sort_key = &hist_data->sort_keys[i]; | |
4736 | ||
4737 | field_str = strsep(&fields_str, ","); | |
4738 | if (!field_str) | |
4739 | break; | |
4740 | ||
4741 | if (!*field_str) { | |
4742 | ret = -EINVAL; | |
4743 | hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); | |
4744 | break; | |
4745 | } | |
4746 | ||
4747 | if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { | |
4748 | hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort=")); | |
4749 | ret = -EINVAL; | |
4750 | break; | |
4751 | } | |
4752 | ||
4753 | field_name = strsep(&field_str, "."); | |
4754 | if (!field_name || !*field_name) { | |
4755 | ret = -EINVAL; | |
4756 | hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); | |
4757 | break; | |
4758 | } | |
4759 | ||
4760 | if (strcmp(field_name, "hitcount") == 0) { | |
4761 | descending = is_descending(tr, field_str); | |
4762 | if (descending < 0) { | |
4763 | ret = descending; | |
4764 | break; | |
4765 | } | |
4766 | sort_key->descending = descending; | |
4767 | continue; | |
4768 | } | |
4769 | ||
4770 | for (j = 1, k = 1; j < hist_data->n_fields; j++) { | |
4771 | unsigned int idx; | |
4772 | ||
4773 | hist_field = hist_data->fields[j]; | |
4774 | if (hist_field->flags & HIST_FIELD_FL_VAR) | |
4775 | continue; | |
4776 | ||
4777 | idx = k++; | |
4778 | ||
4779 | test_name = hist_field_name(hist_field, 0); | |
4780 | ||
4781 | if (strcmp(field_name, test_name) == 0) { | |
4782 | sort_key->field_idx = idx; | |
4783 | descending = is_descending(tr, field_str); | |
4784 | if (descending < 0) { | |
4785 | ret = descending; | |
4786 | goto out; | |
4787 | } | |
4788 | sort_key->descending = descending; | |
4789 | break; | |
4790 | } | |
4791 | } | |
4792 | if (j == hist_data->n_fields) { | |
4793 | ret = -EINVAL; | |
4794 | hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name)); | |
4795 | break; | |
4796 | } | |
4797 | } | |
4798 | ||
4799 | hist_data->n_sort_keys = i; | |
4800 | out: | |
4801 | return ret; | |
4802 | } | |
4803 | ||
4804 | static void destroy_actions(struct hist_trigger_data *hist_data) | |
4805 | { | |
4806 | unsigned int i; | |
4807 | ||
4808 | for (i = 0; i < hist_data->n_actions; i++) { | |
4809 | struct action_data *data = hist_data->actions[i]; | |
4810 | ||
4811 | if (data->handler == HANDLER_ONMATCH) | |
4812 | onmatch_destroy(data); | |
4813 | else if (data->handler == HANDLER_ONMAX || | |
4814 | data->handler == HANDLER_ONCHANGE) | |
4815 | track_data_destroy(hist_data, data); | |
4816 | else | |
4817 | kfree(data); | |
4818 | } | |
4819 | } | |
4820 | ||
4821 | static int parse_actions(struct hist_trigger_data *hist_data) | |
4822 | { | |
4823 | struct trace_array *tr = hist_data->event_file->tr; | |
4824 | struct action_data *data; | |
4825 | unsigned int i; | |
4826 | int ret = 0; | |
4827 | char *str; | |
4828 | int len; | |
4829 | ||
4830 | for (i = 0; i < hist_data->attrs->n_actions; i++) { | |
4831 | enum handler_id hid = 0; | |
4832 | char *action_str; | |
4833 | ||
4834 | str = hist_data->attrs->action_str[i]; | |
4835 | ||
4836 | if ((len = str_has_prefix(str, "onmatch("))) | |
4837 | hid = HANDLER_ONMATCH; | |
4838 | else if ((len = str_has_prefix(str, "onmax("))) | |
4839 | hid = HANDLER_ONMAX; | |
4840 | else if ((len = str_has_prefix(str, "onchange("))) | |
4841 | hid = HANDLER_ONCHANGE; | |
4842 | ||
4843 | action_str = str + len; | |
4844 | ||
4845 | switch (hid) { | |
4846 | case HANDLER_ONMATCH: | |
4847 | data = onmatch_parse(tr, action_str); | |
4848 | break; | |
4849 | case HANDLER_ONMAX: | |
4850 | case HANDLER_ONCHANGE: | |
4851 | data = track_data_parse(hist_data, action_str, hid); | |
4852 | break; | |
4853 | default: | |
4854 | data = ERR_PTR(-EINVAL); | |
4855 | break; | |
4856 | } | |
4857 | ||
4858 | if (IS_ERR(data)) { | |
4859 | ret = PTR_ERR(data); | |
4860 | break; | |
4861 | } | |
4862 | ||
4863 | hist_data->actions[hist_data->n_actions++] = data; | |
4864 | } | |
4865 | ||
4866 | return ret; | |
4867 | } | |
4868 | ||
4869 | static int create_actions(struct hist_trigger_data *hist_data) | |
4870 | { | |
4871 | struct action_data *data; | |
4872 | unsigned int i; | |
4873 | int ret = 0; | |
4874 | ||
4875 | for (i = 0; i < hist_data->attrs->n_actions; i++) { | |
4876 | data = hist_data->actions[i]; | |
4877 | ||
4878 | if (data->handler == HANDLER_ONMATCH) { | |
4879 | ret = onmatch_create(hist_data, data); | |
4880 | if (ret) | |
4881 | break; | |
4882 | } else if (data->handler == HANDLER_ONMAX || | |
4883 | data->handler == HANDLER_ONCHANGE) { | |
4884 | ret = track_data_create(hist_data, data); | |
4885 | if (ret) | |
4886 | break; | |
4887 | } else { | |
4888 | ret = -EINVAL; | |
4889 | break; | |
4890 | } | |
4891 | } | |
4892 | ||
4893 | return ret; | |
4894 | } | |
4895 | ||
4896 | static void print_actions(struct seq_file *m, | |
4897 | struct hist_trigger_data *hist_data, | |
4898 | struct tracing_map_elt *elt) | |
4899 | { | |
4900 | unsigned int i; | |
4901 | ||
4902 | for (i = 0; i < hist_data->n_actions; i++) { | |
4903 | struct action_data *data = hist_data->actions[i]; | |
4904 | ||
4905 | if (data->action == ACTION_SNAPSHOT) | |
4906 | continue; | |
4907 | ||
4908 | if (data->handler == HANDLER_ONMAX || | |
4909 | data->handler == HANDLER_ONCHANGE) | |
4910 | track_data_print(m, hist_data, elt, data); | |
4911 | } | |
4912 | } | |
4913 | ||
4914 | static void print_action_spec(struct seq_file *m, | |
4915 | struct hist_trigger_data *hist_data, | |
4916 | struct action_data *data) | |
4917 | { | |
4918 | unsigned int i; | |
4919 | ||
4920 | if (data->action == ACTION_SAVE) { | |
4921 | for (i = 0; i < hist_data->n_save_vars; i++) { | |
4922 | seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name); | |
4923 | if (i < hist_data->n_save_vars - 1) | |
4924 | seq_puts(m, ","); | |
4925 | } | |
4926 | } else if (data->action == ACTION_TRACE) { | |
4927 | if (data->use_trace_keyword) | |
4928 | seq_printf(m, "%s", data->synth_event_name); | |
4929 | for (i = 0; i < data->n_params; i++) { | |
4930 | if (i || data->use_trace_keyword) | |
4931 | seq_puts(m, ","); | |
4932 | seq_printf(m, "%s", data->params[i]); | |
4933 | } | |
4934 | } | |
4935 | } | |
4936 | ||
4937 | static void print_track_data_spec(struct seq_file *m, | |
4938 | struct hist_trigger_data *hist_data, | |
4939 | struct action_data *data) | |
4940 | { | |
4941 | if (data->handler == HANDLER_ONMAX) | |
4942 | seq_puts(m, ":onmax("); | |
4943 | else if (data->handler == HANDLER_ONCHANGE) | |
4944 | seq_puts(m, ":onchange("); | |
4945 | seq_printf(m, "%s", data->track_data.var_str); | |
4946 | seq_printf(m, ").%s(", data->action_name); | |
4947 | ||
4948 | print_action_spec(m, hist_data, data); | |
4949 | ||
4950 | seq_puts(m, ")"); | |
4951 | } | |
4952 | ||
4953 | static void print_onmatch_spec(struct seq_file *m, | |
4954 | struct hist_trigger_data *hist_data, | |
4955 | struct action_data *data) | |
4956 | { | |
4957 | seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system, | |
4958 | data->match_data.event); | |
4959 | ||
4960 | seq_printf(m, "%s(", data->action_name); | |
4961 | ||
4962 | print_action_spec(m, hist_data, data); | |
4963 | ||
4964 | seq_puts(m, ")"); | |
4965 | } | |
4966 | ||
4967 | static bool actions_match(struct hist_trigger_data *hist_data, | |
4968 | struct hist_trigger_data *hist_data_test) | |
4969 | { | |
4970 | unsigned int i, j; | |
4971 | ||
4972 | if (hist_data->n_actions != hist_data_test->n_actions) | |
4973 | return false; | |
4974 | ||
4975 | for (i = 0; i < hist_data->n_actions; i++) { | |
4976 | struct action_data *data = hist_data->actions[i]; | |
4977 | struct action_data *data_test = hist_data_test->actions[i]; | |
4978 | char *action_name, *action_name_test; | |
4979 | ||
4980 | if (data->handler != data_test->handler) | |
4981 | return false; | |
4982 | if (data->action != data_test->action) | |
4983 | return false; | |
4984 | ||
4985 | if (data->n_params != data_test->n_params) | |
4986 | return false; | |
4987 | ||
4988 | for (j = 0; j < data->n_params; j++) { | |
4989 | if (strcmp(data->params[j], data_test->params[j]) != 0) | |
4990 | return false; | |
4991 | } | |
4992 | ||
4993 | if (data->use_trace_keyword) | |
4994 | action_name = data->synth_event_name; | |
4995 | else | |
4996 | action_name = data->action_name; | |
4997 | ||
4998 | if (data_test->use_trace_keyword) | |
4999 | action_name_test = data_test->synth_event_name; | |
5000 | else | |
5001 | action_name_test = data_test->action_name; | |
5002 | ||
5003 | if (strcmp(action_name, action_name_test) != 0) | |
5004 | return false; | |
5005 | ||
5006 | if (data->handler == HANDLER_ONMATCH) { | |
5007 | if (strcmp(data->match_data.event_system, | |
5008 | data_test->match_data.event_system) != 0) | |
5009 | return false; | |
5010 | if (strcmp(data->match_data.event, | |
5011 | data_test->match_data.event) != 0) | |
5012 | return false; | |
5013 | } else if (data->handler == HANDLER_ONMAX || | |
5014 | data->handler == HANDLER_ONCHANGE) { | |
5015 | if (strcmp(data->track_data.var_str, | |
5016 | data_test->track_data.var_str) != 0) | |
5017 | return false; | |
5018 | } | |
5019 | } | |
5020 | ||
5021 | return true; | |
5022 | } | |
5023 | ||
5024 | ||
5025 | static void print_actions_spec(struct seq_file *m, | |
5026 | struct hist_trigger_data *hist_data) | |
5027 | { | |
5028 | unsigned int i; | |
5029 | ||
5030 | for (i = 0; i < hist_data->n_actions; i++) { | |
5031 | struct action_data *data = hist_data->actions[i]; | |
5032 | ||
5033 | if (data->handler == HANDLER_ONMATCH) | |
5034 | print_onmatch_spec(m, hist_data, data); | |
5035 | else if (data->handler == HANDLER_ONMAX || | |
5036 | data->handler == HANDLER_ONCHANGE) | |
5037 | print_track_data_spec(m, hist_data, data); | |
5038 | } | |
5039 | } | |
5040 | ||
5041 | static void destroy_field_var_hists(struct hist_trigger_data *hist_data) | |
5042 | { | |
5043 | unsigned int i; | |
5044 | ||
5045 | for (i = 0; i < hist_data->n_field_var_hists; i++) { | |
5046 | kfree(hist_data->field_var_hists[i]->cmd); | |
5047 | kfree(hist_data->field_var_hists[i]); | |
5048 | } | |
5049 | } | |
5050 | ||
5051 | static void destroy_hist_data(struct hist_trigger_data *hist_data) | |
5052 | { | |
5053 | if (!hist_data) | |
5054 | return; | |
5055 | ||
5056 | destroy_hist_trigger_attrs(hist_data->attrs); | |
5057 | destroy_hist_fields(hist_data); | |
5058 | tracing_map_destroy(hist_data->map); | |
5059 | ||
5060 | destroy_actions(hist_data); | |
5061 | destroy_field_vars(hist_data); | |
5062 | destroy_field_var_hists(hist_data); | |
5063 | ||
5064 | kfree(hist_data); | |
5065 | } | |
5066 | ||
5067 | static int create_tracing_map_fields(struct hist_trigger_data *hist_data) | |
5068 | { | |
5069 | struct tracing_map *map = hist_data->map; | |
5070 | struct ftrace_event_field *field; | |
5071 | struct hist_field *hist_field; | |
5072 | int i, idx = 0; | |
5073 | ||
5074 | for_each_hist_field(i, hist_data) { | |
5075 | hist_field = hist_data->fields[i]; | |
5076 | if (hist_field->flags & HIST_FIELD_FL_KEY) { | |
5077 | tracing_map_cmp_fn_t cmp_fn; | |
5078 | ||
5079 | field = hist_field->field; | |
5080 | ||
5081 | if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) | |
5082 | cmp_fn = tracing_map_cmp_none; | |
5083 | else if (!field || hist_field->flags & HIST_FIELD_FL_CPU) | |
5084 | cmp_fn = tracing_map_cmp_num(hist_field->size, | |
5085 | hist_field->is_signed); | |
5086 | else if (is_string_field(field)) | |
5087 | cmp_fn = tracing_map_cmp_string; | |
5088 | else | |
5089 | cmp_fn = tracing_map_cmp_num(field->size, | |
5090 | field->is_signed); | |
5091 | idx = tracing_map_add_key_field(map, | |
5092 | hist_field->offset, | |
5093 | cmp_fn); | |
5094 | } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) | |
5095 | idx = tracing_map_add_sum_field(map); | |
5096 | ||
5097 | if (idx < 0) | |
5098 | return idx; | |
5099 | ||
5100 | if (hist_field->flags & HIST_FIELD_FL_VAR) { | |
5101 | idx = tracing_map_add_var(map); | |
5102 | if (idx < 0) | |
5103 | return idx; | |
5104 | hist_field->var.idx = idx; | |
5105 | hist_field->var.hist_data = hist_data; | |
5106 | } | |
5107 | } | |
5108 | ||
5109 | return 0; | |
5110 | } | |
5111 | ||
5112 | static struct hist_trigger_data * | |
5113 | create_hist_data(unsigned int map_bits, | |
5114 | struct hist_trigger_attrs *attrs, | |
5115 | struct trace_event_file *file, | |
5116 | bool remove) | |
5117 | { | |
5118 | const struct tracing_map_ops *map_ops = NULL; | |
5119 | struct hist_trigger_data *hist_data; | |
5120 | int ret = 0; | |
5121 | ||
5122 | hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); | |
5123 | if (!hist_data) | |
5124 | return ERR_PTR(-ENOMEM); | |
5125 | ||
5126 | hist_data->attrs = attrs; | |
5127 | hist_data->remove = remove; | |
5128 | hist_data->event_file = file; | |
5129 | ||
5130 | ret = parse_actions(hist_data); | |
5131 | if (ret) | |
5132 | goto free; | |
5133 | ||
5134 | ret = create_hist_fields(hist_data, file); | |
5135 | if (ret) | |
5136 | goto free; | |
5137 | ||
5138 | ret = create_sort_keys(hist_data); | |
5139 | if (ret) | |
5140 | goto free; | |
5141 | ||
5142 | map_ops = &hist_trigger_elt_data_ops; | |
5143 | ||
5144 | hist_data->map = tracing_map_create(map_bits, hist_data->key_size, | |
5145 | map_ops, hist_data); | |
5146 | if (IS_ERR(hist_data->map)) { | |
5147 | ret = PTR_ERR(hist_data->map); | |
5148 | hist_data->map = NULL; | |
5149 | goto free; | |
5150 | } | |
5151 | ||
5152 | ret = create_tracing_map_fields(hist_data); | |
5153 | if (ret) | |
5154 | goto free; | |
5155 | out: | |
5156 | return hist_data; | |
5157 | free: | |
5158 | hist_data->attrs = NULL; | |
5159 | ||
5160 | destroy_hist_data(hist_data); | |
5161 | ||
5162 | hist_data = ERR_PTR(ret); | |
5163 | ||
5164 | goto out; | |
5165 | } | |
5166 | ||
5167 | static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, | |
5168 | struct tracing_map_elt *elt, | |
5169 | struct trace_buffer *buffer, void *rec, | |
5170 | struct ring_buffer_event *rbe, | |
5171 | u64 *var_ref_vals) | |
5172 | { | |
5173 | struct hist_elt_data *elt_data; | |
5174 | struct hist_field *hist_field; | |
5175 | unsigned int i, var_idx; | |
5176 | u64 hist_val; | |
5177 | ||
5178 | elt_data = elt->private_data; | |
5179 | elt_data->var_ref_vals = var_ref_vals; | |
5180 | ||
5181 | for_each_hist_val_field(i, hist_data) { | |
5182 | hist_field = hist_data->fields[i]; | |
5183 | hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec); | |
5184 | if (hist_field->flags & HIST_FIELD_FL_VAR) { | |
5185 | var_idx = hist_field->var.idx; | |
5186 | ||
5187 | if (hist_field->flags & | |
5188 | (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) { | |
5189 | unsigned int str_start, var_str_idx, idx; | |
5190 | char *str, *val_str; | |
5191 | unsigned int size; | |
5192 | ||
5193 | str_start = hist_data->n_field_var_str + | |
5194 | hist_data->n_save_var_str; | |
5195 | var_str_idx = hist_field->var_str_idx; | |
5196 | idx = str_start + var_str_idx; | |
5197 | ||
5198 | str = elt_data->field_var_str[idx]; | |
5199 | val_str = (char *)(uintptr_t)hist_val; | |
5200 | ||
5201 | if (hist_field->flags & HIST_FIELD_FL_STRING) { | |
5202 | size = min(hist_field->size, STR_VAR_LEN_MAX); | |
5203 | strscpy(str, val_str, size); | |
5204 | } else { | |
5205 | char *stack_start = str + sizeof(unsigned long); | |
5206 | int e; | |
5207 | ||
5208 | e = stack_trace_save((void *)stack_start, | |
5209 | HIST_STACKTRACE_DEPTH, | |
5210 | HIST_STACKTRACE_SKIP); | |
5211 | if (e < HIST_STACKTRACE_DEPTH - 1) | |
5212 | ((unsigned long *)stack_start)[e] = 0; | |
5213 | *((unsigned long *)str) = e; | |
5214 | } | |
5215 | hist_val = (u64)(uintptr_t)str; | |
5216 | } | |
5217 | tracing_map_set_var(elt, var_idx, hist_val); | |
5218 | continue; | |
5219 | } | |
5220 | tracing_map_update_sum(elt, i, hist_val); | |
5221 | } | |
5222 | ||
5223 | for_each_hist_key_field(i, hist_data) { | |
5224 | hist_field = hist_data->fields[i]; | |
5225 | if (hist_field->flags & HIST_FIELD_FL_VAR) { | |
5226 | hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec); | |
5227 | var_idx = hist_field->var.idx; | |
5228 | tracing_map_set_var(elt, var_idx, hist_val); | |
5229 | } | |
5230 | } | |
5231 | ||
5232 | update_field_vars(hist_data, elt, buffer, rbe, rec); | |
5233 | } | |
5234 | ||
5235 | static inline void add_to_key(char *compound_key, void *key, | |
5236 | struct hist_field *key_field, void *rec) | |
5237 | { | |
5238 | size_t size = key_field->size; | |
5239 | ||
5240 | if (key_field->flags & HIST_FIELD_FL_STRING) { | |
5241 | ||
5242 | if (key_field->flags & HIST_FIELD_FL_COMM) { | |
5243 | size = strlen((char *)key); | |
5244 | } else { | |
5245 | struct ftrace_event_field *field; | |
5246 | ||
5247 | field = key_field->field; | |
5248 | if (field->filter_type == FILTER_DYN_STRING || | |
5249 | field->filter_type == FILTER_RDYN_STRING) | |
5250 | size = *(u32 *)(rec + field->offset) >> 16; | |
5251 | else if (field->filter_type == FILTER_STATIC_STRING) | |
5252 | size = field->size; | |
5253 | } | |
5254 | ||
5255 | /* ensure NULL-termination */ | |
5256 | if (size > key_field->size - 1) | |
5257 | size = key_field->size - 1; | |
5258 | } | |
5259 | memcpy(compound_key + key_field->offset, key, size); | |
5260 | } | |
5261 | ||
5262 | static void | |
5263 | hist_trigger_actions(struct hist_trigger_data *hist_data, | |
5264 | struct tracing_map_elt *elt, | |
5265 | struct trace_buffer *buffer, void *rec, | |
5266 | struct ring_buffer_event *rbe, void *key, | |
5267 | u64 *var_ref_vals) | |
5268 | { | |
5269 | struct action_data *data; | |
5270 | unsigned int i; | |
5271 | ||
5272 | for (i = 0; i < hist_data->n_actions; i++) { | |
5273 | data = hist_data->actions[i]; | |
5274 | data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals); | |
5275 | } | |
5276 | } | |
5277 | ||
5278 | /* | |
5279 | * The hist_pad structure is used to save information to create | |
5280 | * a histogram from the histogram trigger. It's too big to store | |
5281 | * on the stack, so when the histogram trigger is initialized | |
5282 | * a percpu array of 4 hist_pad structures is allocated. | |
5283 | * This will cover every context from normal, softirq, irq and NMI | |
5284 | * in the very unlikely event that a tigger happens at each of | |
5285 | * these contexts and interrupts a currently active trigger. | |
5286 | */ | |
5287 | struct hist_pad { | |
5288 | unsigned long entries[HIST_STACKTRACE_DEPTH]; | |
5289 | u64 var_ref_vals[TRACING_MAP_VARS_MAX]; | |
5290 | char compound_key[HIST_KEY_SIZE_MAX]; | |
5291 | }; | |
5292 | ||
5293 | static struct hist_pad __percpu *hist_pads; | |
5294 | static DEFINE_PER_CPU(int, hist_pad_cnt); | |
5295 | static refcount_t hist_pad_ref; | |
5296 | ||
5297 | /* One hist_pad for every context (normal, softirq, irq, NMI) */ | |
5298 | #define MAX_HIST_CNT 4 | |
5299 | ||
5300 | static int alloc_hist_pad(void) | |
5301 | { | |
5302 | lockdep_assert_held(&event_mutex); | |
5303 | ||
5304 | if (refcount_read(&hist_pad_ref)) { | |
5305 | refcount_inc(&hist_pad_ref); | |
5306 | return 0; | |
5307 | } | |
5308 | ||
5309 | hist_pads = __alloc_percpu(sizeof(struct hist_pad) * MAX_HIST_CNT, | |
5310 | __alignof__(struct hist_pad)); | |
5311 | if (!hist_pads) | |
5312 | return -ENOMEM; | |
5313 | ||
5314 | refcount_set(&hist_pad_ref, 1); | |
5315 | return 0; | |
5316 | } | |
5317 | ||
5318 | static void free_hist_pad(void) | |
5319 | { | |
5320 | lockdep_assert_held(&event_mutex); | |
5321 | ||
5322 | if (!refcount_dec_and_test(&hist_pad_ref)) | |
5323 | return; | |
5324 | ||
5325 | free_percpu(hist_pads); | |
5326 | hist_pads = NULL; | |
5327 | } | |
5328 | ||
5329 | static struct hist_pad *get_hist_pad(void) | |
5330 | { | |
5331 | struct hist_pad *hist_pad; | |
5332 | int cnt; | |
5333 | ||
5334 | if (WARN_ON_ONCE(!hist_pads)) | |
5335 | return NULL; | |
5336 | ||
5337 | preempt_disable(); | |
5338 | ||
5339 | hist_pad = per_cpu_ptr(hist_pads, smp_processor_id()); | |
5340 | ||
5341 | if (this_cpu_read(hist_pad_cnt) == MAX_HIST_CNT) { | |
5342 | preempt_enable(); | |
5343 | return NULL; | |
5344 | } | |
5345 | ||
5346 | cnt = this_cpu_inc_return(hist_pad_cnt) - 1; | |
5347 | ||
5348 | return &hist_pad[cnt]; | |
5349 | } | |
5350 | ||
5351 | static void put_hist_pad(void) | |
5352 | { | |
5353 | this_cpu_dec(hist_pad_cnt); | |
5354 | preempt_enable(); | |
5355 | } | |
5356 | ||
5357 | static void event_hist_trigger(struct event_trigger_data *data, | |
5358 | struct trace_buffer *buffer, void *rec, | |
5359 | struct ring_buffer_event *rbe) | |
5360 | { | |
5361 | struct hist_trigger_data *hist_data = data->private_data; | |
5362 | bool use_compound_key = (hist_data->n_keys > 1); | |
5363 | struct tracing_map_elt *elt = NULL; | |
5364 | struct hist_field *key_field; | |
5365 | struct hist_pad *hist_pad; | |
5366 | u64 field_contents; | |
5367 | void *key = NULL; | |
5368 | unsigned int i; | |
5369 | ||
5370 | if (unlikely(!rbe)) | |
5371 | return; | |
5372 | ||
5373 | hist_pad = get_hist_pad(); | |
5374 | if (!hist_pad) | |
5375 | return; | |
5376 | ||
5377 | memset(hist_pad->compound_key, 0, hist_data->key_size); | |
5378 | ||
5379 | for_each_hist_key_field(i, hist_data) { | |
5380 | key_field = hist_data->fields[i]; | |
5381 | ||
5382 | if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { | |
5383 | unsigned long *entries = hist_pad->entries; | |
5384 | ||
5385 | memset(entries, 0, HIST_STACKTRACE_SIZE); | |
5386 | if (key_field->field) { | |
5387 | unsigned long *stack, n_entries; | |
5388 | ||
5389 | field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec); | |
5390 | stack = (unsigned long *)(long)field_contents; | |
5391 | n_entries = *stack; | |
5392 | memcpy(entries, ++stack, n_entries * sizeof(unsigned long)); | |
5393 | } else { | |
5394 | stack_trace_save(entries, HIST_STACKTRACE_DEPTH, | |
5395 | HIST_STACKTRACE_SKIP); | |
5396 | } | |
5397 | key = entries; | |
5398 | } else { | |
5399 | field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec); | |
5400 | if (key_field->flags & HIST_FIELD_FL_STRING) { | |
5401 | key = (void *)(unsigned long)field_contents; | |
5402 | use_compound_key = true; | |
5403 | } else | |
5404 | key = (void *)&field_contents; | |
5405 | } | |
5406 | ||
5407 | if (use_compound_key) | |
5408 | add_to_key(hist_pad->compound_key, key, key_field, rec); | |
5409 | } | |
5410 | ||
5411 | if (use_compound_key) | |
5412 | key = hist_pad->compound_key; | |
5413 | ||
5414 | if (hist_data->n_var_refs && | |
5415 | !resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, false)) | |
5416 | goto out; | |
5417 | ||
5418 | elt = tracing_map_insert(hist_data->map, key); | |
5419 | if (!elt) | |
5420 | goto out; | |
5421 | ||
5422 | hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, hist_pad->var_ref_vals); | |
5423 | ||
5424 | if (resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, true)) { | |
5425 | hist_trigger_actions(hist_data, elt, buffer, rec, rbe, | |
5426 | key, hist_pad->var_ref_vals); | |
5427 | } | |
5428 | ||
5429 | hist_poll_wakeup(); | |
5430 | ||
5431 | out: | |
5432 | put_hist_pad(); | |
5433 | } | |
5434 | ||
5435 | static void hist_trigger_stacktrace_print(struct seq_file *m, | |
5436 | unsigned long *stacktrace_entries, | |
5437 | unsigned int max_entries) | |
5438 | { | |
5439 | unsigned int spaces = 8; | |
5440 | unsigned int i; | |
5441 | ||
5442 | for (i = 0; i < max_entries; i++) { | |
5443 | if (!stacktrace_entries[i]) | |
5444 | return; | |
5445 | ||
5446 | seq_printf(m, "%*c", 1 + spaces, ' '); | |
5447 | seq_printf(m, "%pS\n", (void*)stacktrace_entries[i]); | |
5448 | } | |
5449 | } | |
5450 | ||
5451 | static void hist_trigger_print_key(struct seq_file *m, | |
5452 | struct hist_trigger_data *hist_data, | |
5453 | void *key, | |
5454 | struct tracing_map_elt *elt) | |
5455 | { | |
5456 | struct hist_field *key_field; | |
5457 | bool multiline = false; | |
5458 | const char *field_name; | |
5459 | unsigned int i; | |
5460 | u64 uval; | |
5461 | ||
5462 | seq_puts(m, "{ "); | |
5463 | ||
5464 | for_each_hist_key_field(i, hist_data) { | |
5465 | key_field = hist_data->fields[i]; | |
5466 | ||
5467 | if (i > hist_data->n_vals) | |
5468 | seq_puts(m, ", "); | |
5469 | ||
5470 | field_name = hist_field_name(key_field, 0); | |
5471 | ||
5472 | if (key_field->flags & HIST_FIELD_FL_HEX) { | |
5473 | uval = *(u64 *)(key + key_field->offset); | |
5474 | seq_printf(m, "%s: %llx", field_name, uval); | |
5475 | } else if (key_field->flags & HIST_FIELD_FL_SYM) { | |
5476 | uval = *(u64 *)(key + key_field->offset); | |
5477 | seq_printf(m, "%s: [%llx] %-45ps", field_name, | |
5478 | uval, (void *)(uintptr_t)uval); | |
5479 | } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { | |
5480 | uval = *(u64 *)(key + key_field->offset); | |
5481 | seq_printf(m, "%s: [%llx] %-55pS", field_name, | |
5482 | uval, (void *)(uintptr_t)uval); | |
5483 | } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { | |
5484 | struct hist_elt_data *elt_data = elt->private_data; | |
5485 | char *comm; | |
5486 | ||
5487 | if (WARN_ON_ONCE(!elt_data)) | |
5488 | return; | |
5489 | ||
5490 | comm = elt_data->comm; | |
5491 | ||
5492 | uval = *(u64 *)(key + key_field->offset); | |
5493 | seq_printf(m, "%s: %-16s[%10llu]", field_name, | |
5494 | comm, uval); | |
5495 | } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { | |
5496 | const char *syscall_name; | |
5497 | ||
5498 | uval = *(u64 *)(key + key_field->offset); | |
5499 | syscall_name = get_syscall_name(uval); | |
5500 | if (!syscall_name) | |
5501 | syscall_name = "unknown_syscall"; | |
5502 | ||
5503 | seq_printf(m, "%s: %-30s[%3llu]", field_name, | |
5504 | syscall_name, uval); | |
5505 | } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { | |
5506 | if (key_field->field) | |
5507 | seq_printf(m, "%s.stacktrace", key_field->field->name); | |
5508 | else | |
5509 | seq_puts(m, "common_stacktrace:\n"); | |
5510 | hist_trigger_stacktrace_print(m, | |
5511 | key + key_field->offset, | |
5512 | HIST_STACKTRACE_DEPTH); | |
5513 | multiline = true; | |
5514 | } else if (key_field->flags & HIST_FIELD_FL_LOG2) { | |
5515 | seq_printf(m, "%s: ~ 2^%-2llu", field_name, | |
5516 | *(u64 *)(key + key_field->offset)); | |
5517 | } else if (key_field->flags & HIST_FIELD_FL_BUCKET) { | |
5518 | unsigned long buckets = key_field->buckets; | |
5519 | uval = *(u64 *)(key + key_field->offset); | |
5520 | seq_printf(m, "%s: ~ %llu-%llu", field_name, | |
5521 | uval, uval + buckets -1); | |
5522 | } else if (key_field->flags & HIST_FIELD_FL_STRING) { | |
5523 | seq_printf(m, "%s: %-50s", field_name, | |
5524 | (char *)(key + key_field->offset)); | |
5525 | } else { | |
5526 | uval = *(u64 *)(key + key_field->offset); | |
5527 | seq_printf(m, "%s: %10llu", field_name, uval); | |
5528 | } | |
5529 | } | |
5530 | ||
5531 | if (!multiline) | |
5532 | seq_puts(m, " "); | |
5533 | ||
5534 | seq_puts(m, "}"); | |
5535 | } | |
5536 | ||
5537 | /* Get the 100 times of the percentage of @val in @total */ | |
5538 | static inline unsigned int __get_percentage(u64 val, u64 total) | |
5539 | { | |
5540 | if (!total) | |
5541 | goto div0; | |
5542 | ||
5543 | if (val < (U64_MAX / 10000)) | |
5544 | return (unsigned int)div64_ul(val * 10000, total); | |
5545 | ||
5546 | total = div64_u64(total, 10000); | |
5547 | if (!total) | |
5548 | goto div0; | |
5549 | ||
5550 | return (unsigned int)div64_ul(val, total); | |
5551 | div0: | |
5552 | return val ? UINT_MAX : 0; | |
5553 | } | |
5554 | ||
5555 | #define BAR_CHAR '#' | |
5556 | ||
5557 | static inline const char *__fill_bar_str(char *buf, int size, u64 val, u64 max) | |
5558 | { | |
5559 | unsigned int len = __get_percentage(val, max); | |
5560 | int i; | |
5561 | ||
5562 | if (len == UINT_MAX) { | |
5563 | snprintf(buf, size, "[ERROR]"); | |
5564 | return buf; | |
5565 | } | |
5566 | ||
5567 | len = len * size / 10000; | |
5568 | for (i = 0; i < len && i < size; i++) | |
5569 | buf[i] = BAR_CHAR; | |
5570 | while (i < size) | |
5571 | buf[i++] = ' '; | |
5572 | buf[size] = '\0'; | |
5573 | ||
5574 | return buf; | |
5575 | } | |
5576 | ||
5577 | struct hist_val_stat { | |
5578 | u64 max; | |
5579 | u64 total; | |
5580 | }; | |
5581 | ||
5582 | static void hist_trigger_print_val(struct seq_file *m, unsigned int idx, | |
5583 | const char *field_name, unsigned long flags, | |
5584 | struct hist_val_stat *stats, | |
5585 | struct tracing_map_elt *elt) | |
5586 | { | |
5587 | u64 val = tracing_map_read_sum(elt, idx); | |
5588 | unsigned int pc; | |
5589 | char bar[21]; | |
5590 | ||
5591 | if (flags & HIST_FIELD_FL_PERCENT) { | |
5592 | pc = __get_percentage(val, stats[idx].total); | |
5593 | if (pc == UINT_MAX) | |
5594 | seq_printf(m, " %s (%%):[ERROR]", field_name); | |
5595 | else | |
5596 | seq_printf(m, " %s (%%): %3u.%02u", field_name, | |
5597 | pc / 100, pc % 100); | |
5598 | } else if (flags & HIST_FIELD_FL_GRAPH) { | |
5599 | seq_printf(m, " %s: %20s", field_name, | |
5600 | __fill_bar_str(bar, 20, val, stats[idx].max)); | |
5601 | } else if (flags & HIST_FIELD_FL_HEX) { | |
5602 | seq_printf(m, " %s: %10llx", field_name, val); | |
5603 | } else { | |
5604 | seq_printf(m, " %s: %10llu", field_name, val); | |
5605 | } | |
5606 | } | |
5607 | ||
5608 | static void hist_trigger_entry_print(struct seq_file *m, | |
5609 | struct hist_trigger_data *hist_data, | |
5610 | struct hist_val_stat *stats, | |
5611 | void *key, | |
5612 | struct tracing_map_elt *elt) | |
5613 | { | |
5614 | const char *field_name; | |
5615 | unsigned int i = HITCOUNT_IDX; | |
5616 | unsigned long flags; | |
5617 | ||
5618 | hist_trigger_print_key(m, hist_data, key, elt); | |
5619 | ||
5620 | /* At first, show the raw hitcount if !nohitcount */ | |
5621 | if (!hist_data->attrs->no_hitcount) | |
5622 | hist_trigger_print_val(m, i, "hitcount", 0, stats, elt); | |
5623 | ||
5624 | for (i = 1; i < hist_data->n_vals; i++) { | |
5625 | field_name = hist_field_name(hist_data->fields[i], 0); | |
5626 | flags = hist_data->fields[i]->flags; | |
5627 | if (flags & HIST_FIELD_FL_VAR || flags & HIST_FIELD_FL_EXPR) | |
5628 | continue; | |
5629 | ||
5630 | seq_puts(m, " "); | |
5631 | hist_trigger_print_val(m, i, field_name, flags, stats, elt); | |
5632 | } | |
5633 | ||
5634 | print_actions(m, hist_data, elt); | |
5635 | ||
5636 | seq_puts(m, "\n"); | |
5637 | } | |
5638 | ||
5639 | static int print_entries(struct seq_file *m, | |
5640 | struct hist_trigger_data *hist_data) | |
5641 | { | |
5642 | struct tracing_map_sort_entry **sort_entries = NULL; | |
5643 | struct tracing_map *map = hist_data->map; | |
5644 | int i, j, n_entries; | |
5645 | struct hist_val_stat *stats = NULL; | |
5646 | u64 val; | |
5647 | ||
5648 | n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, | |
5649 | hist_data->n_sort_keys, | |
5650 | &sort_entries); | |
5651 | if (n_entries < 0) | |
5652 | return n_entries; | |
5653 | ||
5654 | /* Calculate the max and the total for each field if needed. */ | |
5655 | for (j = 0; j < hist_data->n_vals; j++) { | |
5656 | if (!(hist_data->fields[j]->flags & | |
5657 | (HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH))) | |
5658 | continue; | |
5659 | if (!stats) { | |
5660 | stats = kcalloc(hist_data->n_vals, sizeof(*stats), | |
5661 | GFP_KERNEL); | |
5662 | if (!stats) { | |
5663 | n_entries = -ENOMEM; | |
5664 | goto out; | |
5665 | } | |
5666 | } | |
5667 | for (i = 0; i < n_entries; i++) { | |
5668 | val = tracing_map_read_sum(sort_entries[i]->elt, j); | |
5669 | stats[j].total += val; | |
5670 | if (stats[j].max < val) | |
5671 | stats[j].max = val; | |
5672 | } | |
5673 | } | |
5674 | ||
5675 | for (i = 0; i < n_entries; i++) | |
5676 | hist_trigger_entry_print(m, hist_data, stats, | |
5677 | sort_entries[i]->key, | |
5678 | sort_entries[i]->elt); | |
5679 | ||
5680 | kfree(stats); | |
5681 | out: | |
5682 | tracing_map_destroy_sort_entries(sort_entries, n_entries); | |
5683 | ||
5684 | return n_entries; | |
5685 | } | |
5686 | ||
5687 | static void hist_trigger_show(struct seq_file *m, | |
5688 | struct event_trigger_data *data, int n) | |
5689 | { | |
5690 | struct hist_trigger_data *hist_data; | |
5691 | int n_entries; | |
5692 | ||
5693 | if (n > 0) | |
5694 | seq_puts(m, "\n\n"); | |
5695 | ||
5696 | seq_puts(m, "# event histogram\n#\n# trigger info: "); | |
5697 | data->ops->print(m, data); | |
5698 | seq_puts(m, "#\n\n"); | |
5699 | ||
5700 | hist_data = data->private_data; | |
5701 | n_entries = print_entries(m, hist_data); | |
5702 | if (n_entries < 0) | |
5703 | n_entries = 0; | |
5704 | ||
5705 | track_data_snapshot_print(m, hist_data); | |
5706 | ||
5707 | seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", | |
5708 | (u64)atomic64_read(&hist_data->map->hits), | |
5709 | n_entries, (u64)atomic64_read(&hist_data->map->drops)); | |
5710 | } | |
5711 | ||
5712 | struct hist_file_data { | |
5713 | struct file *file; | |
5714 | u64 last_read; | |
5715 | u64 last_act; | |
5716 | }; | |
5717 | ||
5718 | static u64 get_hist_hit_count(struct trace_event_file *event_file) | |
5719 | { | |
5720 | struct hist_trigger_data *hist_data; | |
5721 | struct event_trigger_data *data; | |
5722 | u64 ret = 0; | |
5723 | ||
5724 | list_for_each_entry(data, &event_file->triggers, list) { | |
5725 | if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
5726 | hist_data = data->private_data; | |
5727 | ret += atomic64_read(&hist_data->map->hits); | |
5728 | } | |
5729 | } | |
5730 | return ret; | |
5731 | } | |
5732 | ||
5733 | static int hist_show(struct seq_file *m, void *v) | |
5734 | { | |
5735 | struct hist_file_data *hist_file = m->private; | |
5736 | struct event_trigger_data *data; | |
5737 | struct trace_event_file *event_file; | |
5738 | int n = 0; | |
5739 | ||
5740 | guard(mutex)(&event_mutex); | |
5741 | ||
5742 | event_file = event_file_file(hist_file->file); | |
5743 | if (unlikely(!event_file)) | |
5744 | return -ENODEV; | |
5745 | ||
5746 | list_for_each_entry(data, &event_file->triggers, list) { | |
5747 | if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) | |
5748 | hist_trigger_show(m, data, n++); | |
5749 | } | |
5750 | hist_file->last_read = get_hist_hit_count(event_file); | |
5751 | /* | |
5752 | * Update last_act too so that poll()/POLLPRI can wait for the next | |
5753 | * event after any syscall on hist file. | |
5754 | */ | |
5755 | hist_file->last_act = hist_file->last_read; | |
5756 | ||
5757 | return 0; | |
5758 | } | |
5759 | ||
5760 | static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait) | |
5761 | { | |
5762 | struct trace_event_file *event_file; | |
5763 | struct seq_file *m = file->private_data; | |
5764 | struct hist_file_data *hist_file = m->private; | |
5765 | __poll_t ret = 0; | |
5766 | u64 cnt; | |
5767 | ||
5768 | guard(mutex)(&event_mutex); | |
5769 | ||
5770 | event_file = event_file_data(file); | |
5771 | if (!event_file) | |
5772 | return EPOLLERR; | |
5773 | ||
5774 | hist_poll_wait(file, wait); | |
5775 | ||
5776 | cnt = get_hist_hit_count(event_file); | |
5777 | if (hist_file->last_read != cnt) | |
5778 | ret |= EPOLLIN | EPOLLRDNORM; | |
5779 | if (hist_file->last_act != cnt) { | |
5780 | hist_file->last_act = cnt; | |
5781 | ret |= EPOLLPRI; | |
5782 | } | |
5783 | ||
5784 | return ret; | |
5785 | } | |
5786 | ||
5787 | static int event_hist_release(struct inode *inode, struct file *file) | |
5788 | { | |
5789 | struct seq_file *m = file->private_data; | |
5790 | struct hist_file_data *hist_file = m->private; | |
5791 | ||
5792 | kfree(hist_file); | |
5793 | return tracing_single_release_file_tr(inode, file); | |
5794 | } | |
5795 | ||
5796 | static int event_hist_open(struct inode *inode, struct file *file) | |
5797 | { | |
5798 | struct trace_event_file *event_file; | |
5799 | struct hist_file_data *hist_file; | |
5800 | int ret; | |
5801 | ||
5802 | ret = tracing_open_file_tr(inode, file); | |
5803 | if (ret) | |
5804 | return ret; | |
5805 | ||
5806 | guard(mutex)(&event_mutex); | |
5807 | ||
5808 | event_file = event_file_data(file); | |
5809 | if (!event_file) { | |
5810 | ret = -ENODEV; | |
5811 | goto err; | |
5812 | } | |
5813 | ||
5814 | hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL); | |
5815 | if (!hist_file) { | |
5816 | ret = -ENOMEM; | |
5817 | goto err; | |
5818 | } | |
5819 | ||
5820 | hist_file->file = file; | |
5821 | hist_file->last_act = get_hist_hit_count(event_file); | |
5822 | ||
5823 | /* Clear private_data to avoid warning in single_open() */ | |
5824 | file->private_data = NULL; | |
5825 | ret = single_open(file, hist_show, hist_file); | |
5826 | if (ret) { | |
5827 | kfree(hist_file); | |
5828 | goto err; | |
5829 | } | |
5830 | ||
5831 | return 0; | |
5832 | err: | |
5833 | tracing_release_file_tr(inode, file); | |
5834 | return ret; | |
5835 | } | |
5836 | ||
5837 | const struct file_operations event_hist_fops = { | |
5838 | .open = event_hist_open, | |
5839 | .read = seq_read, | |
5840 | .llseek = seq_lseek, | |
5841 | .release = event_hist_release, | |
5842 | .poll = event_hist_poll, | |
5843 | }; | |
5844 | ||
5845 | #ifdef CONFIG_HIST_TRIGGERS_DEBUG | |
5846 | static void hist_field_debug_show_flags(struct seq_file *m, | |
5847 | unsigned long flags) | |
5848 | { | |
5849 | seq_puts(m, " flags:\n"); | |
5850 | ||
5851 | if (flags & HIST_FIELD_FL_KEY) | |
5852 | seq_puts(m, " HIST_FIELD_FL_KEY\n"); | |
5853 | else if (flags & HIST_FIELD_FL_HITCOUNT) | |
5854 | seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n"); | |
5855 | else if (flags & HIST_FIELD_FL_VAR) | |
5856 | seq_puts(m, " HIST_FIELD_FL_VAR\n"); | |
5857 | else if (flags & HIST_FIELD_FL_VAR_REF) | |
5858 | seq_puts(m, " HIST_FIELD_FL_VAR_REF\n"); | |
5859 | else | |
5860 | seq_puts(m, " VAL: normal u64 value\n"); | |
5861 | ||
5862 | if (flags & HIST_FIELD_FL_ALIAS) | |
5863 | seq_puts(m, " HIST_FIELD_FL_ALIAS\n"); | |
5864 | else if (flags & HIST_FIELD_FL_CONST) | |
5865 | seq_puts(m, " HIST_FIELD_FL_CONST\n"); | |
5866 | } | |
5867 | ||
5868 | static int hist_field_debug_show(struct seq_file *m, | |
5869 | struct hist_field *field, unsigned long flags) | |
5870 | { | |
5871 | if ((field->flags & flags) != flags) { | |
5872 | seq_printf(m, "ERROR: bad flags - %lx\n", flags); | |
5873 | return -EINVAL; | |
5874 | } | |
5875 | ||
5876 | hist_field_debug_show_flags(m, field->flags); | |
5877 | if (field->field) | |
5878 | seq_printf(m, " ftrace_event_field name: %s\n", | |
5879 | field->field->name); | |
5880 | ||
5881 | if (field->flags & HIST_FIELD_FL_VAR) { | |
5882 | seq_printf(m, " var.name: %s\n", field->var.name); | |
5883 | seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", | |
5884 | field->var.idx); | |
5885 | } | |
5886 | ||
5887 | if (field->flags & HIST_FIELD_FL_CONST) | |
5888 | seq_printf(m, " constant: %llu\n", field->constant); | |
5889 | ||
5890 | if (field->flags & HIST_FIELD_FL_ALIAS) | |
5891 | seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", | |
5892 | field->var_ref_idx); | |
5893 | ||
5894 | if (field->flags & HIST_FIELD_FL_VAR_REF) { | |
5895 | seq_printf(m, " name: %s\n", field->name); | |
5896 | seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", | |
5897 | field->var.idx); | |
5898 | seq_printf(m, " var.hist_data: %p\n", field->var.hist_data); | |
5899 | seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", | |
5900 | field->var_ref_idx); | |
5901 | if (field->system) | |
5902 | seq_printf(m, " system: %s\n", field->system); | |
5903 | if (field->event_name) | |
5904 | seq_printf(m, " event_name: %s\n", field->event_name); | |
5905 | } | |
5906 | ||
5907 | seq_printf(m, " type: %s\n", field->type); | |
5908 | seq_printf(m, " size: %u\n", field->size); | |
5909 | seq_printf(m, " is_signed: %u\n", field->is_signed); | |
5910 | ||
5911 | return 0; | |
5912 | } | |
5913 | ||
5914 | static int field_var_debug_show(struct seq_file *m, | |
5915 | struct field_var *field_var, unsigned int i, | |
5916 | bool save_vars) | |
5917 | { | |
5918 | const char *vars_name = save_vars ? "save_vars" : "field_vars"; | |
5919 | struct hist_field *field; | |
5920 | int ret = 0; | |
5921 | ||
5922 | seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i); | |
5923 | ||
5924 | field = field_var->var; | |
5925 | ||
5926 | seq_printf(m, "\n %s[%d].var:\n", vars_name, i); | |
5927 | ||
5928 | hist_field_debug_show_flags(m, field->flags); | |
5929 | seq_printf(m, " var.name: %s\n", field->var.name); | |
5930 | seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", | |
5931 | field->var.idx); | |
5932 | ||
5933 | field = field_var->val; | |
5934 | ||
5935 | seq_printf(m, "\n %s[%d].val:\n", vars_name, i); | |
5936 | if (field->field) | |
5937 | seq_printf(m, " ftrace_event_field name: %s\n", | |
5938 | field->field->name); | |
5939 | else { | |
5940 | ret = -EINVAL; | |
5941 | goto out; | |
5942 | } | |
5943 | ||
5944 | seq_printf(m, " type: %s\n", field->type); | |
5945 | seq_printf(m, " size: %u\n", field->size); | |
5946 | seq_printf(m, " is_signed: %u\n", field->is_signed); | |
5947 | out: | |
5948 | return ret; | |
5949 | } | |
5950 | ||
5951 | static int hist_action_debug_show(struct seq_file *m, | |
5952 | struct action_data *data, int i) | |
5953 | { | |
5954 | int ret = 0; | |
5955 | ||
5956 | if (data->handler == HANDLER_ONMAX || | |
5957 | data->handler == HANDLER_ONCHANGE) { | |
5958 | seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i); | |
5959 | ret = hist_field_debug_show(m, data->track_data.var_ref, | |
5960 | HIST_FIELD_FL_VAR_REF); | |
5961 | if (ret) | |
5962 | goto out; | |
5963 | ||
5964 | seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i); | |
5965 | ret = hist_field_debug_show(m, data->track_data.track_var, | |
5966 | HIST_FIELD_FL_VAR); | |
5967 | if (ret) | |
5968 | goto out; | |
5969 | } | |
5970 | ||
5971 | if (data->handler == HANDLER_ONMATCH) { | |
5972 | seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n", | |
5973 | i, data->match_data.event_system); | |
5974 | seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n", | |
5975 | i, data->match_data.event); | |
5976 | } | |
5977 | out: | |
5978 | return ret; | |
5979 | } | |
5980 | ||
5981 | static int hist_actions_debug_show(struct seq_file *m, | |
5982 | struct hist_trigger_data *hist_data) | |
5983 | { | |
5984 | int i, ret = 0; | |
5985 | ||
5986 | if (hist_data->n_actions) | |
5987 | seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n"); | |
5988 | ||
5989 | for (i = 0; i < hist_data->n_actions; i++) { | |
5990 | struct action_data *action = hist_data->actions[i]; | |
5991 | ||
5992 | ret = hist_action_debug_show(m, action, i); | |
5993 | if (ret) | |
5994 | goto out; | |
5995 | } | |
5996 | ||
5997 | if (hist_data->n_save_vars) | |
5998 | seq_puts(m, "\n save action variables (save() params):\n"); | |
5999 | ||
6000 | for (i = 0; i < hist_data->n_save_vars; i++) { | |
6001 | ret = field_var_debug_show(m, hist_data->save_vars[i], i, true); | |
6002 | if (ret) | |
6003 | goto out; | |
6004 | } | |
6005 | out: | |
6006 | return ret; | |
6007 | } | |
6008 | ||
6009 | static void hist_trigger_debug_show(struct seq_file *m, | |
6010 | struct event_trigger_data *data, int n) | |
6011 | { | |
6012 | struct hist_trigger_data *hist_data; | |
6013 | int i, ret; | |
6014 | ||
6015 | if (n > 0) | |
6016 | seq_puts(m, "\n\n"); | |
6017 | ||
6018 | seq_puts(m, "# event histogram\n#\n# trigger info: "); | |
6019 | data->ops->print(m, data); | |
6020 | seq_puts(m, "#\n\n"); | |
6021 | ||
6022 | hist_data = data->private_data; | |
6023 | ||
6024 | seq_printf(m, "hist_data: %p\n\n", hist_data); | |
6025 | seq_printf(m, " n_vals: %u\n", hist_data->n_vals); | |
6026 | seq_printf(m, " n_keys: %u\n", hist_data->n_keys); | |
6027 | seq_printf(m, " n_fields: %u\n", hist_data->n_fields); | |
6028 | ||
6029 | seq_puts(m, "\n val fields:\n\n"); | |
6030 | ||
6031 | seq_puts(m, " hist_data->fields[0]:\n"); | |
6032 | ret = hist_field_debug_show(m, hist_data->fields[0], | |
6033 | HIST_FIELD_FL_HITCOUNT); | |
6034 | if (ret) | |
6035 | return; | |
6036 | ||
6037 | for (i = 1; i < hist_data->n_vals; i++) { | |
6038 | seq_printf(m, "\n hist_data->fields[%d]:\n", i); | |
6039 | ret = hist_field_debug_show(m, hist_data->fields[i], 0); | |
6040 | if (ret) | |
6041 | return; | |
6042 | } | |
6043 | ||
6044 | seq_puts(m, "\n key fields:\n"); | |
6045 | ||
6046 | for (i = hist_data->n_vals; i < hist_data->n_fields; i++) { | |
6047 | seq_printf(m, "\n hist_data->fields[%d]:\n", i); | |
6048 | ret = hist_field_debug_show(m, hist_data->fields[i], | |
6049 | HIST_FIELD_FL_KEY); | |
6050 | if (ret) | |
6051 | return; | |
6052 | } | |
6053 | ||
6054 | if (hist_data->n_var_refs) | |
6055 | seq_puts(m, "\n variable reference fields:\n"); | |
6056 | ||
6057 | for (i = 0; i < hist_data->n_var_refs; i++) { | |
6058 | seq_printf(m, "\n hist_data->var_refs[%d]:\n", i); | |
6059 | ret = hist_field_debug_show(m, hist_data->var_refs[i], | |
6060 | HIST_FIELD_FL_VAR_REF); | |
6061 | if (ret) | |
6062 | return; | |
6063 | } | |
6064 | ||
6065 | if (hist_data->n_field_vars) | |
6066 | seq_puts(m, "\n field variables:\n"); | |
6067 | ||
6068 | for (i = 0; i < hist_data->n_field_vars; i++) { | |
6069 | ret = field_var_debug_show(m, hist_data->field_vars[i], i, false); | |
6070 | if (ret) | |
6071 | return; | |
6072 | } | |
6073 | ||
6074 | ret = hist_actions_debug_show(m, hist_data); | |
6075 | if (ret) | |
6076 | return; | |
6077 | } | |
6078 | ||
6079 | static int hist_debug_show(struct seq_file *m, void *v) | |
6080 | { | |
6081 | struct event_trigger_data *data; | |
6082 | struct trace_event_file *event_file; | |
6083 | int n = 0; | |
6084 | ||
6085 | guard(mutex)(&event_mutex); | |
6086 | ||
6087 | event_file = event_file_file(m->private); | |
6088 | if (unlikely(!event_file)) | |
6089 | return -ENODEV; | |
6090 | ||
6091 | list_for_each_entry(data, &event_file->triggers, list) { | |
6092 | if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) | |
6093 | hist_trigger_debug_show(m, data, n++); | |
6094 | } | |
6095 | return 0; | |
6096 | } | |
6097 | ||
6098 | static int event_hist_debug_open(struct inode *inode, struct file *file) | |
6099 | { | |
6100 | int ret; | |
6101 | ||
6102 | ret = tracing_open_file_tr(inode, file); | |
6103 | if (ret) | |
6104 | return ret; | |
6105 | ||
6106 | /* Clear private_data to avoid warning in single_open() */ | |
6107 | file->private_data = NULL; | |
6108 | ret = single_open(file, hist_debug_show, file); | |
6109 | if (ret) | |
6110 | tracing_release_file_tr(inode, file); | |
6111 | return ret; | |
6112 | } | |
6113 | ||
6114 | const struct file_operations event_hist_debug_fops = { | |
6115 | .open = event_hist_debug_open, | |
6116 | .read = seq_read, | |
6117 | .llseek = seq_lseek, | |
6118 | .release = tracing_single_release_file_tr, | |
6119 | }; | |
6120 | #endif | |
6121 | ||
6122 | static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) | |
6123 | { | |
6124 | const char *field_name = hist_field_name(hist_field, 0); | |
6125 | ||
6126 | if (hist_field->var.name) | |
6127 | seq_printf(m, "%s=", hist_field->var.name); | |
6128 | ||
6129 | if (hist_field->flags & HIST_FIELD_FL_CPU) | |
6130 | seq_puts(m, "common_cpu"); | |
6131 | if (hist_field->flags & HIST_FIELD_FL_COMM) | |
6132 | seq_puts(m, "common_comm"); | |
6133 | else if (hist_field->flags & HIST_FIELD_FL_CONST) | |
6134 | seq_printf(m, "%llu", hist_field->constant); | |
6135 | else if (field_name) { | |
6136 | if (hist_field->flags & HIST_FIELD_FL_VAR_REF || | |
6137 | hist_field->flags & HIST_FIELD_FL_ALIAS) | |
6138 | seq_putc(m, '$'); | |
6139 | seq_printf(m, "%s", field_name); | |
6140 | } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) | |
6141 | seq_puts(m, "common_timestamp"); | |
6142 | ||
6143 | if (hist_field->flags) { | |
6144 | if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && | |
6145 | !(hist_field->flags & HIST_FIELD_FL_EXPR) && | |
6146 | !(hist_field->flags & HIST_FIELD_FL_STACKTRACE)) { | |
6147 | const char *flags = get_hist_field_flags(hist_field); | |
6148 | ||
6149 | if (flags) | |
6150 | seq_printf(m, ".%s", flags); | |
6151 | } | |
6152 | } | |
6153 | if (hist_field->buckets) | |
6154 | seq_printf(m, "=%ld", hist_field->buckets); | |
6155 | } | |
6156 | ||
6157 | static int event_hist_trigger_print(struct seq_file *m, | |
6158 | struct event_trigger_data *data) | |
6159 | { | |
6160 | struct hist_trigger_data *hist_data = data->private_data; | |
6161 | struct hist_field *field; | |
6162 | bool have_var = false; | |
6163 | bool show_val = false; | |
6164 | unsigned int i; | |
6165 | ||
6166 | seq_puts(m, HIST_PREFIX); | |
6167 | ||
6168 | if (data->name) | |
6169 | seq_printf(m, "%s:", data->name); | |
6170 | ||
6171 | seq_puts(m, "keys="); | |
6172 | ||
6173 | for_each_hist_key_field(i, hist_data) { | |
6174 | field = hist_data->fields[i]; | |
6175 | ||
6176 | if (i > hist_data->n_vals) | |
6177 | seq_puts(m, ","); | |
6178 | ||
6179 | if (field->flags & HIST_FIELD_FL_STACKTRACE) { | |
6180 | if (field->field) | |
6181 | seq_printf(m, "%s.stacktrace", field->field->name); | |
6182 | else | |
6183 | seq_puts(m, "common_stacktrace"); | |
6184 | } else | |
6185 | hist_field_print(m, field); | |
6186 | } | |
6187 | ||
6188 | seq_puts(m, ":vals="); | |
6189 | ||
6190 | for_each_hist_val_field(i, hist_data) { | |
6191 | field = hist_data->fields[i]; | |
6192 | if (field->flags & HIST_FIELD_FL_VAR) { | |
6193 | have_var = true; | |
6194 | continue; | |
6195 | } | |
6196 | ||
6197 | if (i == HITCOUNT_IDX) { | |
6198 | if (hist_data->attrs->no_hitcount) | |
6199 | continue; | |
6200 | seq_puts(m, "hitcount"); | |
6201 | } else { | |
6202 | if (show_val) | |
6203 | seq_puts(m, ","); | |
6204 | hist_field_print(m, field); | |
6205 | } | |
6206 | show_val = true; | |
6207 | } | |
6208 | ||
6209 | if (have_var) { | |
6210 | unsigned int n = 0; | |
6211 | ||
6212 | seq_puts(m, ":"); | |
6213 | ||
6214 | for_each_hist_val_field(i, hist_data) { | |
6215 | field = hist_data->fields[i]; | |
6216 | ||
6217 | if (field->flags & HIST_FIELD_FL_VAR) { | |
6218 | if (n++) | |
6219 | seq_puts(m, ","); | |
6220 | hist_field_print(m, field); | |
6221 | } | |
6222 | } | |
6223 | } | |
6224 | ||
6225 | seq_puts(m, ":sort="); | |
6226 | ||
6227 | for (i = 0; i < hist_data->n_sort_keys; i++) { | |
6228 | struct tracing_map_sort_key *sort_key; | |
6229 | unsigned int idx, first_key_idx; | |
6230 | ||
6231 | /* skip VAR vals */ | |
6232 | first_key_idx = hist_data->n_vals - hist_data->n_vars; | |
6233 | ||
6234 | sort_key = &hist_data->sort_keys[i]; | |
6235 | idx = sort_key->field_idx; | |
6236 | ||
6237 | if (WARN_ON(idx >= HIST_FIELDS_MAX)) | |
6238 | return -EINVAL; | |
6239 | ||
6240 | if (i > 0) | |
6241 | seq_puts(m, ","); | |
6242 | ||
6243 | if (idx == HITCOUNT_IDX) | |
6244 | seq_puts(m, "hitcount"); | |
6245 | else { | |
6246 | if (idx >= first_key_idx) | |
6247 | idx += hist_data->n_vars; | |
6248 | hist_field_print(m, hist_data->fields[idx]); | |
6249 | } | |
6250 | ||
6251 | if (sort_key->descending) | |
6252 | seq_puts(m, ".descending"); | |
6253 | } | |
6254 | seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); | |
6255 | if (hist_data->enable_timestamps) | |
6256 | seq_printf(m, ":clock=%s", hist_data->attrs->clock); | |
6257 | if (hist_data->attrs->no_hitcount) | |
6258 | seq_puts(m, ":nohitcount"); | |
6259 | ||
6260 | print_actions_spec(m, hist_data); | |
6261 | ||
6262 | if (data->filter_str) | |
6263 | seq_printf(m, " if %s", data->filter_str); | |
6264 | ||
6265 | if (data->paused) | |
6266 | seq_puts(m, " [paused]"); | |
6267 | else | |
6268 | seq_puts(m, " [active]"); | |
6269 | ||
6270 | seq_putc(m, '\n'); | |
6271 | ||
6272 | return 0; | |
6273 | } | |
6274 | ||
6275 | static int event_hist_trigger_init(struct event_trigger_data *data) | |
6276 | { | |
6277 | struct hist_trigger_data *hist_data = data->private_data; | |
6278 | ||
6279 | if (alloc_hist_pad() < 0) | |
6280 | return -ENOMEM; | |
6281 | ||
6282 | if (!data->ref && hist_data->attrs->name) | |
6283 | save_named_trigger(hist_data->attrs->name, data); | |
6284 | ||
6285 | data->ref++; | |
6286 | ||
6287 | return 0; | |
6288 | } | |
6289 | ||
6290 | static void unregister_field_var_hists(struct hist_trigger_data *hist_data) | |
6291 | { | |
6292 | struct trace_event_file *file; | |
6293 | unsigned int i; | |
6294 | char *cmd; | |
6295 | int ret; | |
6296 | ||
6297 | for (i = 0; i < hist_data->n_field_var_hists; i++) { | |
6298 | file = hist_data->field_var_hists[i]->hist_data->event_file; | |
6299 | cmd = hist_data->field_var_hists[i]->cmd; | |
6300 | ret = event_hist_trigger_parse(&trigger_hist_cmd, file, | |
6301 | "!hist", "hist", cmd); | |
6302 | WARN_ON_ONCE(ret < 0); | |
6303 | } | |
6304 | } | |
6305 | ||
6306 | static void event_hist_trigger_free(struct event_trigger_data *data) | |
6307 | { | |
6308 | struct hist_trigger_data *hist_data = data->private_data; | |
6309 | ||
6310 | if (WARN_ON_ONCE(data->ref <= 0)) | |
6311 | return; | |
6312 | ||
6313 | data->ref--; | |
6314 | if (!data->ref) { | |
6315 | if (data->name) | |
6316 | del_named_trigger(data); | |
6317 | ||
6318 | trigger_data_free(data); | |
6319 | ||
6320 | remove_hist_vars(hist_data); | |
6321 | ||
6322 | unregister_field_var_hists(hist_data); | |
6323 | ||
6324 | destroy_hist_data(hist_data); | |
6325 | } | |
6326 | free_hist_pad(); | |
6327 | } | |
6328 | ||
6329 | static const struct event_trigger_ops event_hist_trigger_ops = { | |
6330 | .trigger = event_hist_trigger, | |
6331 | .print = event_hist_trigger_print, | |
6332 | .init = event_hist_trigger_init, | |
6333 | .free = event_hist_trigger_free, | |
6334 | }; | |
6335 | ||
6336 | static int event_hist_trigger_named_init(struct event_trigger_data *data) | |
6337 | { | |
6338 | data->ref++; | |
6339 | ||
6340 | save_named_trigger(data->named_data->name, data); | |
6341 | ||
6342 | return event_hist_trigger_init(data->named_data); | |
6343 | } | |
6344 | ||
6345 | static void event_hist_trigger_named_free(struct event_trigger_data *data) | |
6346 | { | |
6347 | if (WARN_ON_ONCE(data->ref <= 0)) | |
6348 | return; | |
6349 | ||
6350 | event_hist_trigger_free(data->named_data); | |
6351 | ||
6352 | data->ref--; | |
6353 | if (!data->ref) { | |
6354 | del_named_trigger(data); | |
6355 | trigger_data_free(data); | |
6356 | } | |
6357 | } | |
6358 | ||
6359 | static const struct event_trigger_ops event_hist_trigger_named_ops = { | |
6360 | .trigger = event_hist_trigger, | |
6361 | .print = event_hist_trigger_print, | |
6362 | .init = event_hist_trigger_named_init, | |
6363 | .free = event_hist_trigger_named_free, | |
6364 | }; | |
6365 | ||
6366 | static const struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, | |
6367 | char *param) | |
6368 | { | |
6369 | return &event_hist_trigger_ops; | |
6370 | } | |
6371 | ||
6372 | static void hist_clear(struct event_trigger_data *data) | |
6373 | { | |
6374 | struct hist_trigger_data *hist_data = data->private_data; | |
6375 | ||
6376 | if (data->name) | |
6377 | pause_named_trigger(data); | |
6378 | ||
6379 | tracepoint_synchronize_unregister(); | |
6380 | ||
6381 | tracing_map_clear(hist_data->map); | |
6382 | ||
6383 | if (data->name) | |
6384 | unpause_named_trigger(data); | |
6385 | } | |
6386 | ||
6387 | static bool compatible_field(struct ftrace_event_field *field, | |
6388 | struct ftrace_event_field *test_field) | |
6389 | { | |
6390 | if (field == test_field) | |
6391 | return true; | |
6392 | if (field == NULL || test_field == NULL) | |
6393 | return false; | |
6394 | if (strcmp(field->name, test_field->name) != 0) | |
6395 | return false; | |
6396 | if (strcmp(field->type, test_field->type) != 0) | |
6397 | return false; | |
6398 | if (field->size != test_field->size) | |
6399 | return false; | |
6400 | if (field->is_signed != test_field->is_signed) | |
6401 | return false; | |
6402 | ||
6403 | return true; | |
6404 | } | |
6405 | ||
6406 | static bool hist_trigger_match(struct event_trigger_data *data, | |
6407 | struct event_trigger_data *data_test, | |
6408 | struct event_trigger_data *named_data, | |
6409 | bool ignore_filter) | |
6410 | { | |
6411 | struct tracing_map_sort_key *sort_key, *sort_key_test; | |
6412 | struct hist_trigger_data *hist_data, *hist_data_test; | |
6413 | struct hist_field *key_field, *key_field_test; | |
6414 | unsigned int i; | |
6415 | ||
6416 | if (named_data && (named_data != data_test) && | |
6417 | (named_data != data_test->named_data)) | |
6418 | return false; | |
6419 | ||
6420 | if (!named_data && is_named_trigger(data_test)) | |
6421 | return false; | |
6422 | ||
6423 | hist_data = data->private_data; | |
6424 | hist_data_test = data_test->private_data; | |
6425 | ||
6426 | if (hist_data->n_vals != hist_data_test->n_vals || | |
6427 | hist_data->n_fields != hist_data_test->n_fields || | |
6428 | hist_data->n_sort_keys != hist_data_test->n_sort_keys) | |
6429 | return false; | |
6430 | ||
6431 | if (!ignore_filter) { | |
6432 | if ((data->filter_str && !data_test->filter_str) || | |
6433 | (!data->filter_str && data_test->filter_str)) | |
6434 | return false; | |
6435 | } | |
6436 | ||
6437 | for_each_hist_field(i, hist_data) { | |
6438 | key_field = hist_data->fields[i]; | |
6439 | key_field_test = hist_data_test->fields[i]; | |
6440 | ||
6441 | if (key_field->flags != key_field_test->flags) | |
6442 | return false; | |
6443 | if (!compatible_field(key_field->field, key_field_test->field)) | |
6444 | return false; | |
6445 | if (key_field->offset != key_field_test->offset) | |
6446 | return false; | |
6447 | if (key_field->size != key_field_test->size) | |
6448 | return false; | |
6449 | if (key_field->is_signed != key_field_test->is_signed) | |
6450 | return false; | |
6451 | if (!!key_field->var.name != !!key_field_test->var.name) | |
6452 | return false; | |
6453 | if (key_field->var.name && | |
6454 | strcmp(key_field->var.name, key_field_test->var.name) != 0) | |
6455 | return false; | |
6456 | } | |
6457 | ||
6458 | for (i = 0; i < hist_data->n_sort_keys; i++) { | |
6459 | sort_key = &hist_data->sort_keys[i]; | |
6460 | sort_key_test = &hist_data_test->sort_keys[i]; | |
6461 | ||
6462 | if (sort_key->field_idx != sort_key_test->field_idx || | |
6463 | sort_key->descending != sort_key_test->descending) | |
6464 | return false; | |
6465 | } | |
6466 | ||
6467 | if (!ignore_filter && data->filter_str && | |
6468 | (strcmp(data->filter_str, data_test->filter_str) != 0)) | |
6469 | return false; | |
6470 | ||
6471 | if (!actions_match(hist_data, hist_data_test)) | |
6472 | return false; | |
6473 | ||
6474 | return true; | |
6475 | } | |
6476 | ||
6477 | static bool existing_hist_update_only(char *glob, | |
6478 | struct event_trigger_data *data, | |
6479 | struct trace_event_file *file) | |
6480 | { | |
6481 | struct hist_trigger_data *hist_data = data->private_data; | |
6482 | struct event_trigger_data *test, *named_data = NULL; | |
6483 | bool updated = false; | |
6484 | ||
6485 | if (!hist_data->attrs->pause && !hist_data->attrs->cont && | |
6486 | !hist_data->attrs->clear) | |
6487 | goto out; | |
6488 | ||
6489 | if (hist_data->attrs->name) { | |
6490 | named_data = find_named_trigger(hist_data->attrs->name); | |
6491 | if (named_data) { | |
6492 | if (!hist_trigger_match(data, named_data, named_data, | |
6493 | true)) | |
6494 | goto out; | |
6495 | } | |
6496 | } | |
6497 | ||
6498 | if (hist_data->attrs->name && !named_data) | |
6499 | goto out; | |
6500 | ||
6501 | list_for_each_entry(test, &file->triggers, list) { | |
6502 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6503 | if (!hist_trigger_match(data, test, named_data, false)) | |
6504 | continue; | |
6505 | if (hist_data->attrs->pause) | |
6506 | test->paused = true; | |
6507 | else if (hist_data->attrs->cont) | |
6508 | test->paused = false; | |
6509 | else if (hist_data->attrs->clear) | |
6510 | hist_clear(test); | |
6511 | updated = true; | |
6512 | goto out; | |
6513 | } | |
6514 | } | |
6515 | out: | |
6516 | return updated; | |
6517 | } | |
6518 | ||
6519 | static int hist_register_trigger(char *glob, | |
6520 | struct event_trigger_data *data, | |
6521 | struct trace_event_file *file) | |
6522 | { | |
6523 | struct hist_trigger_data *hist_data = data->private_data; | |
6524 | struct event_trigger_data *test, *named_data = NULL; | |
6525 | struct trace_array *tr = file->tr; | |
6526 | int ret = 0; | |
6527 | ||
6528 | if (hist_data->attrs->name) { | |
6529 | named_data = find_named_trigger(hist_data->attrs->name); | |
6530 | if (named_data) { | |
6531 | if (!hist_trigger_match(data, named_data, named_data, | |
6532 | true)) { | |
6533 | hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); | |
6534 | ret = -EINVAL; | |
6535 | goto out; | |
6536 | } | |
6537 | } | |
6538 | } | |
6539 | ||
6540 | if (hist_data->attrs->name && !named_data) | |
6541 | goto new; | |
6542 | ||
6543 | lockdep_assert_held(&event_mutex); | |
6544 | ||
6545 | list_for_each_entry(test, &file->triggers, list) { | |
6546 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6547 | if (hist_trigger_match(data, test, named_data, false)) { | |
6548 | hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); | |
6549 | ret = -EEXIST; | |
6550 | goto out; | |
6551 | } | |
6552 | } | |
6553 | } | |
6554 | new: | |
6555 | if (hist_data->attrs->cont || hist_data->attrs->clear) { | |
6556 | hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); | |
6557 | ret = -ENOENT; | |
6558 | goto out; | |
6559 | } | |
6560 | ||
6561 | if (hist_data->attrs->pause) | |
6562 | data->paused = true; | |
6563 | ||
6564 | if (named_data) { | |
6565 | data->private_data = named_data->private_data; | |
6566 | set_named_trigger_data(data, named_data); | |
6567 | data->ops = &event_hist_trigger_named_ops; | |
6568 | } | |
6569 | ||
6570 | if (data->ops->init) { | |
6571 | ret = data->ops->init(data); | |
6572 | if (ret < 0) | |
6573 | goto out; | |
6574 | } | |
6575 | ||
6576 | if (hist_data->enable_timestamps) { | |
6577 | char *clock = hist_data->attrs->clock; | |
6578 | ||
6579 | ret = tracing_set_clock(file->tr, hist_data->attrs->clock); | |
6580 | if (ret) { | |
6581 | hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); | |
6582 | goto out; | |
6583 | } | |
6584 | ||
6585 | tracing_set_filter_buffering(file->tr, true); | |
6586 | } | |
6587 | ||
6588 | if (named_data) | |
6589 | destroy_hist_data(hist_data); | |
6590 | out: | |
6591 | return ret; | |
6592 | } | |
6593 | ||
6594 | static int hist_trigger_enable(struct event_trigger_data *data, | |
6595 | struct trace_event_file *file) | |
6596 | { | |
6597 | int ret = 0; | |
6598 | ||
6599 | list_add_tail_rcu(&data->list, &file->triggers); | |
6600 | ||
6601 | update_cond_flag(file); | |
6602 | ||
6603 | if (trace_event_trigger_enable_disable(file, 1) < 0) { | |
6604 | list_del_rcu(&data->list); | |
6605 | update_cond_flag(file); | |
6606 | ret--; | |
6607 | } | |
6608 | ||
6609 | return ret; | |
6610 | } | |
6611 | ||
6612 | static bool have_hist_trigger_match(struct event_trigger_data *data, | |
6613 | struct trace_event_file *file) | |
6614 | { | |
6615 | struct hist_trigger_data *hist_data = data->private_data; | |
6616 | struct event_trigger_data *test, *named_data = NULL; | |
6617 | bool match = false; | |
6618 | ||
6619 | lockdep_assert_held(&event_mutex); | |
6620 | ||
6621 | if (hist_data->attrs->name) | |
6622 | named_data = find_named_trigger(hist_data->attrs->name); | |
6623 | ||
6624 | list_for_each_entry(test, &file->triggers, list) { | |
6625 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6626 | if (hist_trigger_match(data, test, named_data, false)) { | |
6627 | match = true; | |
6628 | break; | |
6629 | } | |
6630 | } | |
6631 | } | |
6632 | ||
6633 | return match; | |
6634 | } | |
6635 | ||
6636 | static bool hist_trigger_check_refs(struct event_trigger_data *data, | |
6637 | struct trace_event_file *file) | |
6638 | { | |
6639 | struct hist_trigger_data *hist_data = data->private_data; | |
6640 | struct event_trigger_data *test, *named_data = NULL; | |
6641 | ||
6642 | lockdep_assert_held(&event_mutex); | |
6643 | ||
6644 | if (hist_data->attrs->name) | |
6645 | named_data = find_named_trigger(hist_data->attrs->name); | |
6646 | ||
6647 | list_for_each_entry(test, &file->triggers, list) { | |
6648 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6649 | if (!hist_trigger_match(data, test, named_data, false)) | |
6650 | continue; | |
6651 | hist_data = test->private_data; | |
6652 | if (check_var_refs(hist_data)) | |
6653 | return true; | |
6654 | break; | |
6655 | } | |
6656 | } | |
6657 | ||
6658 | return false; | |
6659 | } | |
6660 | ||
6661 | static void hist_unregister_trigger(char *glob, | |
6662 | struct event_trigger_data *data, | |
6663 | struct trace_event_file *file) | |
6664 | { | |
6665 | struct event_trigger_data *test = NULL, *iter, *named_data = NULL; | |
6666 | struct hist_trigger_data *hist_data = data->private_data; | |
6667 | ||
6668 | lockdep_assert_held(&event_mutex); | |
6669 | ||
6670 | if (hist_data->attrs->name) | |
6671 | named_data = find_named_trigger(hist_data->attrs->name); | |
6672 | ||
6673 | list_for_each_entry(iter, &file->triggers, list) { | |
6674 | if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6675 | if (!hist_trigger_match(data, iter, named_data, false)) | |
6676 | continue; | |
6677 | test = iter; | |
6678 | list_del_rcu(&test->list); | |
6679 | trace_event_trigger_enable_disable(file, 0); | |
6680 | update_cond_flag(file); | |
6681 | break; | |
6682 | } | |
6683 | } | |
6684 | ||
6685 | if (test && test->ops->free) | |
6686 | test->ops->free(test); | |
6687 | ||
6688 | if (hist_data->enable_timestamps) { | |
6689 | if (!hist_data->remove || test) | |
6690 | tracing_set_filter_buffering(file->tr, false); | |
6691 | } | |
6692 | } | |
6693 | ||
6694 | static bool hist_file_check_refs(struct trace_event_file *file) | |
6695 | { | |
6696 | struct hist_trigger_data *hist_data; | |
6697 | struct event_trigger_data *test; | |
6698 | ||
6699 | lockdep_assert_held(&event_mutex); | |
6700 | ||
6701 | list_for_each_entry(test, &file->triggers, list) { | |
6702 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6703 | hist_data = test->private_data; | |
6704 | if (check_var_refs(hist_data)) | |
6705 | return true; | |
6706 | } | |
6707 | } | |
6708 | ||
6709 | return false; | |
6710 | } | |
6711 | ||
6712 | static void hist_unreg_all(struct trace_event_file *file) | |
6713 | { | |
6714 | struct event_trigger_data *test, *n; | |
6715 | struct hist_trigger_data *hist_data; | |
6716 | struct synth_event *se; | |
6717 | const char *se_name; | |
6718 | ||
6719 | lockdep_assert_held(&event_mutex); | |
6720 | ||
6721 | if (hist_file_check_refs(file)) | |
6722 | return; | |
6723 | ||
6724 | list_for_each_entry_safe(test, n, &file->triggers, list) { | |
6725 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6726 | hist_data = test->private_data; | |
6727 | list_del_rcu(&test->list); | |
6728 | trace_event_trigger_enable_disable(file, 0); | |
6729 | ||
6730 | se_name = trace_event_name(file->event_call); | |
6731 | se = find_synth_event(se_name); | |
6732 | if (se) | |
6733 | se->ref--; | |
6734 | ||
6735 | update_cond_flag(file); | |
6736 | if (hist_data->enable_timestamps) | |
6737 | tracing_set_filter_buffering(file->tr, false); | |
6738 | if (test->ops->free) | |
6739 | test->ops->free(test); | |
6740 | } | |
6741 | } | |
6742 | } | |
6743 | ||
6744 | static int event_hist_trigger_parse(struct event_command *cmd_ops, | |
6745 | struct trace_event_file *file, | |
6746 | char *glob, char *cmd, | |
6747 | char *param_and_filter) | |
6748 | { | |
6749 | unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; | |
6750 | struct event_trigger_data *trigger_data; | |
6751 | struct hist_trigger_attrs *attrs; | |
6752 | struct hist_trigger_data *hist_data; | |
6753 | char *param, *filter, *p, *start; | |
6754 | struct synth_event *se; | |
6755 | const char *se_name; | |
6756 | bool remove; | |
6757 | int ret = 0; | |
6758 | ||
6759 | lockdep_assert_held(&event_mutex); | |
6760 | ||
6761 | if (WARN_ON(!glob)) | |
6762 | return -EINVAL; | |
6763 | ||
6764 | if (glob[0]) { | |
6765 | hist_err_clear(); | |
6766 | last_cmd_set(file, param_and_filter); | |
6767 | } | |
6768 | ||
6769 | remove = event_trigger_check_remove(glob); | |
6770 | ||
6771 | if (event_trigger_empty_param(param_and_filter)) | |
6772 | return -EINVAL; | |
6773 | ||
6774 | /* | |
6775 | * separate the trigger from the filter (k:v [if filter]) | |
6776 | * allowing for whitespace in the trigger | |
6777 | */ | |
6778 | p = param = param_and_filter; | |
6779 | do { | |
6780 | p = strstr(p, "if"); | |
6781 | if (!p) | |
6782 | break; | |
6783 | if (p == param_and_filter) | |
6784 | return -EINVAL; | |
6785 | if (*(p - 1) != ' ' && *(p - 1) != '\t') { | |
6786 | p++; | |
6787 | continue; | |
6788 | } | |
6789 | if (p >= param_and_filter + strlen(param_and_filter) - (sizeof("if") - 1) - 1) | |
6790 | return -EINVAL; | |
6791 | if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') { | |
6792 | p++; | |
6793 | continue; | |
6794 | } | |
6795 | break; | |
6796 | } while (1); | |
6797 | ||
6798 | if (!p) | |
6799 | filter = NULL; | |
6800 | else { | |
6801 | *(p - 1) = '\0'; | |
6802 | filter = strstrip(p); | |
6803 | param = strstrip(param); | |
6804 | } | |
6805 | ||
6806 | /* | |
6807 | * To simplify arithmetic expression parsing, replace occurrences of | |
6808 | * '.sym-offset' modifier with '.symXoffset' | |
6809 | */ | |
6810 | start = strstr(param, ".sym-offset"); | |
6811 | while (start) { | |
6812 | *(start + 4) = 'X'; | |
6813 | start = strstr(start + 11, ".sym-offset"); | |
6814 | } | |
6815 | ||
6816 | attrs = parse_hist_trigger_attrs(file->tr, param); | |
6817 | if (IS_ERR(attrs)) | |
6818 | return PTR_ERR(attrs); | |
6819 | ||
6820 | if (attrs->map_bits) | |
6821 | hist_trigger_bits = attrs->map_bits; | |
6822 | ||
6823 | hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); | |
6824 | if (IS_ERR(hist_data)) { | |
6825 | destroy_hist_trigger_attrs(attrs); | |
6826 | return PTR_ERR(hist_data); | |
6827 | } | |
6828 | ||
6829 | trigger_data = trigger_data_alloc(cmd_ops, cmd, param, hist_data); | |
6830 | if (!trigger_data) { | |
6831 | ret = -ENOMEM; | |
6832 | goto out_free; | |
6833 | } | |
6834 | ||
6835 | ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data); | |
6836 | if (ret < 0) | |
6837 | goto out_free; | |
6838 | ||
6839 | if (remove) { | |
6840 | if (!have_hist_trigger_match(trigger_data, file)) | |
6841 | goto out_free; | |
6842 | ||
6843 | if (hist_trigger_check_refs(trigger_data, file)) { | |
6844 | ret = -EBUSY; | |
6845 | goto out_free; | |
6846 | } | |
6847 | ||
6848 | event_trigger_unregister(cmd_ops, file, glob+1, trigger_data); | |
6849 | se_name = trace_event_name(file->event_call); | |
6850 | se = find_synth_event(se_name); | |
6851 | if (se) | |
6852 | se->ref--; | |
6853 | ret = 0; | |
6854 | goto out_free; | |
6855 | } | |
6856 | ||
6857 | if (existing_hist_update_only(glob, trigger_data, file)) | |
6858 | goto out_free; | |
6859 | ||
6860 | if (!get_named_trigger_data(trigger_data)) { | |
6861 | ||
6862 | ret = create_actions(hist_data); | |
6863 | if (ret) | |
6864 | goto out_free; | |
6865 | ||
6866 | if (has_hist_vars(hist_data) || hist_data->n_var_refs) { | |
6867 | ret = save_hist_vars(hist_data); | |
6868 | if (ret) | |
6869 | goto out_free; | |
6870 | } | |
6871 | ||
6872 | ret = tracing_map_init(hist_data->map); | |
6873 | if (ret) | |
6874 | goto out_free; | |
6875 | } | |
6876 | ||
6877 | ret = event_trigger_register(cmd_ops, file, glob, trigger_data); | |
6878 | if (ret < 0) | |
6879 | goto out_free; | |
6880 | ||
6881 | ret = hist_trigger_enable(trigger_data, file); | |
6882 | if (ret) | |
6883 | goto out_unreg; | |
6884 | ||
6885 | se_name = trace_event_name(file->event_call); | |
6886 | se = find_synth_event(se_name); | |
6887 | if (se) | |
6888 | se->ref++; | |
6889 | out: | |
6890 | if (ret == 0 && glob[0]) | |
6891 | hist_err_clear(); | |
6892 | ||
6893 | return ret; | |
6894 | out_unreg: | |
6895 | event_trigger_unregister(cmd_ops, file, glob+1, trigger_data); | |
6896 | out_free: | |
6897 | event_trigger_reset_filter(cmd_ops, trigger_data); | |
6898 | ||
6899 | remove_hist_vars(hist_data); | |
6900 | ||
6901 | kfree(trigger_data); | |
6902 | ||
6903 | destroy_hist_data(hist_data); | |
6904 | goto out; | |
6905 | } | |
6906 | ||
6907 | static struct event_command trigger_hist_cmd = { | |
6908 | .name = "hist", | |
6909 | .trigger_type = ETT_EVENT_HIST, | |
6910 | .flags = EVENT_CMD_FL_NEEDS_REC, | |
6911 | .parse = event_hist_trigger_parse, | |
6912 | .reg = hist_register_trigger, | |
6913 | .unreg = hist_unregister_trigger, | |
6914 | .unreg_all = hist_unreg_all, | |
6915 | .get_trigger_ops = event_hist_get_trigger_ops, | |
6916 | .set_filter = set_trigger_filter, | |
6917 | }; | |
6918 | ||
6919 | __init int register_trigger_hist_cmd(void) | |
6920 | { | |
6921 | int ret; | |
6922 | ||
6923 | ret = register_event_command(&trigger_hist_cmd); | |
6924 | WARN_ON(ret < 0); | |
6925 | ||
6926 | return ret; | |
6927 | } | |
6928 | ||
6929 | static void | |
6930 | hist_enable_trigger(struct event_trigger_data *data, | |
6931 | struct trace_buffer *buffer, void *rec, | |
6932 | struct ring_buffer_event *event) | |
6933 | { | |
6934 | struct enable_trigger_data *enable_data = data->private_data; | |
6935 | struct event_trigger_data *test; | |
6936 | ||
6937 | list_for_each_entry_rcu(test, &enable_data->file->triggers, list, | |
6938 | lockdep_is_held(&event_mutex)) { | |
6939 | if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { | |
6940 | if (enable_data->enable) | |
6941 | test->paused = false; | |
6942 | else | |
6943 | test->paused = true; | |
6944 | } | |
6945 | } | |
6946 | } | |
6947 | ||
6948 | static void | |
6949 | hist_enable_count_trigger(struct event_trigger_data *data, | |
6950 | struct trace_buffer *buffer, void *rec, | |
6951 | struct ring_buffer_event *event) | |
6952 | { | |
6953 | if (!data->count) | |
6954 | return; | |
6955 | ||
6956 | if (data->count != -1) | |
6957 | (data->count)--; | |
6958 | ||
6959 | hist_enable_trigger(data, buffer, rec, event); | |
6960 | } | |
6961 | ||
6962 | static const struct event_trigger_ops hist_enable_trigger_ops = { | |
6963 | .trigger = hist_enable_trigger, | |
6964 | .print = event_enable_trigger_print, | |
6965 | .init = event_trigger_init, | |
6966 | .free = event_enable_trigger_free, | |
6967 | }; | |
6968 | ||
6969 | static const struct event_trigger_ops hist_enable_count_trigger_ops = { | |
6970 | .trigger = hist_enable_count_trigger, | |
6971 | .print = event_enable_trigger_print, | |
6972 | .init = event_trigger_init, | |
6973 | .free = event_enable_trigger_free, | |
6974 | }; | |
6975 | ||
6976 | static const struct event_trigger_ops hist_disable_trigger_ops = { | |
6977 | .trigger = hist_enable_trigger, | |
6978 | .print = event_enable_trigger_print, | |
6979 | .init = event_trigger_init, | |
6980 | .free = event_enable_trigger_free, | |
6981 | }; | |
6982 | ||
6983 | static const struct event_trigger_ops hist_disable_count_trigger_ops = { | |
6984 | .trigger = hist_enable_count_trigger, | |
6985 | .print = event_enable_trigger_print, | |
6986 | .init = event_trigger_init, | |
6987 | .free = event_enable_trigger_free, | |
6988 | }; | |
6989 | ||
6990 | static const struct event_trigger_ops * | |
6991 | hist_enable_get_trigger_ops(char *cmd, char *param) | |
6992 | { | |
6993 | const struct event_trigger_ops *ops; | |
6994 | bool enable; | |
6995 | ||
6996 | enable = (strcmp(cmd, ENABLE_HIST_STR) == 0); | |
6997 | ||
6998 | if (enable) | |
6999 | ops = param ? &hist_enable_count_trigger_ops : | |
7000 | &hist_enable_trigger_ops; | |
7001 | else | |
7002 | ops = param ? &hist_disable_count_trigger_ops : | |
7003 | &hist_disable_trigger_ops; | |
7004 | ||
7005 | return ops; | |
7006 | } | |
7007 | ||
7008 | static void hist_enable_unreg_all(struct trace_event_file *file) | |
7009 | { | |
7010 | struct event_trigger_data *test, *n; | |
7011 | ||
7012 | list_for_each_entry_safe(test, n, &file->triggers, list) { | |
7013 | if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { | |
7014 | list_del_rcu(&test->list); | |
7015 | update_cond_flag(file); | |
7016 | trace_event_trigger_enable_disable(file, 0); | |
7017 | if (test->ops->free) | |
7018 | test->ops->free(test); | |
7019 | } | |
7020 | } | |
7021 | } | |
7022 | ||
7023 | static struct event_command trigger_hist_enable_cmd = { | |
7024 | .name = ENABLE_HIST_STR, | |
7025 | .trigger_type = ETT_HIST_ENABLE, | |
7026 | .parse = event_enable_trigger_parse, | |
7027 | .reg = event_enable_register_trigger, | |
7028 | .unreg = event_enable_unregister_trigger, | |
7029 | .unreg_all = hist_enable_unreg_all, | |
7030 | .get_trigger_ops = hist_enable_get_trigger_ops, | |
7031 | .set_filter = set_trigger_filter, | |
7032 | }; | |
7033 | ||
7034 | static struct event_command trigger_hist_disable_cmd = { | |
7035 | .name = DISABLE_HIST_STR, | |
7036 | .trigger_type = ETT_HIST_ENABLE, | |
7037 | .parse = event_enable_trigger_parse, | |
7038 | .reg = event_enable_register_trigger, | |
7039 | .unreg = event_enable_unregister_trigger, | |
7040 | .unreg_all = hist_enable_unreg_all, | |
7041 | .get_trigger_ops = hist_enable_get_trigger_ops, | |
7042 | .set_filter = set_trigger_filter, | |
7043 | }; | |
7044 | ||
7045 | static __init void unregister_trigger_hist_enable_disable_cmds(void) | |
7046 | { | |
7047 | unregister_event_command(&trigger_hist_enable_cmd); | |
7048 | unregister_event_command(&trigger_hist_disable_cmd); | |
7049 | } | |
7050 | ||
7051 | __init int register_trigger_hist_enable_disable_cmds(void) | |
7052 | { | |
7053 | int ret; | |
7054 | ||
7055 | ret = register_event_command(&trigger_hist_enable_cmd); | |
7056 | if (WARN_ON(ret < 0)) | |
7057 | return ret; | |
7058 | ret = register_event_command(&trigger_hist_disable_cmd); | |
7059 | if (WARN_ON(ret < 0)) | |
7060 | unregister_trigger_hist_enable_disable_cmds(); | |
7061 | ||
7062 | return ret; | |
7063 | } |