]> git.ipfire.org Git - thirdparty/linux.git/blame - kernel/events/core.c
Merge branch 'perf/urgent' into perf/core, to pick up fixes
[thirdparty/linux.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
90eec103 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
39bed6cb 37#include <linux/cgroup.h>
cdd6c482 38#include <linux/perf_event.h>
af658dca 39#include <linux/trace_events.h>
3c502e7a 40#include <linux/hw_breakpoint.h>
c5ebcedb 41#include <linux/mm_types.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
b3f20785 44#include <linux/compat.h>
2541517c
AS
45#include <linux/bpf.h>
46#include <linux/filter.h>
375637bc
AS
47#include <linux/namei.h>
48#include <linux/parser.h>
0793a61d 49
76369139
FW
50#include "internal.h"
51
4e193bd4
TB
52#include <asm/irq_regs.h>
53
272325c4
PZ
54typedef int (*remote_function_f)(void *);
55
fe4b04fa 56struct remote_function_call {
e7e7ee2e 57 struct task_struct *p;
272325c4 58 remote_function_f func;
e7e7ee2e
IM
59 void *info;
60 int ret;
fe4b04fa
PZ
61};
62
63static void remote_function(void *data)
64{
65 struct remote_function_call *tfc = data;
66 struct task_struct *p = tfc->p;
67
68 if (p) {
0da4cf3e
PZ
69 /* -EAGAIN */
70 if (task_cpu(p) != smp_processor_id())
71 return;
72
73 /*
74 * Now that we're on right CPU with IRQs disabled, we can test
75 * if we hit the right task without races.
76 */
77
78 tfc->ret = -ESRCH; /* No such (running) process */
79 if (p != current)
fe4b04fa
PZ
80 return;
81 }
82
83 tfc->ret = tfc->func(tfc->info);
84}
85
86/**
87 * task_function_call - call a function on the cpu on which a task runs
88 * @p: the task to evaluate
89 * @func: the function to be called
90 * @info: the function call argument
91 *
92 * Calls the function @func when the task is currently running. This might
93 * be on the current CPU, which just calls the function directly
94 *
95 * returns: @func return value, or
96 * -ESRCH - when the process isn't running
97 * -EAGAIN - when the process moved away
98 */
99static int
272325c4 100task_function_call(struct task_struct *p, remote_function_f func, void *info)
fe4b04fa
PZ
101{
102 struct remote_function_call data = {
e7e7ee2e
IM
103 .p = p,
104 .func = func,
105 .info = info,
0da4cf3e 106 .ret = -EAGAIN,
fe4b04fa 107 };
0da4cf3e 108 int ret;
fe4b04fa 109
0da4cf3e
PZ
110 do {
111 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
112 if (!ret)
113 ret = data.ret;
114 } while (ret == -EAGAIN);
fe4b04fa 115
0da4cf3e 116 return ret;
fe4b04fa
PZ
117}
118
119/**
120 * cpu_function_call - call a function on the cpu
121 * @func: the function to be called
122 * @info: the function call argument
123 *
124 * Calls the function @func on the remote cpu.
125 *
126 * returns: @func return value or -ENXIO when the cpu is offline
127 */
272325c4 128static int cpu_function_call(int cpu, remote_function_f func, void *info)
fe4b04fa
PZ
129{
130 struct remote_function_call data = {
e7e7ee2e
IM
131 .p = NULL,
132 .func = func,
133 .info = info,
134 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
135 };
136
137 smp_call_function_single(cpu, remote_function, &data, 1);
138
139 return data.ret;
140}
141
fae3fde6
PZ
142static inline struct perf_cpu_context *
143__get_cpu_context(struct perf_event_context *ctx)
144{
145 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
146}
147
148static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
149 struct perf_event_context *ctx)
0017960f 150{
fae3fde6
PZ
151 raw_spin_lock(&cpuctx->ctx.lock);
152 if (ctx)
153 raw_spin_lock(&ctx->lock);
154}
155
156static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
157 struct perf_event_context *ctx)
158{
159 if (ctx)
160 raw_spin_unlock(&ctx->lock);
161 raw_spin_unlock(&cpuctx->ctx.lock);
162}
163
63b6da39
PZ
164#define TASK_TOMBSTONE ((void *)-1L)
165
166static bool is_kernel_event(struct perf_event *event)
167{
f47c02c0 168 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
63b6da39
PZ
169}
170
39a43640
PZ
171/*
172 * On task ctx scheduling...
173 *
174 * When !ctx->nr_events a task context will not be scheduled. This means
175 * we can disable the scheduler hooks (for performance) without leaving
176 * pending task ctx state.
177 *
178 * This however results in two special cases:
179 *
180 * - removing the last event from a task ctx; this is relatively straight
181 * forward and is done in __perf_remove_from_context.
182 *
183 * - adding the first event to a task ctx; this is tricky because we cannot
184 * rely on ctx->is_active and therefore cannot use event_function_call().
185 * See perf_install_in_context().
186 *
39a43640
PZ
187 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
188 */
189
fae3fde6
PZ
190typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
191 struct perf_event_context *, void *);
192
193struct event_function_struct {
194 struct perf_event *event;
195 event_f func;
196 void *data;
197};
198
199static int event_function(void *info)
200{
201 struct event_function_struct *efs = info;
202 struct perf_event *event = efs->event;
0017960f 203 struct perf_event_context *ctx = event->ctx;
fae3fde6
PZ
204 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
205 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63b6da39 206 int ret = 0;
fae3fde6
PZ
207
208 WARN_ON_ONCE(!irqs_disabled());
209
63b6da39 210 perf_ctx_lock(cpuctx, task_ctx);
fae3fde6
PZ
211 /*
212 * Since we do the IPI call without holding ctx->lock things can have
213 * changed, double check we hit the task we set out to hit.
fae3fde6
PZ
214 */
215 if (ctx->task) {
63b6da39 216 if (ctx->task != current) {
0da4cf3e 217 ret = -ESRCH;
63b6da39
PZ
218 goto unlock;
219 }
fae3fde6 220
fae3fde6
PZ
221 /*
222 * We only use event_function_call() on established contexts,
223 * and event_function() is only ever called when active (or
224 * rather, we'll have bailed in task_function_call() or the
225 * above ctx->task != current test), therefore we must have
226 * ctx->is_active here.
227 */
228 WARN_ON_ONCE(!ctx->is_active);
229 /*
230 * And since we have ctx->is_active, cpuctx->task_ctx must
231 * match.
232 */
63b6da39
PZ
233 WARN_ON_ONCE(task_ctx != ctx);
234 } else {
235 WARN_ON_ONCE(&cpuctx->ctx != ctx);
fae3fde6 236 }
63b6da39 237
fae3fde6 238 efs->func(event, cpuctx, ctx, efs->data);
63b6da39 239unlock:
fae3fde6
PZ
240 perf_ctx_unlock(cpuctx, task_ctx);
241
63b6da39 242 return ret;
fae3fde6
PZ
243}
244
fae3fde6 245static void event_function_call(struct perf_event *event, event_f func, void *data)
0017960f
PZ
246{
247 struct perf_event_context *ctx = event->ctx;
63b6da39 248 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
fae3fde6
PZ
249 struct event_function_struct efs = {
250 .event = event,
251 .func = func,
252 .data = data,
253 };
0017960f 254
c97f4736
PZ
255 if (!event->parent) {
256 /*
257 * If this is a !child event, we must hold ctx::mutex to
258 * stabilize the the event->ctx relation. See
259 * perf_event_ctx_lock().
260 */
261 lockdep_assert_held(&ctx->mutex);
262 }
0017960f
PZ
263
264 if (!task) {
fae3fde6 265 cpu_function_call(event->cpu, event_function, &efs);
0017960f
PZ
266 return;
267 }
268
63b6da39
PZ
269 if (task == TASK_TOMBSTONE)
270 return;
271
a096309b 272again:
fae3fde6 273 if (!task_function_call(task, event_function, &efs))
0017960f
PZ
274 return;
275
276 raw_spin_lock_irq(&ctx->lock);
63b6da39
PZ
277 /*
278 * Reload the task pointer, it might have been changed by
279 * a concurrent perf_event_context_sched_out().
280 */
281 task = ctx->task;
a096309b
PZ
282 if (task == TASK_TOMBSTONE) {
283 raw_spin_unlock_irq(&ctx->lock);
284 return;
0017960f 285 }
a096309b
PZ
286 if (ctx->is_active) {
287 raw_spin_unlock_irq(&ctx->lock);
288 goto again;
289 }
290 func(event, NULL, ctx, data);
0017960f
PZ
291 raw_spin_unlock_irq(&ctx->lock);
292}
293
cca20946
PZ
294/*
295 * Similar to event_function_call() + event_function(), but hard assumes IRQs
296 * are already disabled and we're on the right CPU.
297 */
298static void event_function_local(struct perf_event *event, event_f func, void *data)
299{
300 struct perf_event_context *ctx = event->ctx;
301 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
302 struct task_struct *task = READ_ONCE(ctx->task);
303 struct perf_event_context *task_ctx = NULL;
304
305 WARN_ON_ONCE(!irqs_disabled());
306
307 if (task) {
308 if (task == TASK_TOMBSTONE)
309 return;
310
311 task_ctx = ctx;
312 }
313
314 perf_ctx_lock(cpuctx, task_ctx);
315
316 task = ctx->task;
317 if (task == TASK_TOMBSTONE)
318 goto unlock;
319
320 if (task) {
321 /*
322 * We must be either inactive or active and the right task,
323 * otherwise we're screwed, since we cannot IPI to somewhere
324 * else.
325 */
326 if (ctx->is_active) {
327 if (WARN_ON_ONCE(task != current))
328 goto unlock;
329
330 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
331 goto unlock;
332 }
333 } else {
334 WARN_ON_ONCE(&cpuctx->ctx != ctx);
335 }
336
337 func(event, cpuctx, ctx, data);
338unlock:
339 perf_ctx_unlock(cpuctx, task_ctx);
340}
341
e5d1367f
SE
342#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
343 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
344 PERF_FLAG_PID_CGROUP |\
345 PERF_FLAG_FD_CLOEXEC)
e5d1367f 346
bce38cd5
SE
347/*
348 * branch priv levels that need permission checks
349 */
350#define PERF_SAMPLE_BRANCH_PERM_PLM \
351 (PERF_SAMPLE_BRANCH_KERNEL |\
352 PERF_SAMPLE_BRANCH_HV)
353
0b3fcf17
SE
354enum event_type_t {
355 EVENT_FLEXIBLE = 0x1,
356 EVENT_PINNED = 0x2,
3cbaa590 357 EVENT_TIME = 0x4,
0b3fcf17
SE
358 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
359};
360
e5d1367f
SE
361/*
362 * perf_sched_events : >0 events exist
363 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
364 */
9107c89e
PZ
365
366static void perf_sched_delayed(struct work_struct *work);
367DEFINE_STATIC_KEY_FALSE(perf_sched_events);
368static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
369static DEFINE_MUTEX(perf_sched_mutex);
370static atomic_t perf_sched_count;
371
e5d1367f 372static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
ba532500 373static DEFINE_PER_CPU(int, perf_sched_cb_usages);
f2fb6bef 374static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
e5d1367f 375
cdd6c482
IM
376static atomic_t nr_mmap_events __read_mostly;
377static atomic_t nr_comm_events __read_mostly;
378static atomic_t nr_task_events __read_mostly;
948b26b6 379static atomic_t nr_freq_events __read_mostly;
45ac1403 380static atomic_t nr_switch_events __read_mostly;
9ee318a7 381
108b02cf
PZ
382static LIST_HEAD(pmus);
383static DEFINE_MUTEX(pmus_lock);
384static struct srcu_struct pmus_srcu;
385
0764771d 386/*
cdd6c482 387 * perf event paranoia level:
0fbdea19
IM
388 * -1 - not paranoid at all
389 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 390 * 1 - disallow cpu events for unpriv
0fbdea19 391 * 2 - disallow kernel profiling for unpriv
0764771d 392 */
0161028b 393int sysctl_perf_event_paranoid __read_mostly = 2;
0764771d 394
20443384
FW
395/* Minimum for 512 kiB + 1 user control page */
396int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
397
398/*
cdd6c482 399 * max perf event sample rate
df58ab24 400 */
14c63f17
DH
401#define DEFAULT_MAX_SAMPLE_RATE 100000
402#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
403#define DEFAULT_CPU_TIME_MAX_PERCENT 25
404
405int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
406
407static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
408static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
409
d9494cb4
PZ
410static int perf_sample_allowed_ns __read_mostly =
411 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17 412
18ab2cd3 413static void update_perf_cpu_limits(void)
14c63f17
DH
414{
415 u64 tmp = perf_sample_period_ns;
416
417 tmp *= sysctl_perf_cpu_time_max_percent;
91a612ee
PZ
418 tmp = div_u64(tmp, 100);
419 if (!tmp)
420 tmp = 1;
421
422 WRITE_ONCE(perf_sample_allowed_ns, tmp);
14c63f17 423}
163ec435 424
9e630205
SE
425static int perf_rotate_context(struct perf_cpu_context *cpuctx);
426
163ec435
PZ
427int perf_proc_update_handler(struct ctl_table *table, int write,
428 void __user *buffer, size_t *lenp,
429 loff_t *ppos)
430{
723478c8 431 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
432
433 if (ret || !write)
434 return ret;
435
ab7fdefb
KL
436 /*
437 * If throttling is disabled don't allow the write:
438 */
439 if (sysctl_perf_cpu_time_max_percent == 100 ||
440 sysctl_perf_cpu_time_max_percent == 0)
441 return -EINVAL;
442
163ec435 443 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
444 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
445 update_perf_cpu_limits();
446
447 return 0;
448}
449
450int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
451
452int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
453 void __user *buffer, size_t *lenp,
454 loff_t *ppos)
455{
456 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
457
458 if (ret || !write)
459 return ret;
460
b303e7c1
PZ
461 if (sysctl_perf_cpu_time_max_percent == 100 ||
462 sysctl_perf_cpu_time_max_percent == 0) {
91a612ee
PZ
463 printk(KERN_WARNING
464 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
465 WRITE_ONCE(perf_sample_allowed_ns, 0);
466 } else {
467 update_perf_cpu_limits();
468 }
163ec435
PZ
469
470 return 0;
471}
1ccd1549 472
14c63f17
DH
473/*
474 * perf samples are done in some very critical code paths (NMIs).
475 * If they take too much CPU time, the system can lock up and not
476 * get any real work done. This will drop the sample rate when
477 * we detect that events are taking too long.
478 */
479#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 480static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 481
91a612ee
PZ
482static u64 __report_avg;
483static u64 __report_allowed;
484
6a02ad66 485static void perf_duration_warn(struct irq_work *w)
14c63f17 486{
0d87d7ec 487 printk_ratelimited(KERN_INFO
91a612ee
PZ
488 "perf: interrupt took too long (%lld > %lld), lowering "
489 "kernel.perf_event_max_sample_rate to %d\n",
490 __report_avg, __report_allowed,
491 sysctl_perf_event_sample_rate);
6a02ad66
PZ
492}
493
494static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
495
496void perf_sample_event_took(u64 sample_len_ns)
497{
91a612ee
PZ
498 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
499 u64 running_len;
500 u64 avg_len;
501 u32 max;
14c63f17 502
91a612ee 503 if (max_len == 0)
14c63f17
DH
504 return;
505
91a612ee
PZ
506 /* Decay the counter by 1 average sample. */
507 running_len = __this_cpu_read(running_sample_length);
508 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
509 running_len += sample_len_ns;
510 __this_cpu_write(running_sample_length, running_len);
14c63f17
DH
511
512 /*
91a612ee
PZ
513 * Note: this will be biased artifically low until we have
514 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
14c63f17
DH
515 * from having to maintain a count.
516 */
91a612ee
PZ
517 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
518 if (avg_len <= max_len)
14c63f17
DH
519 return;
520
91a612ee
PZ
521 __report_avg = avg_len;
522 __report_allowed = max_len;
14c63f17 523
91a612ee
PZ
524 /*
525 * Compute a throttle threshold 25% below the current duration.
526 */
527 avg_len += avg_len / 4;
528 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
529 if (avg_len < max)
530 max /= (u32)avg_len;
531 else
532 max = 1;
14c63f17 533
91a612ee
PZ
534 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
535 WRITE_ONCE(max_samples_per_tick, max);
536
537 sysctl_perf_event_sample_rate = max * HZ;
538 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
6a02ad66 539
cd578abb 540 if (!irq_work_queue(&perf_duration_work)) {
91a612ee 541 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
cd578abb 542 "kernel.perf_event_max_sample_rate to %d\n",
91a612ee 543 __report_avg, __report_allowed,
cd578abb
PZ
544 sysctl_perf_event_sample_rate);
545 }
14c63f17
DH
546}
547
cdd6c482 548static atomic64_t perf_event_id;
a96bbc16 549
0b3fcf17
SE
550static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
551 enum event_type_t event_type);
552
553static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
554 enum event_type_t event_type,
555 struct task_struct *task);
556
557static void update_context_time(struct perf_event_context *ctx);
558static u64 perf_event_time(struct perf_event *event);
0b3fcf17 559
cdd6c482 560void __weak perf_event_print_debug(void) { }
0793a61d 561
84c79910 562extern __weak const char *perf_pmu_name(void)
0793a61d 563{
84c79910 564 return "pmu";
0793a61d
TG
565}
566
0b3fcf17
SE
567static inline u64 perf_clock(void)
568{
569 return local_clock();
570}
571
34f43927
PZ
572static inline u64 perf_event_clock(struct perf_event *event)
573{
574 return event->clock();
575}
576
e5d1367f
SE
577#ifdef CONFIG_CGROUP_PERF
578
e5d1367f
SE
579static inline bool
580perf_cgroup_match(struct perf_event *event)
581{
582 struct perf_event_context *ctx = event->ctx;
583 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
584
ef824fa1
TH
585 /* @event doesn't care about cgroup */
586 if (!event->cgrp)
587 return true;
588
589 /* wants specific cgroup scope but @cpuctx isn't associated with any */
590 if (!cpuctx->cgrp)
591 return false;
592
593 /*
594 * Cgroup scoping is recursive. An event enabled for a cgroup is
595 * also enabled for all its descendant cgroups. If @cpuctx's
596 * cgroup is a descendant of @event's (the test covers identity
597 * case), it's a match.
598 */
599 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
600 event->cgrp->css.cgroup);
e5d1367f
SE
601}
602
e5d1367f
SE
603static inline void perf_detach_cgroup(struct perf_event *event)
604{
4e2ba650 605 css_put(&event->cgrp->css);
e5d1367f
SE
606 event->cgrp = NULL;
607}
608
609static inline int is_cgroup_event(struct perf_event *event)
610{
611 return event->cgrp != NULL;
612}
613
614static inline u64 perf_cgroup_event_time(struct perf_event *event)
615{
616 struct perf_cgroup_info *t;
617
618 t = per_cpu_ptr(event->cgrp->info, event->cpu);
619 return t->time;
620}
621
622static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
623{
624 struct perf_cgroup_info *info;
625 u64 now;
626
627 now = perf_clock();
628
629 info = this_cpu_ptr(cgrp->info);
630
631 info->time += now - info->timestamp;
632 info->timestamp = now;
633}
634
635static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
636{
637 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
638 if (cgrp_out)
639 __update_cgrp_time(cgrp_out);
640}
641
642static inline void update_cgrp_time_from_event(struct perf_event *event)
643{
3f7cce3c
SE
644 struct perf_cgroup *cgrp;
645
e5d1367f 646 /*
3f7cce3c
SE
647 * ensure we access cgroup data only when needed and
648 * when we know the cgroup is pinned (css_get)
e5d1367f 649 */
3f7cce3c 650 if (!is_cgroup_event(event))
e5d1367f
SE
651 return;
652
614e4c4e 653 cgrp = perf_cgroup_from_task(current, event->ctx);
3f7cce3c
SE
654 /*
655 * Do not update time when cgroup is not active
656 */
657 if (cgrp == event->cgrp)
658 __update_cgrp_time(event->cgrp);
e5d1367f
SE
659}
660
661static inline void
3f7cce3c
SE
662perf_cgroup_set_timestamp(struct task_struct *task,
663 struct perf_event_context *ctx)
e5d1367f
SE
664{
665 struct perf_cgroup *cgrp;
666 struct perf_cgroup_info *info;
667
3f7cce3c
SE
668 /*
669 * ctx->lock held by caller
670 * ensure we do not access cgroup data
671 * unless we have the cgroup pinned (css_get)
672 */
673 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
674 return;
675
614e4c4e 676 cgrp = perf_cgroup_from_task(task, ctx);
e5d1367f 677 info = this_cpu_ptr(cgrp->info);
3f7cce3c 678 info->timestamp = ctx->timestamp;
e5d1367f
SE
679}
680
681#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
682#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
683
684/*
685 * reschedule events based on the cgroup constraint of task.
686 *
687 * mode SWOUT : schedule out everything
688 * mode SWIN : schedule in based on cgroup for next
689 */
18ab2cd3 690static void perf_cgroup_switch(struct task_struct *task, int mode)
e5d1367f
SE
691{
692 struct perf_cpu_context *cpuctx;
693 struct pmu *pmu;
694 unsigned long flags;
695
696 /*
697 * disable interrupts to avoid geting nr_cgroup
698 * changes via __perf_event_disable(). Also
699 * avoids preemption.
700 */
701 local_irq_save(flags);
702
703 /*
704 * we reschedule only in the presence of cgroup
705 * constrained events.
706 */
e5d1367f
SE
707
708 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 709 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
710 if (cpuctx->unique_pmu != pmu)
711 continue; /* ensure we process each cpuctx once */
e5d1367f 712
e5d1367f
SE
713 /*
714 * perf_cgroup_events says at least one
715 * context on this CPU has cgroup events.
716 *
717 * ctx->nr_cgroups reports the number of cgroup
718 * events for a context.
719 */
720 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
721 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
722 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
723
724 if (mode & PERF_CGROUP_SWOUT) {
725 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
726 /*
727 * must not be done before ctxswout due
728 * to event_filter_match() in event_sched_out()
729 */
730 cpuctx->cgrp = NULL;
731 }
732
733 if (mode & PERF_CGROUP_SWIN) {
e566b76e 734 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
735 /*
736 * set cgrp before ctxsw in to allow
737 * event_filter_match() to not have to pass
738 * task around
614e4c4e
SE
739 * we pass the cpuctx->ctx to perf_cgroup_from_task()
740 * because cgorup events are only per-cpu
e5d1367f 741 */
614e4c4e 742 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
e5d1367f
SE
743 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
744 }
facc4307
PZ
745 perf_pmu_enable(cpuctx->ctx.pmu);
746 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 747 }
e5d1367f
SE
748 }
749
e5d1367f
SE
750 local_irq_restore(flags);
751}
752
a8d757ef
SE
753static inline void perf_cgroup_sched_out(struct task_struct *task,
754 struct task_struct *next)
e5d1367f 755{
a8d757ef
SE
756 struct perf_cgroup *cgrp1;
757 struct perf_cgroup *cgrp2 = NULL;
758
ddaaf4e2 759 rcu_read_lock();
a8d757ef
SE
760 /*
761 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
762 * we do not need to pass the ctx here because we know
763 * we are holding the rcu lock
a8d757ef 764 */
614e4c4e 765 cgrp1 = perf_cgroup_from_task(task, NULL);
70a01657 766 cgrp2 = perf_cgroup_from_task(next, NULL);
a8d757ef
SE
767
768 /*
769 * only schedule out current cgroup events if we know
770 * that we are switching to a different cgroup. Otherwise,
771 * do no touch the cgroup events.
772 */
773 if (cgrp1 != cgrp2)
774 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
ddaaf4e2
SE
775
776 rcu_read_unlock();
e5d1367f
SE
777}
778
a8d757ef
SE
779static inline void perf_cgroup_sched_in(struct task_struct *prev,
780 struct task_struct *task)
e5d1367f 781{
a8d757ef
SE
782 struct perf_cgroup *cgrp1;
783 struct perf_cgroup *cgrp2 = NULL;
784
ddaaf4e2 785 rcu_read_lock();
a8d757ef
SE
786 /*
787 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
788 * we do not need to pass the ctx here because we know
789 * we are holding the rcu lock
a8d757ef 790 */
614e4c4e 791 cgrp1 = perf_cgroup_from_task(task, NULL);
614e4c4e 792 cgrp2 = perf_cgroup_from_task(prev, NULL);
a8d757ef
SE
793
794 /*
795 * only need to schedule in cgroup events if we are changing
796 * cgroup during ctxsw. Cgroup events were not scheduled
797 * out of ctxsw out if that was not the case.
798 */
799 if (cgrp1 != cgrp2)
800 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
ddaaf4e2
SE
801
802 rcu_read_unlock();
e5d1367f
SE
803}
804
805static inline int perf_cgroup_connect(int fd, struct perf_event *event,
806 struct perf_event_attr *attr,
807 struct perf_event *group_leader)
808{
809 struct perf_cgroup *cgrp;
810 struct cgroup_subsys_state *css;
2903ff01
AV
811 struct fd f = fdget(fd);
812 int ret = 0;
e5d1367f 813
2903ff01 814 if (!f.file)
e5d1367f
SE
815 return -EBADF;
816
b583043e 817 css = css_tryget_online_from_dir(f.file->f_path.dentry,
ec903c0c 818 &perf_event_cgrp_subsys);
3db272c0
LZ
819 if (IS_ERR(css)) {
820 ret = PTR_ERR(css);
821 goto out;
822 }
e5d1367f
SE
823
824 cgrp = container_of(css, struct perf_cgroup, css);
825 event->cgrp = cgrp;
826
827 /*
828 * all events in a group must monitor
829 * the same cgroup because a task belongs
830 * to only one perf cgroup at a time
831 */
832 if (group_leader && group_leader->cgrp != cgrp) {
833 perf_detach_cgroup(event);
834 ret = -EINVAL;
e5d1367f 835 }
3db272c0 836out:
2903ff01 837 fdput(f);
e5d1367f
SE
838 return ret;
839}
840
841static inline void
842perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
843{
844 struct perf_cgroup_info *t;
845 t = per_cpu_ptr(event->cgrp->info, event->cpu);
846 event->shadow_ctx_time = now - t->timestamp;
847}
848
849static inline void
850perf_cgroup_defer_enabled(struct perf_event *event)
851{
852 /*
853 * when the current task's perf cgroup does not match
854 * the event's, we need to remember to call the
855 * perf_mark_enable() function the first time a task with
856 * a matching perf cgroup is scheduled in.
857 */
858 if (is_cgroup_event(event) && !perf_cgroup_match(event))
859 event->cgrp_defer_enabled = 1;
860}
861
862static inline void
863perf_cgroup_mark_enabled(struct perf_event *event,
864 struct perf_event_context *ctx)
865{
866 struct perf_event *sub;
867 u64 tstamp = perf_event_time(event);
868
869 if (!event->cgrp_defer_enabled)
870 return;
871
872 event->cgrp_defer_enabled = 0;
873
874 event->tstamp_enabled = tstamp - event->total_time_enabled;
875 list_for_each_entry(sub, &event->sibling_list, group_entry) {
876 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
877 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
878 sub->cgrp_defer_enabled = 0;
879 }
880 }
881}
db4a8356
DCC
882
883/*
884 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
885 * cleared when last cgroup event is removed.
886 */
887static inline void
888list_update_cgroup_event(struct perf_event *event,
889 struct perf_event_context *ctx, bool add)
890{
891 struct perf_cpu_context *cpuctx;
892
893 if (!is_cgroup_event(event))
894 return;
895
896 if (add && ctx->nr_cgroups++)
897 return;
898 else if (!add && --ctx->nr_cgroups)
899 return;
900 /*
901 * Because cgroup events are always per-cpu events,
902 * this will always be called from the right CPU.
903 */
904 cpuctx = __get_cpu_context(ctx);
864c2357 905
8fc31ce8
DCC
906 /*
907 * cpuctx->cgrp is NULL until a cgroup event is sched in or
908 * ctx->nr_cgroup == 0 .
909 */
910 if (add && perf_cgroup_from_task(current, ctx) == event->cgrp)
911 cpuctx->cgrp = event->cgrp;
912 else if (!add)
913 cpuctx->cgrp = NULL;
db4a8356
DCC
914}
915
e5d1367f
SE
916#else /* !CONFIG_CGROUP_PERF */
917
918static inline bool
919perf_cgroup_match(struct perf_event *event)
920{
921 return true;
922}
923
924static inline void perf_detach_cgroup(struct perf_event *event)
925{}
926
927static inline int is_cgroup_event(struct perf_event *event)
928{
929 return 0;
930}
931
932static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
933{
934 return 0;
935}
936
937static inline void update_cgrp_time_from_event(struct perf_event *event)
938{
939}
940
941static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
942{
943}
944
a8d757ef
SE
945static inline void perf_cgroup_sched_out(struct task_struct *task,
946 struct task_struct *next)
e5d1367f
SE
947{
948}
949
a8d757ef
SE
950static inline void perf_cgroup_sched_in(struct task_struct *prev,
951 struct task_struct *task)
e5d1367f
SE
952{
953}
954
955static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
956 struct perf_event_attr *attr,
957 struct perf_event *group_leader)
958{
959 return -EINVAL;
960}
961
962static inline void
3f7cce3c
SE
963perf_cgroup_set_timestamp(struct task_struct *task,
964 struct perf_event_context *ctx)
e5d1367f
SE
965{
966}
967
968void
969perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
970{
971}
972
973static inline void
974perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
975{
976}
977
978static inline u64 perf_cgroup_event_time(struct perf_event *event)
979{
980 return 0;
981}
982
983static inline void
984perf_cgroup_defer_enabled(struct perf_event *event)
985{
986}
987
988static inline void
989perf_cgroup_mark_enabled(struct perf_event *event,
990 struct perf_event_context *ctx)
991{
992}
db4a8356
DCC
993
994static inline void
995list_update_cgroup_event(struct perf_event *event,
996 struct perf_event_context *ctx, bool add)
997{
998}
999
e5d1367f
SE
1000#endif
1001
9e630205
SE
1002/*
1003 * set default to be dependent on timer tick just
1004 * like original code
1005 */
1006#define PERF_CPU_HRTIMER (1000 / HZ)
1007/*
1008 * function must be called with interrupts disbled
1009 */
272325c4 1010static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
9e630205
SE
1011{
1012 struct perf_cpu_context *cpuctx;
9e630205
SE
1013 int rotations = 0;
1014
1015 WARN_ON(!irqs_disabled());
1016
1017 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
9e630205
SE
1018 rotations = perf_rotate_context(cpuctx);
1019
4cfafd30
PZ
1020 raw_spin_lock(&cpuctx->hrtimer_lock);
1021 if (rotations)
9e630205 1022 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
4cfafd30
PZ
1023 else
1024 cpuctx->hrtimer_active = 0;
1025 raw_spin_unlock(&cpuctx->hrtimer_lock);
9e630205 1026
4cfafd30 1027 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
9e630205
SE
1028}
1029
272325c4 1030static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
9e630205 1031{
272325c4 1032 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1033 struct pmu *pmu = cpuctx->ctx.pmu;
272325c4 1034 u64 interval;
9e630205
SE
1035
1036 /* no multiplexing needed for SW PMU */
1037 if (pmu->task_ctx_nr == perf_sw_context)
1038 return;
1039
62b85639
SE
1040 /*
1041 * check default is sane, if not set then force to
1042 * default interval (1/tick)
1043 */
272325c4
PZ
1044 interval = pmu->hrtimer_interval_ms;
1045 if (interval < 1)
1046 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
62b85639 1047
272325c4 1048 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
9e630205 1049
4cfafd30
PZ
1050 raw_spin_lock_init(&cpuctx->hrtimer_lock);
1051 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
272325c4 1052 timer->function = perf_mux_hrtimer_handler;
9e630205
SE
1053}
1054
272325c4 1055static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
9e630205 1056{
272325c4 1057 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1058 struct pmu *pmu = cpuctx->ctx.pmu;
4cfafd30 1059 unsigned long flags;
9e630205
SE
1060
1061 /* not for SW PMU */
1062 if (pmu->task_ctx_nr == perf_sw_context)
272325c4 1063 return 0;
9e630205 1064
4cfafd30
PZ
1065 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1066 if (!cpuctx->hrtimer_active) {
1067 cpuctx->hrtimer_active = 1;
1068 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1069 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1070 }
1071 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
9e630205 1072
272325c4 1073 return 0;
9e630205
SE
1074}
1075
33696fc0 1076void perf_pmu_disable(struct pmu *pmu)
9e35ad38 1077{
33696fc0
PZ
1078 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1079 if (!(*count)++)
1080 pmu->pmu_disable(pmu);
9e35ad38 1081}
9e35ad38 1082
33696fc0 1083void perf_pmu_enable(struct pmu *pmu)
9e35ad38 1084{
33696fc0
PZ
1085 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1086 if (!--(*count))
1087 pmu->pmu_enable(pmu);
9e35ad38 1088}
9e35ad38 1089
2fde4f94 1090static DEFINE_PER_CPU(struct list_head, active_ctx_list);
e9d2b064
PZ
1091
1092/*
2fde4f94
MR
1093 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1094 * perf_event_task_tick() are fully serialized because they're strictly cpu
1095 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1096 * disabled, while perf_event_task_tick is called from IRQ context.
e9d2b064 1097 */
2fde4f94 1098static void perf_event_ctx_activate(struct perf_event_context *ctx)
9e35ad38 1099{
2fde4f94 1100 struct list_head *head = this_cpu_ptr(&active_ctx_list);
b5ab4cd5 1101
e9d2b064 1102 WARN_ON(!irqs_disabled());
b5ab4cd5 1103
2fde4f94
MR
1104 WARN_ON(!list_empty(&ctx->active_ctx_list));
1105
1106 list_add(&ctx->active_ctx_list, head);
1107}
1108
1109static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1110{
1111 WARN_ON(!irqs_disabled());
1112
1113 WARN_ON(list_empty(&ctx->active_ctx_list));
1114
1115 list_del_init(&ctx->active_ctx_list);
9e35ad38 1116}
9e35ad38 1117
cdd6c482 1118static void get_ctx(struct perf_event_context *ctx)
a63eaf34 1119{
e5289d4a 1120 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
1121}
1122
4af57ef2
YZ
1123static void free_ctx(struct rcu_head *head)
1124{
1125 struct perf_event_context *ctx;
1126
1127 ctx = container_of(head, struct perf_event_context, rcu_head);
1128 kfree(ctx->task_ctx_data);
1129 kfree(ctx);
1130}
1131
cdd6c482 1132static void put_ctx(struct perf_event_context *ctx)
a63eaf34 1133{
564c2b21
PM
1134 if (atomic_dec_and_test(&ctx->refcount)) {
1135 if (ctx->parent_ctx)
1136 put_ctx(ctx->parent_ctx);
63b6da39 1137 if (ctx->task && ctx->task != TASK_TOMBSTONE)
c93f7669 1138 put_task_struct(ctx->task);
4af57ef2 1139 call_rcu(&ctx->rcu_head, free_ctx);
564c2b21 1140 }
a63eaf34
PM
1141}
1142
f63a8daa
PZ
1143/*
1144 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1145 * perf_pmu_migrate_context() we need some magic.
1146 *
1147 * Those places that change perf_event::ctx will hold both
1148 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1149 *
8b10c5e2
PZ
1150 * Lock ordering is by mutex address. There are two other sites where
1151 * perf_event_context::mutex nests and those are:
1152 *
1153 * - perf_event_exit_task_context() [ child , 0 ]
8ba289b8
PZ
1154 * perf_event_exit_event()
1155 * put_event() [ parent, 1 ]
8b10c5e2
PZ
1156 *
1157 * - perf_event_init_context() [ parent, 0 ]
1158 * inherit_task_group()
1159 * inherit_group()
1160 * inherit_event()
1161 * perf_event_alloc()
1162 * perf_init_event()
1163 * perf_try_init_event() [ child , 1 ]
1164 *
1165 * While it appears there is an obvious deadlock here -- the parent and child
1166 * nesting levels are inverted between the two. This is in fact safe because
1167 * life-time rules separate them. That is an exiting task cannot fork, and a
1168 * spawning task cannot (yet) exit.
1169 *
1170 * But remember that that these are parent<->child context relations, and
1171 * migration does not affect children, therefore these two orderings should not
1172 * interact.
f63a8daa
PZ
1173 *
1174 * The change in perf_event::ctx does not affect children (as claimed above)
1175 * because the sys_perf_event_open() case will install a new event and break
1176 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1177 * concerned with cpuctx and that doesn't have children.
1178 *
1179 * The places that change perf_event::ctx will issue:
1180 *
1181 * perf_remove_from_context();
1182 * synchronize_rcu();
1183 * perf_install_in_context();
1184 *
1185 * to affect the change. The remove_from_context() + synchronize_rcu() should
1186 * quiesce the event, after which we can install it in the new location. This
1187 * means that only external vectors (perf_fops, prctl) can perturb the event
1188 * while in transit. Therefore all such accessors should also acquire
1189 * perf_event_context::mutex to serialize against this.
1190 *
1191 * However; because event->ctx can change while we're waiting to acquire
1192 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1193 * function.
1194 *
1195 * Lock order:
79c9ce57 1196 * cred_guard_mutex
f63a8daa
PZ
1197 * task_struct::perf_event_mutex
1198 * perf_event_context::mutex
f63a8daa 1199 * perf_event::child_mutex;
07c4a776 1200 * perf_event_context::lock
f63a8daa
PZ
1201 * perf_event::mmap_mutex
1202 * mmap_sem
1203 */
a83fe28e
PZ
1204static struct perf_event_context *
1205perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
f63a8daa
PZ
1206{
1207 struct perf_event_context *ctx;
1208
1209again:
1210 rcu_read_lock();
1211 ctx = ACCESS_ONCE(event->ctx);
1212 if (!atomic_inc_not_zero(&ctx->refcount)) {
1213 rcu_read_unlock();
1214 goto again;
1215 }
1216 rcu_read_unlock();
1217
a83fe28e 1218 mutex_lock_nested(&ctx->mutex, nesting);
f63a8daa
PZ
1219 if (event->ctx != ctx) {
1220 mutex_unlock(&ctx->mutex);
1221 put_ctx(ctx);
1222 goto again;
1223 }
1224
1225 return ctx;
1226}
1227
a83fe28e
PZ
1228static inline struct perf_event_context *
1229perf_event_ctx_lock(struct perf_event *event)
1230{
1231 return perf_event_ctx_lock_nested(event, 0);
1232}
1233
f63a8daa
PZ
1234static void perf_event_ctx_unlock(struct perf_event *event,
1235 struct perf_event_context *ctx)
1236{
1237 mutex_unlock(&ctx->mutex);
1238 put_ctx(ctx);
1239}
1240
211de6eb
PZ
1241/*
1242 * This must be done under the ctx->lock, such as to serialize against
1243 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1244 * calling scheduler related locks and ctx->lock nests inside those.
1245 */
1246static __must_check struct perf_event_context *
1247unclone_ctx(struct perf_event_context *ctx)
71a851b4 1248{
211de6eb
PZ
1249 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1250
1251 lockdep_assert_held(&ctx->lock);
1252
1253 if (parent_ctx)
71a851b4 1254 ctx->parent_ctx = NULL;
5a3126d4 1255 ctx->generation++;
211de6eb
PZ
1256
1257 return parent_ctx;
71a851b4
PZ
1258}
1259
6844c09d
ACM
1260static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1261{
1262 /*
1263 * only top level events have the pid namespace they were created in
1264 */
1265 if (event->parent)
1266 event = event->parent;
1267
1268 return task_tgid_nr_ns(p, event->ns);
1269}
1270
1271static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1272{
1273 /*
1274 * only top level events have the pid namespace they were created in
1275 */
1276 if (event->parent)
1277 event = event->parent;
1278
1279 return task_pid_nr_ns(p, event->ns);
1280}
1281
7f453c24 1282/*
cdd6c482 1283 * If we inherit events we want to return the parent event id
7f453c24
PZ
1284 * to userspace.
1285 */
cdd6c482 1286static u64 primary_event_id(struct perf_event *event)
7f453c24 1287{
cdd6c482 1288 u64 id = event->id;
7f453c24 1289
cdd6c482
IM
1290 if (event->parent)
1291 id = event->parent->id;
7f453c24
PZ
1292
1293 return id;
1294}
1295
25346b93 1296/*
cdd6c482 1297 * Get the perf_event_context for a task and lock it.
63b6da39 1298 *
25346b93
PM
1299 * This has to cope with with the fact that until it is locked,
1300 * the context could get moved to another task.
1301 */
cdd6c482 1302static struct perf_event_context *
8dc85d54 1303perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 1304{
cdd6c482 1305 struct perf_event_context *ctx;
25346b93 1306
9ed6060d 1307retry:
058ebd0e
PZ
1308 /*
1309 * One of the few rules of preemptible RCU is that one cannot do
1310 * rcu_read_unlock() while holding a scheduler (or nested) lock when
2fd59077 1311 * part of the read side critical section was irqs-enabled -- see
058ebd0e
PZ
1312 * rcu_read_unlock_special().
1313 *
1314 * Since ctx->lock nests under rq->lock we must ensure the entire read
2fd59077 1315 * side critical section has interrupts disabled.
058ebd0e 1316 */
2fd59077 1317 local_irq_save(*flags);
058ebd0e 1318 rcu_read_lock();
8dc85d54 1319 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
1320 if (ctx) {
1321 /*
1322 * If this context is a clone of another, it might
1323 * get swapped for another underneath us by
cdd6c482 1324 * perf_event_task_sched_out, though the
25346b93
PM
1325 * rcu_read_lock() protects us from any context
1326 * getting freed. Lock the context and check if it
1327 * got swapped before we could get the lock, and retry
1328 * if so. If we locked the right context, then it
1329 * can't get swapped on us any more.
1330 */
2fd59077 1331 raw_spin_lock(&ctx->lock);
8dc85d54 1332 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
2fd59077 1333 raw_spin_unlock(&ctx->lock);
058ebd0e 1334 rcu_read_unlock();
2fd59077 1335 local_irq_restore(*flags);
25346b93
PM
1336 goto retry;
1337 }
b49a9e7e 1338
63b6da39
PZ
1339 if (ctx->task == TASK_TOMBSTONE ||
1340 !atomic_inc_not_zero(&ctx->refcount)) {
2fd59077 1341 raw_spin_unlock(&ctx->lock);
b49a9e7e 1342 ctx = NULL;
828b6f0e
PZ
1343 } else {
1344 WARN_ON_ONCE(ctx->task != task);
b49a9e7e 1345 }
25346b93
PM
1346 }
1347 rcu_read_unlock();
2fd59077
PM
1348 if (!ctx)
1349 local_irq_restore(*flags);
25346b93
PM
1350 return ctx;
1351}
1352
1353/*
1354 * Get the context for a task and increment its pin_count so it
1355 * can't get swapped to another task. This also increments its
1356 * reference count so that the context can't get freed.
1357 */
8dc85d54
PZ
1358static struct perf_event_context *
1359perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1360{
cdd6c482 1361 struct perf_event_context *ctx;
25346b93
PM
1362 unsigned long flags;
1363
8dc85d54 1364 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1365 if (ctx) {
1366 ++ctx->pin_count;
e625cce1 1367 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1368 }
1369 return ctx;
1370}
1371
cdd6c482 1372static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1373{
1374 unsigned long flags;
1375
e625cce1 1376 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1377 --ctx->pin_count;
e625cce1 1378 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1379}
1380
f67218c3
PZ
1381/*
1382 * Update the record of the current time in a context.
1383 */
1384static void update_context_time(struct perf_event_context *ctx)
1385{
1386 u64 now = perf_clock();
1387
1388 ctx->time += now - ctx->timestamp;
1389 ctx->timestamp = now;
1390}
1391
4158755d
SE
1392static u64 perf_event_time(struct perf_event *event)
1393{
1394 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1395
1396 if (is_cgroup_event(event))
1397 return perf_cgroup_event_time(event);
1398
4158755d
SE
1399 return ctx ? ctx->time : 0;
1400}
1401
f67218c3
PZ
1402/*
1403 * Update the total_time_enabled and total_time_running fields for a event.
1404 */
1405static void update_event_times(struct perf_event *event)
1406{
1407 struct perf_event_context *ctx = event->ctx;
1408 u64 run_end;
1409
3cbaa590
PZ
1410 lockdep_assert_held(&ctx->lock);
1411
f67218c3
PZ
1412 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1413 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1414 return;
3cbaa590 1415
e5d1367f
SE
1416 /*
1417 * in cgroup mode, time_enabled represents
1418 * the time the event was enabled AND active
1419 * tasks were in the monitored cgroup. This is
1420 * independent of the activity of the context as
1421 * there may be a mix of cgroup and non-cgroup events.
1422 *
1423 * That is why we treat cgroup events differently
1424 * here.
1425 */
1426 if (is_cgroup_event(event))
46cd6a7f 1427 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1428 else if (ctx->is_active)
1429 run_end = ctx->time;
acd1d7c1
PZ
1430 else
1431 run_end = event->tstamp_stopped;
1432
1433 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1434
1435 if (event->state == PERF_EVENT_STATE_INACTIVE)
1436 run_end = event->tstamp_stopped;
1437 else
4158755d 1438 run_end = perf_event_time(event);
f67218c3
PZ
1439
1440 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1441
f67218c3
PZ
1442}
1443
96c21a46
PZ
1444/*
1445 * Update total_time_enabled and total_time_running for all events in a group.
1446 */
1447static void update_group_times(struct perf_event *leader)
1448{
1449 struct perf_event *event;
1450
1451 update_event_times(leader);
1452 list_for_each_entry(event, &leader->sibling_list, group_entry)
1453 update_event_times(event);
1454}
1455
889ff015
FW
1456static struct list_head *
1457ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1458{
1459 if (event->attr.pinned)
1460 return &ctx->pinned_groups;
1461 else
1462 return &ctx->flexible_groups;
1463}
1464
fccc714b 1465/*
cdd6c482 1466 * Add a event from the lists for its context.
fccc714b
PZ
1467 * Must be called with ctx->mutex and ctx->lock held.
1468 */
04289bb9 1469static void
cdd6c482 1470list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1471{
c994d613
PZ
1472 lockdep_assert_held(&ctx->lock);
1473
8a49542c
PZ
1474 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1475 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1476
1477 /*
8a49542c
PZ
1478 * If we're a stand alone event or group leader, we go to the context
1479 * list, group events are kept attached to the group so that
1480 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1481 */
8a49542c 1482 if (event->group_leader == event) {
889ff015
FW
1483 struct list_head *list;
1484
4ff6a8de 1485 event->group_caps = event->event_caps;
d6f962b5 1486
889ff015
FW
1487 list = ctx_group_list(event, ctx);
1488 list_add_tail(&event->group_entry, list);
5c148194 1489 }
592903cd 1490
db4a8356 1491 list_update_cgroup_event(event, ctx, true);
e5d1367f 1492
cdd6c482
IM
1493 list_add_rcu(&event->event_entry, &ctx->event_list);
1494 ctx->nr_events++;
1495 if (event->attr.inherit_stat)
bfbd3381 1496 ctx->nr_stat++;
5a3126d4
PZ
1497
1498 ctx->generation++;
04289bb9
IM
1499}
1500
0231bb53
JO
1501/*
1502 * Initialize event state based on the perf_event_attr::disabled.
1503 */
1504static inline void perf_event__state_init(struct perf_event *event)
1505{
1506 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1507 PERF_EVENT_STATE_INACTIVE;
1508}
1509
a723968c 1510static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
c320c7b7
ACM
1511{
1512 int entry = sizeof(u64); /* value */
1513 int size = 0;
1514 int nr = 1;
1515
1516 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1517 size += sizeof(u64);
1518
1519 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1520 size += sizeof(u64);
1521
1522 if (event->attr.read_format & PERF_FORMAT_ID)
1523 entry += sizeof(u64);
1524
1525 if (event->attr.read_format & PERF_FORMAT_GROUP) {
a723968c 1526 nr += nr_siblings;
c320c7b7
ACM
1527 size += sizeof(u64);
1528 }
1529
1530 size += entry * nr;
1531 event->read_size = size;
1532}
1533
a723968c 1534static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
c320c7b7
ACM
1535{
1536 struct perf_sample_data *data;
c320c7b7
ACM
1537 u16 size = 0;
1538
c320c7b7
ACM
1539 if (sample_type & PERF_SAMPLE_IP)
1540 size += sizeof(data->ip);
1541
6844c09d
ACM
1542 if (sample_type & PERF_SAMPLE_ADDR)
1543 size += sizeof(data->addr);
1544
1545 if (sample_type & PERF_SAMPLE_PERIOD)
1546 size += sizeof(data->period);
1547
c3feedf2
AK
1548 if (sample_type & PERF_SAMPLE_WEIGHT)
1549 size += sizeof(data->weight);
1550
6844c09d
ACM
1551 if (sample_type & PERF_SAMPLE_READ)
1552 size += event->read_size;
1553
d6be9ad6
SE
1554 if (sample_type & PERF_SAMPLE_DATA_SRC)
1555 size += sizeof(data->data_src.val);
1556
fdfbbd07
AK
1557 if (sample_type & PERF_SAMPLE_TRANSACTION)
1558 size += sizeof(data->txn);
1559
6844c09d
ACM
1560 event->header_size = size;
1561}
1562
a723968c
PZ
1563/*
1564 * Called at perf_event creation and when events are attached/detached from a
1565 * group.
1566 */
1567static void perf_event__header_size(struct perf_event *event)
1568{
1569 __perf_event_read_size(event,
1570 event->group_leader->nr_siblings);
1571 __perf_event_header_size(event, event->attr.sample_type);
1572}
1573
6844c09d
ACM
1574static void perf_event__id_header_size(struct perf_event *event)
1575{
1576 struct perf_sample_data *data;
1577 u64 sample_type = event->attr.sample_type;
1578 u16 size = 0;
1579
c320c7b7
ACM
1580 if (sample_type & PERF_SAMPLE_TID)
1581 size += sizeof(data->tid_entry);
1582
1583 if (sample_type & PERF_SAMPLE_TIME)
1584 size += sizeof(data->time);
1585
ff3d527c
AH
1586 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1587 size += sizeof(data->id);
1588
c320c7b7
ACM
1589 if (sample_type & PERF_SAMPLE_ID)
1590 size += sizeof(data->id);
1591
1592 if (sample_type & PERF_SAMPLE_STREAM_ID)
1593 size += sizeof(data->stream_id);
1594
1595 if (sample_type & PERF_SAMPLE_CPU)
1596 size += sizeof(data->cpu_entry);
1597
6844c09d 1598 event->id_header_size = size;
c320c7b7
ACM
1599}
1600
a723968c
PZ
1601static bool perf_event_validate_size(struct perf_event *event)
1602{
1603 /*
1604 * The values computed here will be over-written when we actually
1605 * attach the event.
1606 */
1607 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1608 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1609 perf_event__id_header_size(event);
1610
1611 /*
1612 * Sum the lot; should not exceed the 64k limit we have on records.
1613 * Conservative limit to allow for callchains and other variable fields.
1614 */
1615 if (event->read_size + event->header_size +
1616 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1617 return false;
1618
1619 return true;
1620}
1621
8a49542c
PZ
1622static void perf_group_attach(struct perf_event *event)
1623{
c320c7b7 1624 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1625
a76a82a3
PZ
1626 lockdep_assert_held(&event->ctx->lock);
1627
74c3337c
PZ
1628 /*
1629 * We can have double attach due to group movement in perf_event_open.
1630 */
1631 if (event->attach_state & PERF_ATTACH_GROUP)
1632 return;
1633
8a49542c
PZ
1634 event->attach_state |= PERF_ATTACH_GROUP;
1635
1636 if (group_leader == event)
1637 return;
1638
652884fe
PZ
1639 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1640
4ff6a8de 1641 group_leader->group_caps &= event->event_caps;
8a49542c
PZ
1642
1643 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1644 group_leader->nr_siblings++;
c320c7b7
ACM
1645
1646 perf_event__header_size(group_leader);
1647
1648 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1649 perf_event__header_size(pos);
8a49542c
PZ
1650}
1651
a63eaf34 1652/*
cdd6c482 1653 * Remove a event from the lists for its context.
fccc714b 1654 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1655 */
04289bb9 1656static void
cdd6c482 1657list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1658{
652884fe
PZ
1659 WARN_ON_ONCE(event->ctx != ctx);
1660 lockdep_assert_held(&ctx->lock);
1661
8a49542c
PZ
1662 /*
1663 * We can have double detach due to exit/hot-unplug + close.
1664 */
1665 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1666 return;
8a49542c
PZ
1667
1668 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1669
db4a8356 1670 list_update_cgroup_event(event, ctx, false);
e5d1367f 1671
cdd6c482
IM
1672 ctx->nr_events--;
1673 if (event->attr.inherit_stat)
bfbd3381 1674 ctx->nr_stat--;
8bc20959 1675
cdd6c482 1676 list_del_rcu(&event->event_entry);
04289bb9 1677
8a49542c
PZ
1678 if (event->group_leader == event)
1679 list_del_init(&event->group_entry);
5c148194 1680
96c21a46 1681 update_group_times(event);
b2e74a26
SE
1682
1683 /*
1684 * If event was in error state, then keep it
1685 * that way, otherwise bogus counts will be
1686 * returned on read(). The only way to get out
1687 * of error state is by explicit re-enabling
1688 * of the event
1689 */
1690 if (event->state > PERF_EVENT_STATE_OFF)
1691 event->state = PERF_EVENT_STATE_OFF;
5a3126d4
PZ
1692
1693 ctx->generation++;
050735b0
PZ
1694}
1695
8a49542c 1696static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1697{
1698 struct perf_event *sibling, *tmp;
8a49542c
PZ
1699 struct list_head *list = NULL;
1700
a76a82a3
PZ
1701 lockdep_assert_held(&event->ctx->lock);
1702
8a49542c
PZ
1703 /*
1704 * We can have double detach due to exit/hot-unplug + close.
1705 */
1706 if (!(event->attach_state & PERF_ATTACH_GROUP))
1707 return;
1708
1709 event->attach_state &= ~PERF_ATTACH_GROUP;
1710
1711 /*
1712 * If this is a sibling, remove it from its group.
1713 */
1714 if (event->group_leader != event) {
1715 list_del_init(&event->group_entry);
1716 event->group_leader->nr_siblings--;
c320c7b7 1717 goto out;
8a49542c
PZ
1718 }
1719
1720 if (!list_empty(&event->group_entry))
1721 list = &event->group_entry;
2e2af50b 1722
04289bb9 1723 /*
cdd6c482
IM
1724 * If this was a group event with sibling events then
1725 * upgrade the siblings to singleton events by adding them
8a49542c 1726 * to whatever list we are on.
04289bb9 1727 */
cdd6c482 1728 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1729 if (list)
1730 list_move_tail(&sibling->group_entry, list);
04289bb9 1731 sibling->group_leader = sibling;
d6f962b5
FW
1732
1733 /* Inherit group flags from the previous leader */
4ff6a8de 1734 sibling->group_caps = event->group_caps;
652884fe
PZ
1735
1736 WARN_ON_ONCE(sibling->ctx != event->ctx);
04289bb9 1737 }
c320c7b7
ACM
1738
1739out:
1740 perf_event__header_size(event->group_leader);
1741
1742 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1743 perf_event__header_size(tmp);
04289bb9
IM
1744}
1745
fadfe7be
JO
1746static bool is_orphaned_event(struct perf_event *event)
1747{
a69b0ca4 1748 return event->state == PERF_EVENT_STATE_DEAD;
fadfe7be
JO
1749}
1750
2c81a647 1751static inline int __pmu_filter_match(struct perf_event *event)
66eb579e
MR
1752{
1753 struct pmu *pmu = event->pmu;
1754 return pmu->filter_match ? pmu->filter_match(event) : 1;
1755}
1756
2c81a647
MR
1757/*
1758 * Check whether we should attempt to schedule an event group based on
1759 * PMU-specific filtering. An event group can consist of HW and SW events,
1760 * potentially with a SW leader, so we must check all the filters, to
1761 * determine whether a group is schedulable:
1762 */
1763static inline int pmu_filter_match(struct perf_event *event)
1764{
1765 struct perf_event *child;
1766
1767 if (!__pmu_filter_match(event))
1768 return 0;
1769
1770 list_for_each_entry(child, &event->sibling_list, group_entry) {
1771 if (!__pmu_filter_match(child))
1772 return 0;
1773 }
1774
1775 return 1;
1776}
1777
fa66f07a
SE
1778static inline int
1779event_filter_match(struct perf_event *event)
1780{
0b8f1e2e
PZ
1781 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1782 perf_cgroup_match(event) && pmu_filter_match(event);
fa66f07a
SE
1783}
1784
9ffcfa6f
SE
1785static void
1786event_sched_out(struct perf_event *event,
3b6f9e5c 1787 struct perf_cpu_context *cpuctx,
cdd6c482 1788 struct perf_event_context *ctx)
3b6f9e5c 1789{
4158755d 1790 u64 tstamp = perf_event_time(event);
fa66f07a 1791 u64 delta;
652884fe
PZ
1792
1793 WARN_ON_ONCE(event->ctx != ctx);
1794 lockdep_assert_held(&ctx->lock);
1795
fa66f07a
SE
1796 /*
1797 * An event which could not be activated because of
1798 * filter mismatch still needs to have its timings
1799 * maintained, otherwise bogus information is return
1800 * via read() for time_enabled, time_running:
1801 */
0b8f1e2e
PZ
1802 if (event->state == PERF_EVENT_STATE_INACTIVE &&
1803 !event_filter_match(event)) {
e5d1367f 1804 delta = tstamp - event->tstamp_stopped;
fa66f07a 1805 event->tstamp_running += delta;
4158755d 1806 event->tstamp_stopped = tstamp;
fa66f07a
SE
1807 }
1808
cdd6c482 1809 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1810 return;
3b6f9e5c 1811
44377277
AS
1812 perf_pmu_disable(event->pmu);
1813
28a967c3
PZ
1814 event->tstamp_stopped = tstamp;
1815 event->pmu->del(event, 0);
1816 event->oncpu = -1;
cdd6c482
IM
1817 event->state = PERF_EVENT_STATE_INACTIVE;
1818 if (event->pending_disable) {
1819 event->pending_disable = 0;
1820 event->state = PERF_EVENT_STATE_OFF;
970892a9 1821 }
3b6f9e5c 1822
cdd6c482 1823 if (!is_software_event(event))
3b6f9e5c 1824 cpuctx->active_oncpu--;
2fde4f94
MR
1825 if (!--ctx->nr_active)
1826 perf_event_ctx_deactivate(ctx);
0f5a2601
PZ
1827 if (event->attr.freq && event->attr.sample_freq)
1828 ctx->nr_freq--;
cdd6c482 1829 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 1830 cpuctx->exclusive = 0;
44377277
AS
1831
1832 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
1833}
1834
d859e29f 1835static void
cdd6c482 1836group_sched_out(struct perf_event *group_event,
d859e29f 1837 struct perf_cpu_context *cpuctx,
cdd6c482 1838 struct perf_event_context *ctx)
d859e29f 1839{
cdd6c482 1840 struct perf_event *event;
fa66f07a 1841 int state = group_event->state;
d859e29f 1842
3f005e7d
MR
1843 perf_pmu_disable(ctx->pmu);
1844
cdd6c482 1845 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1846
1847 /*
1848 * Schedule out siblings (if any):
1849 */
cdd6c482
IM
1850 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1851 event_sched_out(event, cpuctx, ctx);
d859e29f 1852
3f005e7d
MR
1853 perf_pmu_enable(ctx->pmu);
1854
fa66f07a 1855 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1856 cpuctx->exclusive = 0;
1857}
1858
45a0e07a 1859#define DETACH_GROUP 0x01UL
0017960f 1860
0793a61d 1861/*
cdd6c482 1862 * Cross CPU call to remove a performance event
0793a61d 1863 *
cdd6c482 1864 * We disable the event on the hardware level first. After that we
0793a61d
TG
1865 * remove it from the context list.
1866 */
fae3fde6
PZ
1867static void
1868__perf_remove_from_context(struct perf_event *event,
1869 struct perf_cpu_context *cpuctx,
1870 struct perf_event_context *ctx,
1871 void *info)
0793a61d 1872{
45a0e07a 1873 unsigned long flags = (unsigned long)info;
0793a61d 1874
cdd6c482 1875 event_sched_out(event, cpuctx, ctx);
45a0e07a 1876 if (flags & DETACH_GROUP)
46ce0fe9 1877 perf_group_detach(event);
cdd6c482 1878 list_del_event(event, ctx);
39a43640
PZ
1879
1880 if (!ctx->nr_events && ctx->is_active) {
64ce3126 1881 ctx->is_active = 0;
39a43640
PZ
1882 if (ctx->task) {
1883 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1884 cpuctx->task_ctx = NULL;
1885 }
64ce3126 1886 }
0793a61d
TG
1887}
1888
0793a61d 1889/*
cdd6c482 1890 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1891 *
cdd6c482
IM
1892 * If event->ctx is a cloned context, callers must make sure that
1893 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1894 * remains valid. This is OK when called from perf_release since
1895 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1896 * When called from perf_event_exit_task, it's OK because the
c93f7669 1897 * context has been detached from its task.
0793a61d 1898 */
45a0e07a 1899static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
0793a61d 1900{
a76a82a3
PZ
1901 struct perf_event_context *ctx = event->ctx;
1902
1903 lockdep_assert_held(&ctx->mutex);
0793a61d 1904
45a0e07a 1905 event_function_call(event, __perf_remove_from_context, (void *)flags);
a76a82a3
PZ
1906
1907 /*
1908 * The above event_function_call() can NO-OP when it hits
1909 * TASK_TOMBSTONE. In that case we must already have been detached
1910 * from the context (by perf_event_exit_event()) but the grouping
1911 * might still be in-tact.
1912 */
1913 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1914 if ((flags & DETACH_GROUP) &&
1915 (event->attach_state & PERF_ATTACH_GROUP)) {
1916 /*
1917 * Since in that case we cannot possibly be scheduled, simply
1918 * detach now.
1919 */
1920 raw_spin_lock_irq(&ctx->lock);
1921 perf_group_detach(event);
1922 raw_spin_unlock_irq(&ctx->lock);
1923 }
0793a61d
TG
1924}
1925
d859e29f 1926/*
cdd6c482 1927 * Cross CPU call to disable a performance event
d859e29f 1928 */
fae3fde6
PZ
1929static void __perf_event_disable(struct perf_event *event,
1930 struct perf_cpu_context *cpuctx,
1931 struct perf_event_context *ctx,
1932 void *info)
7b648018 1933{
fae3fde6
PZ
1934 if (event->state < PERF_EVENT_STATE_INACTIVE)
1935 return;
7b648018 1936
fae3fde6
PZ
1937 update_context_time(ctx);
1938 update_cgrp_time_from_event(event);
1939 update_group_times(event);
1940 if (event == event->group_leader)
1941 group_sched_out(event, cpuctx, ctx);
1942 else
1943 event_sched_out(event, cpuctx, ctx);
1944 event->state = PERF_EVENT_STATE_OFF;
7b648018
PZ
1945}
1946
d859e29f 1947/*
cdd6c482 1948 * Disable a event.
c93f7669 1949 *
cdd6c482
IM
1950 * If event->ctx is a cloned context, callers must make sure that
1951 * every task struct that event->ctx->task could possibly point to
c93f7669 1952 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1953 * perf_event_for_each_child or perf_event_for_each because they
1954 * hold the top-level event's child_mutex, so any descendant that
8ba289b8
PZ
1955 * goes to exit will block in perf_event_exit_event().
1956 *
cdd6c482 1957 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1958 * is the current context on this CPU and preemption is disabled,
cdd6c482 1959 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1960 */
f63a8daa 1961static void _perf_event_disable(struct perf_event *event)
d859e29f 1962{
cdd6c482 1963 struct perf_event_context *ctx = event->ctx;
d859e29f 1964
e625cce1 1965 raw_spin_lock_irq(&ctx->lock);
7b648018 1966 if (event->state <= PERF_EVENT_STATE_OFF) {
e625cce1 1967 raw_spin_unlock_irq(&ctx->lock);
7b648018 1968 return;
53cfbf59 1969 }
e625cce1 1970 raw_spin_unlock_irq(&ctx->lock);
7b648018 1971
fae3fde6
PZ
1972 event_function_call(event, __perf_event_disable, NULL);
1973}
1974
1975void perf_event_disable_local(struct perf_event *event)
1976{
1977 event_function_local(event, __perf_event_disable, NULL);
d859e29f 1978}
f63a8daa
PZ
1979
1980/*
1981 * Strictly speaking kernel users cannot create groups and therefore this
1982 * interface does not need the perf_event_ctx_lock() magic.
1983 */
1984void perf_event_disable(struct perf_event *event)
1985{
1986 struct perf_event_context *ctx;
1987
1988 ctx = perf_event_ctx_lock(event);
1989 _perf_event_disable(event);
1990 perf_event_ctx_unlock(event, ctx);
1991}
dcfce4a0 1992EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1993
5aab90ce
JO
1994void perf_event_disable_inatomic(struct perf_event *event)
1995{
1996 event->pending_disable = 1;
1997 irq_work_queue(&event->pending);
1998}
1999
e5d1367f
SE
2000static void perf_set_shadow_time(struct perf_event *event,
2001 struct perf_event_context *ctx,
2002 u64 tstamp)
2003{
2004 /*
2005 * use the correct time source for the time snapshot
2006 *
2007 * We could get by without this by leveraging the
2008 * fact that to get to this function, the caller
2009 * has most likely already called update_context_time()
2010 * and update_cgrp_time_xx() and thus both timestamp
2011 * are identical (or very close). Given that tstamp is,
2012 * already adjusted for cgroup, we could say that:
2013 * tstamp - ctx->timestamp
2014 * is equivalent to
2015 * tstamp - cgrp->timestamp.
2016 *
2017 * Then, in perf_output_read(), the calculation would
2018 * work with no changes because:
2019 * - event is guaranteed scheduled in
2020 * - no scheduled out in between
2021 * - thus the timestamp would be the same
2022 *
2023 * But this is a bit hairy.
2024 *
2025 * So instead, we have an explicit cgroup call to remain
2026 * within the time time source all along. We believe it
2027 * is cleaner and simpler to understand.
2028 */
2029 if (is_cgroup_event(event))
2030 perf_cgroup_set_shadow_time(event, tstamp);
2031 else
2032 event->shadow_ctx_time = tstamp - ctx->timestamp;
2033}
2034
4fe757dd
PZ
2035#define MAX_INTERRUPTS (~0ULL)
2036
2037static void perf_log_throttle(struct perf_event *event, int enable);
ec0d7729 2038static void perf_log_itrace_start(struct perf_event *event);
4fe757dd 2039
235c7fc7 2040static int
9ffcfa6f 2041event_sched_in(struct perf_event *event,
235c7fc7 2042 struct perf_cpu_context *cpuctx,
6e37738a 2043 struct perf_event_context *ctx)
235c7fc7 2044{
4158755d 2045 u64 tstamp = perf_event_time(event);
44377277 2046 int ret = 0;
4158755d 2047
63342411
PZ
2048 lockdep_assert_held(&ctx->lock);
2049
cdd6c482 2050 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
2051 return 0;
2052
95ff4ca2
AS
2053 WRITE_ONCE(event->oncpu, smp_processor_id());
2054 /*
2055 * Order event::oncpu write to happen before the ACTIVE state
2056 * is visible.
2057 */
2058 smp_wmb();
2059 WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
4fe757dd
PZ
2060
2061 /*
2062 * Unthrottle events, since we scheduled we might have missed several
2063 * ticks already, also for a heavily scheduling task there is little
2064 * guarantee it'll get a tick in a timely manner.
2065 */
2066 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2067 perf_log_throttle(event, 1);
2068 event->hw.interrupts = 0;
2069 }
2070
235c7fc7
IM
2071 /*
2072 * The new state must be visible before we turn it on in the hardware:
2073 */
2074 smp_wmb();
2075
44377277
AS
2076 perf_pmu_disable(event->pmu);
2077
72f669c0
SL
2078 perf_set_shadow_time(event, ctx, tstamp);
2079
ec0d7729
AS
2080 perf_log_itrace_start(event);
2081
a4eaf7f1 2082 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
2083 event->state = PERF_EVENT_STATE_INACTIVE;
2084 event->oncpu = -1;
44377277
AS
2085 ret = -EAGAIN;
2086 goto out;
235c7fc7
IM
2087 }
2088
00a2916f
PZ
2089 event->tstamp_running += tstamp - event->tstamp_stopped;
2090
cdd6c482 2091 if (!is_software_event(event))
3b6f9e5c 2092 cpuctx->active_oncpu++;
2fde4f94
MR
2093 if (!ctx->nr_active++)
2094 perf_event_ctx_activate(ctx);
0f5a2601
PZ
2095 if (event->attr.freq && event->attr.sample_freq)
2096 ctx->nr_freq++;
235c7fc7 2097
cdd6c482 2098 if (event->attr.exclusive)
3b6f9e5c
PM
2099 cpuctx->exclusive = 1;
2100
44377277
AS
2101out:
2102 perf_pmu_enable(event->pmu);
2103
2104 return ret;
235c7fc7
IM
2105}
2106
6751b71e 2107static int
cdd6c482 2108group_sched_in(struct perf_event *group_event,
6751b71e 2109 struct perf_cpu_context *cpuctx,
6e37738a 2110 struct perf_event_context *ctx)
6751b71e 2111{
6bde9b6c 2112 struct perf_event *event, *partial_group = NULL;
4a234593 2113 struct pmu *pmu = ctx->pmu;
d7842da4
SE
2114 u64 now = ctx->time;
2115 bool simulate = false;
6751b71e 2116
cdd6c482 2117 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
2118 return 0;
2119
fbbe0701 2120 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
6bde9b6c 2121
9ffcfa6f 2122 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 2123 pmu->cancel_txn(pmu);
272325c4 2124 perf_mux_hrtimer_restart(cpuctx);
6751b71e 2125 return -EAGAIN;
90151c35 2126 }
6751b71e
PM
2127
2128 /*
2129 * Schedule in siblings as one group (if any):
2130 */
cdd6c482 2131 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 2132 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 2133 partial_group = event;
6751b71e
PM
2134 goto group_error;
2135 }
2136 }
2137
9ffcfa6f 2138 if (!pmu->commit_txn(pmu))
6e85158c 2139 return 0;
9ffcfa6f 2140
6751b71e
PM
2141group_error:
2142 /*
2143 * Groups can be scheduled in as one unit only, so undo any
2144 * partial group before returning:
d7842da4
SE
2145 * The events up to the failed event are scheduled out normally,
2146 * tstamp_stopped will be updated.
2147 *
2148 * The failed events and the remaining siblings need to have
2149 * their timings updated as if they had gone thru event_sched_in()
2150 * and event_sched_out(). This is required to get consistent timings
2151 * across the group. This also takes care of the case where the group
2152 * could never be scheduled by ensuring tstamp_stopped is set to mark
2153 * the time the event was actually stopped, such that time delta
2154 * calculation in update_event_times() is correct.
6751b71e 2155 */
cdd6c482
IM
2156 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2157 if (event == partial_group)
d7842da4
SE
2158 simulate = true;
2159
2160 if (simulate) {
2161 event->tstamp_running += now - event->tstamp_stopped;
2162 event->tstamp_stopped = now;
2163 } else {
2164 event_sched_out(event, cpuctx, ctx);
2165 }
6751b71e 2166 }
9ffcfa6f 2167 event_sched_out(group_event, cpuctx, ctx);
6751b71e 2168
ad5133b7 2169 pmu->cancel_txn(pmu);
90151c35 2170
272325c4 2171 perf_mux_hrtimer_restart(cpuctx);
9e630205 2172
6751b71e
PM
2173 return -EAGAIN;
2174}
2175
3b6f9e5c 2176/*
cdd6c482 2177 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 2178 */
cdd6c482 2179static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
2180 struct perf_cpu_context *cpuctx,
2181 int can_add_hw)
2182{
2183 /*
cdd6c482 2184 * Groups consisting entirely of software events can always go on.
3b6f9e5c 2185 */
4ff6a8de 2186 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
3b6f9e5c
PM
2187 return 1;
2188 /*
2189 * If an exclusive group is already on, no other hardware
cdd6c482 2190 * events can go on.
3b6f9e5c
PM
2191 */
2192 if (cpuctx->exclusive)
2193 return 0;
2194 /*
2195 * If this group is exclusive and there are already
cdd6c482 2196 * events on the CPU, it can't go on.
3b6f9e5c 2197 */
cdd6c482 2198 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
2199 return 0;
2200 /*
2201 * Otherwise, try to add it if all previous groups were able
2202 * to go on.
2203 */
2204 return can_add_hw;
2205}
2206
cdd6c482
IM
2207static void add_event_to_ctx(struct perf_event *event,
2208 struct perf_event_context *ctx)
53cfbf59 2209{
4158755d
SE
2210 u64 tstamp = perf_event_time(event);
2211
cdd6c482 2212 list_add_event(event, ctx);
8a49542c 2213 perf_group_attach(event);
4158755d
SE
2214 event->tstamp_enabled = tstamp;
2215 event->tstamp_running = tstamp;
2216 event->tstamp_stopped = tstamp;
53cfbf59
PM
2217}
2218
bd2afa49
PZ
2219static void ctx_sched_out(struct perf_event_context *ctx,
2220 struct perf_cpu_context *cpuctx,
2221 enum event_type_t event_type);
2c29ef0f
PZ
2222static void
2223ctx_sched_in(struct perf_event_context *ctx,
2224 struct perf_cpu_context *cpuctx,
2225 enum event_type_t event_type,
2226 struct task_struct *task);
fe4b04fa 2227
bd2afa49
PZ
2228static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2229 struct perf_event_context *ctx)
2230{
2231 if (!cpuctx->task_ctx)
2232 return;
2233
2234 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2235 return;
2236
2237 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2238}
2239
dce5855b
PZ
2240static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2241 struct perf_event_context *ctx,
2242 struct task_struct *task)
2243{
2244 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2245 if (ctx)
2246 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2247 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2248 if (ctx)
2249 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2250}
2251
3e349507
PZ
2252static void ctx_resched(struct perf_cpu_context *cpuctx,
2253 struct perf_event_context *task_ctx)
0017960f 2254{
3e349507
PZ
2255 perf_pmu_disable(cpuctx->ctx.pmu);
2256 if (task_ctx)
2257 task_ctx_sched_out(cpuctx, task_ctx);
2258 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2259 perf_event_sched_in(cpuctx, task_ctx, current);
2260 perf_pmu_enable(cpuctx->ctx.pmu);
0017960f
PZ
2261}
2262
0793a61d 2263/*
cdd6c482 2264 * Cross CPU call to install and enable a performance event
682076ae 2265 *
a096309b
PZ
2266 * Very similar to remote_function() + event_function() but cannot assume that
2267 * things like ctx->is_active and cpuctx->task_ctx are set.
0793a61d 2268 */
fe4b04fa 2269static int __perf_install_in_context(void *info)
0793a61d 2270{
a096309b
PZ
2271 struct perf_event *event = info;
2272 struct perf_event_context *ctx = event->ctx;
108b02cf 2273 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f 2274 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63cae12b 2275 bool reprogram = true;
a096309b 2276 int ret = 0;
0793a61d 2277
63b6da39 2278 raw_spin_lock(&cpuctx->ctx.lock);
39a43640 2279 if (ctx->task) {
b58f6b0d
PZ
2280 raw_spin_lock(&ctx->lock);
2281 task_ctx = ctx;
a096309b 2282
63cae12b 2283 reprogram = (ctx->task == current);
b58f6b0d 2284
39a43640 2285 /*
63cae12b
PZ
2286 * If the task is running, it must be running on this CPU,
2287 * otherwise we cannot reprogram things.
2288 *
2289 * If its not running, we don't care, ctx->lock will
2290 * serialize against it becoming runnable.
39a43640 2291 */
63cae12b
PZ
2292 if (task_curr(ctx->task) && !reprogram) {
2293 ret = -ESRCH;
2294 goto unlock;
2295 }
a096309b 2296
63cae12b 2297 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
63b6da39
PZ
2298 } else if (task_ctx) {
2299 raw_spin_lock(&task_ctx->lock);
2c29ef0f 2300 }
b58f6b0d 2301
63cae12b 2302 if (reprogram) {
a096309b
PZ
2303 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2304 add_event_to_ctx(event, ctx);
2305 ctx_resched(cpuctx, task_ctx);
2306 } else {
2307 add_event_to_ctx(event, ctx);
2308 }
2309
63b6da39 2310unlock:
2c29ef0f 2311 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa 2312
a096309b 2313 return ret;
0793a61d
TG
2314}
2315
2316/*
a096309b
PZ
2317 * Attach a performance event to a context.
2318 *
2319 * Very similar to event_function_call, see comment there.
0793a61d
TG
2320 */
2321static void
cdd6c482
IM
2322perf_install_in_context(struct perf_event_context *ctx,
2323 struct perf_event *event,
0793a61d
TG
2324 int cpu)
2325{
a096309b 2326 struct task_struct *task = READ_ONCE(ctx->task);
39a43640 2327
fe4b04fa
PZ
2328 lockdep_assert_held(&ctx->mutex);
2329
0cda4c02
YZ
2330 if (event->cpu != -1)
2331 event->cpu = cpu;
c3f00c70 2332
0b8f1e2e
PZ
2333 /*
2334 * Ensures that if we can observe event->ctx, both the event and ctx
2335 * will be 'complete'. See perf_iterate_sb_cpu().
2336 */
2337 smp_store_release(&event->ctx, ctx);
2338
a096309b
PZ
2339 if (!task) {
2340 cpu_function_call(cpu, __perf_install_in_context, event);
2341 return;
2342 }
2343
2344 /*
2345 * Should not happen, we validate the ctx is still alive before calling.
2346 */
2347 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2348 return;
2349
39a43640
PZ
2350 /*
2351 * Installing events is tricky because we cannot rely on ctx->is_active
2352 * to be set in case this is the nr_events 0 -> 1 transition.
63cae12b
PZ
2353 *
2354 * Instead we use task_curr(), which tells us if the task is running.
2355 * However, since we use task_curr() outside of rq::lock, we can race
2356 * against the actual state. This means the result can be wrong.
2357 *
2358 * If we get a false positive, we retry, this is harmless.
2359 *
2360 * If we get a false negative, things are complicated. If we are after
2361 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2362 * value must be correct. If we're before, it doesn't matter since
2363 * perf_event_context_sched_in() will program the counter.
2364 *
2365 * However, this hinges on the remote context switch having observed
2366 * our task->perf_event_ctxp[] store, such that it will in fact take
2367 * ctx::lock in perf_event_context_sched_in().
2368 *
2369 * We do this by task_function_call(), if the IPI fails to hit the task
2370 * we know any future context switch of task must see the
2371 * perf_event_ctpx[] store.
39a43640 2372 */
63cae12b 2373
63b6da39 2374 /*
63cae12b
PZ
2375 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2376 * task_cpu() load, such that if the IPI then does not find the task
2377 * running, a future context switch of that task must observe the
2378 * store.
63b6da39 2379 */
63cae12b
PZ
2380 smp_mb();
2381again:
2382 if (!task_function_call(task, __perf_install_in_context, event))
a096309b
PZ
2383 return;
2384
2385 raw_spin_lock_irq(&ctx->lock);
2386 task = ctx->task;
84c4e620 2387 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
a096309b
PZ
2388 /*
2389 * Cannot happen because we already checked above (which also
2390 * cannot happen), and we hold ctx->mutex, which serializes us
2391 * against perf_event_exit_task_context().
2392 */
63b6da39
PZ
2393 raw_spin_unlock_irq(&ctx->lock);
2394 return;
2395 }
39a43640 2396 /*
63cae12b
PZ
2397 * If the task is not running, ctx->lock will avoid it becoming so,
2398 * thus we can safely install the event.
39a43640 2399 */
63cae12b
PZ
2400 if (task_curr(task)) {
2401 raw_spin_unlock_irq(&ctx->lock);
2402 goto again;
2403 }
2404 add_event_to_ctx(event, ctx);
2405 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
2406}
2407
fa289bec 2408/*
cdd6c482 2409 * Put a event into inactive state and update time fields.
fa289bec
PM
2410 * Enabling the leader of a group effectively enables all
2411 * the group members that aren't explicitly disabled, so we
2412 * have to update their ->tstamp_enabled also.
2413 * Note: this works for group members as well as group leaders
2414 * since the non-leader members' sibling_lists will be empty.
2415 */
1d9b482e 2416static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 2417{
cdd6c482 2418 struct perf_event *sub;
4158755d 2419 u64 tstamp = perf_event_time(event);
fa289bec 2420
cdd6c482 2421 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 2422 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 2423 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
2424 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2425 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 2426 }
fa289bec
PM
2427}
2428
d859e29f 2429/*
cdd6c482 2430 * Cross CPU call to enable a performance event
d859e29f 2431 */
fae3fde6
PZ
2432static void __perf_event_enable(struct perf_event *event,
2433 struct perf_cpu_context *cpuctx,
2434 struct perf_event_context *ctx,
2435 void *info)
04289bb9 2436{
cdd6c482 2437 struct perf_event *leader = event->group_leader;
fae3fde6 2438 struct perf_event_context *task_ctx;
04289bb9 2439
6e801e01
PZ
2440 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2441 event->state <= PERF_EVENT_STATE_ERROR)
fae3fde6 2442 return;
3cbed429 2443
bd2afa49
PZ
2444 if (ctx->is_active)
2445 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2446
1d9b482e 2447 __perf_event_mark_enabled(event);
04289bb9 2448
fae3fde6
PZ
2449 if (!ctx->is_active)
2450 return;
2451
e5d1367f 2452 if (!event_filter_match(event)) {
bd2afa49 2453 if (is_cgroup_event(event))
e5d1367f 2454 perf_cgroup_defer_enabled(event);
bd2afa49 2455 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2456 return;
e5d1367f 2457 }
f4c4176f 2458
04289bb9 2459 /*
cdd6c482 2460 * If the event is in a group and isn't the group leader,
d859e29f 2461 * then don't put it on unless the group is on.
04289bb9 2462 */
bd2afa49
PZ
2463 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2464 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2465 return;
bd2afa49 2466 }
fe4b04fa 2467
fae3fde6
PZ
2468 task_ctx = cpuctx->task_ctx;
2469 if (ctx->task)
2470 WARN_ON_ONCE(task_ctx != ctx);
d859e29f 2471
fae3fde6 2472 ctx_resched(cpuctx, task_ctx);
7b648018
PZ
2473}
2474
d859e29f 2475/*
cdd6c482 2476 * Enable a event.
c93f7669 2477 *
cdd6c482
IM
2478 * If event->ctx is a cloned context, callers must make sure that
2479 * every task struct that event->ctx->task could possibly point to
c93f7669 2480 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2481 * perf_event_for_each_child or perf_event_for_each as described
2482 * for perf_event_disable.
d859e29f 2483 */
f63a8daa 2484static void _perf_event_enable(struct perf_event *event)
d859e29f 2485{
cdd6c482 2486 struct perf_event_context *ctx = event->ctx;
d859e29f 2487
7b648018 2488 raw_spin_lock_irq(&ctx->lock);
6e801e01
PZ
2489 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2490 event->state < PERF_EVENT_STATE_ERROR) {
7b648018 2491 raw_spin_unlock_irq(&ctx->lock);
d859e29f
PM
2492 return;
2493 }
2494
d859e29f 2495 /*
cdd6c482 2496 * If the event is in error state, clear that first.
7b648018
PZ
2497 *
2498 * That way, if we see the event in error state below, we know that it
2499 * has gone back into error state, as distinct from the task having
2500 * been scheduled away before the cross-call arrived.
d859e29f 2501 */
cdd6c482
IM
2502 if (event->state == PERF_EVENT_STATE_ERROR)
2503 event->state = PERF_EVENT_STATE_OFF;
e625cce1 2504 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa 2505
fae3fde6 2506 event_function_call(event, __perf_event_enable, NULL);
d859e29f 2507}
f63a8daa
PZ
2508
2509/*
2510 * See perf_event_disable();
2511 */
2512void perf_event_enable(struct perf_event *event)
2513{
2514 struct perf_event_context *ctx;
2515
2516 ctx = perf_event_ctx_lock(event);
2517 _perf_event_enable(event);
2518 perf_event_ctx_unlock(event, ctx);
2519}
dcfce4a0 2520EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2521
375637bc
AS
2522struct stop_event_data {
2523 struct perf_event *event;
2524 unsigned int restart;
2525};
2526
95ff4ca2
AS
2527static int __perf_event_stop(void *info)
2528{
375637bc
AS
2529 struct stop_event_data *sd = info;
2530 struct perf_event *event = sd->event;
95ff4ca2 2531
375637bc 2532 /* if it's already INACTIVE, do nothing */
95ff4ca2
AS
2533 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2534 return 0;
2535
2536 /* matches smp_wmb() in event_sched_in() */
2537 smp_rmb();
2538
2539 /*
2540 * There is a window with interrupts enabled before we get here,
2541 * so we need to check again lest we try to stop another CPU's event.
2542 */
2543 if (READ_ONCE(event->oncpu) != smp_processor_id())
2544 return -EAGAIN;
2545
2546 event->pmu->stop(event, PERF_EF_UPDATE);
2547
375637bc
AS
2548 /*
2549 * May race with the actual stop (through perf_pmu_output_stop()),
2550 * but it is only used for events with AUX ring buffer, and such
2551 * events will refuse to restart because of rb::aux_mmap_count==0,
2552 * see comments in perf_aux_output_begin().
2553 *
2554 * Since this is happening on a event-local CPU, no trace is lost
2555 * while restarting.
2556 */
2557 if (sd->restart)
c9bbdd48 2558 event->pmu->start(event, 0);
375637bc 2559
95ff4ca2
AS
2560 return 0;
2561}
2562
767ae086 2563static int perf_event_stop(struct perf_event *event, int restart)
375637bc
AS
2564{
2565 struct stop_event_data sd = {
2566 .event = event,
767ae086 2567 .restart = restart,
375637bc
AS
2568 };
2569 int ret = 0;
2570
2571 do {
2572 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2573 return 0;
2574
2575 /* matches smp_wmb() in event_sched_in() */
2576 smp_rmb();
2577
2578 /*
2579 * We only want to restart ACTIVE events, so if the event goes
2580 * inactive here (event->oncpu==-1), there's nothing more to do;
2581 * fall through with ret==-ENXIO.
2582 */
2583 ret = cpu_function_call(READ_ONCE(event->oncpu),
2584 __perf_event_stop, &sd);
2585 } while (ret == -EAGAIN);
2586
2587 return ret;
2588}
2589
2590/*
2591 * In order to contain the amount of racy and tricky in the address filter
2592 * configuration management, it is a two part process:
2593 *
2594 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2595 * we update the addresses of corresponding vmas in
2596 * event::addr_filters_offs array and bump the event::addr_filters_gen;
2597 * (p2) when an event is scheduled in (pmu::add), it calls
2598 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2599 * if the generation has changed since the previous call.
2600 *
2601 * If (p1) happens while the event is active, we restart it to force (p2).
2602 *
2603 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2604 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2605 * ioctl;
2606 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2607 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2608 * for reading;
2609 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2610 * of exec.
2611 */
2612void perf_event_addr_filters_sync(struct perf_event *event)
2613{
2614 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2615
2616 if (!has_addr_filter(event))
2617 return;
2618
2619 raw_spin_lock(&ifh->lock);
2620 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2621 event->pmu->addr_filters_sync(event);
2622 event->hw.addr_filters_gen = event->addr_filters_gen;
2623 }
2624 raw_spin_unlock(&ifh->lock);
2625}
2626EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2627
f63a8daa 2628static int _perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2629{
2023b359 2630 /*
cdd6c482 2631 * not supported on inherited events
2023b359 2632 */
2e939d1d 2633 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2634 return -EINVAL;
2635
cdd6c482 2636 atomic_add(refresh, &event->event_limit);
f63a8daa 2637 _perf_event_enable(event);
2023b359
PZ
2638
2639 return 0;
79f14641 2640}
f63a8daa
PZ
2641
2642/*
2643 * See perf_event_disable()
2644 */
2645int perf_event_refresh(struct perf_event *event, int refresh)
2646{
2647 struct perf_event_context *ctx;
2648 int ret;
2649
2650 ctx = perf_event_ctx_lock(event);
2651 ret = _perf_event_refresh(event, refresh);
2652 perf_event_ctx_unlock(event, ctx);
2653
2654 return ret;
2655}
26ca5c11 2656EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2657
5b0311e1
FW
2658static void ctx_sched_out(struct perf_event_context *ctx,
2659 struct perf_cpu_context *cpuctx,
2660 enum event_type_t event_type)
235c7fc7 2661{
db24d33e 2662 int is_active = ctx->is_active;
c994d613 2663 struct perf_event *event;
235c7fc7 2664
c994d613 2665 lockdep_assert_held(&ctx->lock);
235c7fc7 2666
39a43640
PZ
2667 if (likely(!ctx->nr_events)) {
2668 /*
2669 * See __perf_remove_from_context().
2670 */
2671 WARN_ON_ONCE(ctx->is_active);
2672 if (ctx->task)
2673 WARN_ON_ONCE(cpuctx->task_ctx);
facc4307 2674 return;
39a43640
PZ
2675 }
2676
db24d33e 2677 ctx->is_active &= ~event_type;
3cbaa590
PZ
2678 if (!(ctx->is_active & EVENT_ALL))
2679 ctx->is_active = 0;
2680
63e30d3e
PZ
2681 if (ctx->task) {
2682 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2683 if (!ctx->is_active)
2684 cpuctx->task_ctx = NULL;
2685 }
facc4307 2686
8fdc6539
PZ
2687 /*
2688 * Always update time if it was set; not only when it changes.
2689 * Otherwise we can 'forget' to update time for any but the last
2690 * context we sched out. For example:
2691 *
2692 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2693 * ctx_sched_out(.event_type = EVENT_PINNED)
2694 *
2695 * would only update time for the pinned events.
2696 */
3cbaa590
PZ
2697 if (is_active & EVENT_TIME) {
2698 /* update (and stop) ctx time */
2699 update_context_time(ctx);
2700 update_cgrp_time_from_cpuctx(cpuctx);
2701 }
2702
8fdc6539
PZ
2703 is_active ^= ctx->is_active; /* changed bits */
2704
3cbaa590 2705 if (!ctx->nr_active || !(is_active & EVENT_ALL))
facc4307 2706 return;
5b0311e1 2707
075e0b00 2708 perf_pmu_disable(ctx->pmu);
3cbaa590 2709 if (is_active & EVENT_PINNED) {
889ff015
FW
2710 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2711 group_sched_out(event, cpuctx, ctx);
9ed6060d 2712 }
889ff015 2713
3cbaa590 2714 if (is_active & EVENT_FLEXIBLE) {
889ff015 2715 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2716 group_sched_out(event, cpuctx, ctx);
9ed6060d 2717 }
1b9a644f 2718 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2719}
2720
564c2b21 2721/*
5a3126d4
PZ
2722 * Test whether two contexts are equivalent, i.e. whether they have both been
2723 * cloned from the same version of the same context.
2724 *
2725 * Equivalence is measured using a generation number in the context that is
2726 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2727 * and list_del_event().
564c2b21 2728 */
cdd6c482
IM
2729static int context_equiv(struct perf_event_context *ctx1,
2730 struct perf_event_context *ctx2)
564c2b21 2731{
211de6eb
PZ
2732 lockdep_assert_held(&ctx1->lock);
2733 lockdep_assert_held(&ctx2->lock);
2734
5a3126d4
PZ
2735 /* Pinning disables the swap optimization */
2736 if (ctx1->pin_count || ctx2->pin_count)
2737 return 0;
2738
2739 /* If ctx1 is the parent of ctx2 */
2740 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2741 return 1;
2742
2743 /* If ctx2 is the parent of ctx1 */
2744 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2745 return 1;
2746
2747 /*
2748 * If ctx1 and ctx2 have the same parent; we flatten the parent
2749 * hierarchy, see perf_event_init_context().
2750 */
2751 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2752 ctx1->parent_gen == ctx2->parent_gen)
2753 return 1;
2754
2755 /* Unmatched */
2756 return 0;
564c2b21
PM
2757}
2758
cdd6c482
IM
2759static void __perf_event_sync_stat(struct perf_event *event,
2760 struct perf_event *next_event)
bfbd3381
PZ
2761{
2762 u64 value;
2763
cdd6c482 2764 if (!event->attr.inherit_stat)
bfbd3381
PZ
2765 return;
2766
2767 /*
cdd6c482 2768 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2769 * because we're in the middle of a context switch and have IRQs
2770 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2771 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2772 * don't need to use it.
2773 */
cdd6c482
IM
2774 switch (event->state) {
2775 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2776 event->pmu->read(event);
2777 /* fall-through */
bfbd3381 2778
cdd6c482
IM
2779 case PERF_EVENT_STATE_INACTIVE:
2780 update_event_times(event);
bfbd3381
PZ
2781 break;
2782
2783 default:
2784 break;
2785 }
2786
2787 /*
cdd6c482 2788 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2789 * values when we flip the contexts.
2790 */
e7850595
PZ
2791 value = local64_read(&next_event->count);
2792 value = local64_xchg(&event->count, value);
2793 local64_set(&next_event->count, value);
bfbd3381 2794
cdd6c482
IM
2795 swap(event->total_time_enabled, next_event->total_time_enabled);
2796 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2797
bfbd3381 2798 /*
19d2e755 2799 * Since we swizzled the values, update the user visible data too.
bfbd3381 2800 */
cdd6c482
IM
2801 perf_event_update_userpage(event);
2802 perf_event_update_userpage(next_event);
bfbd3381
PZ
2803}
2804
cdd6c482
IM
2805static void perf_event_sync_stat(struct perf_event_context *ctx,
2806 struct perf_event_context *next_ctx)
bfbd3381 2807{
cdd6c482 2808 struct perf_event *event, *next_event;
bfbd3381
PZ
2809
2810 if (!ctx->nr_stat)
2811 return;
2812
02ffdbc8
PZ
2813 update_context_time(ctx);
2814
cdd6c482
IM
2815 event = list_first_entry(&ctx->event_list,
2816 struct perf_event, event_entry);
bfbd3381 2817
cdd6c482
IM
2818 next_event = list_first_entry(&next_ctx->event_list,
2819 struct perf_event, event_entry);
bfbd3381 2820
cdd6c482
IM
2821 while (&event->event_entry != &ctx->event_list &&
2822 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2823
cdd6c482 2824 __perf_event_sync_stat(event, next_event);
bfbd3381 2825
cdd6c482
IM
2826 event = list_next_entry(event, event_entry);
2827 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2828 }
2829}
2830
fe4b04fa
PZ
2831static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2832 struct task_struct *next)
0793a61d 2833{
8dc85d54 2834 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 2835 struct perf_event_context *next_ctx;
5a3126d4 2836 struct perf_event_context *parent, *next_parent;
108b02cf 2837 struct perf_cpu_context *cpuctx;
c93f7669 2838 int do_switch = 1;
0793a61d 2839
108b02cf
PZ
2840 if (likely(!ctx))
2841 return;
10989fb2 2842
108b02cf
PZ
2843 cpuctx = __get_cpu_context(ctx);
2844 if (!cpuctx->task_ctx)
0793a61d
TG
2845 return;
2846
c93f7669 2847 rcu_read_lock();
8dc85d54 2848 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
2849 if (!next_ctx)
2850 goto unlock;
2851
2852 parent = rcu_dereference(ctx->parent_ctx);
2853 next_parent = rcu_dereference(next_ctx->parent_ctx);
2854
2855 /* If neither context have a parent context; they cannot be clones. */
802c8a61 2856 if (!parent && !next_parent)
5a3126d4
PZ
2857 goto unlock;
2858
2859 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
2860 /*
2861 * Looks like the two contexts are clones, so we might be
2862 * able to optimize the context switch. We lock both
2863 * contexts and check that they are clones under the
2864 * lock (including re-checking that neither has been
2865 * uncloned in the meantime). It doesn't matter which
2866 * order we take the locks because no other cpu could
2867 * be trying to lock both of these tasks.
2868 */
e625cce1
TG
2869 raw_spin_lock(&ctx->lock);
2870 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2871 if (context_equiv(ctx, next_ctx)) {
63b6da39
PZ
2872 WRITE_ONCE(ctx->task, next);
2873 WRITE_ONCE(next_ctx->task, task);
5a158c3c
YZ
2874
2875 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2876
63b6da39
PZ
2877 /*
2878 * RCU_INIT_POINTER here is safe because we've not
2879 * modified the ctx and the above modification of
2880 * ctx->task and ctx->task_ctx_data are immaterial
2881 * since those values are always verified under
2882 * ctx->lock which we're now holding.
2883 */
2884 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2885 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2886
c93f7669 2887 do_switch = 0;
bfbd3381 2888
cdd6c482 2889 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2890 }
e625cce1
TG
2891 raw_spin_unlock(&next_ctx->lock);
2892 raw_spin_unlock(&ctx->lock);
564c2b21 2893 }
5a3126d4 2894unlock:
c93f7669 2895 rcu_read_unlock();
564c2b21 2896
c93f7669 2897 if (do_switch) {
facc4307 2898 raw_spin_lock(&ctx->lock);
8833d0e2 2899 task_ctx_sched_out(cpuctx, ctx);
facc4307 2900 raw_spin_unlock(&ctx->lock);
c93f7669 2901 }
0793a61d
TG
2902}
2903
e48c1788
PZ
2904static DEFINE_PER_CPU(struct list_head, sched_cb_list);
2905
ba532500
YZ
2906void perf_sched_cb_dec(struct pmu *pmu)
2907{
e48c1788
PZ
2908 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2909
ba532500 2910 this_cpu_dec(perf_sched_cb_usages);
e48c1788
PZ
2911
2912 if (!--cpuctx->sched_cb_usage)
2913 list_del(&cpuctx->sched_cb_entry);
ba532500
YZ
2914}
2915
e48c1788 2916
ba532500
YZ
2917void perf_sched_cb_inc(struct pmu *pmu)
2918{
e48c1788
PZ
2919 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2920
2921 if (!cpuctx->sched_cb_usage++)
2922 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
2923
ba532500
YZ
2924 this_cpu_inc(perf_sched_cb_usages);
2925}
2926
2927/*
2928 * This function provides the context switch callback to the lower code
2929 * layer. It is invoked ONLY when the context switch callback is enabled.
09e61b4f
PZ
2930 *
2931 * This callback is relevant even to per-cpu events; for example multi event
2932 * PEBS requires this to provide PID/TID information. This requires we flush
2933 * all queued PEBS records before we context switch to a new task.
ba532500
YZ
2934 */
2935static void perf_pmu_sched_task(struct task_struct *prev,
2936 struct task_struct *next,
2937 bool sched_in)
2938{
2939 struct perf_cpu_context *cpuctx;
2940 struct pmu *pmu;
ba532500
YZ
2941
2942 if (prev == next)
2943 return;
2944
e48c1788
PZ
2945 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
2946 pmu = cpuctx->unique_pmu; /* software PMUs will not have sched_task */
ba532500 2947
e48c1788
PZ
2948 if (WARN_ON_ONCE(!pmu->sched_task))
2949 continue;
ba532500 2950
e48c1788
PZ
2951 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2952 perf_pmu_disable(pmu);
ba532500 2953
e48c1788 2954 pmu->sched_task(cpuctx->task_ctx, sched_in);
ba532500 2955
e48c1788
PZ
2956 perf_pmu_enable(pmu);
2957 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
ba532500 2958 }
ba532500
YZ
2959}
2960
45ac1403
AH
2961static void perf_event_switch(struct task_struct *task,
2962 struct task_struct *next_prev, bool sched_in);
2963
8dc85d54
PZ
2964#define for_each_task_context_nr(ctxn) \
2965 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2966
2967/*
2968 * Called from scheduler to remove the events of the current task,
2969 * with interrupts disabled.
2970 *
2971 * We stop each event and update the event value in event->count.
2972 *
2973 * This does not protect us against NMI, but disable()
2974 * sets the disabled bit in the control field of event _before_
2975 * accessing the event control register. If a NMI hits, then it will
2976 * not restart the event.
2977 */
ab0cce56
JO
2978void __perf_event_task_sched_out(struct task_struct *task,
2979 struct task_struct *next)
8dc85d54
PZ
2980{
2981 int ctxn;
2982
ba532500
YZ
2983 if (__this_cpu_read(perf_sched_cb_usages))
2984 perf_pmu_sched_task(task, next, false);
2985
45ac1403
AH
2986 if (atomic_read(&nr_switch_events))
2987 perf_event_switch(task, next, false);
2988
8dc85d54
PZ
2989 for_each_task_context_nr(ctxn)
2990 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2991
2992 /*
2993 * if cgroup events exist on this CPU, then we need
2994 * to check if we have to switch out PMU state.
2995 * cgroup event are system-wide mode only
2996 */
4a32fea9 2997 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 2998 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2999}
3000
5b0311e1
FW
3001/*
3002 * Called with IRQs disabled
3003 */
3004static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
3005 enum event_type_t event_type)
3006{
3007 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
3008}
3009
235c7fc7 3010static void
5b0311e1 3011ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 3012 struct perf_cpu_context *cpuctx)
0793a61d 3013{
cdd6c482 3014 struct perf_event *event;
0793a61d 3015
889ff015
FW
3016 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
3017 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 3018 continue;
5632ab12 3019 if (!event_filter_match(event))
3b6f9e5c
PM
3020 continue;
3021
e5d1367f
SE
3022 /* may need to reset tstamp_enabled */
3023 if (is_cgroup_event(event))
3024 perf_cgroup_mark_enabled(event, ctx);
3025
8c9ed8e1 3026 if (group_can_go_on(event, cpuctx, 1))
6e37738a 3027 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
3028
3029 /*
3030 * If this pinned group hasn't been scheduled,
3031 * put it in error state.
3032 */
cdd6c482
IM
3033 if (event->state == PERF_EVENT_STATE_INACTIVE) {
3034 update_group_times(event);
3035 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 3036 }
3b6f9e5c 3037 }
5b0311e1
FW
3038}
3039
3040static void
3041ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 3042 struct perf_cpu_context *cpuctx)
5b0311e1
FW
3043{
3044 struct perf_event *event;
3045 int can_add_hw = 1;
3b6f9e5c 3046
889ff015
FW
3047 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
3048 /* Ignore events in OFF or ERROR state */
3049 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 3050 continue;
04289bb9
IM
3051 /*
3052 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 3053 * of events:
04289bb9 3054 */
5632ab12 3055 if (!event_filter_match(event))
0793a61d
TG
3056 continue;
3057
e5d1367f
SE
3058 /* may need to reset tstamp_enabled */
3059 if (is_cgroup_event(event))
3060 perf_cgroup_mark_enabled(event, ctx);
3061
9ed6060d 3062 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 3063 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 3064 can_add_hw = 0;
9ed6060d 3065 }
0793a61d 3066 }
5b0311e1
FW
3067}
3068
3069static void
3070ctx_sched_in(struct perf_event_context *ctx,
3071 struct perf_cpu_context *cpuctx,
e5d1367f
SE
3072 enum event_type_t event_type,
3073 struct task_struct *task)
5b0311e1 3074{
db24d33e 3075 int is_active = ctx->is_active;
c994d613
PZ
3076 u64 now;
3077
3078 lockdep_assert_held(&ctx->lock);
e5d1367f 3079
5b0311e1 3080 if (likely(!ctx->nr_events))
facc4307 3081 return;
5b0311e1 3082
3cbaa590 3083 ctx->is_active |= (event_type | EVENT_TIME);
63e30d3e
PZ
3084 if (ctx->task) {
3085 if (!is_active)
3086 cpuctx->task_ctx = ctx;
3087 else
3088 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3089 }
3090
3cbaa590
PZ
3091 is_active ^= ctx->is_active; /* changed bits */
3092
3093 if (is_active & EVENT_TIME) {
3094 /* start ctx time */
3095 now = perf_clock();
3096 ctx->timestamp = now;
3097 perf_cgroup_set_timestamp(task, ctx);
3098 }
3099
5b0311e1
FW
3100 /*
3101 * First go through the list and put on any pinned groups
3102 * in order to give them the best chance of going on.
3103 */
3cbaa590 3104 if (is_active & EVENT_PINNED)
6e37738a 3105 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
3106
3107 /* Then walk through the lower prio flexible groups */
3cbaa590 3108 if (is_active & EVENT_FLEXIBLE)
6e37738a 3109 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
3110}
3111
329c0e01 3112static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
3113 enum event_type_t event_type,
3114 struct task_struct *task)
329c0e01
FW
3115{
3116 struct perf_event_context *ctx = &cpuctx->ctx;
3117
e5d1367f 3118 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
3119}
3120
e5d1367f
SE
3121static void perf_event_context_sched_in(struct perf_event_context *ctx,
3122 struct task_struct *task)
235c7fc7 3123{
108b02cf 3124 struct perf_cpu_context *cpuctx;
235c7fc7 3125
108b02cf 3126 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
3127 if (cpuctx->task_ctx == ctx)
3128 return;
3129
facc4307 3130 perf_ctx_lock(cpuctx, ctx);
1b9a644f 3131 perf_pmu_disable(ctx->pmu);
329c0e01
FW
3132 /*
3133 * We want to keep the following priority order:
3134 * cpu pinned (that don't need to move), task pinned,
3135 * cpu flexible, task flexible.
3136 */
3137 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
63e30d3e 3138 perf_event_sched_in(cpuctx, ctx, task);
facc4307
PZ
3139 perf_pmu_enable(ctx->pmu);
3140 perf_ctx_unlock(cpuctx, ctx);
235c7fc7
IM
3141}
3142
8dc85d54
PZ
3143/*
3144 * Called from scheduler to add the events of the current task
3145 * with interrupts disabled.
3146 *
3147 * We restore the event value and then enable it.
3148 *
3149 * This does not protect us against NMI, but enable()
3150 * sets the enabled bit in the control field of event _before_
3151 * accessing the event control register. If a NMI hits, then it will
3152 * keep the event running.
3153 */
ab0cce56
JO
3154void __perf_event_task_sched_in(struct task_struct *prev,
3155 struct task_struct *task)
8dc85d54
PZ
3156{
3157 struct perf_event_context *ctx;
3158 int ctxn;
3159
7e41d177
PZ
3160 /*
3161 * If cgroup events exist on this CPU, then we need to check if we have
3162 * to switch in PMU state; cgroup event are system-wide mode only.
3163 *
3164 * Since cgroup events are CPU events, we must schedule these in before
3165 * we schedule in the task events.
3166 */
3167 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3168 perf_cgroup_sched_in(prev, task);
3169
8dc85d54
PZ
3170 for_each_task_context_nr(ctxn) {
3171 ctx = task->perf_event_ctxp[ctxn];
3172 if (likely(!ctx))
3173 continue;
3174
e5d1367f 3175 perf_event_context_sched_in(ctx, task);
8dc85d54 3176 }
d010b332 3177
45ac1403
AH
3178 if (atomic_read(&nr_switch_events))
3179 perf_event_switch(task, prev, true);
3180
ba532500
YZ
3181 if (__this_cpu_read(perf_sched_cb_usages))
3182 perf_pmu_sched_task(prev, task, true);
235c7fc7
IM
3183}
3184
abd50713
PZ
3185static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3186{
3187 u64 frequency = event->attr.sample_freq;
3188 u64 sec = NSEC_PER_SEC;
3189 u64 divisor, dividend;
3190
3191 int count_fls, nsec_fls, frequency_fls, sec_fls;
3192
3193 count_fls = fls64(count);
3194 nsec_fls = fls64(nsec);
3195 frequency_fls = fls64(frequency);
3196 sec_fls = 30;
3197
3198 /*
3199 * We got @count in @nsec, with a target of sample_freq HZ
3200 * the target period becomes:
3201 *
3202 * @count * 10^9
3203 * period = -------------------
3204 * @nsec * sample_freq
3205 *
3206 */
3207
3208 /*
3209 * Reduce accuracy by one bit such that @a and @b converge
3210 * to a similar magnitude.
3211 */
fe4b04fa 3212#define REDUCE_FLS(a, b) \
abd50713
PZ
3213do { \
3214 if (a##_fls > b##_fls) { \
3215 a >>= 1; \
3216 a##_fls--; \
3217 } else { \
3218 b >>= 1; \
3219 b##_fls--; \
3220 } \
3221} while (0)
3222
3223 /*
3224 * Reduce accuracy until either term fits in a u64, then proceed with
3225 * the other, so that finally we can do a u64/u64 division.
3226 */
3227 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3228 REDUCE_FLS(nsec, frequency);
3229 REDUCE_FLS(sec, count);
3230 }
3231
3232 if (count_fls + sec_fls > 64) {
3233 divisor = nsec * frequency;
3234
3235 while (count_fls + sec_fls > 64) {
3236 REDUCE_FLS(count, sec);
3237 divisor >>= 1;
3238 }
3239
3240 dividend = count * sec;
3241 } else {
3242 dividend = count * sec;
3243
3244 while (nsec_fls + frequency_fls > 64) {
3245 REDUCE_FLS(nsec, frequency);
3246 dividend >>= 1;
3247 }
3248
3249 divisor = nsec * frequency;
3250 }
3251
f6ab91ad
PZ
3252 if (!divisor)
3253 return dividend;
3254
abd50713
PZ
3255 return div64_u64(dividend, divisor);
3256}
3257
e050e3f0
SE
3258static DEFINE_PER_CPU(int, perf_throttled_count);
3259static DEFINE_PER_CPU(u64, perf_throttled_seq);
3260
f39d47ff 3261static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 3262{
cdd6c482 3263 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 3264 s64 period, sample_period;
bd2b5b12
PZ
3265 s64 delta;
3266
abd50713 3267 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
3268
3269 delta = (s64)(period - hwc->sample_period);
3270 delta = (delta + 7) / 8; /* low pass filter */
3271
3272 sample_period = hwc->sample_period + delta;
3273
3274 if (!sample_period)
3275 sample_period = 1;
3276
bd2b5b12 3277 hwc->sample_period = sample_period;
abd50713 3278
e7850595 3279 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
3280 if (disable)
3281 event->pmu->stop(event, PERF_EF_UPDATE);
3282
e7850595 3283 local64_set(&hwc->period_left, 0);
f39d47ff
SE
3284
3285 if (disable)
3286 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 3287 }
bd2b5b12
PZ
3288}
3289
e050e3f0
SE
3290/*
3291 * combine freq adjustment with unthrottling to avoid two passes over the
3292 * events. At the same time, make sure, having freq events does not change
3293 * the rate of unthrottling as that would introduce bias.
3294 */
3295static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3296 int needs_unthr)
60db5e09 3297{
cdd6c482
IM
3298 struct perf_event *event;
3299 struct hw_perf_event *hwc;
e050e3f0 3300 u64 now, period = TICK_NSEC;
abd50713 3301 s64 delta;
60db5e09 3302
e050e3f0
SE
3303 /*
3304 * only need to iterate over all events iff:
3305 * - context have events in frequency mode (needs freq adjust)
3306 * - there are events to unthrottle on this cpu
3307 */
3308 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
3309 return;
3310
e050e3f0 3311 raw_spin_lock(&ctx->lock);
f39d47ff 3312 perf_pmu_disable(ctx->pmu);
e050e3f0 3313
03541f8b 3314 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 3315 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
3316 continue;
3317
5632ab12 3318 if (!event_filter_match(event))
5d27c23d
PZ
3319 continue;
3320
44377277
AS
3321 perf_pmu_disable(event->pmu);
3322
cdd6c482 3323 hwc = &event->hw;
6a24ed6c 3324
ae23bff1 3325 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 3326 hwc->interrupts = 0;
cdd6c482 3327 perf_log_throttle(event, 1);
a4eaf7f1 3328 event->pmu->start(event, 0);
a78ac325
PZ
3329 }
3330
cdd6c482 3331 if (!event->attr.freq || !event->attr.sample_freq)
44377277 3332 goto next;
60db5e09 3333
e050e3f0
SE
3334 /*
3335 * stop the event and update event->count
3336 */
3337 event->pmu->stop(event, PERF_EF_UPDATE);
3338
e7850595 3339 now = local64_read(&event->count);
abd50713
PZ
3340 delta = now - hwc->freq_count_stamp;
3341 hwc->freq_count_stamp = now;
60db5e09 3342
e050e3f0
SE
3343 /*
3344 * restart the event
3345 * reload only if value has changed
f39d47ff
SE
3346 * we have stopped the event so tell that
3347 * to perf_adjust_period() to avoid stopping it
3348 * twice.
e050e3f0 3349 */
abd50713 3350 if (delta > 0)
f39d47ff 3351 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
3352
3353 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
3354 next:
3355 perf_pmu_enable(event->pmu);
60db5e09 3356 }
e050e3f0 3357
f39d47ff 3358 perf_pmu_enable(ctx->pmu);
e050e3f0 3359 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
3360}
3361
235c7fc7 3362/*
cdd6c482 3363 * Round-robin a context's events:
235c7fc7 3364 */
cdd6c482 3365static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 3366{
dddd3379
TG
3367 /*
3368 * Rotate the first entry last of non-pinned groups. Rotation might be
3369 * disabled by the inheritance code.
3370 */
3371 if (!ctx->rotate_disable)
3372 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
3373}
3374
9e630205 3375static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 3376{
8dc85d54 3377 struct perf_event_context *ctx = NULL;
2fde4f94 3378 int rotate = 0;
7fc23a53 3379
b5ab4cd5 3380 if (cpuctx->ctx.nr_events) {
b5ab4cd5
PZ
3381 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3382 rotate = 1;
3383 }
235c7fc7 3384
8dc85d54 3385 ctx = cpuctx->task_ctx;
b5ab4cd5 3386 if (ctx && ctx->nr_events) {
b5ab4cd5
PZ
3387 if (ctx->nr_events != ctx->nr_active)
3388 rotate = 1;
3389 }
9717e6cd 3390
e050e3f0 3391 if (!rotate)
0f5a2601
PZ
3392 goto done;
3393
facc4307 3394 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 3395 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 3396
e050e3f0
SE
3397 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3398 if (ctx)
3399 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 3400
e050e3f0
SE
3401 rotate_ctx(&cpuctx->ctx);
3402 if (ctx)
3403 rotate_ctx(ctx);
235c7fc7 3404
e050e3f0 3405 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 3406
0f5a2601
PZ
3407 perf_pmu_enable(cpuctx->ctx.pmu);
3408 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 3409done:
9e630205
SE
3410
3411 return rotate;
e9d2b064
PZ
3412}
3413
3414void perf_event_task_tick(void)
3415{
2fde4f94
MR
3416 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3417 struct perf_event_context *ctx, *tmp;
e050e3f0 3418 int throttled;
b5ab4cd5 3419
e9d2b064
PZ
3420 WARN_ON(!irqs_disabled());
3421
e050e3f0
SE
3422 __this_cpu_inc(perf_throttled_seq);
3423 throttled = __this_cpu_xchg(perf_throttled_count, 0);
555e0c1e 3424 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
e050e3f0 3425
2fde4f94 3426 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
e050e3f0 3427 perf_adjust_freq_unthr_context(ctx, throttled);
0793a61d
TG
3428}
3429
889ff015
FW
3430static int event_enable_on_exec(struct perf_event *event,
3431 struct perf_event_context *ctx)
3432{
3433 if (!event->attr.enable_on_exec)
3434 return 0;
3435
3436 event->attr.enable_on_exec = 0;
3437 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3438 return 0;
3439
1d9b482e 3440 __perf_event_mark_enabled(event);
889ff015
FW
3441
3442 return 1;
3443}
3444
57e7986e 3445/*
cdd6c482 3446 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
3447 * This expects task == current.
3448 */
c1274499 3449static void perf_event_enable_on_exec(int ctxn)
57e7986e 3450{
c1274499 3451 struct perf_event_context *ctx, *clone_ctx = NULL;
3e349507 3452 struct perf_cpu_context *cpuctx;
cdd6c482 3453 struct perf_event *event;
57e7986e
PM
3454 unsigned long flags;
3455 int enabled = 0;
3456
3457 local_irq_save(flags);
c1274499 3458 ctx = current->perf_event_ctxp[ctxn];
cdd6c482 3459 if (!ctx || !ctx->nr_events)
57e7986e
PM
3460 goto out;
3461
3e349507
PZ
3462 cpuctx = __get_cpu_context(ctx);
3463 perf_ctx_lock(cpuctx, ctx);
7fce2509 3464 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3e349507
PZ
3465 list_for_each_entry(event, &ctx->event_list, event_entry)
3466 enabled |= event_enable_on_exec(event, ctx);
57e7986e
PM
3467
3468 /*
3e349507 3469 * Unclone and reschedule this context if we enabled any event.
57e7986e 3470 */
3e349507 3471 if (enabled) {
211de6eb 3472 clone_ctx = unclone_ctx(ctx);
3e349507
PZ
3473 ctx_resched(cpuctx, ctx);
3474 }
3475 perf_ctx_unlock(cpuctx, ctx);
57e7986e 3476
9ed6060d 3477out:
57e7986e 3478 local_irq_restore(flags);
211de6eb
PZ
3479
3480 if (clone_ctx)
3481 put_ctx(clone_ctx);
57e7986e
PM
3482}
3483
0492d4c5
PZ
3484struct perf_read_data {
3485 struct perf_event *event;
3486 bool group;
7d88962e 3487 int ret;
0492d4c5
PZ
3488};
3489
d6a2f903
DCC
3490static int find_cpu_to_read(struct perf_event *event, int local_cpu)
3491{
3492 int event_cpu = event->oncpu;
3493 u16 local_pkg, event_pkg;
3494
3495 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
3496 event_pkg = topology_physical_package_id(event_cpu);
3497 local_pkg = topology_physical_package_id(local_cpu);
3498
3499 if (event_pkg == local_pkg)
3500 return local_cpu;
3501 }
3502
3503 return event_cpu;
3504}
3505
0793a61d 3506/*
cdd6c482 3507 * Cross CPU call to read the hardware event
0793a61d 3508 */
cdd6c482 3509static void __perf_event_read(void *info)
0793a61d 3510{
0492d4c5
PZ
3511 struct perf_read_data *data = info;
3512 struct perf_event *sub, *event = data->event;
cdd6c482 3513 struct perf_event_context *ctx = event->ctx;
108b02cf 3514 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
4a00c16e 3515 struct pmu *pmu = event->pmu;
621a01ea 3516
e1ac3614
PM
3517 /*
3518 * If this is a task context, we need to check whether it is
3519 * the current task context of this cpu. If not it has been
3520 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
3521 * event->count would have been updated to a recent sample
3522 * when the event was scheduled out.
e1ac3614
PM
3523 */
3524 if (ctx->task && cpuctx->task_ctx != ctx)
3525 return;
3526
e625cce1 3527 raw_spin_lock(&ctx->lock);
e5d1367f 3528 if (ctx->is_active) {
542e72fc 3529 update_context_time(ctx);
e5d1367f
SE
3530 update_cgrp_time_from_event(event);
3531 }
0492d4c5 3532
cdd6c482 3533 update_event_times(event);
4a00c16e
SB
3534 if (event->state != PERF_EVENT_STATE_ACTIVE)
3535 goto unlock;
0492d4c5 3536
4a00c16e
SB
3537 if (!data->group) {
3538 pmu->read(event);
3539 data->ret = 0;
0492d4c5 3540 goto unlock;
4a00c16e
SB
3541 }
3542
3543 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3544
3545 pmu->read(event);
0492d4c5
PZ
3546
3547 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3548 update_event_times(sub);
4a00c16e
SB
3549 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3550 /*
3551 * Use sibling's PMU rather than @event's since
3552 * sibling could be on different (eg: software) PMU.
3553 */
0492d4c5 3554 sub->pmu->read(sub);
4a00c16e 3555 }
0492d4c5 3556 }
4a00c16e
SB
3557
3558 data->ret = pmu->commit_txn(pmu);
0492d4c5
PZ
3559
3560unlock:
e625cce1 3561 raw_spin_unlock(&ctx->lock);
0793a61d
TG
3562}
3563
b5e58793
PZ
3564static inline u64 perf_event_count(struct perf_event *event)
3565{
eacd3ecc
MF
3566 if (event->pmu->count)
3567 return event->pmu->count(event);
3568
3569 return __perf_event_count(event);
b5e58793
PZ
3570}
3571
ffe8690c
KX
3572/*
3573 * NMI-safe method to read a local event, that is an event that
3574 * is:
3575 * - either for the current task, or for this CPU
3576 * - does not have inherit set, for inherited task events
3577 * will not be local and we cannot read them atomically
3578 * - must not have a pmu::count method
3579 */
3580u64 perf_event_read_local(struct perf_event *event)
3581{
3582 unsigned long flags;
3583 u64 val;
3584
3585 /*
3586 * Disabling interrupts avoids all counter scheduling (context
3587 * switches, timer based rotation and IPIs).
3588 */
3589 local_irq_save(flags);
3590
3591 /* If this is a per-task event, it must be for current */
3592 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3593 event->hw.target != current);
3594
3595 /* If this is a per-CPU event, it must be for this CPU */
3596 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3597 event->cpu != smp_processor_id());
3598
3599 /*
3600 * It must not be an event with inherit set, we cannot read
3601 * all child counters from atomic context.
3602 */
3603 WARN_ON_ONCE(event->attr.inherit);
3604
3605 /*
3606 * It must not have a pmu::count method, those are not
3607 * NMI safe.
3608 */
3609 WARN_ON_ONCE(event->pmu->count);
3610
3611 /*
3612 * If the event is currently on this CPU, its either a per-task event,
3613 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3614 * oncpu == -1).
3615 */
3616 if (event->oncpu == smp_processor_id())
3617 event->pmu->read(event);
3618
3619 val = local64_read(&event->count);
3620 local_irq_restore(flags);
3621
3622 return val;
3623}
3624
7d88962e 3625static int perf_event_read(struct perf_event *event, bool group)
0793a61d 3626{
d6a2f903 3627 int ret = 0, cpu_to_read, local_cpu;
7d88962e 3628
0793a61d 3629 /*
cdd6c482
IM
3630 * If event is enabled and currently active on a CPU, update the
3631 * value in the event structure:
0793a61d 3632 */
cdd6c482 3633 if (event->state == PERF_EVENT_STATE_ACTIVE) {
0492d4c5
PZ
3634 struct perf_read_data data = {
3635 .event = event,
3636 .group = group,
7d88962e 3637 .ret = 0,
0492d4c5 3638 };
d6a2f903
DCC
3639
3640 local_cpu = get_cpu();
3641 cpu_to_read = find_cpu_to_read(event, local_cpu);
3642 put_cpu();
3643
58763148
PZ
3644 /*
3645 * Purposely ignore the smp_call_function_single() return
3646 * value.
3647 *
3648 * If event->oncpu isn't a valid CPU it means the event got
3649 * scheduled out and that will have updated the event count.
3650 *
3651 * Therefore, either way, we'll have an up-to-date event count
3652 * after this.
3653 */
2cc53841 3654 (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
58763148 3655 ret = data.ret;
cdd6c482 3656 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
3657 struct perf_event_context *ctx = event->ctx;
3658 unsigned long flags;
3659
e625cce1 3660 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
3661 /*
3662 * may read while context is not active
3663 * (e.g., thread is blocked), in that case
3664 * we cannot update context time
3665 */
e5d1367f 3666 if (ctx->is_active) {
c530ccd9 3667 update_context_time(ctx);
e5d1367f
SE
3668 update_cgrp_time_from_event(event);
3669 }
0492d4c5
PZ
3670 if (group)
3671 update_group_times(event);
3672 else
3673 update_event_times(event);
e625cce1 3674 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d 3675 }
7d88962e
SB
3676
3677 return ret;
0793a61d
TG
3678}
3679
a63eaf34 3680/*
cdd6c482 3681 * Initialize the perf_event context in a task_struct:
a63eaf34 3682 */
eb184479 3683static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 3684{
e625cce1 3685 raw_spin_lock_init(&ctx->lock);
a63eaf34 3686 mutex_init(&ctx->mutex);
2fde4f94 3687 INIT_LIST_HEAD(&ctx->active_ctx_list);
889ff015
FW
3688 INIT_LIST_HEAD(&ctx->pinned_groups);
3689 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
3690 INIT_LIST_HEAD(&ctx->event_list);
3691 atomic_set(&ctx->refcount, 1);
eb184479
PZ
3692}
3693
3694static struct perf_event_context *
3695alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3696{
3697 struct perf_event_context *ctx;
3698
3699 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3700 if (!ctx)
3701 return NULL;
3702
3703 __perf_event_init_context(ctx);
3704 if (task) {
3705 ctx->task = task;
3706 get_task_struct(task);
0793a61d 3707 }
eb184479
PZ
3708 ctx->pmu = pmu;
3709
3710 return ctx;
a63eaf34
PM
3711}
3712
2ebd4ffb
MH
3713static struct task_struct *
3714find_lively_task_by_vpid(pid_t vpid)
3715{
3716 struct task_struct *task;
0793a61d
TG
3717
3718 rcu_read_lock();
2ebd4ffb 3719 if (!vpid)
0793a61d
TG
3720 task = current;
3721 else
2ebd4ffb 3722 task = find_task_by_vpid(vpid);
0793a61d
TG
3723 if (task)
3724 get_task_struct(task);
3725 rcu_read_unlock();
3726
3727 if (!task)
3728 return ERR_PTR(-ESRCH);
3729
2ebd4ffb 3730 return task;
2ebd4ffb
MH
3731}
3732
fe4b04fa
PZ
3733/*
3734 * Returns a matching context with refcount and pincount.
3735 */
108b02cf 3736static struct perf_event_context *
4af57ef2
YZ
3737find_get_context(struct pmu *pmu, struct task_struct *task,
3738 struct perf_event *event)
0793a61d 3739{
211de6eb 3740 struct perf_event_context *ctx, *clone_ctx = NULL;
22a4f650 3741 struct perf_cpu_context *cpuctx;
4af57ef2 3742 void *task_ctx_data = NULL;
25346b93 3743 unsigned long flags;
8dc85d54 3744 int ctxn, err;
4af57ef2 3745 int cpu = event->cpu;
0793a61d 3746
22a4ec72 3747 if (!task) {
cdd6c482 3748 /* Must be root to operate on a CPU event: */
0764771d 3749 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3750 return ERR_PTR(-EACCES);
3751
0793a61d 3752 /*
cdd6c482 3753 * We could be clever and allow to attach a event to an
0793a61d
TG
3754 * offline CPU and activate it when the CPU comes up, but
3755 * that's for later.
3756 */
f6325e30 3757 if (!cpu_online(cpu))
0793a61d
TG
3758 return ERR_PTR(-ENODEV);
3759
108b02cf 3760 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3761 ctx = &cpuctx->ctx;
c93f7669 3762 get_ctx(ctx);
fe4b04fa 3763 ++ctx->pin_count;
0793a61d 3764
0793a61d
TG
3765 return ctx;
3766 }
3767
8dc85d54
PZ
3768 err = -EINVAL;
3769 ctxn = pmu->task_ctx_nr;
3770 if (ctxn < 0)
3771 goto errout;
3772
4af57ef2
YZ
3773 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3774 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3775 if (!task_ctx_data) {
3776 err = -ENOMEM;
3777 goto errout;
3778 }
3779 }
3780
9ed6060d 3781retry:
8dc85d54 3782 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3783 if (ctx) {
211de6eb 3784 clone_ctx = unclone_ctx(ctx);
fe4b04fa 3785 ++ctx->pin_count;
4af57ef2
YZ
3786
3787 if (task_ctx_data && !ctx->task_ctx_data) {
3788 ctx->task_ctx_data = task_ctx_data;
3789 task_ctx_data = NULL;
3790 }
e625cce1 3791 raw_spin_unlock_irqrestore(&ctx->lock, flags);
211de6eb
PZ
3792
3793 if (clone_ctx)
3794 put_ctx(clone_ctx);
9137fb28 3795 } else {
eb184479 3796 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3797 err = -ENOMEM;
3798 if (!ctx)
3799 goto errout;
eb184479 3800
4af57ef2
YZ
3801 if (task_ctx_data) {
3802 ctx->task_ctx_data = task_ctx_data;
3803 task_ctx_data = NULL;
3804 }
3805
dbe08d82
ON
3806 err = 0;
3807 mutex_lock(&task->perf_event_mutex);
3808 /*
3809 * If it has already passed perf_event_exit_task().
3810 * we must see PF_EXITING, it takes this mutex too.
3811 */
3812 if (task->flags & PF_EXITING)
3813 err = -ESRCH;
3814 else if (task->perf_event_ctxp[ctxn])
3815 err = -EAGAIN;
fe4b04fa 3816 else {
9137fb28 3817 get_ctx(ctx);
fe4b04fa 3818 ++ctx->pin_count;
dbe08d82 3819 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3820 }
dbe08d82
ON
3821 mutex_unlock(&task->perf_event_mutex);
3822
3823 if (unlikely(err)) {
9137fb28 3824 put_ctx(ctx);
dbe08d82
ON
3825
3826 if (err == -EAGAIN)
3827 goto retry;
3828 goto errout;
a63eaf34
PM
3829 }
3830 }
3831
4af57ef2 3832 kfree(task_ctx_data);
0793a61d 3833 return ctx;
c93f7669 3834
9ed6060d 3835errout:
4af57ef2 3836 kfree(task_ctx_data);
c93f7669 3837 return ERR_PTR(err);
0793a61d
TG
3838}
3839
6fb2915d 3840static void perf_event_free_filter(struct perf_event *event);
2541517c 3841static void perf_event_free_bpf_prog(struct perf_event *event);
6fb2915d 3842
cdd6c482 3843static void free_event_rcu(struct rcu_head *head)
592903cd 3844{
cdd6c482 3845 struct perf_event *event;
592903cd 3846
cdd6c482
IM
3847 event = container_of(head, struct perf_event, rcu_head);
3848 if (event->ns)
3849 put_pid_ns(event->ns);
6fb2915d 3850 perf_event_free_filter(event);
cdd6c482 3851 kfree(event);
592903cd
PZ
3852}
3853
b69cf536
PZ
3854static void ring_buffer_attach(struct perf_event *event,
3855 struct ring_buffer *rb);
925d519a 3856
f2fb6bef
KL
3857static void detach_sb_event(struct perf_event *event)
3858{
3859 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3860
3861 raw_spin_lock(&pel->lock);
3862 list_del_rcu(&event->sb_list);
3863 raw_spin_unlock(&pel->lock);
3864}
3865
a4f144eb 3866static bool is_sb_event(struct perf_event *event)
f2fb6bef 3867{
a4f144eb
DCC
3868 struct perf_event_attr *attr = &event->attr;
3869
f2fb6bef 3870 if (event->parent)
a4f144eb 3871 return false;
f2fb6bef
KL
3872
3873 if (event->attach_state & PERF_ATTACH_TASK)
a4f144eb 3874 return false;
f2fb6bef 3875
a4f144eb
DCC
3876 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3877 attr->comm || attr->comm_exec ||
3878 attr->task ||
3879 attr->context_switch)
3880 return true;
3881 return false;
3882}
3883
3884static void unaccount_pmu_sb_event(struct perf_event *event)
3885{
3886 if (is_sb_event(event))
3887 detach_sb_event(event);
f2fb6bef
KL
3888}
3889
4beb31f3 3890static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 3891{
4beb31f3
FW
3892 if (event->parent)
3893 return;
3894
4beb31f3
FW
3895 if (is_cgroup_event(event))
3896 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3897}
925d519a 3898
555e0c1e
FW
3899#ifdef CONFIG_NO_HZ_FULL
3900static DEFINE_SPINLOCK(nr_freq_lock);
3901#endif
3902
3903static void unaccount_freq_event_nohz(void)
3904{
3905#ifdef CONFIG_NO_HZ_FULL
3906 spin_lock(&nr_freq_lock);
3907 if (atomic_dec_and_test(&nr_freq_events))
3908 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3909 spin_unlock(&nr_freq_lock);
3910#endif
3911}
3912
3913static void unaccount_freq_event(void)
3914{
3915 if (tick_nohz_full_enabled())
3916 unaccount_freq_event_nohz();
3917 else
3918 atomic_dec(&nr_freq_events);
3919}
3920
4beb31f3
FW
3921static void unaccount_event(struct perf_event *event)
3922{
25432ae9
PZ
3923 bool dec = false;
3924
4beb31f3
FW
3925 if (event->parent)
3926 return;
3927
3928 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 3929 dec = true;
4beb31f3
FW
3930 if (event->attr.mmap || event->attr.mmap_data)
3931 atomic_dec(&nr_mmap_events);
3932 if (event->attr.comm)
3933 atomic_dec(&nr_comm_events);
3934 if (event->attr.task)
3935 atomic_dec(&nr_task_events);
948b26b6 3936 if (event->attr.freq)
555e0c1e 3937 unaccount_freq_event();
45ac1403 3938 if (event->attr.context_switch) {
25432ae9 3939 dec = true;
45ac1403
AH
3940 atomic_dec(&nr_switch_events);
3941 }
4beb31f3 3942 if (is_cgroup_event(event))
25432ae9 3943 dec = true;
4beb31f3 3944 if (has_branch_stack(event))
25432ae9
PZ
3945 dec = true;
3946
9107c89e
PZ
3947 if (dec) {
3948 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3949 schedule_delayed_work(&perf_sched_work, HZ);
3950 }
4beb31f3
FW
3951
3952 unaccount_event_cpu(event, event->cpu);
f2fb6bef
KL
3953
3954 unaccount_pmu_sb_event(event);
4beb31f3 3955}
925d519a 3956
9107c89e
PZ
3957static void perf_sched_delayed(struct work_struct *work)
3958{
3959 mutex_lock(&perf_sched_mutex);
3960 if (atomic_dec_and_test(&perf_sched_count))
3961 static_branch_disable(&perf_sched_events);
3962 mutex_unlock(&perf_sched_mutex);
3963}
3964
bed5b25a
AS
3965/*
3966 * The following implement mutual exclusion of events on "exclusive" pmus
3967 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3968 * at a time, so we disallow creating events that might conflict, namely:
3969 *
3970 * 1) cpu-wide events in the presence of per-task events,
3971 * 2) per-task events in the presence of cpu-wide events,
3972 * 3) two matching events on the same context.
3973 *
3974 * The former two cases are handled in the allocation path (perf_event_alloc(),
a0733e69 3975 * _free_event()), the latter -- before the first perf_install_in_context().
bed5b25a
AS
3976 */
3977static int exclusive_event_init(struct perf_event *event)
3978{
3979 struct pmu *pmu = event->pmu;
3980
3981 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3982 return 0;
3983
3984 /*
3985 * Prevent co-existence of per-task and cpu-wide events on the
3986 * same exclusive pmu.
3987 *
3988 * Negative pmu::exclusive_cnt means there are cpu-wide
3989 * events on this "exclusive" pmu, positive means there are
3990 * per-task events.
3991 *
3992 * Since this is called in perf_event_alloc() path, event::ctx
3993 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3994 * to mean "per-task event", because unlike other attach states it
3995 * never gets cleared.
3996 */
3997 if (event->attach_state & PERF_ATTACH_TASK) {
3998 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3999 return -EBUSY;
4000 } else {
4001 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
4002 return -EBUSY;
4003 }
4004
4005 return 0;
4006}
4007
4008static void exclusive_event_destroy(struct perf_event *event)
4009{
4010 struct pmu *pmu = event->pmu;
4011
4012 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4013 return;
4014
4015 /* see comment in exclusive_event_init() */
4016 if (event->attach_state & PERF_ATTACH_TASK)
4017 atomic_dec(&pmu->exclusive_cnt);
4018 else
4019 atomic_inc(&pmu->exclusive_cnt);
4020}
4021
4022static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
4023{
3bf6215a 4024 if ((e1->pmu == e2->pmu) &&
bed5b25a
AS
4025 (e1->cpu == e2->cpu ||
4026 e1->cpu == -1 ||
4027 e2->cpu == -1))
4028 return true;
4029 return false;
4030}
4031
4032/* Called under the same ctx::mutex as perf_install_in_context() */
4033static bool exclusive_event_installable(struct perf_event *event,
4034 struct perf_event_context *ctx)
4035{
4036 struct perf_event *iter_event;
4037 struct pmu *pmu = event->pmu;
4038
4039 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4040 return true;
4041
4042 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
4043 if (exclusive_event_match(iter_event, event))
4044 return false;
4045 }
4046
4047 return true;
4048}
4049
375637bc
AS
4050static void perf_addr_filters_splice(struct perf_event *event,
4051 struct list_head *head);
4052
683ede43 4053static void _free_event(struct perf_event *event)
f1600952 4054{
e360adbe 4055 irq_work_sync(&event->pending);
925d519a 4056
4beb31f3 4057 unaccount_event(event);
9ee318a7 4058
76369139 4059 if (event->rb) {
9bb5d40c
PZ
4060 /*
4061 * Can happen when we close an event with re-directed output.
4062 *
4063 * Since we have a 0 refcount, perf_mmap_close() will skip
4064 * over us; possibly making our ring_buffer_put() the last.
4065 */
4066 mutex_lock(&event->mmap_mutex);
b69cf536 4067 ring_buffer_attach(event, NULL);
9bb5d40c 4068 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
4069 }
4070
e5d1367f
SE
4071 if (is_cgroup_event(event))
4072 perf_detach_cgroup(event);
4073
a0733e69
PZ
4074 if (!event->parent) {
4075 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
4076 put_callchain_buffers();
4077 }
4078
4079 perf_event_free_bpf_prog(event);
375637bc
AS
4080 perf_addr_filters_splice(event, NULL);
4081 kfree(event->addr_filters_offs);
a0733e69
PZ
4082
4083 if (event->destroy)
4084 event->destroy(event);
4085
4086 if (event->ctx)
4087 put_ctx(event->ctx);
4088
62a92c8f
AS
4089 exclusive_event_destroy(event);
4090 module_put(event->pmu->module);
a0733e69
PZ
4091
4092 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
4093}
4094
683ede43
PZ
4095/*
4096 * Used to free events which have a known refcount of 1, such as in error paths
4097 * where the event isn't exposed yet and inherited events.
4098 */
4099static void free_event(struct perf_event *event)
0793a61d 4100{
683ede43
PZ
4101 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4102 "unexpected event refcount: %ld; ptr=%p\n",
4103 atomic_long_read(&event->refcount), event)) {
4104 /* leak to avoid use-after-free */
4105 return;
4106 }
0793a61d 4107
683ede43 4108 _free_event(event);
0793a61d
TG
4109}
4110
a66a3052 4111/*
f8697762 4112 * Remove user event from the owner task.
a66a3052 4113 */
f8697762 4114static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 4115{
8882135b 4116 struct task_struct *owner;
fb0459d7 4117
8882135b 4118 rcu_read_lock();
8882135b 4119 /*
f47c02c0
PZ
4120 * Matches the smp_store_release() in perf_event_exit_task(). If we
4121 * observe !owner it means the list deletion is complete and we can
4122 * indeed free this event, otherwise we need to serialize on
8882135b
PZ
4123 * owner->perf_event_mutex.
4124 */
f47c02c0 4125 owner = lockless_dereference(event->owner);
8882135b
PZ
4126 if (owner) {
4127 /*
4128 * Since delayed_put_task_struct() also drops the last
4129 * task reference we can safely take a new reference
4130 * while holding the rcu_read_lock().
4131 */
4132 get_task_struct(owner);
4133 }
4134 rcu_read_unlock();
4135
4136 if (owner) {
f63a8daa
PZ
4137 /*
4138 * If we're here through perf_event_exit_task() we're already
4139 * holding ctx->mutex which would be an inversion wrt. the
4140 * normal lock order.
4141 *
4142 * However we can safely take this lock because its the child
4143 * ctx->mutex.
4144 */
4145 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4146
8882135b
PZ
4147 /*
4148 * We have to re-check the event->owner field, if it is cleared
4149 * we raced with perf_event_exit_task(), acquiring the mutex
4150 * ensured they're done, and we can proceed with freeing the
4151 * event.
4152 */
f47c02c0 4153 if (event->owner) {
8882135b 4154 list_del_init(&event->owner_entry);
f47c02c0
PZ
4155 smp_store_release(&event->owner, NULL);
4156 }
8882135b
PZ
4157 mutex_unlock(&owner->perf_event_mutex);
4158 put_task_struct(owner);
4159 }
f8697762
JO
4160}
4161
f8697762
JO
4162static void put_event(struct perf_event *event)
4163{
f8697762
JO
4164 if (!atomic_long_dec_and_test(&event->refcount))
4165 return;
4166
c6e5b732
PZ
4167 _free_event(event);
4168}
4169
4170/*
4171 * Kill an event dead; while event:refcount will preserve the event
4172 * object, it will not preserve its functionality. Once the last 'user'
4173 * gives up the object, we'll destroy the thing.
4174 */
4175int perf_event_release_kernel(struct perf_event *event)
4176{
a4f4bb6d 4177 struct perf_event_context *ctx = event->ctx;
c6e5b732
PZ
4178 struct perf_event *child, *tmp;
4179
a4f4bb6d
PZ
4180 /*
4181 * If we got here through err_file: fput(event_file); we will not have
4182 * attached to a context yet.
4183 */
4184 if (!ctx) {
4185 WARN_ON_ONCE(event->attach_state &
4186 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4187 goto no_ctx;
4188 }
4189
f8697762
JO
4190 if (!is_kernel_event(event))
4191 perf_remove_from_owner(event);
8882135b 4192
5fa7c8ec 4193 ctx = perf_event_ctx_lock(event);
a83fe28e 4194 WARN_ON_ONCE(ctx->parent_ctx);
a69b0ca4 4195 perf_remove_from_context(event, DETACH_GROUP);
683ede43 4196
a69b0ca4 4197 raw_spin_lock_irq(&ctx->lock);
683ede43 4198 /*
a69b0ca4
PZ
4199 * Mark this even as STATE_DEAD, there is no external reference to it
4200 * anymore.
683ede43 4201 *
a69b0ca4
PZ
4202 * Anybody acquiring event->child_mutex after the below loop _must_
4203 * also see this, most importantly inherit_event() which will avoid
4204 * placing more children on the list.
683ede43 4205 *
c6e5b732
PZ
4206 * Thus this guarantees that we will in fact observe and kill _ALL_
4207 * child events.
683ede43 4208 */
a69b0ca4
PZ
4209 event->state = PERF_EVENT_STATE_DEAD;
4210 raw_spin_unlock_irq(&ctx->lock);
4211
4212 perf_event_ctx_unlock(event, ctx);
683ede43 4213
c6e5b732
PZ
4214again:
4215 mutex_lock(&event->child_mutex);
4216 list_for_each_entry(child, &event->child_list, child_list) {
a6fa941d 4217
c6e5b732
PZ
4218 /*
4219 * Cannot change, child events are not migrated, see the
4220 * comment with perf_event_ctx_lock_nested().
4221 */
4222 ctx = lockless_dereference(child->ctx);
4223 /*
4224 * Since child_mutex nests inside ctx::mutex, we must jump
4225 * through hoops. We start by grabbing a reference on the ctx.
4226 *
4227 * Since the event cannot get freed while we hold the
4228 * child_mutex, the context must also exist and have a !0
4229 * reference count.
4230 */
4231 get_ctx(ctx);
4232
4233 /*
4234 * Now that we have a ctx ref, we can drop child_mutex, and
4235 * acquire ctx::mutex without fear of it going away. Then we
4236 * can re-acquire child_mutex.
4237 */
4238 mutex_unlock(&event->child_mutex);
4239 mutex_lock(&ctx->mutex);
4240 mutex_lock(&event->child_mutex);
4241
4242 /*
4243 * Now that we hold ctx::mutex and child_mutex, revalidate our
4244 * state, if child is still the first entry, it didn't get freed
4245 * and we can continue doing so.
4246 */
4247 tmp = list_first_entry_or_null(&event->child_list,
4248 struct perf_event, child_list);
4249 if (tmp == child) {
4250 perf_remove_from_context(child, DETACH_GROUP);
4251 list_del(&child->child_list);
4252 free_event(child);
4253 /*
4254 * This matches the refcount bump in inherit_event();
4255 * this can't be the last reference.
4256 */
4257 put_event(event);
4258 }
4259
4260 mutex_unlock(&event->child_mutex);
4261 mutex_unlock(&ctx->mutex);
4262 put_ctx(ctx);
4263 goto again;
4264 }
4265 mutex_unlock(&event->child_mutex);
4266
a4f4bb6d
PZ
4267no_ctx:
4268 put_event(event); /* Must be the 'last' reference */
683ede43
PZ
4269 return 0;
4270}
4271EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4272
8b10c5e2
PZ
4273/*
4274 * Called when the last reference to the file is gone.
4275 */
a6fa941d
AV
4276static int perf_release(struct inode *inode, struct file *file)
4277{
c6e5b732 4278 perf_event_release_kernel(file->private_data);
a6fa941d 4279 return 0;
fb0459d7 4280}
fb0459d7 4281
59ed446f 4282u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 4283{
cdd6c482 4284 struct perf_event *child;
e53c0994
PZ
4285 u64 total = 0;
4286
59ed446f
PZ
4287 *enabled = 0;
4288 *running = 0;
4289
6f10581a 4290 mutex_lock(&event->child_mutex);
01add3ea 4291
7d88962e 4292 (void)perf_event_read(event, false);
01add3ea
SB
4293 total += perf_event_count(event);
4294
59ed446f
PZ
4295 *enabled += event->total_time_enabled +
4296 atomic64_read(&event->child_total_time_enabled);
4297 *running += event->total_time_running +
4298 atomic64_read(&event->child_total_time_running);
4299
4300 list_for_each_entry(child, &event->child_list, child_list) {
7d88962e 4301 (void)perf_event_read(child, false);
01add3ea 4302 total += perf_event_count(child);
59ed446f
PZ
4303 *enabled += child->total_time_enabled;
4304 *running += child->total_time_running;
4305 }
6f10581a 4306 mutex_unlock(&event->child_mutex);
e53c0994
PZ
4307
4308 return total;
4309}
fb0459d7 4310EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 4311
7d88962e 4312static int __perf_read_group_add(struct perf_event *leader,
fa8c2693 4313 u64 read_format, u64 *values)
3dab77fb 4314{
fa8c2693
PZ
4315 struct perf_event *sub;
4316 int n = 1; /* skip @nr */
7d88962e 4317 int ret;
f63a8daa 4318
7d88962e
SB
4319 ret = perf_event_read(leader, true);
4320 if (ret)
4321 return ret;
abf4868b 4322
fa8c2693
PZ
4323 /*
4324 * Since we co-schedule groups, {enabled,running} times of siblings
4325 * will be identical to those of the leader, so we only publish one
4326 * set.
4327 */
4328 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4329 values[n++] += leader->total_time_enabled +
4330 atomic64_read(&leader->child_total_time_enabled);
4331 }
3dab77fb 4332
fa8c2693
PZ
4333 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4334 values[n++] += leader->total_time_running +
4335 atomic64_read(&leader->child_total_time_running);
4336 }
4337
4338 /*
4339 * Write {count,id} tuples for every sibling.
4340 */
4341 values[n++] += perf_event_count(leader);
abf4868b
PZ
4342 if (read_format & PERF_FORMAT_ID)
4343 values[n++] = primary_event_id(leader);
3dab77fb 4344
fa8c2693
PZ
4345 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4346 values[n++] += perf_event_count(sub);
4347 if (read_format & PERF_FORMAT_ID)
4348 values[n++] = primary_event_id(sub);
4349 }
7d88962e
SB
4350
4351 return 0;
fa8c2693 4352}
3dab77fb 4353
fa8c2693
PZ
4354static int perf_read_group(struct perf_event *event,
4355 u64 read_format, char __user *buf)
4356{
4357 struct perf_event *leader = event->group_leader, *child;
4358 struct perf_event_context *ctx = leader->ctx;
7d88962e 4359 int ret;
fa8c2693 4360 u64 *values;
3dab77fb 4361
fa8c2693 4362 lockdep_assert_held(&ctx->mutex);
3dab77fb 4363
fa8c2693
PZ
4364 values = kzalloc(event->read_size, GFP_KERNEL);
4365 if (!values)
4366 return -ENOMEM;
3dab77fb 4367
fa8c2693
PZ
4368 values[0] = 1 + leader->nr_siblings;
4369
4370 /*
4371 * By locking the child_mutex of the leader we effectively
4372 * lock the child list of all siblings.. XXX explain how.
4373 */
4374 mutex_lock(&leader->child_mutex);
abf4868b 4375
7d88962e
SB
4376 ret = __perf_read_group_add(leader, read_format, values);
4377 if (ret)
4378 goto unlock;
4379
4380 list_for_each_entry(child, &leader->child_list, child_list) {
4381 ret = __perf_read_group_add(child, read_format, values);
4382 if (ret)
4383 goto unlock;
4384 }
abf4868b 4385
fa8c2693 4386 mutex_unlock(&leader->child_mutex);
abf4868b 4387
7d88962e 4388 ret = event->read_size;
fa8c2693
PZ
4389 if (copy_to_user(buf, values, event->read_size))
4390 ret = -EFAULT;
7d88962e 4391 goto out;
fa8c2693 4392
7d88962e
SB
4393unlock:
4394 mutex_unlock(&leader->child_mutex);
4395out:
fa8c2693 4396 kfree(values);
abf4868b 4397 return ret;
3dab77fb
PZ
4398}
4399
b15f495b 4400static int perf_read_one(struct perf_event *event,
3dab77fb
PZ
4401 u64 read_format, char __user *buf)
4402{
59ed446f 4403 u64 enabled, running;
3dab77fb
PZ
4404 u64 values[4];
4405 int n = 0;
4406
59ed446f
PZ
4407 values[n++] = perf_event_read_value(event, &enabled, &running);
4408 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4409 values[n++] = enabled;
4410 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4411 values[n++] = running;
3dab77fb 4412 if (read_format & PERF_FORMAT_ID)
cdd6c482 4413 values[n++] = primary_event_id(event);
3dab77fb
PZ
4414
4415 if (copy_to_user(buf, values, n * sizeof(u64)))
4416 return -EFAULT;
4417
4418 return n * sizeof(u64);
4419}
4420
dc633982
JO
4421static bool is_event_hup(struct perf_event *event)
4422{
4423 bool no_children;
4424
a69b0ca4 4425 if (event->state > PERF_EVENT_STATE_EXIT)
dc633982
JO
4426 return false;
4427
4428 mutex_lock(&event->child_mutex);
4429 no_children = list_empty(&event->child_list);
4430 mutex_unlock(&event->child_mutex);
4431 return no_children;
4432}
4433
0793a61d 4434/*
cdd6c482 4435 * Read the performance event - simple non blocking version for now
0793a61d
TG
4436 */
4437static ssize_t
b15f495b 4438__perf_read(struct perf_event *event, char __user *buf, size_t count)
0793a61d 4439{
cdd6c482 4440 u64 read_format = event->attr.read_format;
3dab77fb 4441 int ret;
0793a61d 4442
3b6f9e5c 4443 /*
cdd6c482 4444 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
4445 * error state (i.e. because it was pinned but it couldn't be
4446 * scheduled on to the CPU at some point).
4447 */
cdd6c482 4448 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
4449 return 0;
4450
c320c7b7 4451 if (count < event->read_size)
3dab77fb
PZ
4452 return -ENOSPC;
4453
cdd6c482 4454 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 4455 if (read_format & PERF_FORMAT_GROUP)
b15f495b 4456 ret = perf_read_group(event, read_format, buf);
3dab77fb 4457 else
b15f495b 4458 ret = perf_read_one(event, read_format, buf);
0793a61d 4459
3dab77fb 4460 return ret;
0793a61d
TG
4461}
4462
0793a61d
TG
4463static ssize_t
4464perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4465{
cdd6c482 4466 struct perf_event *event = file->private_data;
f63a8daa
PZ
4467 struct perf_event_context *ctx;
4468 int ret;
0793a61d 4469
f63a8daa 4470 ctx = perf_event_ctx_lock(event);
b15f495b 4471 ret = __perf_read(event, buf, count);
f63a8daa
PZ
4472 perf_event_ctx_unlock(event, ctx);
4473
4474 return ret;
0793a61d
TG
4475}
4476
4477static unsigned int perf_poll(struct file *file, poll_table *wait)
4478{
cdd6c482 4479 struct perf_event *event = file->private_data;
76369139 4480 struct ring_buffer *rb;
61b67684 4481 unsigned int events = POLLHUP;
c7138f37 4482
e708d7ad 4483 poll_wait(file, &event->waitq, wait);
179033b3 4484
dc633982 4485 if (is_event_hup(event))
179033b3 4486 return events;
c7138f37 4487
10c6db11 4488 /*
9bb5d40c
PZ
4489 * Pin the event->rb by taking event->mmap_mutex; otherwise
4490 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
4491 */
4492 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
4493 rb = event->rb;
4494 if (rb)
76369139 4495 events = atomic_xchg(&rb->poll, 0);
10c6db11 4496 mutex_unlock(&event->mmap_mutex);
0793a61d
TG
4497 return events;
4498}
4499
f63a8daa 4500static void _perf_event_reset(struct perf_event *event)
6de6a7b9 4501{
7d88962e 4502 (void)perf_event_read(event, false);
e7850595 4503 local64_set(&event->count, 0);
cdd6c482 4504 perf_event_update_userpage(event);
3df5edad
PZ
4505}
4506
c93f7669 4507/*
cdd6c482
IM
4508 * Holding the top-level event's child_mutex means that any
4509 * descendant process that has inherited this event will block
8ba289b8 4510 * in perf_event_exit_event() if it goes to exit, thus satisfying the
cdd6c482 4511 * task existence requirements of perf_event_enable/disable.
c93f7669 4512 */
cdd6c482
IM
4513static void perf_event_for_each_child(struct perf_event *event,
4514 void (*func)(struct perf_event *))
3df5edad 4515{
cdd6c482 4516 struct perf_event *child;
3df5edad 4517
cdd6c482 4518 WARN_ON_ONCE(event->ctx->parent_ctx);
f63a8daa 4519
cdd6c482
IM
4520 mutex_lock(&event->child_mutex);
4521 func(event);
4522 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 4523 func(child);
cdd6c482 4524 mutex_unlock(&event->child_mutex);
3df5edad
PZ
4525}
4526
cdd6c482
IM
4527static void perf_event_for_each(struct perf_event *event,
4528 void (*func)(struct perf_event *))
3df5edad 4529{
cdd6c482
IM
4530 struct perf_event_context *ctx = event->ctx;
4531 struct perf_event *sibling;
3df5edad 4532
f63a8daa
PZ
4533 lockdep_assert_held(&ctx->mutex);
4534
cdd6c482 4535 event = event->group_leader;
75f937f2 4536
cdd6c482 4537 perf_event_for_each_child(event, func);
cdd6c482 4538 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 4539 perf_event_for_each_child(sibling, func);
6de6a7b9
PZ
4540}
4541
fae3fde6
PZ
4542static void __perf_event_period(struct perf_event *event,
4543 struct perf_cpu_context *cpuctx,
4544 struct perf_event_context *ctx,
4545 void *info)
c7999c6f 4546{
fae3fde6 4547 u64 value = *((u64 *)info);
c7999c6f 4548 bool active;
08247e31 4549
cdd6c482 4550 if (event->attr.freq) {
cdd6c482 4551 event->attr.sample_freq = value;
08247e31 4552 } else {
cdd6c482
IM
4553 event->attr.sample_period = value;
4554 event->hw.sample_period = value;
08247e31 4555 }
bad7192b
PZ
4556
4557 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4558 if (active) {
4559 perf_pmu_disable(ctx->pmu);
1e02cd40
PZ
4560 /*
4561 * We could be throttled; unthrottle now to avoid the tick
4562 * trying to unthrottle while we already re-started the event.
4563 */
4564 if (event->hw.interrupts == MAX_INTERRUPTS) {
4565 event->hw.interrupts = 0;
4566 perf_log_throttle(event, 1);
4567 }
bad7192b
PZ
4568 event->pmu->stop(event, PERF_EF_UPDATE);
4569 }
4570
4571 local64_set(&event->hw.period_left, 0);
4572
4573 if (active) {
4574 event->pmu->start(event, PERF_EF_RELOAD);
4575 perf_pmu_enable(ctx->pmu);
4576 }
c7999c6f
PZ
4577}
4578
4579static int perf_event_period(struct perf_event *event, u64 __user *arg)
4580{
c7999c6f
PZ
4581 u64 value;
4582
4583 if (!is_sampling_event(event))
4584 return -EINVAL;
4585
4586 if (copy_from_user(&value, arg, sizeof(value)))
4587 return -EFAULT;
4588
4589 if (!value)
4590 return -EINVAL;
4591
4592 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4593 return -EINVAL;
4594
fae3fde6 4595 event_function_call(event, __perf_event_period, &value);
08247e31 4596
c7999c6f 4597 return 0;
08247e31
PZ
4598}
4599
ac9721f3
PZ
4600static const struct file_operations perf_fops;
4601
2903ff01 4602static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 4603{
2903ff01
AV
4604 struct fd f = fdget(fd);
4605 if (!f.file)
4606 return -EBADF;
ac9721f3 4607
2903ff01
AV
4608 if (f.file->f_op != &perf_fops) {
4609 fdput(f);
4610 return -EBADF;
ac9721f3 4611 }
2903ff01
AV
4612 *p = f;
4613 return 0;
ac9721f3
PZ
4614}
4615
4616static int perf_event_set_output(struct perf_event *event,
4617 struct perf_event *output_event);
6fb2915d 4618static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2541517c 4619static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
a4be7c27 4620
f63a8daa 4621static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
d859e29f 4622{
cdd6c482 4623 void (*func)(struct perf_event *);
3df5edad 4624 u32 flags = arg;
d859e29f
PM
4625
4626 switch (cmd) {
cdd6c482 4627 case PERF_EVENT_IOC_ENABLE:
f63a8daa 4628 func = _perf_event_enable;
d859e29f 4629 break;
cdd6c482 4630 case PERF_EVENT_IOC_DISABLE:
f63a8daa 4631 func = _perf_event_disable;
79f14641 4632 break;
cdd6c482 4633 case PERF_EVENT_IOC_RESET:
f63a8daa 4634 func = _perf_event_reset;
6de6a7b9 4635 break;
3df5edad 4636
cdd6c482 4637 case PERF_EVENT_IOC_REFRESH:
f63a8daa 4638 return _perf_event_refresh(event, arg);
08247e31 4639
cdd6c482
IM
4640 case PERF_EVENT_IOC_PERIOD:
4641 return perf_event_period(event, (u64 __user *)arg);
08247e31 4642
cf4957f1
JO
4643 case PERF_EVENT_IOC_ID:
4644 {
4645 u64 id = primary_event_id(event);
4646
4647 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4648 return -EFAULT;
4649 return 0;
4650 }
4651
cdd6c482 4652 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 4653 {
ac9721f3 4654 int ret;
ac9721f3 4655 if (arg != -1) {
2903ff01
AV
4656 struct perf_event *output_event;
4657 struct fd output;
4658 ret = perf_fget_light(arg, &output);
4659 if (ret)
4660 return ret;
4661 output_event = output.file->private_data;
4662 ret = perf_event_set_output(event, output_event);
4663 fdput(output);
4664 } else {
4665 ret = perf_event_set_output(event, NULL);
ac9721f3 4666 }
ac9721f3
PZ
4667 return ret;
4668 }
a4be7c27 4669
6fb2915d
LZ
4670 case PERF_EVENT_IOC_SET_FILTER:
4671 return perf_event_set_filter(event, (void __user *)arg);
4672
2541517c
AS
4673 case PERF_EVENT_IOC_SET_BPF:
4674 return perf_event_set_bpf_prog(event, arg);
4675
86e7972f
WN
4676 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4677 struct ring_buffer *rb;
4678
4679 rcu_read_lock();
4680 rb = rcu_dereference(event->rb);
4681 if (!rb || !rb->nr_pages) {
4682 rcu_read_unlock();
4683 return -EINVAL;
4684 }
4685 rb_toggle_paused(rb, !!arg);
4686 rcu_read_unlock();
4687 return 0;
4688 }
d859e29f 4689 default:
3df5edad 4690 return -ENOTTY;
d859e29f 4691 }
3df5edad
PZ
4692
4693 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 4694 perf_event_for_each(event, func);
3df5edad 4695 else
cdd6c482 4696 perf_event_for_each_child(event, func);
3df5edad
PZ
4697
4698 return 0;
d859e29f
PM
4699}
4700
f63a8daa
PZ
4701static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4702{
4703 struct perf_event *event = file->private_data;
4704 struct perf_event_context *ctx;
4705 long ret;
4706
4707 ctx = perf_event_ctx_lock(event);
4708 ret = _perf_ioctl(event, cmd, arg);
4709 perf_event_ctx_unlock(event, ctx);
4710
4711 return ret;
4712}
4713
b3f20785
PM
4714#ifdef CONFIG_COMPAT
4715static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4716 unsigned long arg)
4717{
4718 switch (_IOC_NR(cmd)) {
4719 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4720 case _IOC_NR(PERF_EVENT_IOC_ID):
4721 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4722 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4723 cmd &= ~IOCSIZE_MASK;
4724 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4725 }
4726 break;
4727 }
4728 return perf_ioctl(file, cmd, arg);
4729}
4730#else
4731# define perf_compat_ioctl NULL
4732#endif
4733
cdd6c482 4734int perf_event_task_enable(void)
771d7cde 4735{
f63a8daa 4736 struct perf_event_context *ctx;
cdd6c482 4737 struct perf_event *event;
771d7cde 4738
cdd6c482 4739 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4740 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4741 ctx = perf_event_ctx_lock(event);
4742 perf_event_for_each_child(event, _perf_event_enable);
4743 perf_event_ctx_unlock(event, ctx);
4744 }
cdd6c482 4745 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4746
4747 return 0;
4748}
4749
cdd6c482 4750int perf_event_task_disable(void)
771d7cde 4751{
f63a8daa 4752 struct perf_event_context *ctx;
cdd6c482 4753 struct perf_event *event;
771d7cde 4754
cdd6c482 4755 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4756 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4757 ctx = perf_event_ctx_lock(event);
4758 perf_event_for_each_child(event, _perf_event_disable);
4759 perf_event_ctx_unlock(event, ctx);
4760 }
cdd6c482 4761 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4762
4763 return 0;
4764}
4765
cdd6c482 4766static int perf_event_index(struct perf_event *event)
194002b2 4767{
a4eaf7f1
PZ
4768 if (event->hw.state & PERF_HES_STOPPED)
4769 return 0;
4770
cdd6c482 4771 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
4772 return 0;
4773
35edc2a5 4774 return event->pmu->event_idx(event);
194002b2
PZ
4775}
4776
c4794295 4777static void calc_timer_values(struct perf_event *event,
e3f3541c 4778 u64 *now,
7f310a5d
EM
4779 u64 *enabled,
4780 u64 *running)
c4794295 4781{
e3f3541c 4782 u64 ctx_time;
c4794295 4783
e3f3541c
PZ
4784 *now = perf_clock();
4785 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
4786 *enabled = ctx_time - event->tstamp_enabled;
4787 *running = ctx_time - event->tstamp_running;
4788}
4789
fa731587
PZ
4790static void perf_event_init_userpage(struct perf_event *event)
4791{
4792 struct perf_event_mmap_page *userpg;
4793 struct ring_buffer *rb;
4794
4795 rcu_read_lock();
4796 rb = rcu_dereference(event->rb);
4797 if (!rb)
4798 goto unlock;
4799
4800 userpg = rb->user_page;
4801
4802 /* Allow new userspace to detect that bit 0 is deprecated */
4803 userpg->cap_bit0_is_deprecated = 1;
4804 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
e8c6deac
AS
4805 userpg->data_offset = PAGE_SIZE;
4806 userpg->data_size = perf_data_size(rb);
fa731587
PZ
4807
4808unlock:
4809 rcu_read_unlock();
4810}
4811
c1317ec2
AL
4812void __weak arch_perf_update_userpage(
4813 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
4814{
4815}
4816
38ff667b
PZ
4817/*
4818 * Callers need to ensure there can be no nesting of this function, otherwise
4819 * the seqlock logic goes bad. We can not serialize this because the arch
4820 * code calls this from NMI context.
4821 */
cdd6c482 4822void perf_event_update_userpage(struct perf_event *event)
37d81828 4823{
cdd6c482 4824 struct perf_event_mmap_page *userpg;
76369139 4825 struct ring_buffer *rb;
e3f3541c 4826 u64 enabled, running, now;
38ff667b
PZ
4827
4828 rcu_read_lock();
5ec4c599
PZ
4829 rb = rcu_dereference(event->rb);
4830 if (!rb)
4831 goto unlock;
4832
0d641208
EM
4833 /*
4834 * compute total_time_enabled, total_time_running
4835 * based on snapshot values taken when the event
4836 * was last scheduled in.
4837 *
4838 * we cannot simply called update_context_time()
4839 * because of locking issue as we can be called in
4840 * NMI context
4841 */
e3f3541c 4842 calc_timer_values(event, &now, &enabled, &running);
38ff667b 4843
76369139 4844 userpg = rb->user_page;
7b732a75
PZ
4845 /*
4846 * Disable preemption so as to not let the corresponding user-space
4847 * spin too long if we get preempted.
4848 */
4849 preempt_disable();
37d81828 4850 ++userpg->lock;
92f22a38 4851 barrier();
cdd6c482 4852 userpg->index = perf_event_index(event);
b5e58793 4853 userpg->offset = perf_event_count(event);
365a4038 4854 if (userpg->index)
e7850595 4855 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 4856
0d641208 4857 userpg->time_enabled = enabled +
cdd6c482 4858 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 4859
0d641208 4860 userpg->time_running = running +
cdd6c482 4861 atomic64_read(&event->child_total_time_running);
7f8b4e4e 4862
c1317ec2 4863 arch_perf_update_userpage(event, userpg, now);
e3f3541c 4864
92f22a38 4865 barrier();
37d81828 4866 ++userpg->lock;
7b732a75 4867 preempt_enable();
38ff667b 4868unlock:
7b732a75 4869 rcu_read_unlock();
37d81828
PM
4870}
4871
906010b2
PZ
4872static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4873{
4874 struct perf_event *event = vma->vm_file->private_data;
76369139 4875 struct ring_buffer *rb;
906010b2
PZ
4876 int ret = VM_FAULT_SIGBUS;
4877
4878 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4879 if (vmf->pgoff == 0)
4880 ret = 0;
4881 return ret;
4882 }
4883
4884 rcu_read_lock();
76369139
FW
4885 rb = rcu_dereference(event->rb);
4886 if (!rb)
906010b2
PZ
4887 goto unlock;
4888
4889 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4890 goto unlock;
4891
76369139 4892 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
4893 if (!vmf->page)
4894 goto unlock;
4895
4896 get_page(vmf->page);
4897 vmf->page->mapping = vma->vm_file->f_mapping;
4898 vmf->page->index = vmf->pgoff;
4899
4900 ret = 0;
4901unlock:
4902 rcu_read_unlock();
4903
4904 return ret;
4905}
4906
10c6db11
PZ
4907static void ring_buffer_attach(struct perf_event *event,
4908 struct ring_buffer *rb)
4909{
b69cf536 4910 struct ring_buffer *old_rb = NULL;
10c6db11
PZ
4911 unsigned long flags;
4912
b69cf536
PZ
4913 if (event->rb) {
4914 /*
4915 * Should be impossible, we set this when removing
4916 * event->rb_entry and wait/clear when adding event->rb_entry.
4917 */
4918 WARN_ON_ONCE(event->rcu_pending);
10c6db11 4919
b69cf536 4920 old_rb = event->rb;
b69cf536
PZ
4921 spin_lock_irqsave(&old_rb->event_lock, flags);
4922 list_del_rcu(&event->rb_entry);
4923 spin_unlock_irqrestore(&old_rb->event_lock, flags);
10c6db11 4924
2f993cf0
ON
4925 event->rcu_batches = get_state_synchronize_rcu();
4926 event->rcu_pending = 1;
b69cf536 4927 }
10c6db11 4928
b69cf536 4929 if (rb) {
2f993cf0
ON
4930 if (event->rcu_pending) {
4931 cond_synchronize_rcu(event->rcu_batches);
4932 event->rcu_pending = 0;
4933 }
4934
b69cf536
PZ
4935 spin_lock_irqsave(&rb->event_lock, flags);
4936 list_add_rcu(&event->rb_entry, &rb->event_list);
4937 spin_unlock_irqrestore(&rb->event_lock, flags);
4938 }
4939
767ae086
AS
4940 /*
4941 * Avoid racing with perf_mmap_close(AUX): stop the event
4942 * before swizzling the event::rb pointer; if it's getting
4943 * unmapped, its aux_mmap_count will be 0 and it won't
4944 * restart. See the comment in __perf_pmu_output_stop().
4945 *
4946 * Data will inevitably be lost when set_output is done in
4947 * mid-air, but then again, whoever does it like this is
4948 * not in for the data anyway.
4949 */
4950 if (has_aux(event))
4951 perf_event_stop(event, 0);
4952
b69cf536
PZ
4953 rcu_assign_pointer(event->rb, rb);
4954
4955 if (old_rb) {
4956 ring_buffer_put(old_rb);
4957 /*
4958 * Since we detached before setting the new rb, so that we
4959 * could attach the new rb, we could have missed a wakeup.
4960 * Provide it now.
4961 */
4962 wake_up_all(&event->waitq);
4963 }
10c6db11
PZ
4964}
4965
4966static void ring_buffer_wakeup(struct perf_event *event)
4967{
4968 struct ring_buffer *rb;
4969
4970 rcu_read_lock();
4971 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
4972 if (rb) {
4973 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4974 wake_up_all(&event->waitq);
4975 }
10c6db11
PZ
4976 rcu_read_unlock();
4977}
4978
fdc26706 4979struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 4980{
76369139 4981 struct ring_buffer *rb;
7b732a75 4982
ac9721f3 4983 rcu_read_lock();
76369139
FW
4984 rb = rcu_dereference(event->rb);
4985 if (rb) {
4986 if (!atomic_inc_not_zero(&rb->refcount))
4987 rb = NULL;
ac9721f3
PZ
4988 }
4989 rcu_read_unlock();
4990
76369139 4991 return rb;
ac9721f3
PZ
4992}
4993
fdc26706 4994void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 4995{
76369139 4996 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 4997 return;
7b732a75 4998
9bb5d40c 4999 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 5000
76369139 5001 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
5002}
5003
5004static void perf_mmap_open(struct vm_area_struct *vma)
5005{
cdd6c482 5006 struct perf_event *event = vma->vm_file->private_data;
7b732a75 5007
cdd6c482 5008 atomic_inc(&event->mmap_count);
9bb5d40c 5009 atomic_inc(&event->rb->mmap_count);
1e0fb9ec 5010
45bfb2e5
PZ
5011 if (vma->vm_pgoff)
5012 atomic_inc(&event->rb->aux_mmap_count);
5013
1e0fb9ec
AL
5014 if (event->pmu->event_mapped)
5015 event->pmu->event_mapped(event);
7b732a75
PZ
5016}
5017
95ff4ca2
AS
5018static void perf_pmu_output_stop(struct perf_event *event);
5019
9bb5d40c
PZ
5020/*
5021 * A buffer can be mmap()ed multiple times; either directly through the same
5022 * event, or through other events by use of perf_event_set_output().
5023 *
5024 * In order to undo the VM accounting done by perf_mmap() we need to destroy
5025 * the buffer here, where we still have a VM context. This means we need
5026 * to detach all events redirecting to us.
5027 */
7b732a75
PZ
5028static void perf_mmap_close(struct vm_area_struct *vma)
5029{
cdd6c482 5030 struct perf_event *event = vma->vm_file->private_data;
7b732a75 5031
b69cf536 5032 struct ring_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
5033 struct user_struct *mmap_user = rb->mmap_user;
5034 int mmap_locked = rb->mmap_locked;
5035 unsigned long size = perf_data_size(rb);
789f90fc 5036
1e0fb9ec
AL
5037 if (event->pmu->event_unmapped)
5038 event->pmu->event_unmapped(event);
5039
45bfb2e5
PZ
5040 /*
5041 * rb->aux_mmap_count will always drop before rb->mmap_count and
5042 * event->mmap_count, so it is ok to use event->mmap_mutex to
5043 * serialize with perf_mmap here.
5044 */
5045 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
5046 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
95ff4ca2
AS
5047 /*
5048 * Stop all AUX events that are writing to this buffer,
5049 * so that we can free its AUX pages and corresponding PMU
5050 * data. Note that after rb::aux_mmap_count dropped to zero,
5051 * they won't start any more (see perf_aux_output_begin()).
5052 */
5053 perf_pmu_output_stop(event);
5054
5055 /* now it's safe to free the pages */
45bfb2e5
PZ
5056 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
5057 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
5058
95ff4ca2 5059 /* this has to be the last one */
45bfb2e5 5060 rb_free_aux(rb);
95ff4ca2
AS
5061 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
5062
45bfb2e5
PZ
5063 mutex_unlock(&event->mmap_mutex);
5064 }
5065
9bb5d40c
PZ
5066 atomic_dec(&rb->mmap_count);
5067
5068 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 5069 goto out_put;
9bb5d40c 5070
b69cf536 5071 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
5072 mutex_unlock(&event->mmap_mutex);
5073
5074 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
5075 if (atomic_read(&rb->mmap_count))
5076 goto out_put;
ac9721f3 5077
9bb5d40c
PZ
5078 /*
5079 * No other mmap()s, detach from all other events that might redirect
5080 * into the now unreachable buffer. Somewhat complicated by the
5081 * fact that rb::event_lock otherwise nests inside mmap_mutex.
5082 */
5083again:
5084 rcu_read_lock();
5085 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
5086 if (!atomic_long_inc_not_zero(&event->refcount)) {
5087 /*
5088 * This event is en-route to free_event() which will
5089 * detach it and remove it from the list.
5090 */
5091 continue;
5092 }
5093 rcu_read_unlock();
789f90fc 5094
9bb5d40c
PZ
5095 mutex_lock(&event->mmap_mutex);
5096 /*
5097 * Check we didn't race with perf_event_set_output() which can
5098 * swizzle the rb from under us while we were waiting to
5099 * acquire mmap_mutex.
5100 *
5101 * If we find a different rb; ignore this event, a next
5102 * iteration will no longer find it on the list. We have to
5103 * still restart the iteration to make sure we're not now
5104 * iterating the wrong list.
5105 */
b69cf536
PZ
5106 if (event->rb == rb)
5107 ring_buffer_attach(event, NULL);
5108
cdd6c482 5109 mutex_unlock(&event->mmap_mutex);
9bb5d40c 5110 put_event(event);
ac9721f3 5111
9bb5d40c
PZ
5112 /*
5113 * Restart the iteration; either we're on the wrong list or
5114 * destroyed its integrity by doing a deletion.
5115 */
5116 goto again;
7b732a75 5117 }
9bb5d40c
PZ
5118 rcu_read_unlock();
5119
5120 /*
5121 * It could be there's still a few 0-ref events on the list; they'll
5122 * get cleaned up by free_event() -- they'll also still have their
5123 * ref on the rb and will free it whenever they are done with it.
5124 *
5125 * Aside from that, this buffer is 'fully' detached and unmapped,
5126 * undo the VM accounting.
5127 */
5128
5129 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
5130 vma->vm_mm->pinned_vm -= mmap_locked;
5131 free_uid(mmap_user);
5132
b69cf536 5133out_put:
9bb5d40c 5134 ring_buffer_put(rb); /* could be last */
37d81828
PM
5135}
5136
f0f37e2f 5137static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8 5138 .open = perf_mmap_open,
45bfb2e5 5139 .close = perf_mmap_close, /* non mergable */
43a21ea8
PZ
5140 .fault = perf_mmap_fault,
5141 .page_mkwrite = perf_mmap_fault,
37d81828
PM
5142};
5143
5144static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5145{
cdd6c482 5146 struct perf_event *event = file->private_data;
22a4f650 5147 unsigned long user_locked, user_lock_limit;
789f90fc 5148 struct user_struct *user = current_user();
22a4f650 5149 unsigned long locked, lock_limit;
45bfb2e5 5150 struct ring_buffer *rb = NULL;
7b732a75
PZ
5151 unsigned long vma_size;
5152 unsigned long nr_pages;
45bfb2e5 5153 long user_extra = 0, extra = 0;
d57e34fd 5154 int ret = 0, flags = 0;
37d81828 5155
c7920614
PZ
5156 /*
5157 * Don't allow mmap() of inherited per-task counters. This would
5158 * create a performance issue due to all children writing to the
76369139 5159 * same rb.
c7920614
PZ
5160 */
5161 if (event->cpu == -1 && event->attr.inherit)
5162 return -EINVAL;
5163
43a21ea8 5164 if (!(vma->vm_flags & VM_SHARED))
37d81828 5165 return -EINVAL;
7b732a75
PZ
5166
5167 vma_size = vma->vm_end - vma->vm_start;
45bfb2e5
PZ
5168
5169 if (vma->vm_pgoff == 0) {
5170 nr_pages = (vma_size / PAGE_SIZE) - 1;
5171 } else {
5172 /*
5173 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5174 * mapped, all subsequent mappings should have the same size
5175 * and offset. Must be above the normal perf buffer.
5176 */
5177 u64 aux_offset, aux_size;
5178
5179 if (!event->rb)
5180 return -EINVAL;
5181
5182 nr_pages = vma_size / PAGE_SIZE;
5183
5184 mutex_lock(&event->mmap_mutex);
5185 ret = -EINVAL;
5186
5187 rb = event->rb;
5188 if (!rb)
5189 goto aux_unlock;
5190
5191 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
5192 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
5193
5194 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5195 goto aux_unlock;
5196
5197 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5198 goto aux_unlock;
5199
5200 /* already mapped with a different offset */
5201 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5202 goto aux_unlock;
5203
5204 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5205 goto aux_unlock;
5206
5207 /* already mapped with a different size */
5208 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5209 goto aux_unlock;
5210
5211 if (!is_power_of_2(nr_pages))
5212 goto aux_unlock;
5213
5214 if (!atomic_inc_not_zero(&rb->mmap_count))
5215 goto aux_unlock;
5216
5217 if (rb_has_aux(rb)) {
5218 atomic_inc(&rb->aux_mmap_count);
5219 ret = 0;
5220 goto unlock;
5221 }
5222
5223 atomic_set(&rb->aux_mmap_count, 1);
5224 user_extra = nr_pages;
5225
5226 goto accounting;
5227 }
7b732a75 5228
7730d865 5229 /*
76369139 5230 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
5231 * can do bitmasks instead of modulo.
5232 */
2ed11312 5233 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
5234 return -EINVAL;
5235
7b732a75 5236 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
5237 return -EINVAL;
5238
cdd6c482 5239 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 5240again:
cdd6c482 5241 mutex_lock(&event->mmap_mutex);
76369139 5242 if (event->rb) {
9bb5d40c 5243 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 5244 ret = -EINVAL;
9bb5d40c
PZ
5245 goto unlock;
5246 }
5247
5248 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5249 /*
5250 * Raced against perf_mmap_close() through
5251 * perf_event_set_output(). Try again, hope for better
5252 * luck.
5253 */
5254 mutex_unlock(&event->mmap_mutex);
5255 goto again;
5256 }
5257
ebb3c4c4
PZ
5258 goto unlock;
5259 }
5260
789f90fc 5261 user_extra = nr_pages + 1;
45bfb2e5
PZ
5262
5263accounting:
cdd6c482 5264 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
5265
5266 /*
5267 * Increase the limit linearly with more CPUs:
5268 */
5269 user_lock_limit *= num_online_cpus();
5270
789f90fc 5271 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 5272
789f90fc
PZ
5273 if (user_locked > user_lock_limit)
5274 extra = user_locked - user_lock_limit;
7b732a75 5275
78d7d407 5276 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 5277 lock_limit >>= PAGE_SHIFT;
bc3e53f6 5278 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 5279
459ec28a
IM
5280 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5281 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
5282 ret = -EPERM;
5283 goto unlock;
5284 }
7b732a75 5285
45bfb2e5 5286 WARN_ON(!rb && event->rb);
906010b2 5287
d57e34fd 5288 if (vma->vm_flags & VM_WRITE)
76369139 5289 flags |= RING_BUFFER_WRITABLE;
d57e34fd 5290
76369139 5291 if (!rb) {
45bfb2e5
PZ
5292 rb = rb_alloc(nr_pages,
5293 event->attr.watermark ? event->attr.wakeup_watermark : 0,
5294 event->cpu, flags);
26cb63ad 5295
45bfb2e5
PZ
5296 if (!rb) {
5297 ret = -ENOMEM;
5298 goto unlock;
5299 }
43a21ea8 5300
45bfb2e5
PZ
5301 atomic_set(&rb->mmap_count, 1);
5302 rb->mmap_user = get_current_user();
5303 rb->mmap_locked = extra;
26cb63ad 5304
45bfb2e5 5305 ring_buffer_attach(event, rb);
ac9721f3 5306
45bfb2e5
PZ
5307 perf_event_init_userpage(event);
5308 perf_event_update_userpage(event);
5309 } else {
1a594131
AS
5310 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5311 event->attr.aux_watermark, flags);
45bfb2e5
PZ
5312 if (!ret)
5313 rb->aux_mmap_locked = extra;
5314 }
9a0f05cb 5315
ebb3c4c4 5316unlock:
45bfb2e5
PZ
5317 if (!ret) {
5318 atomic_long_add(user_extra, &user->locked_vm);
5319 vma->vm_mm->pinned_vm += extra;
5320
ac9721f3 5321 atomic_inc(&event->mmap_count);
45bfb2e5
PZ
5322 } else if (rb) {
5323 atomic_dec(&rb->mmap_count);
5324 }
5325aux_unlock:
cdd6c482 5326 mutex_unlock(&event->mmap_mutex);
37d81828 5327
9bb5d40c
PZ
5328 /*
5329 * Since pinned accounting is per vm we cannot allow fork() to copy our
5330 * vma.
5331 */
26cb63ad 5332 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 5333 vma->vm_ops = &perf_mmap_vmops;
7b732a75 5334
1e0fb9ec
AL
5335 if (event->pmu->event_mapped)
5336 event->pmu->event_mapped(event);
5337
7b732a75 5338 return ret;
37d81828
PM
5339}
5340
3c446b3d
PZ
5341static int perf_fasync(int fd, struct file *filp, int on)
5342{
496ad9aa 5343 struct inode *inode = file_inode(filp);
cdd6c482 5344 struct perf_event *event = filp->private_data;
3c446b3d
PZ
5345 int retval;
5346
5955102c 5347 inode_lock(inode);
cdd6c482 5348 retval = fasync_helper(fd, filp, on, &event->fasync);
5955102c 5349 inode_unlock(inode);
3c446b3d
PZ
5350
5351 if (retval < 0)
5352 return retval;
5353
5354 return 0;
5355}
5356
0793a61d 5357static const struct file_operations perf_fops = {
3326c1ce 5358 .llseek = no_llseek,
0793a61d
TG
5359 .release = perf_release,
5360 .read = perf_read,
5361 .poll = perf_poll,
d859e29f 5362 .unlocked_ioctl = perf_ioctl,
b3f20785 5363 .compat_ioctl = perf_compat_ioctl,
37d81828 5364 .mmap = perf_mmap,
3c446b3d 5365 .fasync = perf_fasync,
0793a61d
TG
5366};
5367
925d519a 5368/*
cdd6c482 5369 * Perf event wakeup
925d519a
PZ
5370 *
5371 * If there's data, ensure we set the poll() state and publish everything
5372 * to user-space before waking everybody up.
5373 */
5374
fed66e2c
PZ
5375static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5376{
5377 /* only the parent has fasync state */
5378 if (event->parent)
5379 event = event->parent;
5380 return &event->fasync;
5381}
5382
cdd6c482 5383void perf_event_wakeup(struct perf_event *event)
925d519a 5384{
10c6db11 5385 ring_buffer_wakeup(event);
4c9e2542 5386
cdd6c482 5387 if (event->pending_kill) {
fed66e2c 5388 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
cdd6c482 5389 event->pending_kill = 0;
4c9e2542 5390 }
925d519a
PZ
5391}
5392
e360adbe 5393static void perf_pending_event(struct irq_work *entry)
79f14641 5394{
cdd6c482
IM
5395 struct perf_event *event = container_of(entry,
5396 struct perf_event, pending);
d525211f
PZ
5397 int rctx;
5398
5399 rctx = perf_swevent_get_recursion_context();
5400 /*
5401 * If we 'fail' here, that's OK, it means recursion is already disabled
5402 * and we won't recurse 'further'.
5403 */
79f14641 5404
cdd6c482
IM
5405 if (event->pending_disable) {
5406 event->pending_disable = 0;
fae3fde6 5407 perf_event_disable_local(event);
79f14641
PZ
5408 }
5409
cdd6c482
IM
5410 if (event->pending_wakeup) {
5411 event->pending_wakeup = 0;
5412 perf_event_wakeup(event);
79f14641 5413 }
d525211f
PZ
5414
5415 if (rctx >= 0)
5416 perf_swevent_put_recursion_context(rctx);
79f14641
PZ
5417}
5418
39447b38
ZY
5419/*
5420 * We assume there is only KVM supporting the callbacks.
5421 * Later on, we might change it to a list if there is
5422 * another virtualization implementation supporting the callbacks.
5423 */
5424struct perf_guest_info_callbacks *perf_guest_cbs;
5425
5426int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5427{
5428 perf_guest_cbs = cbs;
5429 return 0;
5430}
5431EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5432
5433int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5434{
5435 perf_guest_cbs = NULL;
5436 return 0;
5437}
5438EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5439
4018994f
JO
5440static void
5441perf_output_sample_regs(struct perf_output_handle *handle,
5442 struct pt_regs *regs, u64 mask)
5443{
5444 int bit;
29dd3288 5445 DECLARE_BITMAP(_mask, 64);
4018994f 5446
29dd3288
MS
5447 bitmap_from_u64(_mask, mask);
5448 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
4018994f
JO
5449 u64 val;
5450
5451 val = perf_reg_value(regs, bit);
5452 perf_output_put(handle, val);
5453 }
5454}
5455
60e2364e 5456static void perf_sample_regs_user(struct perf_regs *regs_user,
88a7c26a
AL
5457 struct pt_regs *regs,
5458 struct pt_regs *regs_user_copy)
4018994f 5459{
88a7c26a
AL
5460 if (user_mode(regs)) {
5461 regs_user->abi = perf_reg_abi(current);
2565711f 5462 regs_user->regs = regs;
88a7c26a
AL
5463 } else if (current->mm) {
5464 perf_get_regs_user(regs_user, regs, regs_user_copy);
2565711f
PZ
5465 } else {
5466 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5467 regs_user->regs = NULL;
4018994f
JO
5468 }
5469}
5470
60e2364e
SE
5471static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5472 struct pt_regs *regs)
5473{
5474 regs_intr->regs = regs;
5475 regs_intr->abi = perf_reg_abi(current);
5476}
5477
5478
c5ebcedb
JO
5479/*
5480 * Get remaining task size from user stack pointer.
5481 *
5482 * It'd be better to take stack vma map and limit this more
5483 * precisly, but there's no way to get it safely under interrupt,
5484 * so using TASK_SIZE as limit.
5485 */
5486static u64 perf_ustack_task_size(struct pt_regs *regs)
5487{
5488 unsigned long addr = perf_user_stack_pointer(regs);
5489
5490 if (!addr || addr >= TASK_SIZE)
5491 return 0;
5492
5493 return TASK_SIZE - addr;
5494}
5495
5496static u16
5497perf_sample_ustack_size(u16 stack_size, u16 header_size,
5498 struct pt_regs *regs)
5499{
5500 u64 task_size;
5501
5502 /* No regs, no stack pointer, no dump. */
5503 if (!regs)
5504 return 0;
5505
5506 /*
5507 * Check if we fit in with the requested stack size into the:
5508 * - TASK_SIZE
5509 * If we don't, we limit the size to the TASK_SIZE.
5510 *
5511 * - remaining sample size
5512 * If we don't, we customize the stack size to
5513 * fit in to the remaining sample size.
5514 */
5515
5516 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5517 stack_size = min(stack_size, (u16) task_size);
5518
5519 /* Current header size plus static size and dynamic size. */
5520 header_size += 2 * sizeof(u64);
5521
5522 /* Do we fit in with the current stack dump size? */
5523 if ((u16) (header_size + stack_size) < header_size) {
5524 /*
5525 * If we overflow the maximum size for the sample,
5526 * we customize the stack dump size to fit in.
5527 */
5528 stack_size = USHRT_MAX - header_size - sizeof(u64);
5529 stack_size = round_up(stack_size, sizeof(u64));
5530 }
5531
5532 return stack_size;
5533}
5534
5535static void
5536perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5537 struct pt_regs *regs)
5538{
5539 /* Case of a kernel thread, nothing to dump */
5540 if (!regs) {
5541 u64 size = 0;
5542 perf_output_put(handle, size);
5543 } else {
5544 unsigned long sp;
5545 unsigned int rem;
5546 u64 dyn_size;
5547
5548 /*
5549 * We dump:
5550 * static size
5551 * - the size requested by user or the best one we can fit
5552 * in to the sample max size
5553 * data
5554 * - user stack dump data
5555 * dynamic size
5556 * - the actual dumped size
5557 */
5558
5559 /* Static size. */
5560 perf_output_put(handle, dump_size);
5561
5562 /* Data. */
5563 sp = perf_user_stack_pointer(regs);
5564 rem = __output_copy_user(handle, (void *) sp, dump_size);
5565 dyn_size = dump_size - rem;
5566
5567 perf_output_skip(handle, rem);
5568
5569 /* Dynamic size. */
5570 perf_output_put(handle, dyn_size);
5571 }
5572}
5573
c980d109
ACM
5574static void __perf_event_header__init_id(struct perf_event_header *header,
5575 struct perf_sample_data *data,
5576 struct perf_event *event)
6844c09d
ACM
5577{
5578 u64 sample_type = event->attr.sample_type;
5579
5580 data->type = sample_type;
5581 header->size += event->id_header_size;
5582
5583 if (sample_type & PERF_SAMPLE_TID) {
5584 /* namespace issues */
5585 data->tid_entry.pid = perf_event_pid(event, current);
5586 data->tid_entry.tid = perf_event_tid(event, current);
5587 }
5588
5589 if (sample_type & PERF_SAMPLE_TIME)
34f43927 5590 data->time = perf_event_clock(event);
6844c09d 5591
ff3d527c 5592 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
5593 data->id = primary_event_id(event);
5594
5595 if (sample_type & PERF_SAMPLE_STREAM_ID)
5596 data->stream_id = event->id;
5597
5598 if (sample_type & PERF_SAMPLE_CPU) {
5599 data->cpu_entry.cpu = raw_smp_processor_id();
5600 data->cpu_entry.reserved = 0;
5601 }
5602}
5603
76369139
FW
5604void perf_event_header__init_id(struct perf_event_header *header,
5605 struct perf_sample_data *data,
5606 struct perf_event *event)
c980d109
ACM
5607{
5608 if (event->attr.sample_id_all)
5609 __perf_event_header__init_id(header, data, event);
5610}
5611
5612static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5613 struct perf_sample_data *data)
5614{
5615 u64 sample_type = data->type;
5616
5617 if (sample_type & PERF_SAMPLE_TID)
5618 perf_output_put(handle, data->tid_entry);
5619
5620 if (sample_type & PERF_SAMPLE_TIME)
5621 perf_output_put(handle, data->time);
5622
5623 if (sample_type & PERF_SAMPLE_ID)
5624 perf_output_put(handle, data->id);
5625
5626 if (sample_type & PERF_SAMPLE_STREAM_ID)
5627 perf_output_put(handle, data->stream_id);
5628
5629 if (sample_type & PERF_SAMPLE_CPU)
5630 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
5631
5632 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5633 perf_output_put(handle, data->id);
c980d109
ACM
5634}
5635
76369139
FW
5636void perf_event__output_id_sample(struct perf_event *event,
5637 struct perf_output_handle *handle,
5638 struct perf_sample_data *sample)
c980d109
ACM
5639{
5640 if (event->attr.sample_id_all)
5641 __perf_event__output_id_sample(handle, sample);
5642}
5643
3dab77fb 5644static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
5645 struct perf_event *event,
5646 u64 enabled, u64 running)
3dab77fb 5647{
cdd6c482 5648 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5649 u64 values[4];
5650 int n = 0;
5651
b5e58793 5652 values[n++] = perf_event_count(event);
3dab77fb 5653 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 5654 values[n++] = enabled +
cdd6c482 5655 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
5656 }
5657 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 5658 values[n++] = running +
cdd6c482 5659 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
5660 }
5661 if (read_format & PERF_FORMAT_ID)
cdd6c482 5662 values[n++] = primary_event_id(event);
3dab77fb 5663
76369139 5664 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5665}
5666
5667/*
cdd6c482 5668 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
5669 */
5670static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
5671 struct perf_event *event,
5672 u64 enabled, u64 running)
3dab77fb 5673{
cdd6c482
IM
5674 struct perf_event *leader = event->group_leader, *sub;
5675 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5676 u64 values[5];
5677 int n = 0;
5678
5679 values[n++] = 1 + leader->nr_siblings;
5680
5681 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 5682 values[n++] = enabled;
3dab77fb
PZ
5683
5684 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 5685 values[n++] = running;
3dab77fb 5686
cdd6c482 5687 if (leader != event)
3dab77fb
PZ
5688 leader->pmu->read(leader);
5689
b5e58793 5690 values[n++] = perf_event_count(leader);
3dab77fb 5691 if (read_format & PERF_FORMAT_ID)
cdd6c482 5692 values[n++] = primary_event_id(leader);
3dab77fb 5693
76369139 5694 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 5695
65abc865 5696 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
5697 n = 0;
5698
6f5ab001
JO
5699 if ((sub != event) &&
5700 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
5701 sub->pmu->read(sub);
5702
b5e58793 5703 values[n++] = perf_event_count(sub);
3dab77fb 5704 if (read_format & PERF_FORMAT_ID)
cdd6c482 5705 values[n++] = primary_event_id(sub);
3dab77fb 5706
76369139 5707 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5708 }
5709}
5710
eed01528
SE
5711#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5712 PERF_FORMAT_TOTAL_TIME_RUNNING)
5713
3dab77fb 5714static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 5715 struct perf_event *event)
3dab77fb 5716{
e3f3541c 5717 u64 enabled = 0, running = 0, now;
eed01528
SE
5718 u64 read_format = event->attr.read_format;
5719
5720 /*
5721 * compute total_time_enabled, total_time_running
5722 * based on snapshot values taken when the event
5723 * was last scheduled in.
5724 *
5725 * we cannot simply called update_context_time()
5726 * because of locking issue as we are called in
5727 * NMI context
5728 */
c4794295 5729 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 5730 calc_timer_values(event, &now, &enabled, &running);
eed01528 5731
cdd6c482 5732 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 5733 perf_output_read_group(handle, event, enabled, running);
3dab77fb 5734 else
eed01528 5735 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
5736}
5737
5622f295
MM
5738void perf_output_sample(struct perf_output_handle *handle,
5739 struct perf_event_header *header,
5740 struct perf_sample_data *data,
cdd6c482 5741 struct perf_event *event)
5622f295
MM
5742{
5743 u64 sample_type = data->type;
5744
5745 perf_output_put(handle, *header);
5746
ff3d527c
AH
5747 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5748 perf_output_put(handle, data->id);
5749
5622f295
MM
5750 if (sample_type & PERF_SAMPLE_IP)
5751 perf_output_put(handle, data->ip);
5752
5753 if (sample_type & PERF_SAMPLE_TID)
5754 perf_output_put(handle, data->tid_entry);
5755
5756 if (sample_type & PERF_SAMPLE_TIME)
5757 perf_output_put(handle, data->time);
5758
5759 if (sample_type & PERF_SAMPLE_ADDR)
5760 perf_output_put(handle, data->addr);
5761
5762 if (sample_type & PERF_SAMPLE_ID)
5763 perf_output_put(handle, data->id);
5764
5765 if (sample_type & PERF_SAMPLE_STREAM_ID)
5766 perf_output_put(handle, data->stream_id);
5767
5768 if (sample_type & PERF_SAMPLE_CPU)
5769 perf_output_put(handle, data->cpu_entry);
5770
5771 if (sample_type & PERF_SAMPLE_PERIOD)
5772 perf_output_put(handle, data->period);
5773
5774 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 5775 perf_output_read(handle, event);
5622f295
MM
5776
5777 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5778 if (data->callchain) {
5779 int size = 1;
5780
5781 if (data->callchain)
5782 size += data->callchain->nr;
5783
5784 size *= sizeof(u64);
5785
76369139 5786 __output_copy(handle, data->callchain, size);
5622f295
MM
5787 } else {
5788 u64 nr = 0;
5789 perf_output_put(handle, nr);
5790 }
5791 }
5792
5793 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
5794 struct perf_raw_record *raw = data->raw;
5795
5796 if (raw) {
5797 struct perf_raw_frag *frag = &raw->frag;
5798
5799 perf_output_put(handle, raw->size);
5800 do {
5801 if (frag->copy) {
5802 __output_custom(handle, frag->copy,
5803 frag->data, frag->size);
5804 } else {
5805 __output_copy(handle, frag->data,
5806 frag->size);
5807 }
5808 if (perf_raw_frag_last(frag))
5809 break;
5810 frag = frag->next;
5811 } while (1);
5812 if (frag->pad)
5813 __output_skip(handle, NULL, frag->pad);
5622f295
MM
5814 } else {
5815 struct {
5816 u32 size;
5817 u32 data;
5818 } raw = {
5819 .size = sizeof(u32),
5820 .data = 0,
5821 };
5822 perf_output_put(handle, raw);
5823 }
5824 }
a7ac67ea 5825
bce38cd5
SE
5826 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5827 if (data->br_stack) {
5828 size_t size;
5829
5830 size = data->br_stack->nr
5831 * sizeof(struct perf_branch_entry);
5832
5833 perf_output_put(handle, data->br_stack->nr);
5834 perf_output_copy(handle, data->br_stack->entries, size);
5835 } else {
5836 /*
5837 * we always store at least the value of nr
5838 */
5839 u64 nr = 0;
5840 perf_output_put(handle, nr);
5841 }
5842 }
4018994f
JO
5843
5844 if (sample_type & PERF_SAMPLE_REGS_USER) {
5845 u64 abi = data->regs_user.abi;
5846
5847 /*
5848 * If there are no regs to dump, notice it through
5849 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5850 */
5851 perf_output_put(handle, abi);
5852
5853 if (abi) {
5854 u64 mask = event->attr.sample_regs_user;
5855 perf_output_sample_regs(handle,
5856 data->regs_user.regs,
5857 mask);
5858 }
5859 }
c5ebcedb 5860
a5cdd40c 5861 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
5862 perf_output_sample_ustack(handle,
5863 data->stack_user_size,
5864 data->regs_user.regs);
a5cdd40c 5865 }
c3feedf2
AK
5866
5867 if (sample_type & PERF_SAMPLE_WEIGHT)
5868 perf_output_put(handle, data->weight);
d6be9ad6
SE
5869
5870 if (sample_type & PERF_SAMPLE_DATA_SRC)
5871 perf_output_put(handle, data->data_src.val);
a5cdd40c 5872
fdfbbd07
AK
5873 if (sample_type & PERF_SAMPLE_TRANSACTION)
5874 perf_output_put(handle, data->txn);
5875
60e2364e
SE
5876 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5877 u64 abi = data->regs_intr.abi;
5878 /*
5879 * If there are no regs to dump, notice it through
5880 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5881 */
5882 perf_output_put(handle, abi);
5883
5884 if (abi) {
5885 u64 mask = event->attr.sample_regs_intr;
5886
5887 perf_output_sample_regs(handle,
5888 data->regs_intr.regs,
5889 mask);
5890 }
5891 }
5892
a5cdd40c
PZ
5893 if (!event->attr.watermark) {
5894 int wakeup_events = event->attr.wakeup_events;
5895
5896 if (wakeup_events) {
5897 struct ring_buffer *rb = handle->rb;
5898 int events = local_inc_return(&rb->events);
5899
5900 if (events >= wakeup_events) {
5901 local_sub(wakeup_events, &rb->events);
5902 local_inc(&rb->wakeup);
5903 }
5904 }
5905 }
5622f295
MM
5906}
5907
5908void perf_prepare_sample(struct perf_event_header *header,
5909 struct perf_sample_data *data,
cdd6c482 5910 struct perf_event *event,
5622f295 5911 struct pt_regs *regs)
7b732a75 5912{
cdd6c482 5913 u64 sample_type = event->attr.sample_type;
7b732a75 5914
cdd6c482 5915 header->type = PERF_RECORD_SAMPLE;
c320c7b7 5916 header->size = sizeof(*header) + event->header_size;
5622f295
MM
5917
5918 header->misc = 0;
5919 header->misc |= perf_misc_flags(regs);
6fab0192 5920
c980d109 5921 __perf_event_header__init_id(header, data, event);
6844c09d 5922
c320c7b7 5923 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
5924 data->ip = perf_instruction_pointer(regs);
5925
b23f3325 5926 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 5927 int size = 1;
394ee076 5928
e6dab5ff 5929 data->callchain = perf_callchain(event, regs);
5622f295
MM
5930
5931 if (data->callchain)
5932 size += data->callchain->nr;
5933
5934 header->size += size * sizeof(u64);
394ee076
PZ
5935 }
5936
3a43ce68 5937 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
5938 struct perf_raw_record *raw = data->raw;
5939 int size;
5940
5941 if (raw) {
5942 struct perf_raw_frag *frag = &raw->frag;
5943 u32 sum = 0;
5944
5945 do {
5946 sum += frag->size;
5947 if (perf_raw_frag_last(frag))
5948 break;
5949 frag = frag->next;
5950 } while (1);
5951
5952 size = round_up(sum + sizeof(u32), sizeof(u64));
5953 raw->size = size - sizeof(u32);
5954 frag->pad = raw->size - sum;
5955 } else {
5956 size = sizeof(u64);
5957 }
a044560c 5958
7e3f977e 5959 header->size += size;
7f453c24 5960 }
bce38cd5
SE
5961
5962 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5963 int size = sizeof(u64); /* nr */
5964 if (data->br_stack) {
5965 size += data->br_stack->nr
5966 * sizeof(struct perf_branch_entry);
5967 }
5968 header->size += size;
5969 }
4018994f 5970
2565711f 5971 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
88a7c26a
AL
5972 perf_sample_regs_user(&data->regs_user, regs,
5973 &data->regs_user_copy);
2565711f 5974
4018994f
JO
5975 if (sample_type & PERF_SAMPLE_REGS_USER) {
5976 /* regs dump ABI info */
5977 int size = sizeof(u64);
5978
4018994f
JO
5979 if (data->regs_user.regs) {
5980 u64 mask = event->attr.sample_regs_user;
5981 size += hweight64(mask) * sizeof(u64);
5982 }
5983
5984 header->size += size;
5985 }
c5ebcedb
JO
5986
5987 if (sample_type & PERF_SAMPLE_STACK_USER) {
5988 /*
5989 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5990 * processed as the last one or have additional check added
5991 * in case new sample type is added, because we could eat
5992 * up the rest of the sample size.
5993 */
c5ebcedb
JO
5994 u16 stack_size = event->attr.sample_stack_user;
5995 u16 size = sizeof(u64);
5996
c5ebcedb 5997 stack_size = perf_sample_ustack_size(stack_size, header->size,
2565711f 5998 data->regs_user.regs);
c5ebcedb
JO
5999
6000 /*
6001 * If there is something to dump, add space for the dump
6002 * itself and for the field that tells the dynamic size,
6003 * which is how many have been actually dumped.
6004 */
6005 if (stack_size)
6006 size += sizeof(u64) + stack_size;
6007
6008 data->stack_user_size = stack_size;
6009 header->size += size;
6010 }
60e2364e
SE
6011
6012 if (sample_type & PERF_SAMPLE_REGS_INTR) {
6013 /* regs dump ABI info */
6014 int size = sizeof(u64);
6015
6016 perf_sample_regs_intr(&data->regs_intr, regs);
6017
6018 if (data->regs_intr.regs) {
6019 u64 mask = event->attr.sample_regs_intr;
6020
6021 size += hweight64(mask) * sizeof(u64);
6022 }
6023
6024 header->size += size;
6025 }
5622f295 6026}
7f453c24 6027
9ecda41a
WN
6028static void __always_inline
6029__perf_event_output(struct perf_event *event,
6030 struct perf_sample_data *data,
6031 struct pt_regs *regs,
6032 int (*output_begin)(struct perf_output_handle *,
6033 struct perf_event *,
6034 unsigned int))
5622f295
MM
6035{
6036 struct perf_output_handle handle;
6037 struct perf_event_header header;
689802b2 6038
927c7a9e
FW
6039 /* protect the callchain buffers */
6040 rcu_read_lock();
6041
cdd6c482 6042 perf_prepare_sample(&header, data, event, regs);
5c148194 6043
9ecda41a 6044 if (output_begin(&handle, event, header.size))
927c7a9e 6045 goto exit;
0322cd6e 6046
cdd6c482 6047 perf_output_sample(&handle, &header, data, event);
f413cdb8 6048
8a057d84 6049 perf_output_end(&handle);
927c7a9e
FW
6050
6051exit:
6052 rcu_read_unlock();
0322cd6e
PZ
6053}
6054
9ecda41a
WN
6055void
6056perf_event_output_forward(struct perf_event *event,
6057 struct perf_sample_data *data,
6058 struct pt_regs *regs)
6059{
6060 __perf_event_output(event, data, regs, perf_output_begin_forward);
6061}
6062
6063void
6064perf_event_output_backward(struct perf_event *event,
6065 struct perf_sample_data *data,
6066 struct pt_regs *regs)
6067{
6068 __perf_event_output(event, data, regs, perf_output_begin_backward);
6069}
6070
6071void
6072perf_event_output(struct perf_event *event,
6073 struct perf_sample_data *data,
6074 struct pt_regs *regs)
6075{
6076 __perf_event_output(event, data, regs, perf_output_begin);
6077}
6078
38b200d6 6079/*
cdd6c482 6080 * read event_id
38b200d6
PZ
6081 */
6082
6083struct perf_read_event {
6084 struct perf_event_header header;
6085
6086 u32 pid;
6087 u32 tid;
38b200d6
PZ
6088};
6089
6090static void
cdd6c482 6091perf_event_read_event(struct perf_event *event,
38b200d6
PZ
6092 struct task_struct *task)
6093{
6094 struct perf_output_handle handle;
c980d109 6095 struct perf_sample_data sample;
dfc65094 6096 struct perf_read_event read_event = {
38b200d6 6097 .header = {
cdd6c482 6098 .type = PERF_RECORD_READ,
38b200d6 6099 .misc = 0,
c320c7b7 6100 .size = sizeof(read_event) + event->read_size,
38b200d6 6101 },
cdd6c482
IM
6102 .pid = perf_event_pid(event, task),
6103 .tid = perf_event_tid(event, task),
38b200d6 6104 };
3dab77fb 6105 int ret;
38b200d6 6106
c980d109 6107 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 6108 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
6109 if (ret)
6110 return;
6111
dfc65094 6112 perf_output_put(&handle, read_event);
cdd6c482 6113 perf_output_read(&handle, event);
c980d109 6114 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 6115
38b200d6
PZ
6116 perf_output_end(&handle);
6117}
6118
aab5b71e 6119typedef void (perf_iterate_f)(struct perf_event *event, void *data);
52d857a8
JO
6120
6121static void
aab5b71e
PZ
6122perf_iterate_ctx(struct perf_event_context *ctx,
6123 perf_iterate_f output,
b73e4fef 6124 void *data, bool all)
52d857a8
JO
6125{
6126 struct perf_event *event;
6127
6128 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
b73e4fef
AS
6129 if (!all) {
6130 if (event->state < PERF_EVENT_STATE_INACTIVE)
6131 continue;
6132 if (!event_filter_match(event))
6133 continue;
6134 }
6135
67516844 6136 output(event, data);
52d857a8
JO
6137 }
6138}
6139
aab5b71e 6140static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
f2fb6bef
KL
6141{
6142 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
6143 struct perf_event *event;
6144
6145 list_for_each_entry_rcu(event, &pel->list, sb_list) {
0b8f1e2e
PZ
6146 /*
6147 * Skip events that are not fully formed yet; ensure that
6148 * if we observe event->ctx, both event and ctx will be
6149 * complete enough. See perf_install_in_context().
6150 */
6151 if (!smp_load_acquire(&event->ctx))
6152 continue;
6153
f2fb6bef
KL
6154 if (event->state < PERF_EVENT_STATE_INACTIVE)
6155 continue;
6156 if (!event_filter_match(event))
6157 continue;
6158 output(event, data);
6159 }
6160}
6161
aab5b71e
PZ
6162/*
6163 * Iterate all events that need to receive side-band events.
6164 *
6165 * For new callers; ensure that account_pmu_sb_event() includes
6166 * your event, otherwise it might not get delivered.
6167 */
52d857a8 6168static void
aab5b71e 6169perf_iterate_sb(perf_iterate_f output, void *data,
52d857a8
JO
6170 struct perf_event_context *task_ctx)
6171{
52d857a8 6172 struct perf_event_context *ctx;
52d857a8
JO
6173 int ctxn;
6174
aab5b71e
PZ
6175 rcu_read_lock();
6176 preempt_disable();
6177
4e93ad60 6178 /*
aab5b71e
PZ
6179 * If we have task_ctx != NULL we only notify the task context itself.
6180 * The task_ctx is set only for EXIT events before releasing task
4e93ad60
JO
6181 * context.
6182 */
6183 if (task_ctx) {
aab5b71e
PZ
6184 perf_iterate_ctx(task_ctx, output, data, false);
6185 goto done;
4e93ad60
JO
6186 }
6187
aab5b71e 6188 perf_iterate_sb_cpu(output, data);
f2fb6bef
KL
6189
6190 for_each_task_context_nr(ctxn) {
52d857a8
JO
6191 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6192 if (ctx)
aab5b71e 6193 perf_iterate_ctx(ctx, output, data, false);
52d857a8 6194 }
aab5b71e 6195done:
f2fb6bef 6196 preempt_enable();
52d857a8 6197 rcu_read_unlock();
95ff4ca2
AS
6198}
6199
375637bc
AS
6200/*
6201 * Clear all file-based filters at exec, they'll have to be
6202 * re-instated when/if these objects are mmapped again.
6203 */
6204static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6205{
6206 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6207 struct perf_addr_filter *filter;
6208 unsigned int restart = 0, count = 0;
6209 unsigned long flags;
6210
6211 if (!has_addr_filter(event))
6212 return;
6213
6214 raw_spin_lock_irqsave(&ifh->lock, flags);
6215 list_for_each_entry(filter, &ifh->list, entry) {
6216 if (filter->inode) {
6217 event->addr_filters_offs[count] = 0;
6218 restart++;
6219 }
6220
6221 count++;
6222 }
6223
6224 if (restart)
6225 event->addr_filters_gen++;
6226 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6227
6228 if (restart)
767ae086 6229 perf_event_stop(event, 1);
375637bc
AS
6230}
6231
6232void perf_event_exec(void)
6233{
6234 struct perf_event_context *ctx;
6235 int ctxn;
6236
6237 rcu_read_lock();
6238 for_each_task_context_nr(ctxn) {
6239 ctx = current->perf_event_ctxp[ctxn];
6240 if (!ctx)
6241 continue;
6242
6243 perf_event_enable_on_exec(ctxn);
6244
aab5b71e 6245 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
375637bc
AS
6246 true);
6247 }
6248 rcu_read_unlock();
6249}
6250
95ff4ca2
AS
6251struct remote_output {
6252 struct ring_buffer *rb;
6253 int err;
6254};
6255
6256static void __perf_event_output_stop(struct perf_event *event, void *data)
6257{
6258 struct perf_event *parent = event->parent;
6259 struct remote_output *ro = data;
6260 struct ring_buffer *rb = ro->rb;
375637bc
AS
6261 struct stop_event_data sd = {
6262 .event = event,
6263 };
95ff4ca2
AS
6264
6265 if (!has_aux(event))
6266 return;
6267
6268 if (!parent)
6269 parent = event;
6270
6271 /*
6272 * In case of inheritance, it will be the parent that links to the
767ae086
AS
6273 * ring-buffer, but it will be the child that's actually using it.
6274 *
6275 * We are using event::rb to determine if the event should be stopped,
6276 * however this may race with ring_buffer_attach() (through set_output),
6277 * which will make us skip the event that actually needs to be stopped.
6278 * So ring_buffer_attach() has to stop an aux event before re-assigning
6279 * its rb pointer.
95ff4ca2
AS
6280 */
6281 if (rcu_dereference(parent->rb) == rb)
375637bc 6282 ro->err = __perf_event_stop(&sd);
95ff4ca2
AS
6283}
6284
6285static int __perf_pmu_output_stop(void *info)
6286{
6287 struct perf_event *event = info;
6288 struct pmu *pmu = event->pmu;
8b6a3fe8 6289 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95ff4ca2
AS
6290 struct remote_output ro = {
6291 .rb = event->rb,
6292 };
6293
6294 rcu_read_lock();
aab5b71e 6295 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
95ff4ca2 6296 if (cpuctx->task_ctx)
aab5b71e 6297 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
b73e4fef 6298 &ro, false);
95ff4ca2
AS
6299 rcu_read_unlock();
6300
6301 return ro.err;
6302}
6303
6304static void perf_pmu_output_stop(struct perf_event *event)
6305{
6306 struct perf_event *iter;
6307 int err, cpu;
6308
6309restart:
6310 rcu_read_lock();
6311 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6312 /*
6313 * For per-CPU events, we need to make sure that neither they
6314 * nor their children are running; for cpu==-1 events it's
6315 * sufficient to stop the event itself if it's active, since
6316 * it can't have children.
6317 */
6318 cpu = iter->cpu;
6319 if (cpu == -1)
6320 cpu = READ_ONCE(iter->oncpu);
6321
6322 if (cpu == -1)
6323 continue;
6324
6325 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6326 if (err == -EAGAIN) {
6327 rcu_read_unlock();
6328 goto restart;
6329 }
6330 }
6331 rcu_read_unlock();
52d857a8
JO
6332}
6333
60313ebe 6334/*
9f498cc5
PZ
6335 * task tracking -- fork/exit
6336 *
13d7a241 6337 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
6338 */
6339
9f498cc5 6340struct perf_task_event {
3a80b4a3 6341 struct task_struct *task;
cdd6c482 6342 struct perf_event_context *task_ctx;
60313ebe
PZ
6343
6344 struct {
6345 struct perf_event_header header;
6346
6347 u32 pid;
6348 u32 ppid;
9f498cc5
PZ
6349 u32 tid;
6350 u32 ptid;
393b2ad8 6351 u64 time;
cdd6c482 6352 } event_id;
60313ebe
PZ
6353};
6354
67516844
JO
6355static int perf_event_task_match(struct perf_event *event)
6356{
13d7a241
SE
6357 return event->attr.comm || event->attr.mmap ||
6358 event->attr.mmap2 || event->attr.mmap_data ||
6359 event->attr.task;
67516844
JO
6360}
6361
cdd6c482 6362static void perf_event_task_output(struct perf_event *event,
52d857a8 6363 void *data)
60313ebe 6364{
52d857a8 6365 struct perf_task_event *task_event = data;
60313ebe 6366 struct perf_output_handle handle;
c980d109 6367 struct perf_sample_data sample;
9f498cc5 6368 struct task_struct *task = task_event->task;
c980d109 6369 int ret, size = task_event->event_id.header.size;
8bb39f9a 6370
67516844
JO
6371 if (!perf_event_task_match(event))
6372 return;
6373
c980d109 6374 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 6375
c980d109 6376 ret = perf_output_begin(&handle, event,
a7ac67ea 6377 task_event->event_id.header.size);
ef60777c 6378 if (ret)
c980d109 6379 goto out;
60313ebe 6380
cdd6c482
IM
6381 task_event->event_id.pid = perf_event_pid(event, task);
6382 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 6383
cdd6c482
IM
6384 task_event->event_id.tid = perf_event_tid(event, task);
6385 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 6386
34f43927
PZ
6387 task_event->event_id.time = perf_event_clock(event);
6388
cdd6c482 6389 perf_output_put(&handle, task_event->event_id);
393b2ad8 6390
c980d109
ACM
6391 perf_event__output_id_sample(event, &handle, &sample);
6392
60313ebe 6393 perf_output_end(&handle);
c980d109
ACM
6394out:
6395 task_event->event_id.header.size = size;
60313ebe
PZ
6396}
6397
cdd6c482
IM
6398static void perf_event_task(struct task_struct *task,
6399 struct perf_event_context *task_ctx,
3a80b4a3 6400 int new)
60313ebe 6401{
9f498cc5 6402 struct perf_task_event task_event;
60313ebe 6403
cdd6c482
IM
6404 if (!atomic_read(&nr_comm_events) &&
6405 !atomic_read(&nr_mmap_events) &&
6406 !atomic_read(&nr_task_events))
60313ebe
PZ
6407 return;
6408
9f498cc5 6409 task_event = (struct perf_task_event){
3a80b4a3
PZ
6410 .task = task,
6411 .task_ctx = task_ctx,
cdd6c482 6412 .event_id = {
60313ebe 6413 .header = {
cdd6c482 6414 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 6415 .misc = 0,
cdd6c482 6416 .size = sizeof(task_event.event_id),
60313ebe 6417 },
573402db
PZ
6418 /* .pid */
6419 /* .ppid */
9f498cc5
PZ
6420 /* .tid */
6421 /* .ptid */
34f43927 6422 /* .time */
60313ebe
PZ
6423 },
6424 };
6425
aab5b71e 6426 perf_iterate_sb(perf_event_task_output,
52d857a8
JO
6427 &task_event,
6428 task_ctx);
9f498cc5
PZ
6429}
6430
cdd6c482 6431void perf_event_fork(struct task_struct *task)
9f498cc5 6432{
cdd6c482 6433 perf_event_task(task, NULL, 1);
60313ebe
PZ
6434}
6435
8d1b2d93
PZ
6436/*
6437 * comm tracking
6438 */
6439
6440struct perf_comm_event {
22a4f650
IM
6441 struct task_struct *task;
6442 char *comm;
8d1b2d93
PZ
6443 int comm_size;
6444
6445 struct {
6446 struct perf_event_header header;
6447
6448 u32 pid;
6449 u32 tid;
cdd6c482 6450 } event_id;
8d1b2d93
PZ
6451};
6452
67516844
JO
6453static int perf_event_comm_match(struct perf_event *event)
6454{
6455 return event->attr.comm;
6456}
6457
cdd6c482 6458static void perf_event_comm_output(struct perf_event *event,
52d857a8 6459 void *data)
8d1b2d93 6460{
52d857a8 6461 struct perf_comm_event *comm_event = data;
8d1b2d93 6462 struct perf_output_handle handle;
c980d109 6463 struct perf_sample_data sample;
cdd6c482 6464 int size = comm_event->event_id.header.size;
c980d109
ACM
6465 int ret;
6466
67516844
JO
6467 if (!perf_event_comm_match(event))
6468 return;
6469
c980d109
ACM
6470 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6471 ret = perf_output_begin(&handle, event,
a7ac67ea 6472 comm_event->event_id.header.size);
8d1b2d93
PZ
6473
6474 if (ret)
c980d109 6475 goto out;
8d1b2d93 6476
cdd6c482
IM
6477 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6478 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 6479
cdd6c482 6480 perf_output_put(&handle, comm_event->event_id);
76369139 6481 __output_copy(&handle, comm_event->comm,
8d1b2d93 6482 comm_event->comm_size);
c980d109
ACM
6483
6484 perf_event__output_id_sample(event, &handle, &sample);
6485
8d1b2d93 6486 perf_output_end(&handle);
c980d109
ACM
6487out:
6488 comm_event->event_id.header.size = size;
8d1b2d93
PZ
6489}
6490
cdd6c482 6491static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 6492{
413ee3b4 6493 char comm[TASK_COMM_LEN];
8d1b2d93 6494 unsigned int size;
8d1b2d93 6495
413ee3b4 6496 memset(comm, 0, sizeof(comm));
96b02d78 6497 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 6498 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
6499
6500 comm_event->comm = comm;
6501 comm_event->comm_size = size;
6502
cdd6c482 6503 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 6504
aab5b71e 6505 perf_iterate_sb(perf_event_comm_output,
52d857a8
JO
6506 comm_event,
6507 NULL);
8d1b2d93
PZ
6508}
6509
82b89778 6510void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 6511{
9ee318a7
PZ
6512 struct perf_comm_event comm_event;
6513
cdd6c482 6514 if (!atomic_read(&nr_comm_events))
9ee318a7 6515 return;
a63eaf34 6516
9ee318a7 6517 comm_event = (struct perf_comm_event){
8d1b2d93 6518 .task = task,
573402db
PZ
6519 /* .comm */
6520 /* .comm_size */
cdd6c482 6521 .event_id = {
573402db 6522 .header = {
cdd6c482 6523 .type = PERF_RECORD_COMM,
82b89778 6524 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
6525 /* .size */
6526 },
6527 /* .pid */
6528 /* .tid */
8d1b2d93
PZ
6529 },
6530 };
6531
cdd6c482 6532 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
6533}
6534
0a4a9391
PZ
6535/*
6536 * mmap tracking
6537 */
6538
6539struct perf_mmap_event {
089dd79d
PZ
6540 struct vm_area_struct *vma;
6541
6542 const char *file_name;
6543 int file_size;
13d7a241
SE
6544 int maj, min;
6545 u64 ino;
6546 u64 ino_generation;
f972eb63 6547 u32 prot, flags;
0a4a9391
PZ
6548
6549 struct {
6550 struct perf_event_header header;
6551
6552 u32 pid;
6553 u32 tid;
6554 u64 start;
6555 u64 len;
6556 u64 pgoff;
cdd6c482 6557 } event_id;
0a4a9391
PZ
6558};
6559
67516844
JO
6560static int perf_event_mmap_match(struct perf_event *event,
6561 void *data)
6562{
6563 struct perf_mmap_event *mmap_event = data;
6564 struct vm_area_struct *vma = mmap_event->vma;
6565 int executable = vma->vm_flags & VM_EXEC;
6566
6567 return (!executable && event->attr.mmap_data) ||
13d7a241 6568 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
6569}
6570
cdd6c482 6571static void perf_event_mmap_output(struct perf_event *event,
52d857a8 6572 void *data)
0a4a9391 6573{
52d857a8 6574 struct perf_mmap_event *mmap_event = data;
0a4a9391 6575 struct perf_output_handle handle;
c980d109 6576 struct perf_sample_data sample;
cdd6c482 6577 int size = mmap_event->event_id.header.size;
c980d109 6578 int ret;
0a4a9391 6579
67516844
JO
6580 if (!perf_event_mmap_match(event, data))
6581 return;
6582
13d7a241
SE
6583 if (event->attr.mmap2) {
6584 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6585 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6586 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6587 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 6588 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
6589 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6590 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
6591 }
6592
c980d109
ACM
6593 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6594 ret = perf_output_begin(&handle, event,
a7ac67ea 6595 mmap_event->event_id.header.size);
0a4a9391 6596 if (ret)
c980d109 6597 goto out;
0a4a9391 6598
cdd6c482
IM
6599 mmap_event->event_id.pid = perf_event_pid(event, current);
6600 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 6601
cdd6c482 6602 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
6603
6604 if (event->attr.mmap2) {
6605 perf_output_put(&handle, mmap_event->maj);
6606 perf_output_put(&handle, mmap_event->min);
6607 perf_output_put(&handle, mmap_event->ino);
6608 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
6609 perf_output_put(&handle, mmap_event->prot);
6610 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
6611 }
6612
76369139 6613 __output_copy(&handle, mmap_event->file_name,
0a4a9391 6614 mmap_event->file_size);
c980d109
ACM
6615
6616 perf_event__output_id_sample(event, &handle, &sample);
6617
78d613eb 6618 perf_output_end(&handle);
c980d109
ACM
6619out:
6620 mmap_event->event_id.header.size = size;
0a4a9391
PZ
6621}
6622
cdd6c482 6623static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 6624{
089dd79d
PZ
6625 struct vm_area_struct *vma = mmap_event->vma;
6626 struct file *file = vma->vm_file;
13d7a241
SE
6627 int maj = 0, min = 0;
6628 u64 ino = 0, gen = 0;
f972eb63 6629 u32 prot = 0, flags = 0;
0a4a9391
PZ
6630 unsigned int size;
6631 char tmp[16];
6632 char *buf = NULL;
2c42cfbf 6633 char *name;
413ee3b4 6634
0b3589be
PZ
6635 if (vma->vm_flags & VM_READ)
6636 prot |= PROT_READ;
6637 if (vma->vm_flags & VM_WRITE)
6638 prot |= PROT_WRITE;
6639 if (vma->vm_flags & VM_EXEC)
6640 prot |= PROT_EXEC;
6641
6642 if (vma->vm_flags & VM_MAYSHARE)
6643 flags = MAP_SHARED;
6644 else
6645 flags = MAP_PRIVATE;
6646
6647 if (vma->vm_flags & VM_DENYWRITE)
6648 flags |= MAP_DENYWRITE;
6649 if (vma->vm_flags & VM_MAYEXEC)
6650 flags |= MAP_EXECUTABLE;
6651 if (vma->vm_flags & VM_LOCKED)
6652 flags |= MAP_LOCKED;
6653 if (vma->vm_flags & VM_HUGETLB)
6654 flags |= MAP_HUGETLB;
6655
0a4a9391 6656 if (file) {
13d7a241
SE
6657 struct inode *inode;
6658 dev_t dev;
3ea2f2b9 6659
2c42cfbf 6660 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 6661 if (!buf) {
c7e548b4
ON
6662 name = "//enomem";
6663 goto cpy_name;
0a4a9391 6664 }
413ee3b4 6665 /*
3ea2f2b9 6666 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
6667 * need to add enough zero bytes after the string to handle
6668 * the 64bit alignment we do later.
6669 */
9bf39ab2 6670 name = file_path(file, buf, PATH_MAX - sizeof(u64));
0a4a9391 6671 if (IS_ERR(name)) {
c7e548b4
ON
6672 name = "//toolong";
6673 goto cpy_name;
0a4a9391 6674 }
13d7a241
SE
6675 inode = file_inode(vma->vm_file);
6676 dev = inode->i_sb->s_dev;
6677 ino = inode->i_ino;
6678 gen = inode->i_generation;
6679 maj = MAJOR(dev);
6680 min = MINOR(dev);
f972eb63 6681
c7e548b4 6682 goto got_name;
0a4a9391 6683 } else {
fbe26abe
JO
6684 if (vma->vm_ops && vma->vm_ops->name) {
6685 name = (char *) vma->vm_ops->name(vma);
6686 if (name)
6687 goto cpy_name;
6688 }
6689
2c42cfbf 6690 name = (char *)arch_vma_name(vma);
c7e548b4
ON
6691 if (name)
6692 goto cpy_name;
089dd79d 6693
32c5fb7e 6694 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 6695 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
6696 name = "[heap]";
6697 goto cpy_name;
32c5fb7e
ON
6698 }
6699 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 6700 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
6701 name = "[stack]";
6702 goto cpy_name;
089dd79d
PZ
6703 }
6704
c7e548b4
ON
6705 name = "//anon";
6706 goto cpy_name;
0a4a9391
PZ
6707 }
6708
c7e548b4
ON
6709cpy_name:
6710 strlcpy(tmp, name, sizeof(tmp));
6711 name = tmp;
0a4a9391 6712got_name:
2c42cfbf
PZ
6713 /*
6714 * Since our buffer works in 8 byte units we need to align our string
6715 * size to a multiple of 8. However, we must guarantee the tail end is
6716 * zero'd out to avoid leaking random bits to userspace.
6717 */
6718 size = strlen(name)+1;
6719 while (!IS_ALIGNED(size, sizeof(u64)))
6720 name[size++] = '\0';
0a4a9391
PZ
6721
6722 mmap_event->file_name = name;
6723 mmap_event->file_size = size;
13d7a241
SE
6724 mmap_event->maj = maj;
6725 mmap_event->min = min;
6726 mmap_event->ino = ino;
6727 mmap_event->ino_generation = gen;
f972eb63
PZ
6728 mmap_event->prot = prot;
6729 mmap_event->flags = flags;
0a4a9391 6730
2fe85427
SE
6731 if (!(vma->vm_flags & VM_EXEC))
6732 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6733
cdd6c482 6734 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 6735
aab5b71e 6736 perf_iterate_sb(perf_event_mmap_output,
52d857a8
JO
6737 mmap_event,
6738 NULL);
665c2142 6739
0a4a9391
PZ
6740 kfree(buf);
6741}
6742
375637bc
AS
6743/*
6744 * Check whether inode and address range match filter criteria.
6745 */
6746static bool perf_addr_filter_match(struct perf_addr_filter *filter,
6747 struct file *file, unsigned long offset,
6748 unsigned long size)
6749{
45063097 6750 if (filter->inode != file_inode(file))
375637bc
AS
6751 return false;
6752
6753 if (filter->offset > offset + size)
6754 return false;
6755
6756 if (filter->offset + filter->size < offset)
6757 return false;
6758
6759 return true;
6760}
6761
6762static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6763{
6764 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6765 struct vm_area_struct *vma = data;
6766 unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
6767 struct file *file = vma->vm_file;
6768 struct perf_addr_filter *filter;
6769 unsigned int restart = 0, count = 0;
6770
6771 if (!has_addr_filter(event))
6772 return;
6773
6774 if (!file)
6775 return;
6776
6777 raw_spin_lock_irqsave(&ifh->lock, flags);
6778 list_for_each_entry(filter, &ifh->list, entry) {
6779 if (perf_addr_filter_match(filter, file, off,
6780 vma->vm_end - vma->vm_start)) {
6781 event->addr_filters_offs[count] = vma->vm_start;
6782 restart++;
6783 }
6784
6785 count++;
6786 }
6787
6788 if (restart)
6789 event->addr_filters_gen++;
6790 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6791
6792 if (restart)
767ae086 6793 perf_event_stop(event, 1);
375637bc
AS
6794}
6795
6796/*
6797 * Adjust all task's events' filters to the new vma
6798 */
6799static void perf_addr_filters_adjust(struct vm_area_struct *vma)
6800{
6801 struct perf_event_context *ctx;
6802 int ctxn;
6803
12b40a23
MP
6804 /*
6805 * Data tracing isn't supported yet and as such there is no need
6806 * to keep track of anything that isn't related to executable code:
6807 */
6808 if (!(vma->vm_flags & VM_EXEC))
6809 return;
6810
375637bc
AS
6811 rcu_read_lock();
6812 for_each_task_context_nr(ctxn) {
6813 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6814 if (!ctx)
6815 continue;
6816
aab5b71e 6817 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
375637bc
AS
6818 }
6819 rcu_read_unlock();
6820}
6821
3af9e859 6822void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 6823{
9ee318a7
PZ
6824 struct perf_mmap_event mmap_event;
6825
cdd6c482 6826 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
6827 return;
6828
6829 mmap_event = (struct perf_mmap_event){
089dd79d 6830 .vma = vma,
573402db
PZ
6831 /* .file_name */
6832 /* .file_size */
cdd6c482 6833 .event_id = {
573402db 6834 .header = {
cdd6c482 6835 .type = PERF_RECORD_MMAP,
39447b38 6836 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
6837 /* .size */
6838 },
6839 /* .pid */
6840 /* .tid */
089dd79d
PZ
6841 .start = vma->vm_start,
6842 .len = vma->vm_end - vma->vm_start,
3a0304e9 6843 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 6844 },
13d7a241
SE
6845 /* .maj (attr_mmap2 only) */
6846 /* .min (attr_mmap2 only) */
6847 /* .ino (attr_mmap2 only) */
6848 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
6849 /* .prot (attr_mmap2 only) */
6850 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
6851 };
6852
375637bc 6853 perf_addr_filters_adjust(vma);
cdd6c482 6854 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
6855}
6856
68db7e98
AS
6857void perf_event_aux_event(struct perf_event *event, unsigned long head,
6858 unsigned long size, u64 flags)
6859{
6860 struct perf_output_handle handle;
6861 struct perf_sample_data sample;
6862 struct perf_aux_event {
6863 struct perf_event_header header;
6864 u64 offset;
6865 u64 size;
6866 u64 flags;
6867 } rec = {
6868 .header = {
6869 .type = PERF_RECORD_AUX,
6870 .misc = 0,
6871 .size = sizeof(rec),
6872 },
6873 .offset = head,
6874 .size = size,
6875 .flags = flags,
6876 };
6877 int ret;
6878
6879 perf_event_header__init_id(&rec.header, &sample, event);
6880 ret = perf_output_begin(&handle, event, rec.header.size);
6881
6882 if (ret)
6883 return;
6884
6885 perf_output_put(&handle, rec);
6886 perf_event__output_id_sample(event, &handle, &sample);
6887
6888 perf_output_end(&handle);
6889}
6890
f38b0dbb
KL
6891/*
6892 * Lost/dropped samples logging
6893 */
6894void perf_log_lost_samples(struct perf_event *event, u64 lost)
6895{
6896 struct perf_output_handle handle;
6897 struct perf_sample_data sample;
6898 int ret;
6899
6900 struct {
6901 struct perf_event_header header;
6902 u64 lost;
6903 } lost_samples_event = {
6904 .header = {
6905 .type = PERF_RECORD_LOST_SAMPLES,
6906 .misc = 0,
6907 .size = sizeof(lost_samples_event),
6908 },
6909 .lost = lost,
6910 };
6911
6912 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6913
6914 ret = perf_output_begin(&handle, event,
6915 lost_samples_event.header.size);
6916 if (ret)
6917 return;
6918
6919 perf_output_put(&handle, lost_samples_event);
6920 perf_event__output_id_sample(event, &handle, &sample);
6921 perf_output_end(&handle);
6922}
6923
45ac1403
AH
6924/*
6925 * context_switch tracking
6926 */
6927
6928struct perf_switch_event {
6929 struct task_struct *task;
6930 struct task_struct *next_prev;
6931
6932 struct {
6933 struct perf_event_header header;
6934 u32 next_prev_pid;
6935 u32 next_prev_tid;
6936 } event_id;
6937};
6938
6939static int perf_event_switch_match(struct perf_event *event)
6940{
6941 return event->attr.context_switch;
6942}
6943
6944static void perf_event_switch_output(struct perf_event *event, void *data)
6945{
6946 struct perf_switch_event *se = data;
6947 struct perf_output_handle handle;
6948 struct perf_sample_data sample;
6949 int ret;
6950
6951 if (!perf_event_switch_match(event))
6952 return;
6953
6954 /* Only CPU-wide events are allowed to see next/prev pid/tid */
6955 if (event->ctx->task) {
6956 se->event_id.header.type = PERF_RECORD_SWITCH;
6957 se->event_id.header.size = sizeof(se->event_id.header);
6958 } else {
6959 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6960 se->event_id.header.size = sizeof(se->event_id);
6961 se->event_id.next_prev_pid =
6962 perf_event_pid(event, se->next_prev);
6963 se->event_id.next_prev_tid =
6964 perf_event_tid(event, se->next_prev);
6965 }
6966
6967 perf_event_header__init_id(&se->event_id.header, &sample, event);
6968
6969 ret = perf_output_begin(&handle, event, se->event_id.header.size);
6970 if (ret)
6971 return;
6972
6973 if (event->ctx->task)
6974 perf_output_put(&handle, se->event_id.header);
6975 else
6976 perf_output_put(&handle, se->event_id);
6977
6978 perf_event__output_id_sample(event, &handle, &sample);
6979
6980 perf_output_end(&handle);
6981}
6982
6983static void perf_event_switch(struct task_struct *task,
6984 struct task_struct *next_prev, bool sched_in)
6985{
6986 struct perf_switch_event switch_event;
6987
6988 /* N.B. caller checks nr_switch_events != 0 */
6989
6990 switch_event = (struct perf_switch_event){
6991 .task = task,
6992 .next_prev = next_prev,
6993 .event_id = {
6994 .header = {
6995 /* .type */
6996 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6997 /* .size */
6998 },
6999 /* .next_prev_pid */
7000 /* .next_prev_tid */
7001 },
7002 };
7003
aab5b71e 7004 perf_iterate_sb(perf_event_switch_output,
45ac1403
AH
7005 &switch_event,
7006 NULL);
7007}
7008
a78ac325
PZ
7009/*
7010 * IRQ throttle logging
7011 */
7012
cdd6c482 7013static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
7014{
7015 struct perf_output_handle handle;
c980d109 7016 struct perf_sample_data sample;
a78ac325
PZ
7017 int ret;
7018
7019 struct {
7020 struct perf_event_header header;
7021 u64 time;
cca3f454 7022 u64 id;
7f453c24 7023 u64 stream_id;
a78ac325
PZ
7024 } throttle_event = {
7025 .header = {
cdd6c482 7026 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
7027 .misc = 0,
7028 .size = sizeof(throttle_event),
7029 },
34f43927 7030 .time = perf_event_clock(event),
cdd6c482
IM
7031 .id = primary_event_id(event),
7032 .stream_id = event->id,
a78ac325
PZ
7033 };
7034
966ee4d6 7035 if (enable)
cdd6c482 7036 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 7037
c980d109
ACM
7038 perf_event_header__init_id(&throttle_event.header, &sample, event);
7039
7040 ret = perf_output_begin(&handle, event,
a7ac67ea 7041 throttle_event.header.size);
a78ac325
PZ
7042 if (ret)
7043 return;
7044
7045 perf_output_put(&handle, throttle_event);
c980d109 7046 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
7047 perf_output_end(&handle);
7048}
7049
ec0d7729
AS
7050static void perf_log_itrace_start(struct perf_event *event)
7051{
7052 struct perf_output_handle handle;
7053 struct perf_sample_data sample;
7054 struct perf_aux_event {
7055 struct perf_event_header header;
7056 u32 pid;
7057 u32 tid;
7058 } rec;
7059 int ret;
7060
7061 if (event->parent)
7062 event = event->parent;
7063
7064 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
7065 event->hw.itrace_started)
7066 return;
7067
ec0d7729
AS
7068 rec.header.type = PERF_RECORD_ITRACE_START;
7069 rec.header.misc = 0;
7070 rec.header.size = sizeof(rec);
7071 rec.pid = perf_event_pid(event, current);
7072 rec.tid = perf_event_tid(event, current);
7073
7074 perf_event_header__init_id(&rec.header, &sample, event);
7075 ret = perf_output_begin(&handle, event, rec.header.size);
7076
7077 if (ret)
7078 return;
7079
7080 perf_output_put(&handle, rec);
7081 perf_event__output_id_sample(event, &handle, &sample);
7082
7083 perf_output_end(&handle);
7084}
7085
475113d9
JO
7086static int
7087__perf_event_account_interrupt(struct perf_event *event, int throttle)
f6c7d5fe 7088{
cdd6c482 7089 struct hw_perf_event *hwc = &event->hw;
79f14641 7090 int ret = 0;
475113d9 7091 u64 seq;
96398826 7092
e050e3f0
SE
7093 seq = __this_cpu_read(perf_throttled_seq);
7094 if (seq != hwc->interrupts_seq) {
7095 hwc->interrupts_seq = seq;
7096 hwc->interrupts = 1;
7097 } else {
7098 hwc->interrupts++;
7099 if (unlikely(throttle
7100 && hwc->interrupts >= max_samples_per_tick)) {
7101 __this_cpu_inc(perf_throttled_count);
555e0c1e 7102 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
163ec435
PZ
7103 hwc->interrupts = MAX_INTERRUPTS;
7104 perf_log_throttle(event, 0);
a78ac325
PZ
7105 ret = 1;
7106 }
e050e3f0 7107 }
60db5e09 7108
cdd6c482 7109 if (event->attr.freq) {
def0a9b2 7110 u64 now = perf_clock();
abd50713 7111 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 7112
abd50713 7113 hwc->freq_time_stamp = now;
bd2b5b12 7114
abd50713 7115 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 7116 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
7117 }
7118
475113d9
JO
7119 return ret;
7120}
7121
7122int perf_event_account_interrupt(struct perf_event *event)
7123{
7124 return __perf_event_account_interrupt(event, 1);
7125}
7126
7127/*
7128 * Generic event overflow handling, sampling.
7129 */
7130
7131static int __perf_event_overflow(struct perf_event *event,
7132 int throttle, struct perf_sample_data *data,
7133 struct pt_regs *regs)
7134{
7135 int events = atomic_read(&event->event_limit);
7136 int ret = 0;
7137
7138 /*
7139 * Non-sampling counters might still use the PMI to fold short
7140 * hardware counters, ignore those.
7141 */
7142 if (unlikely(!is_sampling_event(event)))
7143 return 0;
7144
7145 ret = __perf_event_account_interrupt(event, throttle);
7146
2023b359
PZ
7147 /*
7148 * XXX event_limit might not quite work as expected on inherited
cdd6c482 7149 * events
2023b359
PZ
7150 */
7151
cdd6c482
IM
7152 event->pending_kill = POLL_IN;
7153 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 7154 ret = 1;
cdd6c482 7155 event->pending_kill = POLL_HUP;
5aab90ce
JO
7156
7157 perf_event_disable_inatomic(event);
79f14641
PZ
7158 }
7159
aa6a5f3c 7160 READ_ONCE(event->overflow_handler)(event, data, regs);
453f19ee 7161
fed66e2c 7162 if (*perf_event_fasync(event) && event->pending_kill) {
a8b0ca17
PZ
7163 event->pending_wakeup = 1;
7164 irq_work_queue(&event->pending);
f506b3dc
PZ
7165 }
7166
79f14641 7167 return ret;
f6c7d5fe
PZ
7168}
7169
a8b0ca17 7170int perf_event_overflow(struct perf_event *event,
5622f295
MM
7171 struct perf_sample_data *data,
7172 struct pt_regs *regs)
850bc73f 7173{
a8b0ca17 7174 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
7175}
7176
15dbf27c 7177/*
cdd6c482 7178 * Generic software event infrastructure
15dbf27c
PZ
7179 */
7180
b28ab83c
PZ
7181struct swevent_htable {
7182 struct swevent_hlist *swevent_hlist;
7183 struct mutex hlist_mutex;
7184 int hlist_refcount;
7185
7186 /* Recursion avoidance in each contexts */
7187 int recursion[PERF_NR_CONTEXTS];
7188};
7189
7190static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7191
7b4b6658 7192/*
cdd6c482
IM
7193 * We directly increment event->count and keep a second value in
7194 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
7195 * is kept in the range [-sample_period, 0] so that we can use the
7196 * sign as trigger.
7197 */
7198
ab573844 7199u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 7200{
cdd6c482 7201 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
7202 u64 period = hwc->last_period;
7203 u64 nr, offset;
7204 s64 old, val;
7205
7206 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
7207
7208again:
e7850595 7209 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
7210 if (val < 0)
7211 return 0;
15dbf27c 7212
7b4b6658
PZ
7213 nr = div64_u64(period + val, period);
7214 offset = nr * period;
7215 val -= offset;
e7850595 7216 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 7217 goto again;
15dbf27c 7218
7b4b6658 7219 return nr;
15dbf27c
PZ
7220}
7221
0cff784a 7222static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 7223 struct perf_sample_data *data,
5622f295 7224 struct pt_regs *regs)
15dbf27c 7225{
cdd6c482 7226 struct hw_perf_event *hwc = &event->hw;
850bc73f 7227 int throttle = 0;
15dbf27c 7228
0cff784a
PZ
7229 if (!overflow)
7230 overflow = perf_swevent_set_period(event);
15dbf27c 7231
7b4b6658
PZ
7232 if (hwc->interrupts == MAX_INTERRUPTS)
7233 return;
15dbf27c 7234
7b4b6658 7235 for (; overflow; overflow--) {
a8b0ca17 7236 if (__perf_event_overflow(event, throttle,
5622f295 7237 data, regs)) {
7b4b6658
PZ
7238 /*
7239 * We inhibit the overflow from happening when
7240 * hwc->interrupts == MAX_INTERRUPTS.
7241 */
7242 break;
7243 }
cf450a73 7244 throttle = 1;
7b4b6658 7245 }
15dbf27c
PZ
7246}
7247
a4eaf7f1 7248static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 7249 struct perf_sample_data *data,
5622f295 7250 struct pt_regs *regs)
7b4b6658 7251{
cdd6c482 7252 struct hw_perf_event *hwc = &event->hw;
d6d020e9 7253
e7850595 7254 local64_add(nr, &event->count);
d6d020e9 7255
0cff784a
PZ
7256 if (!regs)
7257 return;
7258
6c7e550f 7259 if (!is_sampling_event(event))
7b4b6658 7260 return;
d6d020e9 7261
5d81e5cf
AV
7262 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7263 data->period = nr;
7264 return perf_swevent_overflow(event, 1, data, regs);
7265 } else
7266 data->period = event->hw.last_period;
7267
0cff784a 7268 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 7269 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 7270
e7850595 7271 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 7272 return;
df1a132b 7273
a8b0ca17 7274 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
7275}
7276
f5ffe02e
FW
7277static int perf_exclude_event(struct perf_event *event,
7278 struct pt_regs *regs)
7279{
a4eaf7f1 7280 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 7281 return 1;
a4eaf7f1 7282
f5ffe02e
FW
7283 if (regs) {
7284 if (event->attr.exclude_user && user_mode(regs))
7285 return 1;
7286
7287 if (event->attr.exclude_kernel && !user_mode(regs))
7288 return 1;
7289 }
7290
7291 return 0;
7292}
7293
cdd6c482 7294static int perf_swevent_match(struct perf_event *event,
1c432d89 7295 enum perf_type_id type,
6fb2915d
LZ
7296 u32 event_id,
7297 struct perf_sample_data *data,
7298 struct pt_regs *regs)
15dbf27c 7299{
cdd6c482 7300 if (event->attr.type != type)
a21ca2ca 7301 return 0;
f5ffe02e 7302
cdd6c482 7303 if (event->attr.config != event_id)
15dbf27c
PZ
7304 return 0;
7305
f5ffe02e
FW
7306 if (perf_exclude_event(event, regs))
7307 return 0;
15dbf27c
PZ
7308
7309 return 1;
7310}
7311
76e1d904
FW
7312static inline u64 swevent_hash(u64 type, u32 event_id)
7313{
7314 u64 val = event_id | (type << 32);
7315
7316 return hash_64(val, SWEVENT_HLIST_BITS);
7317}
7318
49f135ed
FW
7319static inline struct hlist_head *
7320__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 7321{
49f135ed
FW
7322 u64 hash = swevent_hash(type, event_id);
7323
7324 return &hlist->heads[hash];
7325}
76e1d904 7326
49f135ed
FW
7327/* For the read side: events when they trigger */
7328static inline struct hlist_head *
b28ab83c 7329find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
7330{
7331 struct swevent_hlist *hlist;
76e1d904 7332
b28ab83c 7333 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
7334 if (!hlist)
7335 return NULL;
7336
49f135ed
FW
7337 return __find_swevent_head(hlist, type, event_id);
7338}
7339
7340/* For the event head insertion and removal in the hlist */
7341static inline struct hlist_head *
b28ab83c 7342find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
7343{
7344 struct swevent_hlist *hlist;
7345 u32 event_id = event->attr.config;
7346 u64 type = event->attr.type;
7347
7348 /*
7349 * Event scheduling is always serialized against hlist allocation
7350 * and release. Which makes the protected version suitable here.
7351 * The context lock guarantees that.
7352 */
b28ab83c 7353 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
7354 lockdep_is_held(&event->ctx->lock));
7355 if (!hlist)
7356 return NULL;
7357
7358 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
7359}
7360
7361static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 7362 u64 nr,
76e1d904
FW
7363 struct perf_sample_data *data,
7364 struct pt_regs *regs)
15dbf27c 7365{
4a32fea9 7366 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 7367 struct perf_event *event;
76e1d904 7368 struct hlist_head *head;
15dbf27c 7369
76e1d904 7370 rcu_read_lock();
b28ab83c 7371 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
7372 if (!head)
7373 goto end;
7374
b67bfe0d 7375 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 7376 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 7377 perf_swevent_event(event, nr, data, regs);
15dbf27c 7378 }
76e1d904
FW
7379end:
7380 rcu_read_unlock();
15dbf27c
PZ
7381}
7382
86038c5e
PZI
7383DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7384
4ed7c92d 7385int perf_swevent_get_recursion_context(void)
96f6d444 7386{
4a32fea9 7387 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
96f6d444 7388
b28ab83c 7389 return get_recursion_context(swhash->recursion);
96f6d444 7390}
645e8cc0 7391EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 7392
98b5c2c6 7393void perf_swevent_put_recursion_context(int rctx)
15dbf27c 7394{
4a32fea9 7395 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
927c7a9e 7396
b28ab83c 7397 put_recursion_context(swhash->recursion, rctx);
ce71b9df 7398}
15dbf27c 7399
86038c5e 7400void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 7401{
a4234bfc 7402 struct perf_sample_data data;
4ed7c92d 7403
86038c5e 7404 if (WARN_ON_ONCE(!regs))
4ed7c92d 7405 return;
a4234bfc 7406
fd0d000b 7407 perf_sample_data_init(&data, addr, 0);
a8b0ca17 7408 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
86038c5e
PZI
7409}
7410
7411void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7412{
7413 int rctx;
7414
7415 preempt_disable_notrace();
7416 rctx = perf_swevent_get_recursion_context();
7417 if (unlikely(rctx < 0))
7418 goto fail;
7419
7420 ___perf_sw_event(event_id, nr, regs, addr);
4ed7c92d
PZ
7421
7422 perf_swevent_put_recursion_context(rctx);
86038c5e 7423fail:
1c024eca 7424 preempt_enable_notrace();
b8e83514
PZ
7425}
7426
cdd6c482 7427static void perf_swevent_read(struct perf_event *event)
15dbf27c 7428{
15dbf27c
PZ
7429}
7430
a4eaf7f1 7431static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 7432{
4a32fea9 7433 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 7434 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
7435 struct hlist_head *head;
7436
6c7e550f 7437 if (is_sampling_event(event)) {
7b4b6658 7438 hwc->last_period = hwc->sample_period;
cdd6c482 7439 perf_swevent_set_period(event);
7b4b6658 7440 }
76e1d904 7441
a4eaf7f1
PZ
7442 hwc->state = !(flags & PERF_EF_START);
7443
b28ab83c 7444 head = find_swevent_head(swhash, event);
12ca6ad2 7445 if (WARN_ON_ONCE(!head))
76e1d904
FW
7446 return -EINVAL;
7447
7448 hlist_add_head_rcu(&event->hlist_entry, head);
6a694a60 7449 perf_event_update_userpage(event);
76e1d904 7450
15dbf27c
PZ
7451 return 0;
7452}
7453
a4eaf7f1 7454static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 7455{
76e1d904 7456 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
7457}
7458
a4eaf7f1 7459static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 7460{
a4eaf7f1 7461 event->hw.state = 0;
d6d020e9 7462}
aa9c4c0f 7463
a4eaf7f1 7464static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 7465{
a4eaf7f1 7466 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
7467}
7468
49f135ed
FW
7469/* Deref the hlist from the update side */
7470static inline struct swevent_hlist *
b28ab83c 7471swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 7472{
b28ab83c
PZ
7473 return rcu_dereference_protected(swhash->swevent_hlist,
7474 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
7475}
7476
b28ab83c 7477static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 7478{
b28ab83c 7479 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 7480
49f135ed 7481 if (!hlist)
76e1d904
FW
7482 return;
7483
70691d4a 7484 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
fa4bbc4c 7485 kfree_rcu(hlist, rcu_head);
76e1d904
FW
7486}
7487
3b364d7b 7488static void swevent_hlist_put_cpu(int cpu)
76e1d904 7489{
b28ab83c 7490 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 7491
b28ab83c 7492 mutex_lock(&swhash->hlist_mutex);
76e1d904 7493
b28ab83c
PZ
7494 if (!--swhash->hlist_refcount)
7495 swevent_hlist_release(swhash);
76e1d904 7496
b28ab83c 7497 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
7498}
7499
3b364d7b 7500static void swevent_hlist_put(void)
76e1d904
FW
7501{
7502 int cpu;
7503
76e1d904 7504 for_each_possible_cpu(cpu)
3b364d7b 7505 swevent_hlist_put_cpu(cpu);
76e1d904
FW
7506}
7507
3b364d7b 7508static int swevent_hlist_get_cpu(int cpu)
76e1d904 7509{
b28ab83c 7510 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
7511 int err = 0;
7512
b28ab83c 7513 mutex_lock(&swhash->hlist_mutex);
b28ab83c 7514 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
7515 struct swevent_hlist *hlist;
7516
7517 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7518 if (!hlist) {
7519 err = -ENOMEM;
7520 goto exit;
7521 }
b28ab83c 7522 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 7523 }
b28ab83c 7524 swhash->hlist_refcount++;
9ed6060d 7525exit:
b28ab83c 7526 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
7527
7528 return err;
7529}
7530
3b364d7b 7531static int swevent_hlist_get(void)
76e1d904 7532{
3b364d7b 7533 int err, cpu, failed_cpu;
76e1d904 7534
76e1d904
FW
7535 get_online_cpus();
7536 for_each_possible_cpu(cpu) {
3b364d7b 7537 err = swevent_hlist_get_cpu(cpu);
76e1d904
FW
7538 if (err) {
7539 failed_cpu = cpu;
7540 goto fail;
7541 }
7542 }
7543 put_online_cpus();
7544
7545 return 0;
9ed6060d 7546fail:
76e1d904
FW
7547 for_each_possible_cpu(cpu) {
7548 if (cpu == failed_cpu)
7549 break;
3b364d7b 7550 swevent_hlist_put_cpu(cpu);
76e1d904
FW
7551 }
7552
7553 put_online_cpus();
7554 return err;
7555}
7556
c5905afb 7557struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 7558
b0a873eb
PZ
7559static void sw_perf_event_destroy(struct perf_event *event)
7560{
7561 u64 event_id = event->attr.config;
95476b64 7562
b0a873eb
PZ
7563 WARN_ON(event->parent);
7564
c5905afb 7565 static_key_slow_dec(&perf_swevent_enabled[event_id]);
3b364d7b 7566 swevent_hlist_put();
b0a873eb
PZ
7567}
7568
7569static int perf_swevent_init(struct perf_event *event)
7570{
8176cced 7571 u64 event_id = event->attr.config;
b0a873eb
PZ
7572
7573 if (event->attr.type != PERF_TYPE_SOFTWARE)
7574 return -ENOENT;
7575
2481c5fa
SE
7576 /*
7577 * no branch sampling for software events
7578 */
7579 if (has_branch_stack(event))
7580 return -EOPNOTSUPP;
7581
b0a873eb
PZ
7582 switch (event_id) {
7583 case PERF_COUNT_SW_CPU_CLOCK:
7584 case PERF_COUNT_SW_TASK_CLOCK:
7585 return -ENOENT;
7586
7587 default:
7588 break;
7589 }
7590
ce677831 7591 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
7592 return -ENOENT;
7593
7594 if (!event->parent) {
7595 int err;
7596
3b364d7b 7597 err = swevent_hlist_get();
b0a873eb
PZ
7598 if (err)
7599 return err;
7600
c5905afb 7601 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
7602 event->destroy = sw_perf_event_destroy;
7603 }
7604
7605 return 0;
7606}
7607
7608static struct pmu perf_swevent = {
89a1e187 7609 .task_ctx_nr = perf_sw_context,
95476b64 7610
34f43927
PZ
7611 .capabilities = PERF_PMU_CAP_NO_NMI,
7612
b0a873eb 7613 .event_init = perf_swevent_init,
a4eaf7f1
PZ
7614 .add = perf_swevent_add,
7615 .del = perf_swevent_del,
7616 .start = perf_swevent_start,
7617 .stop = perf_swevent_stop,
1c024eca 7618 .read = perf_swevent_read,
1c024eca
PZ
7619};
7620
b0a873eb
PZ
7621#ifdef CONFIG_EVENT_TRACING
7622
1c024eca
PZ
7623static int perf_tp_filter_match(struct perf_event *event,
7624 struct perf_sample_data *data)
7625{
7e3f977e 7626 void *record = data->raw->frag.data;
1c024eca 7627
b71b437e
PZ
7628 /* only top level events have filters set */
7629 if (event->parent)
7630 event = event->parent;
7631
1c024eca
PZ
7632 if (likely(!event->filter) || filter_match_preds(event->filter, record))
7633 return 1;
7634 return 0;
7635}
7636
7637static int perf_tp_event_match(struct perf_event *event,
7638 struct perf_sample_data *data,
7639 struct pt_regs *regs)
7640{
a0f7d0f7
FW
7641 if (event->hw.state & PERF_HES_STOPPED)
7642 return 0;
580d607c
PZ
7643 /*
7644 * All tracepoints are from kernel-space.
7645 */
7646 if (event->attr.exclude_kernel)
1c024eca
PZ
7647 return 0;
7648
7649 if (!perf_tp_filter_match(event, data))
7650 return 0;
7651
7652 return 1;
7653}
7654
85b67bcb
AS
7655void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7656 struct trace_event_call *call, u64 count,
7657 struct pt_regs *regs, struct hlist_head *head,
7658 struct task_struct *task)
7659{
7660 struct bpf_prog *prog = call->prog;
7661
7662 if (prog) {
7663 *(struct pt_regs **)raw_data = regs;
7664 if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
7665 perf_swevent_put_recursion_context(rctx);
7666 return;
7667 }
7668 }
7669 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
7670 rctx, task);
7671}
7672EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7673
1e1dcd93 7674void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
e6dab5ff
AV
7675 struct pt_regs *regs, struct hlist_head *head, int rctx,
7676 struct task_struct *task)
95476b64
FW
7677{
7678 struct perf_sample_data data;
1c024eca 7679 struct perf_event *event;
1c024eca 7680
95476b64 7681 struct perf_raw_record raw = {
7e3f977e
DB
7682 .frag = {
7683 .size = entry_size,
7684 .data = record,
7685 },
95476b64
FW
7686 };
7687
1e1dcd93 7688 perf_sample_data_init(&data, 0, 0);
95476b64
FW
7689 data.raw = &raw;
7690
1e1dcd93
AS
7691 perf_trace_buf_update(record, event_type);
7692
b67bfe0d 7693 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 7694 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 7695 perf_swevent_event(event, count, &data, regs);
4f41c013 7696 }
ecc55f84 7697
e6dab5ff
AV
7698 /*
7699 * If we got specified a target task, also iterate its context and
7700 * deliver this event there too.
7701 */
7702 if (task && task != current) {
7703 struct perf_event_context *ctx;
7704 struct trace_entry *entry = record;
7705
7706 rcu_read_lock();
7707 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7708 if (!ctx)
7709 goto unlock;
7710
7711 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7712 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7713 continue;
7714 if (event->attr.config != entry->type)
7715 continue;
7716 if (perf_tp_event_match(event, &data, regs))
7717 perf_swevent_event(event, count, &data, regs);
7718 }
7719unlock:
7720 rcu_read_unlock();
7721 }
7722
ecc55f84 7723 perf_swevent_put_recursion_context(rctx);
95476b64
FW
7724}
7725EXPORT_SYMBOL_GPL(perf_tp_event);
7726
cdd6c482 7727static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 7728{
1c024eca 7729 perf_trace_destroy(event);
e077df4f
PZ
7730}
7731
b0a873eb 7732static int perf_tp_event_init(struct perf_event *event)
e077df4f 7733{
76e1d904
FW
7734 int err;
7735
b0a873eb
PZ
7736 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7737 return -ENOENT;
7738
2481c5fa
SE
7739 /*
7740 * no branch sampling for tracepoint events
7741 */
7742 if (has_branch_stack(event))
7743 return -EOPNOTSUPP;
7744
1c024eca
PZ
7745 err = perf_trace_init(event);
7746 if (err)
b0a873eb 7747 return err;
e077df4f 7748
cdd6c482 7749 event->destroy = tp_perf_event_destroy;
e077df4f 7750
b0a873eb
PZ
7751 return 0;
7752}
7753
7754static struct pmu perf_tracepoint = {
89a1e187
PZ
7755 .task_ctx_nr = perf_sw_context,
7756
b0a873eb 7757 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
7758 .add = perf_trace_add,
7759 .del = perf_trace_del,
7760 .start = perf_swevent_start,
7761 .stop = perf_swevent_stop,
b0a873eb 7762 .read = perf_swevent_read,
b0a873eb
PZ
7763};
7764
7765static inline void perf_tp_register(void)
7766{
2e80a82a 7767 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 7768}
6fb2915d 7769
6fb2915d
LZ
7770static void perf_event_free_filter(struct perf_event *event)
7771{
7772 ftrace_profile_free_filter(event);
7773}
7774
aa6a5f3c
AS
7775#ifdef CONFIG_BPF_SYSCALL
7776static void bpf_overflow_handler(struct perf_event *event,
7777 struct perf_sample_data *data,
7778 struct pt_regs *regs)
7779{
7780 struct bpf_perf_event_data_kern ctx = {
7781 .data = data,
7782 .regs = regs,
7783 };
7784 int ret = 0;
7785
7786 preempt_disable();
7787 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
7788 goto out;
7789 rcu_read_lock();
88575199 7790 ret = BPF_PROG_RUN(event->prog, &ctx);
aa6a5f3c
AS
7791 rcu_read_unlock();
7792out:
7793 __this_cpu_dec(bpf_prog_active);
7794 preempt_enable();
7795 if (!ret)
7796 return;
7797
7798 event->orig_overflow_handler(event, data, regs);
7799}
7800
7801static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
7802{
7803 struct bpf_prog *prog;
7804
7805 if (event->overflow_handler_context)
7806 /* hw breakpoint or kernel counter */
7807 return -EINVAL;
7808
7809 if (event->prog)
7810 return -EEXIST;
7811
7812 prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
7813 if (IS_ERR(prog))
7814 return PTR_ERR(prog);
7815
7816 event->prog = prog;
7817 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
7818 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
7819 return 0;
7820}
7821
7822static void perf_event_free_bpf_handler(struct perf_event *event)
7823{
7824 struct bpf_prog *prog = event->prog;
7825
7826 if (!prog)
7827 return;
7828
7829 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
7830 event->prog = NULL;
7831 bpf_prog_put(prog);
7832}
7833#else
7834static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
7835{
7836 return -EOPNOTSUPP;
7837}
7838static void perf_event_free_bpf_handler(struct perf_event *event)
7839{
7840}
7841#endif
7842
2541517c
AS
7843static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7844{
98b5c2c6 7845 bool is_kprobe, is_tracepoint;
2541517c
AS
7846 struct bpf_prog *prog;
7847
aa6a5f3c
AS
7848 if (event->attr.type == PERF_TYPE_HARDWARE ||
7849 event->attr.type == PERF_TYPE_SOFTWARE)
7850 return perf_event_set_bpf_handler(event, prog_fd);
7851
2541517c
AS
7852 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7853 return -EINVAL;
7854
7855 if (event->tp_event->prog)
7856 return -EEXIST;
7857
98b5c2c6
AS
7858 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
7859 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
7860 if (!is_kprobe && !is_tracepoint)
7861 /* bpf programs can only be attached to u/kprobe or tracepoint */
2541517c
AS
7862 return -EINVAL;
7863
7864 prog = bpf_prog_get(prog_fd);
7865 if (IS_ERR(prog))
7866 return PTR_ERR(prog);
7867
98b5c2c6
AS
7868 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
7869 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
2541517c
AS
7870 /* valid fd, but invalid bpf program type */
7871 bpf_prog_put(prog);
7872 return -EINVAL;
7873 }
7874
32bbe007
AS
7875 if (is_tracepoint) {
7876 int off = trace_event_get_offsets(event->tp_event);
7877
7878 if (prog->aux->max_ctx_offset > off) {
7879 bpf_prog_put(prog);
7880 return -EACCES;
7881 }
7882 }
2541517c
AS
7883 event->tp_event->prog = prog;
7884
7885 return 0;
7886}
7887
7888static void perf_event_free_bpf_prog(struct perf_event *event)
7889{
7890 struct bpf_prog *prog;
7891
aa6a5f3c
AS
7892 perf_event_free_bpf_handler(event);
7893
2541517c
AS
7894 if (!event->tp_event)
7895 return;
7896
7897 prog = event->tp_event->prog;
7898 if (prog) {
7899 event->tp_event->prog = NULL;
1aacde3d 7900 bpf_prog_put(prog);
2541517c
AS
7901 }
7902}
7903
e077df4f 7904#else
6fb2915d 7905
b0a873eb 7906static inline void perf_tp_register(void)
e077df4f 7907{
e077df4f 7908}
6fb2915d 7909
6fb2915d
LZ
7910static void perf_event_free_filter(struct perf_event *event)
7911{
7912}
7913
2541517c
AS
7914static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7915{
7916 return -ENOENT;
7917}
7918
7919static void perf_event_free_bpf_prog(struct perf_event *event)
7920{
7921}
07b139c8 7922#endif /* CONFIG_EVENT_TRACING */
e077df4f 7923
24f1e32c 7924#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 7925void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 7926{
f5ffe02e
FW
7927 struct perf_sample_data sample;
7928 struct pt_regs *regs = data;
7929
fd0d000b 7930 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 7931
a4eaf7f1 7932 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 7933 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
7934}
7935#endif
7936
375637bc
AS
7937/*
7938 * Allocate a new address filter
7939 */
7940static struct perf_addr_filter *
7941perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
7942{
7943 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
7944 struct perf_addr_filter *filter;
7945
7946 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
7947 if (!filter)
7948 return NULL;
7949
7950 INIT_LIST_HEAD(&filter->entry);
7951 list_add_tail(&filter->entry, filters);
7952
7953 return filter;
7954}
7955
7956static void free_filters_list(struct list_head *filters)
7957{
7958 struct perf_addr_filter *filter, *iter;
7959
7960 list_for_each_entry_safe(filter, iter, filters, entry) {
7961 if (filter->inode)
7962 iput(filter->inode);
7963 list_del(&filter->entry);
7964 kfree(filter);
7965 }
7966}
7967
7968/*
7969 * Free existing address filters and optionally install new ones
7970 */
7971static void perf_addr_filters_splice(struct perf_event *event,
7972 struct list_head *head)
7973{
7974 unsigned long flags;
7975 LIST_HEAD(list);
7976
7977 if (!has_addr_filter(event))
7978 return;
7979
7980 /* don't bother with children, they don't have their own filters */
7981 if (event->parent)
7982 return;
7983
7984 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
7985
7986 list_splice_init(&event->addr_filters.list, &list);
7987 if (head)
7988 list_splice(head, &event->addr_filters.list);
7989
7990 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
7991
7992 free_filters_list(&list);
7993}
7994
7995/*
7996 * Scan through mm's vmas and see if one of them matches the
7997 * @filter; if so, adjust filter's address range.
7998 * Called with mm::mmap_sem down for reading.
7999 */
8000static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
8001 struct mm_struct *mm)
8002{
8003 struct vm_area_struct *vma;
8004
8005 for (vma = mm->mmap; vma; vma = vma->vm_next) {
8006 struct file *file = vma->vm_file;
8007 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8008 unsigned long vma_size = vma->vm_end - vma->vm_start;
8009
8010 if (!file)
8011 continue;
8012
8013 if (!perf_addr_filter_match(filter, file, off, vma_size))
8014 continue;
8015
8016 return vma->vm_start;
8017 }
8018
8019 return 0;
8020}
8021
8022/*
8023 * Update event's address range filters based on the
8024 * task's existing mappings, if any.
8025 */
8026static void perf_event_addr_filters_apply(struct perf_event *event)
8027{
8028 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8029 struct task_struct *task = READ_ONCE(event->ctx->task);
8030 struct perf_addr_filter *filter;
8031 struct mm_struct *mm = NULL;
8032 unsigned int count = 0;
8033 unsigned long flags;
8034
8035 /*
8036 * We may observe TASK_TOMBSTONE, which means that the event tear-down
8037 * will stop on the parent's child_mutex that our caller is also holding
8038 */
8039 if (task == TASK_TOMBSTONE)
8040 return;
8041
8042 mm = get_task_mm(event->ctx->task);
8043 if (!mm)
8044 goto restart;
8045
8046 down_read(&mm->mmap_sem);
8047
8048 raw_spin_lock_irqsave(&ifh->lock, flags);
8049 list_for_each_entry(filter, &ifh->list, entry) {
8050 event->addr_filters_offs[count] = 0;
8051
99f5bc9b
MP
8052 /*
8053 * Adjust base offset if the filter is associated to a binary
8054 * that needs to be mapped:
8055 */
8056 if (filter->inode)
375637bc
AS
8057 event->addr_filters_offs[count] =
8058 perf_addr_filter_apply(filter, mm);
8059
8060 count++;
8061 }
8062
8063 event->addr_filters_gen++;
8064 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8065
8066 up_read(&mm->mmap_sem);
8067
8068 mmput(mm);
8069
8070restart:
767ae086 8071 perf_event_stop(event, 1);
375637bc
AS
8072}
8073
8074/*
8075 * Address range filtering: limiting the data to certain
8076 * instruction address ranges. Filters are ioctl()ed to us from
8077 * userspace as ascii strings.
8078 *
8079 * Filter string format:
8080 *
8081 * ACTION RANGE_SPEC
8082 * where ACTION is one of the
8083 * * "filter": limit the trace to this region
8084 * * "start": start tracing from this address
8085 * * "stop": stop tracing at this address/region;
8086 * RANGE_SPEC is
8087 * * for kernel addresses: <start address>[/<size>]
8088 * * for object files: <start address>[/<size>]@</path/to/object/file>
8089 *
8090 * if <size> is not specified, the range is treated as a single address.
8091 */
8092enum {
e96271f3 8093 IF_ACT_NONE = -1,
375637bc
AS
8094 IF_ACT_FILTER,
8095 IF_ACT_START,
8096 IF_ACT_STOP,
8097 IF_SRC_FILE,
8098 IF_SRC_KERNEL,
8099 IF_SRC_FILEADDR,
8100 IF_SRC_KERNELADDR,
8101};
8102
8103enum {
8104 IF_STATE_ACTION = 0,
8105 IF_STATE_SOURCE,
8106 IF_STATE_END,
8107};
8108
8109static const match_table_t if_tokens = {
8110 { IF_ACT_FILTER, "filter" },
8111 { IF_ACT_START, "start" },
8112 { IF_ACT_STOP, "stop" },
8113 { IF_SRC_FILE, "%u/%u@%s" },
8114 { IF_SRC_KERNEL, "%u/%u" },
8115 { IF_SRC_FILEADDR, "%u@%s" },
8116 { IF_SRC_KERNELADDR, "%u" },
e96271f3 8117 { IF_ACT_NONE, NULL },
375637bc
AS
8118};
8119
8120/*
8121 * Address filter string parser
8122 */
8123static int
8124perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
8125 struct list_head *filters)
8126{
8127 struct perf_addr_filter *filter = NULL;
8128 char *start, *orig, *filename = NULL;
8129 struct path path;
8130 substring_t args[MAX_OPT_ARGS];
8131 int state = IF_STATE_ACTION, token;
8132 unsigned int kernel = 0;
8133 int ret = -EINVAL;
8134
8135 orig = fstr = kstrdup(fstr, GFP_KERNEL);
8136 if (!fstr)
8137 return -ENOMEM;
8138
8139 while ((start = strsep(&fstr, " ,\n")) != NULL) {
8140 ret = -EINVAL;
8141
8142 if (!*start)
8143 continue;
8144
8145 /* filter definition begins */
8146 if (state == IF_STATE_ACTION) {
8147 filter = perf_addr_filter_new(event, filters);
8148 if (!filter)
8149 goto fail;
8150 }
8151
8152 token = match_token(start, if_tokens, args);
8153 switch (token) {
8154 case IF_ACT_FILTER:
8155 case IF_ACT_START:
8156 filter->filter = 1;
8157
8158 case IF_ACT_STOP:
8159 if (state != IF_STATE_ACTION)
8160 goto fail;
8161
8162 state = IF_STATE_SOURCE;
8163 break;
8164
8165 case IF_SRC_KERNELADDR:
8166 case IF_SRC_KERNEL:
8167 kernel = 1;
8168
8169 case IF_SRC_FILEADDR:
8170 case IF_SRC_FILE:
8171 if (state != IF_STATE_SOURCE)
8172 goto fail;
8173
8174 if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
8175 filter->range = 1;
8176
8177 *args[0].to = 0;
8178 ret = kstrtoul(args[0].from, 0, &filter->offset);
8179 if (ret)
8180 goto fail;
8181
8182 if (filter->range) {
8183 *args[1].to = 0;
8184 ret = kstrtoul(args[1].from, 0, &filter->size);
8185 if (ret)
8186 goto fail;
8187 }
8188
4059ffd0
MP
8189 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
8190 int fpos = filter->range ? 2 : 1;
8191
8192 filename = match_strdup(&args[fpos]);
375637bc
AS
8193 if (!filename) {
8194 ret = -ENOMEM;
8195 goto fail;
8196 }
8197 }
8198
8199 state = IF_STATE_END;
8200 break;
8201
8202 default:
8203 goto fail;
8204 }
8205
8206 /*
8207 * Filter definition is fully parsed, validate and install it.
8208 * Make sure that it doesn't contradict itself or the event's
8209 * attribute.
8210 */
8211 if (state == IF_STATE_END) {
8212 if (kernel && event->attr.exclude_kernel)
8213 goto fail;
8214
8215 if (!kernel) {
8216 if (!filename)
8217 goto fail;
8218
8219 /* look up the path and grab its inode */
8220 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
8221 if (ret)
8222 goto fail_free_name;
8223
8224 filter->inode = igrab(d_inode(path.dentry));
8225 path_put(&path);
8226 kfree(filename);
8227 filename = NULL;
8228
8229 ret = -EINVAL;
8230 if (!filter->inode ||
8231 !S_ISREG(filter->inode->i_mode))
8232 /* free_filters_list() will iput() */
8233 goto fail;
8234 }
8235
8236 /* ready to consume more filters */
8237 state = IF_STATE_ACTION;
8238 filter = NULL;
8239 }
8240 }
8241
8242 if (state != IF_STATE_ACTION)
8243 goto fail;
8244
8245 kfree(orig);
8246
8247 return 0;
8248
8249fail_free_name:
8250 kfree(filename);
8251fail:
8252 free_filters_list(filters);
8253 kfree(orig);
8254
8255 return ret;
8256}
8257
8258static int
8259perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
8260{
8261 LIST_HEAD(filters);
8262 int ret;
8263
8264 /*
8265 * Since this is called in perf_ioctl() path, we're already holding
8266 * ctx::mutex.
8267 */
8268 lockdep_assert_held(&event->ctx->mutex);
8269
8270 if (WARN_ON_ONCE(event->parent))
8271 return -EINVAL;
8272
8273 /*
8274 * For now, we only support filtering in per-task events; doing so
8275 * for CPU-wide events requires additional context switching trickery,
8276 * since same object code will be mapped at different virtual
8277 * addresses in different processes.
8278 */
8279 if (!event->ctx->task)
8280 return -EOPNOTSUPP;
8281
8282 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
8283 if (ret)
8284 return ret;
8285
8286 ret = event->pmu->addr_filters_validate(&filters);
8287 if (ret) {
8288 free_filters_list(&filters);
8289 return ret;
8290 }
8291
8292 /* remove existing filters, if any */
8293 perf_addr_filters_splice(event, &filters);
8294
8295 /* install new filters */
8296 perf_event_for_each_child(event, perf_event_addr_filters_apply);
8297
8298 return ret;
8299}
8300
c796bbbe
AS
8301static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8302{
8303 char *filter_str;
8304 int ret = -EINVAL;
8305
375637bc
AS
8306 if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
8307 !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
8308 !has_addr_filter(event))
c796bbbe
AS
8309 return -EINVAL;
8310
8311 filter_str = strndup_user(arg, PAGE_SIZE);
8312 if (IS_ERR(filter_str))
8313 return PTR_ERR(filter_str);
8314
8315 if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8316 event->attr.type == PERF_TYPE_TRACEPOINT)
8317 ret = ftrace_profile_set_filter(event, event->attr.config,
8318 filter_str);
375637bc
AS
8319 else if (has_addr_filter(event))
8320 ret = perf_event_set_addr_filter(event, filter_str);
c796bbbe
AS
8321
8322 kfree(filter_str);
8323 return ret;
8324}
8325
b0a873eb
PZ
8326/*
8327 * hrtimer based swevent callback
8328 */
f29ac756 8329
b0a873eb 8330static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 8331{
b0a873eb
PZ
8332 enum hrtimer_restart ret = HRTIMER_RESTART;
8333 struct perf_sample_data data;
8334 struct pt_regs *regs;
8335 struct perf_event *event;
8336 u64 period;
f29ac756 8337
b0a873eb 8338 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
8339
8340 if (event->state != PERF_EVENT_STATE_ACTIVE)
8341 return HRTIMER_NORESTART;
8342
b0a873eb 8343 event->pmu->read(event);
f344011c 8344
fd0d000b 8345 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
8346 regs = get_irq_regs();
8347
8348 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 8349 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 8350 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
8351 ret = HRTIMER_NORESTART;
8352 }
24f1e32c 8353
b0a873eb
PZ
8354 period = max_t(u64, 10000, event->hw.sample_period);
8355 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 8356
b0a873eb 8357 return ret;
f29ac756
PZ
8358}
8359
b0a873eb 8360static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 8361{
b0a873eb 8362 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
8363 s64 period;
8364
8365 if (!is_sampling_event(event))
8366 return;
f5ffe02e 8367
5d508e82
FBH
8368 period = local64_read(&hwc->period_left);
8369 if (period) {
8370 if (period < 0)
8371 period = 10000;
fa407f35 8372
5d508e82
FBH
8373 local64_set(&hwc->period_left, 0);
8374 } else {
8375 period = max_t(u64, 10000, hwc->sample_period);
8376 }
3497d206
TG
8377 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8378 HRTIMER_MODE_REL_PINNED);
24f1e32c 8379}
b0a873eb
PZ
8380
8381static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 8382{
b0a873eb
PZ
8383 struct hw_perf_event *hwc = &event->hw;
8384
6c7e550f 8385 if (is_sampling_event(event)) {
b0a873eb 8386 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 8387 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
8388
8389 hrtimer_cancel(&hwc->hrtimer);
8390 }
24f1e32c
FW
8391}
8392
ba3dd36c
PZ
8393static void perf_swevent_init_hrtimer(struct perf_event *event)
8394{
8395 struct hw_perf_event *hwc = &event->hw;
8396
8397 if (!is_sampling_event(event))
8398 return;
8399
8400 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8401 hwc->hrtimer.function = perf_swevent_hrtimer;
8402
8403 /*
8404 * Since hrtimers have a fixed rate, we can do a static freq->period
8405 * mapping and avoid the whole period adjust feedback stuff.
8406 */
8407 if (event->attr.freq) {
8408 long freq = event->attr.sample_freq;
8409
8410 event->attr.sample_period = NSEC_PER_SEC / freq;
8411 hwc->sample_period = event->attr.sample_period;
8412 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 8413 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
8414 event->attr.freq = 0;
8415 }
8416}
8417
b0a873eb
PZ
8418/*
8419 * Software event: cpu wall time clock
8420 */
8421
8422static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 8423{
b0a873eb
PZ
8424 s64 prev;
8425 u64 now;
8426
a4eaf7f1 8427 now = local_clock();
b0a873eb
PZ
8428 prev = local64_xchg(&event->hw.prev_count, now);
8429 local64_add(now - prev, &event->count);
24f1e32c 8430}
24f1e32c 8431
a4eaf7f1 8432static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 8433{
a4eaf7f1 8434 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 8435 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
8436}
8437
a4eaf7f1 8438static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 8439{
b0a873eb
PZ
8440 perf_swevent_cancel_hrtimer(event);
8441 cpu_clock_event_update(event);
8442}
f29ac756 8443
a4eaf7f1
PZ
8444static int cpu_clock_event_add(struct perf_event *event, int flags)
8445{
8446 if (flags & PERF_EF_START)
8447 cpu_clock_event_start(event, flags);
6a694a60 8448 perf_event_update_userpage(event);
a4eaf7f1
PZ
8449
8450 return 0;
8451}
8452
8453static void cpu_clock_event_del(struct perf_event *event, int flags)
8454{
8455 cpu_clock_event_stop(event, flags);
8456}
8457
b0a873eb
PZ
8458static void cpu_clock_event_read(struct perf_event *event)
8459{
8460 cpu_clock_event_update(event);
8461}
f344011c 8462
b0a873eb
PZ
8463static int cpu_clock_event_init(struct perf_event *event)
8464{
8465 if (event->attr.type != PERF_TYPE_SOFTWARE)
8466 return -ENOENT;
8467
8468 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8469 return -ENOENT;
8470
2481c5fa
SE
8471 /*
8472 * no branch sampling for software events
8473 */
8474 if (has_branch_stack(event))
8475 return -EOPNOTSUPP;
8476
ba3dd36c
PZ
8477 perf_swevent_init_hrtimer(event);
8478
b0a873eb 8479 return 0;
f29ac756
PZ
8480}
8481
b0a873eb 8482static struct pmu perf_cpu_clock = {
89a1e187
PZ
8483 .task_ctx_nr = perf_sw_context,
8484
34f43927
PZ
8485 .capabilities = PERF_PMU_CAP_NO_NMI,
8486
b0a873eb 8487 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
8488 .add = cpu_clock_event_add,
8489 .del = cpu_clock_event_del,
8490 .start = cpu_clock_event_start,
8491 .stop = cpu_clock_event_stop,
b0a873eb
PZ
8492 .read = cpu_clock_event_read,
8493};
8494
8495/*
8496 * Software event: task time clock
8497 */
8498
8499static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 8500{
b0a873eb
PZ
8501 u64 prev;
8502 s64 delta;
5c92d124 8503
b0a873eb
PZ
8504 prev = local64_xchg(&event->hw.prev_count, now);
8505 delta = now - prev;
8506 local64_add(delta, &event->count);
8507}
5c92d124 8508
a4eaf7f1 8509static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 8510{
a4eaf7f1 8511 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 8512 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
8513}
8514
a4eaf7f1 8515static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
8516{
8517 perf_swevent_cancel_hrtimer(event);
8518 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
8519}
8520
8521static int task_clock_event_add(struct perf_event *event, int flags)
8522{
8523 if (flags & PERF_EF_START)
8524 task_clock_event_start(event, flags);
6a694a60 8525 perf_event_update_userpage(event);
b0a873eb 8526
a4eaf7f1
PZ
8527 return 0;
8528}
8529
8530static void task_clock_event_del(struct perf_event *event, int flags)
8531{
8532 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
8533}
8534
8535static void task_clock_event_read(struct perf_event *event)
8536{
768a06e2
PZ
8537 u64 now = perf_clock();
8538 u64 delta = now - event->ctx->timestamp;
8539 u64 time = event->ctx->time + delta;
b0a873eb
PZ
8540
8541 task_clock_event_update(event, time);
8542}
8543
8544static int task_clock_event_init(struct perf_event *event)
6fb2915d 8545{
b0a873eb
PZ
8546 if (event->attr.type != PERF_TYPE_SOFTWARE)
8547 return -ENOENT;
8548
8549 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8550 return -ENOENT;
8551
2481c5fa
SE
8552 /*
8553 * no branch sampling for software events
8554 */
8555 if (has_branch_stack(event))
8556 return -EOPNOTSUPP;
8557
ba3dd36c
PZ
8558 perf_swevent_init_hrtimer(event);
8559
b0a873eb 8560 return 0;
6fb2915d
LZ
8561}
8562
b0a873eb 8563static struct pmu perf_task_clock = {
89a1e187
PZ
8564 .task_ctx_nr = perf_sw_context,
8565
34f43927
PZ
8566 .capabilities = PERF_PMU_CAP_NO_NMI,
8567
b0a873eb 8568 .event_init = task_clock_event_init,
a4eaf7f1
PZ
8569 .add = task_clock_event_add,
8570 .del = task_clock_event_del,
8571 .start = task_clock_event_start,
8572 .stop = task_clock_event_stop,
b0a873eb
PZ
8573 .read = task_clock_event_read,
8574};
6fb2915d 8575
ad5133b7 8576static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 8577{
e077df4f 8578}
6fb2915d 8579
fbbe0701
SB
8580static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8581{
8582}
8583
ad5133b7 8584static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 8585{
ad5133b7 8586 return 0;
6fb2915d
LZ
8587}
8588
18ab2cd3 8589static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
fbbe0701
SB
8590
8591static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
6fb2915d 8592{
fbbe0701
SB
8593 __this_cpu_write(nop_txn_flags, flags);
8594
8595 if (flags & ~PERF_PMU_TXN_ADD)
8596 return;
8597
ad5133b7 8598 perf_pmu_disable(pmu);
6fb2915d
LZ
8599}
8600
ad5133b7
PZ
8601static int perf_pmu_commit_txn(struct pmu *pmu)
8602{
fbbe0701
SB
8603 unsigned int flags = __this_cpu_read(nop_txn_flags);
8604
8605 __this_cpu_write(nop_txn_flags, 0);
8606
8607 if (flags & ~PERF_PMU_TXN_ADD)
8608 return 0;
8609
ad5133b7
PZ
8610 perf_pmu_enable(pmu);
8611 return 0;
8612}
e077df4f 8613
ad5133b7 8614static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 8615{
fbbe0701
SB
8616 unsigned int flags = __this_cpu_read(nop_txn_flags);
8617
8618 __this_cpu_write(nop_txn_flags, 0);
8619
8620 if (flags & ~PERF_PMU_TXN_ADD)
8621 return;
8622
ad5133b7 8623 perf_pmu_enable(pmu);
24f1e32c
FW
8624}
8625
35edc2a5
PZ
8626static int perf_event_idx_default(struct perf_event *event)
8627{
c719f560 8628 return 0;
35edc2a5
PZ
8629}
8630
8dc85d54
PZ
8631/*
8632 * Ensures all contexts with the same task_ctx_nr have the same
8633 * pmu_cpu_context too.
8634 */
9e317041 8635static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 8636{
8dc85d54 8637 struct pmu *pmu;
b326e956 8638
8dc85d54
PZ
8639 if (ctxn < 0)
8640 return NULL;
24f1e32c 8641
8dc85d54
PZ
8642 list_for_each_entry(pmu, &pmus, entry) {
8643 if (pmu->task_ctx_nr == ctxn)
8644 return pmu->pmu_cpu_context;
8645 }
24f1e32c 8646
8dc85d54 8647 return NULL;
24f1e32c
FW
8648}
8649
51676957 8650static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 8651{
51676957
PZ
8652 int cpu;
8653
8654 for_each_possible_cpu(cpu) {
8655 struct perf_cpu_context *cpuctx;
8656
8657 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8658
3f1f3320
PZ
8659 if (cpuctx->unique_pmu == old_pmu)
8660 cpuctx->unique_pmu = pmu;
51676957
PZ
8661 }
8662}
8663
8664static void free_pmu_context(struct pmu *pmu)
8665{
8666 struct pmu *i;
f5ffe02e 8667
8dc85d54 8668 mutex_lock(&pmus_lock);
0475f9ea 8669 /*
8dc85d54 8670 * Like a real lame refcount.
0475f9ea 8671 */
51676957
PZ
8672 list_for_each_entry(i, &pmus, entry) {
8673 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
8674 update_pmu_context(i, pmu);
8dc85d54 8675 goto out;
51676957 8676 }
8dc85d54 8677 }
d6d020e9 8678
51676957 8679 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
8680out:
8681 mutex_unlock(&pmus_lock);
24f1e32c 8682}
6e855cd4
AS
8683
8684/*
8685 * Let userspace know that this PMU supports address range filtering:
8686 */
8687static ssize_t nr_addr_filters_show(struct device *dev,
8688 struct device_attribute *attr,
8689 char *page)
8690{
8691 struct pmu *pmu = dev_get_drvdata(dev);
8692
8693 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8694}
8695DEVICE_ATTR_RO(nr_addr_filters);
8696
2e80a82a 8697static struct idr pmu_idr;
d6d020e9 8698
abe43400
PZ
8699static ssize_t
8700type_show(struct device *dev, struct device_attribute *attr, char *page)
8701{
8702 struct pmu *pmu = dev_get_drvdata(dev);
8703
8704 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8705}
90826ca7 8706static DEVICE_ATTR_RO(type);
abe43400 8707
62b85639
SE
8708static ssize_t
8709perf_event_mux_interval_ms_show(struct device *dev,
8710 struct device_attribute *attr,
8711 char *page)
8712{
8713 struct pmu *pmu = dev_get_drvdata(dev);
8714
8715 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8716}
8717
272325c4
PZ
8718static DEFINE_MUTEX(mux_interval_mutex);
8719
62b85639
SE
8720static ssize_t
8721perf_event_mux_interval_ms_store(struct device *dev,
8722 struct device_attribute *attr,
8723 const char *buf, size_t count)
8724{
8725 struct pmu *pmu = dev_get_drvdata(dev);
8726 int timer, cpu, ret;
8727
8728 ret = kstrtoint(buf, 0, &timer);
8729 if (ret)
8730 return ret;
8731
8732 if (timer < 1)
8733 return -EINVAL;
8734
8735 /* same value, noting to do */
8736 if (timer == pmu->hrtimer_interval_ms)
8737 return count;
8738
272325c4 8739 mutex_lock(&mux_interval_mutex);
62b85639
SE
8740 pmu->hrtimer_interval_ms = timer;
8741
8742 /* update all cpuctx for this PMU */
272325c4
PZ
8743 get_online_cpus();
8744 for_each_online_cpu(cpu) {
62b85639
SE
8745 struct perf_cpu_context *cpuctx;
8746 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8747 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
8748
272325c4
PZ
8749 cpu_function_call(cpu,
8750 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
62b85639 8751 }
272325c4
PZ
8752 put_online_cpus();
8753 mutex_unlock(&mux_interval_mutex);
62b85639
SE
8754
8755 return count;
8756}
90826ca7 8757static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 8758
90826ca7
GKH
8759static struct attribute *pmu_dev_attrs[] = {
8760 &dev_attr_type.attr,
8761 &dev_attr_perf_event_mux_interval_ms.attr,
8762 NULL,
abe43400 8763};
90826ca7 8764ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
8765
8766static int pmu_bus_running;
8767static struct bus_type pmu_bus = {
8768 .name = "event_source",
90826ca7 8769 .dev_groups = pmu_dev_groups,
abe43400
PZ
8770};
8771
8772static void pmu_dev_release(struct device *dev)
8773{
8774 kfree(dev);
8775}
8776
8777static int pmu_dev_alloc(struct pmu *pmu)
8778{
8779 int ret = -ENOMEM;
8780
8781 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
8782 if (!pmu->dev)
8783 goto out;
8784
0c9d42ed 8785 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
8786 device_initialize(pmu->dev);
8787 ret = dev_set_name(pmu->dev, "%s", pmu->name);
8788 if (ret)
8789 goto free_dev;
8790
8791 dev_set_drvdata(pmu->dev, pmu);
8792 pmu->dev->bus = &pmu_bus;
8793 pmu->dev->release = pmu_dev_release;
8794 ret = device_add(pmu->dev);
8795 if (ret)
8796 goto free_dev;
8797
6e855cd4
AS
8798 /* For PMUs with address filters, throw in an extra attribute: */
8799 if (pmu->nr_addr_filters)
8800 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
8801
8802 if (ret)
8803 goto del_dev;
8804
abe43400
PZ
8805out:
8806 return ret;
8807
6e855cd4
AS
8808del_dev:
8809 device_del(pmu->dev);
8810
abe43400
PZ
8811free_dev:
8812 put_device(pmu->dev);
8813 goto out;
8814}
8815
547e9fd7 8816static struct lock_class_key cpuctx_mutex;
facc4307 8817static struct lock_class_key cpuctx_lock;
547e9fd7 8818
03d8e80b 8819int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 8820{
108b02cf 8821 int cpu, ret;
24f1e32c 8822
b0a873eb 8823 mutex_lock(&pmus_lock);
33696fc0
PZ
8824 ret = -ENOMEM;
8825 pmu->pmu_disable_count = alloc_percpu(int);
8826 if (!pmu->pmu_disable_count)
8827 goto unlock;
f29ac756 8828
2e80a82a
PZ
8829 pmu->type = -1;
8830 if (!name)
8831 goto skip_type;
8832 pmu->name = name;
8833
8834 if (type < 0) {
0e9c3be2
TH
8835 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
8836 if (type < 0) {
8837 ret = type;
2e80a82a
PZ
8838 goto free_pdc;
8839 }
8840 }
8841 pmu->type = type;
8842
abe43400
PZ
8843 if (pmu_bus_running) {
8844 ret = pmu_dev_alloc(pmu);
8845 if (ret)
8846 goto free_idr;
8847 }
8848
2e80a82a 8849skip_type:
26657848
PZ
8850 if (pmu->task_ctx_nr == perf_hw_context) {
8851 static int hw_context_taken = 0;
8852
5101ef20
MR
8853 /*
8854 * Other than systems with heterogeneous CPUs, it never makes
8855 * sense for two PMUs to share perf_hw_context. PMUs which are
8856 * uncore must use perf_invalid_context.
8857 */
8858 if (WARN_ON_ONCE(hw_context_taken &&
8859 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
26657848
PZ
8860 pmu->task_ctx_nr = perf_invalid_context;
8861
8862 hw_context_taken = 1;
8863 }
8864
8dc85d54
PZ
8865 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
8866 if (pmu->pmu_cpu_context)
8867 goto got_cpu_context;
f29ac756 8868
c4814202 8869 ret = -ENOMEM;
108b02cf
PZ
8870 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
8871 if (!pmu->pmu_cpu_context)
abe43400 8872 goto free_dev;
f344011c 8873
108b02cf
PZ
8874 for_each_possible_cpu(cpu) {
8875 struct perf_cpu_context *cpuctx;
8876
8877 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 8878 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 8879 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 8880 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
108b02cf 8881 cpuctx->ctx.pmu = pmu;
9e630205 8882
272325c4 8883 __perf_mux_hrtimer_init(cpuctx, cpu);
9e630205 8884
3f1f3320 8885 cpuctx->unique_pmu = pmu;
108b02cf 8886 }
76e1d904 8887
8dc85d54 8888got_cpu_context:
ad5133b7
PZ
8889 if (!pmu->start_txn) {
8890 if (pmu->pmu_enable) {
8891 /*
8892 * If we have pmu_enable/pmu_disable calls, install
8893 * transaction stubs that use that to try and batch
8894 * hardware accesses.
8895 */
8896 pmu->start_txn = perf_pmu_start_txn;
8897 pmu->commit_txn = perf_pmu_commit_txn;
8898 pmu->cancel_txn = perf_pmu_cancel_txn;
8899 } else {
fbbe0701 8900 pmu->start_txn = perf_pmu_nop_txn;
ad5133b7
PZ
8901 pmu->commit_txn = perf_pmu_nop_int;
8902 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 8903 }
5c92d124 8904 }
15dbf27c 8905
ad5133b7
PZ
8906 if (!pmu->pmu_enable) {
8907 pmu->pmu_enable = perf_pmu_nop_void;
8908 pmu->pmu_disable = perf_pmu_nop_void;
8909 }
8910
35edc2a5
PZ
8911 if (!pmu->event_idx)
8912 pmu->event_idx = perf_event_idx_default;
8913
b0a873eb 8914 list_add_rcu(&pmu->entry, &pmus);
bed5b25a 8915 atomic_set(&pmu->exclusive_cnt, 0);
33696fc0
PZ
8916 ret = 0;
8917unlock:
b0a873eb
PZ
8918 mutex_unlock(&pmus_lock);
8919
33696fc0 8920 return ret;
108b02cf 8921
abe43400
PZ
8922free_dev:
8923 device_del(pmu->dev);
8924 put_device(pmu->dev);
8925
2e80a82a
PZ
8926free_idr:
8927 if (pmu->type >= PERF_TYPE_MAX)
8928 idr_remove(&pmu_idr, pmu->type);
8929
108b02cf
PZ
8930free_pdc:
8931 free_percpu(pmu->pmu_disable_count);
8932 goto unlock;
f29ac756 8933}
c464c76e 8934EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 8935
b0a873eb 8936void perf_pmu_unregister(struct pmu *pmu)
5c92d124 8937{
0933840a
JO
8938 int remove_device;
8939
b0a873eb 8940 mutex_lock(&pmus_lock);
0933840a 8941 remove_device = pmu_bus_running;
b0a873eb
PZ
8942 list_del_rcu(&pmu->entry);
8943 mutex_unlock(&pmus_lock);
5c92d124 8944
0475f9ea 8945 /*
cde8e884
PZ
8946 * We dereference the pmu list under both SRCU and regular RCU, so
8947 * synchronize against both of those.
0475f9ea 8948 */
b0a873eb 8949 synchronize_srcu(&pmus_srcu);
cde8e884 8950 synchronize_rcu();
d6d020e9 8951
33696fc0 8952 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
8953 if (pmu->type >= PERF_TYPE_MAX)
8954 idr_remove(&pmu_idr, pmu->type);
0933840a
JO
8955 if (remove_device) {
8956 if (pmu->nr_addr_filters)
8957 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
8958 device_del(pmu->dev);
8959 put_device(pmu->dev);
8960 }
51676957 8961 free_pmu_context(pmu);
b0a873eb 8962}
c464c76e 8963EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 8964
cc34b98b
MR
8965static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
8966{
ccd41c86 8967 struct perf_event_context *ctx = NULL;
cc34b98b
MR
8968 int ret;
8969
8970 if (!try_module_get(pmu->module))
8971 return -ENODEV;
ccd41c86
PZ
8972
8973 if (event->group_leader != event) {
8b10c5e2
PZ
8974 /*
8975 * This ctx->mutex can nest when we're called through
8976 * inheritance. See the perf_event_ctx_lock_nested() comment.
8977 */
8978 ctx = perf_event_ctx_lock_nested(event->group_leader,
8979 SINGLE_DEPTH_NESTING);
ccd41c86
PZ
8980 BUG_ON(!ctx);
8981 }
8982
cc34b98b
MR
8983 event->pmu = pmu;
8984 ret = pmu->event_init(event);
ccd41c86
PZ
8985
8986 if (ctx)
8987 perf_event_ctx_unlock(event->group_leader, ctx);
8988
cc34b98b
MR
8989 if (ret)
8990 module_put(pmu->module);
8991
8992 return ret;
8993}
8994
18ab2cd3 8995static struct pmu *perf_init_event(struct perf_event *event)
b0a873eb
PZ
8996{
8997 struct pmu *pmu = NULL;
8998 int idx;
940c5b29 8999 int ret;
b0a873eb
PZ
9000
9001 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
9002
9003 rcu_read_lock();
9004 pmu = idr_find(&pmu_idr, event->attr.type);
9005 rcu_read_unlock();
940c5b29 9006 if (pmu) {
cc34b98b 9007 ret = perf_try_init_event(pmu, event);
940c5b29
LM
9008 if (ret)
9009 pmu = ERR_PTR(ret);
2e80a82a 9010 goto unlock;
940c5b29 9011 }
2e80a82a 9012
b0a873eb 9013 list_for_each_entry_rcu(pmu, &pmus, entry) {
cc34b98b 9014 ret = perf_try_init_event(pmu, event);
b0a873eb 9015 if (!ret)
e5f4d339 9016 goto unlock;
76e1d904 9017
b0a873eb
PZ
9018 if (ret != -ENOENT) {
9019 pmu = ERR_PTR(ret);
e5f4d339 9020 goto unlock;
f344011c 9021 }
5c92d124 9022 }
e5f4d339
PZ
9023 pmu = ERR_PTR(-ENOENT);
9024unlock:
b0a873eb 9025 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 9026
4aeb0b42 9027 return pmu;
5c92d124
IM
9028}
9029
f2fb6bef
KL
9030static void attach_sb_event(struct perf_event *event)
9031{
9032 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
9033
9034 raw_spin_lock(&pel->lock);
9035 list_add_rcu(&event->sb_list, &pel->list);
9036 raw_spin_unlock(&pel->lock);
9037}
9038
aab5b71e
PZ
9039/*
9040 * We keep a list of all !task (and therefore per-cpu) events
9041 * that need to receive side-band records.
9042 *
9043 * This avoids having to scan all the various PMU per-cpu contexts
9044 * looking for them.
9045 */
f2fb6bef
KL
9046static void account_pmu_sb_event(struct perf_event *event)
9047{
a4f144eb 9048 if (is_sb_event(event))
f2fb6bef
KL
9049 attach_sb_event(event);
9050}
9051
4beb31f3
FW
9052static void account_event_cpu(struct perf_event *event, int cpu)
9053{
9054 if (event->parent)
9055 return;
9056
4beb31f3
FW
9057 if (is_cgroup_event(event))
9058 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
9059}
9060
555e0c1e
FW
9061/* Freq events need the tick to stay alive (see perf_event_task_tick). */
9062static void account_freq_event_nohz(void)
9063{
9064#ifdef CONFIG_NO_HZ_FULL
9065 /* Lock so we don't race with concurrent unaccount */
9066 spin_lock(&nr_freq_lock);
9067 if (atomic_inc_return(&nr_freq_events) == 1)
9068 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
9069 spin_unlock(&nr_freq_lock);
9070#endif
9071}
9072
9073static void account_freq_event(void)
9074{
9075 if (tick_nohz_full_enabled())
9076 account_freq_event_nohz();
9077 else
9078 atomic_inc(&nr_freq_events);
9079}
9080
9081
766d6c07
FW
9082static void account_event(struct perf_event *event)
9083{
25432ae9
PZ
9084 bool inc = false;
9085
4beb31f3
FW
9086 if (event->parent)
9087 return;
9088
766d6c07 9089 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 9090 inc = true;
766d6c07
FW
9091 if (event->attr.mmap || event->attr.mmap_data)
9092 atomic_inc(&nr_mmap_events);
9093 if (event->attr.comm)
9094 atomic_inc(&nr_comm_events);
9095 if (event->attr.task)
9096 atomic_inc(&nr_task_events);
555e0c1e
FW
9097 if (event->attr.freq)
9098 account_freq_event();
45ac1403
AH
9099 if (event->attr.context_switch) {
9100 atomic_inc(&nr_switch_events);
25432ae9 9101 inc = true;
45ac1403 9102 }
4beb31f3 9103 if (has_branch_stack(event))
25432ae9 9104 inc = true;
4beb31f3 9105 if (is_cgroup_event(event))
25432ae9
PZ
9106 inc = true;
9107
9107c89e
PZ
9108 if (inc) {
9109 if (atomic_inc_not_zero(&perf_sched_count))
9110 goto enabled;
9111
9112 mutex_lock(&perf_sched_mutex);
9113 if (!atomic_read(&perf_sched_count)) {
9114 static_branch_enable(&perf_sched_events);
9115 /*
9116 * Guarantee that all CPUs observe they key change and
9117 * call the perf scheduling hooks before proceeding to
9118 * install events that need them.
9119 */
9120 synchronize_sched();
9121 }
9122 /*
9123 * Now that we have waited for the sync_sched(), allow further
9124 * increments to by-pass the mutex.
9125 */
9126 atomic_inc(&perf_sched_count);
9127 mutex_unlock(&perf_sched_mutex);
9128 }
9129enabled:
4beb31f3
FW
9130
9131 account_event_cpu(event, event->cpu);
f2fb6bef
KL
9132
9133 account_pmu_sb_event(event);
766d6c07
FW
9134}
9135
0793a61d 9136/*
cdd6c482 9137 * Allocate and initialize a event structure
0793a61d 9138 */
cdd6c482 9139static struct perf_event *
c3f00c70 9140perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
9141 struct task_struct *task,
9142 struct perf_event *group_leader,
9143 struct perf_event *parent_event,
4dc0da86 9144 perf_overflow_handler_t overflow_handler,
79dff51e 9145 void *context, int cgroup_fd)
0793a61d 9146{
51b0fe39 9147 struct pmu *pmu;
cdd6c482
IM
9148 struct perf_event *event;
9149 struct hw_perf_event *hwc;
90983b16 9150 long err = -EINVAL;
0793a61d 9151
66832eb4
ON
9152 if ((unsigned)cpu >= nr_cpu_ids) {
9153 if (!task || cpu != -1)
9154 return ERR_PTR(-EINVAL);
9155 }
9156
c3f00c70 9157 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 9158 if (!event)
d5d2bc0d 9159 return ERR_PTR(-ENOMEM);
0793a61d 9160
04289bb9 9161 /*
cdd6c482 9162 * Single events are their own group leaders, with an
04289bb9
IM
9163 * empty sibling list:
9164 */
9165 if (!group_leader)
cdd6c482 9166 group_leader = event;
04289bb9 9167
cdd6c482
IM
9168 mutex_init(&event->child_mutex);
9169 INIT_LIST_HEAD(&event->child_list);
fccc714b 9170
cdd6c482
IM
9171 INIT_LIST_HEAD(&event->group_entry);
9172 INIT_LIST_HEAD(&event->event_entry);
9173 INIT_LIST_HEAD(&event->sibling_list);
10c6db11 9174 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 9175 INIT_LIST_HEAD(&event->active_entry);
375637bc 9176 INIT_LIST_HEAD(&event->addr_filters.list);
f3ae75de
SE
9177 INIT_HLIST_NODE(&event->hlist_entry);
9178
10c6db11 9179
cdd6c482 9180 init_waitqueue_head(&event->waitq);
e360adbe 9181 init_irq_work(&event->pending, perf_pending_event);
0793a61d 9182
cdd6c482 9183 mutex_init(&event->mmap_mutex);
375637bc 9184 raw_spin_lock_init(&event->addr_filters.lock);
7b732a75 9185
a6fa941d 9186 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
9187 event->cpu = cpu;
9188 event->attr = *attr;
9189 event->group_leader = group_leader;
9190 event->pmu = NULL;
cdd6c482 9191 event->oncpu = -1;
a96bbc16 9192
cdd6c482 9193 event->parent = parent_event;
b84fbc9f 9194
17cf22c3 9195 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 9196 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 9197
cdd6c482 9198 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 9199
d580ff86
PZ
9200 if (task) {
9201 event->attach_state = PERF_ATTACH_TASK;
d580ff86 9202 /*
50f16a8b
PZ
9203 * XXX pmu::event_init needs to know what task to account to
9204 * and we cannot use the ctx information because we need the
9205 * pmu before we get a ctx.
d580ff86 9206 */
50f16a8b 9207 event->hw.target = task;
d580ff86
PZ
9208 }
9209
34f43927
PZ
9210 event->clock = &local_clock;
9211 if (parent_event)
9212 event->clock = parent_event->clock;
9213
4dc0da86 9214 if (!overflow_handler && parent_event) {
b326e956 9215 overflow_handler = parent_event->overflow_handler;
4dc0da86 9216 context = parent_event->overflow_handler_context;
f1e4ba5b 9217#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
aa6a5f3c
AS
9218 if (overflow_handler == bpf_overflow_handler) {
9219 struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
9220
9221 if (IS_ERR(prog)) {
9222 err = PTR_ERR(prog);
9223 goto err_ns;
9224 }
9225 event->prog = prog;
9226 event->orig_overflow_handler =
9227 parent_event->orig_overflow_handler;
9228 }
9229#endif
4dc0da86 9230 }
66832eb4 9231
1879445d
WN
9232 if (overflow_handler) {
9233 event->overflow_handler = overflow_handler;
9234 event->overflow_handler_context = context;
9ecda41a
WN
9235 } else if (is_write_backward(event)){
9236 event->overflow_handler = perf_event_output_backward;
9237 event->overflow_handler_context = NULL;
1879445d 9238 } else {
9ecda41a 9239 event->overflow_handler = perf_event_output_forward;
1879445d
WN
9240 event->overflow_handler_context = NULL;
9241 }
97eaf530 9242
0231bb53 9243 perf_event__state_init(event);
a86ed508 9244
4aeb0b42 9245 pmu = NULL;
b8e83514 9246
cdd6c482 9247 hwc = &event->hw;
bd2b5b12 9248 hwc->sample_period = attr->sample_period;
0d48696f 9249 if (attr->freq && attr->sample_freq)
bd2b5b12 9250 hwc->sample_period = 1;
eced1dfc 9251 hwc->last_period = hwc->sample_period;
bd2b5b12 9252
e7850595 9253 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 9254
2023b359 9255 /*
cdd6c482 9256 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 9257 */
3dab77fb 9258 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
90983b16 9259 goto err_ns;
a46a2300
YZ
9260
9261 if (!has_branch_stack(event))
9262 event->attr.branch_sample_type = 0;
2023b359 9263
79dff51e
MF
9264 if (cgroup_fd != -1) {
9265 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
9266 if (err)
9267 goto err_ns;
9268 }
9269
b0a873eb 9270 pmu = perf_init_event(event);
4aeb0b42 9271 if (!pmu)
90983b16
FW
9272 goto err_ns;
9273 else if (IS_ERR(pmu)) {
4aeb0b42 9274 err = PTR_ERR(pmu);
90983b16 9275 goto err_ns;
621a01ea 9276 }
d5d2bc0d 9277
bed5b25a
AS
9278 err = exclusive_event_init(event);
9279 if (err)
9280 goto err_pmu;
9281
375637bc
AS
9282 if (has_addr_filter(event)) {
9283 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
9284 sizeof(unsigned long),
9285 GFP_KERNEL);
9286 if (!event->addr_filters_offs)
9287 goto err_per_task;
9288
9289 /* force hw sync on the address filters */
9290 event->addr_filters_gen = 1;
9291 }
9292
cdd6c482 9293 if (!event->parent) {
927c7a9e 9294 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
97c79a38 9295 err = get_callchain_buffers(attr->sample_max_stack);
90983b16 9296 if (err)
375637bc 9297 goto err_addr_filters;
d010b332 9298 }
f344011c 9299 }
9ee318a7 9300
927a5570
AS
9301 /* symmetric to unaccount_event() in _free_event() */
9302 account_event(event);
9303
cdd6c482 9304 return event;
90983b16 9305
375637bc
AS
9306err_addr_filters:
9307 kfree(event->addr_filters_offs);
9308
bed5b25a
AS
9309err_per_task:
9310 exclusive_event_destroy(event);
9311
90983b16
FW
9312err_pmu:
9313 if (event->destroy)
9314 event->destroy(event);
c464c76e 9315 module_put(pmu->module);
90983b16 9316err_ns:
79dff51e
MF
9317 if (is_cgroup_event(event))
9318 perf_detach_cgroup(event);
90983b16
FW
9319 if (event->ns)
9320 put_pid_ns(event->ns);
9321 kfree(event);
9322
9323 return ERR_PTR(err);
0793a61d
TG
9324}
9325
cdd6c482
IM
9326static int perf_copy_attr(struct perf_event_attr __user *uattr,
9327 struct perf_event_attr *attr)
974802ea 9328{
974802ea 9329 u32 size;
cdf8073d 9330 int ret;
974802ea
PZ
9331
9332 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9333 return -EFAULT;
9334
9335 /*
9336 * zero the full structure, so that a short copy will be nice.
9337 */
9338 memset(attr, 0, sizeof(*attr));
9339
9340 ret = get_user(size, &uattr->size);
9341 if (ret)
9342 return ret;
9343
9344 if (size > PAGE_SIZE) /* silly large */
9345 goto err_size;
9346
9347 if (!size) /* abi compat */
9348 size = PERF_ATTR_SIZE_VER0;
9349
9350 if (size < PERF_ATTR_SIZE_VER0)
9351 goto err_size;
9352
9353 /*
9354 * If we're handed a bigger struct than we know of,
cdf8073d
IS
9355 * ensure all the unknown bits are 0 - i.e. new
9356 * user-space does not rely on any kernel feature
9357 * extensions we dont know about yet.
974802ea
PZ
9358 */
9359 if (size > sizeof(*attr)) {
cdf8073d
IS
9360 unsigned char __user *addr;
9361 unsigned char __user *end;
9362 unsigned char val;
974802ea 9363
cdf8073d
IS
9364 addr = (void __user *)uattr + sizeof(*attr);
9365 end = (void __user *)uattr + size;
974802ea 9366
cdf8073d 9367 for (; addr < end; addr++) {
974802ea
PZ
9368 ret = get_user(val, addr);
9369 if (ret)
9370 return ret;
9371 if (val)
9372 goto err_size;
9373 }
b3e62e35 9374 size = sizeof(*attr);
974802ea
PZ
9375 }
9376
9377 ret = copy_from_user(attr, uattr, size);
9378 if (ret)
9379 return -EFAULT;
9380
cd757645 9381 if (attr->__reserved_1)
974802ea
PZ
9382 return -EINVAL;
9383
9384 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9385 return -EINVAL;
9386
9387 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9388 return -EINVAL;
9389
bce38cd5
SE
9390 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9391 u64 mask = attr->branch_sample_type;
9392
9393 /* only using defined bits */
9394 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9395 return -EINVAL;
9396
9397 /* at least one branch bit must be set */
9398 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9399 return -EINVAL;
9400
bce38cd5
SE
9401 /* propagate priv level, when not set for branch */
9402 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9403
9404 /* exclude_kernel checked on syscall entry */
9405 if (!attr->exclude_kernel)
9406 mask |= PERF_SAMPLE_BRANCH_KERNEL;
9407
9408 if (!attr->exclude_user)
9409 mask |= PERF_SAMPLE_BRANCH_USER;
9410
9411 if (!attr->exclude_hv)
9412 mask |= PERF_SAMPLE_BRANCH_HV;
9413 /*
9414 * adjust user setting (for HW filter setup)
9415 */
9416 attr->branch_sample_type = mask;
9417 }
e712209a
SE
9418 /* privileged levels capture (kernel, hv): check permissions */
9419 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
9420 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9421 return -EACCES;
bce38cd5 9422 }
4018994f 9423
c5ebcedb 9424 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 9425 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
9426 if (ret)
9427 return ret;
9428 }
9429
9430 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9431 if (!arch_perf_have_user_stack_dump())
9432 return -ENOSYS;
9433
9434 /*
9435 * We have __u32 type for the size, but so far
9436 * we can only use __u16 as maximum due to the
9437 * __u16 sample size limit.
9438 */
9439 if (attr->sample_stack_user >= USHRT_MAX)
9440 ret = -EINVAL;
9441 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9442 ret = -EINVAL;
9443 }
4018994f 9444
60e2364e
SE
9445 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9446 ret = perf_reg_validate(attr->sample_regs_intr);
974802ea
PZ
9447out:
9448 return ret;
9449
9450err_size:
9451 put_user(sizeof(*attr), &uattr->size);
9452 ret = -E2BIG;
9453 goto out;
9454}
9455
ac9721f3
PZ
9456static int
9457perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 9458{
b69cf536 9459 struct ring_buffer *rb = NULL;
a4be7c27
PZ
9460 int ret = -EINVAL;
9461
ac9721f3 9462 if (!output_event)
a4be7c27
PZ
9463 goto set;
9464
ac9721f3
PZ
9465 /* don't allow circular references */
9466 if (event == output_event)
a4be7c27
PZ
9467 goto out;
9468
0f139300
PZ
9469 /*
9470 * Don't allow cross-cpu buffers
9471 */
9472 if (output_event->cpu != event->cpu)
9473 goto out;
9474
9475 /*
76369139 9476 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
9477 */
9478 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9479 goto out;
9480
34f43927
PZ
9481 /*
9482 * Mixing clocks in the same buffer is trouble you don't need.
9483 */
9484 if (output_event->clock != event->clock)
9485 goto out;
9486
9ecda41a
WN
9487 /*
9488 * Either writing ring buffer from beginning or from end.
9489 * Mixing is not allowed.
9490 */
9491 if (is_write_backward(output_event) != is_write_backward(event))
9492 goto out;
9493
45bfb2e5
PZ
9494 /*
9495 * If both events generate aux data, they must be on the same PMU
9496 */
9497 if (has_aux(event) && has_aux(output_event) &&
9498 event->pmu != output_event->pmu)
9499 goto out;
9500
a4be7c27 9501set:
cdd6c482 9502 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
9503 /* Can't redirect output if we've got an active mmap() */
9504 if (atomic_read(&event->mmap_count))
9505 goto unlock;
a4be7c27 9506
ac9721f3 9507 if (output_event) {
76369139
FW
9508 /* get the rb we want to redirect to */
9509 rb = ring_buffer_get(output_event);
9510 if (!rb)
ac9721f3 9511 goto unlock;
a4be7c27
PZ
9512 }
9513
b69cf536 9514 ring_buffer_attach(event, rb);
9bb5d40c 9515
a4be7c27 9516 ret = 0;
ac9721f3
PZ
9517unlock:
9518 mutex_unlock(&event->mmap_mutex);
9519
a4be7c27 9520out:
a4be7c27
PZ
9521 return ret;
9522}
9523
f63a8daa
PZ
9524static void mutex_lock_double(struct mutex *a, struct mutex *b)
9525{
9526 if (b < a)
9527 swap(a, b);
9528
9529 mutex_lock(a);
9530 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9531}
9532
34f43927
PZ
9533static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9534{
9535 bool nmi_safe = false;
9536
9537 switch (clk_id) {
9538 case CLOCK_MONOTONIC:
9539 event->clock = &ktime_get_mono_fast_ns;
9540 nmi_safe = true;
9541 break;
9542
9543 case CLOCK_MONOTONIC_RAW:
9544 event->clock = &ktime_get_raw_fast_ns;
9545 nmi_safe = true;
9546 break;
9547
9548 case CLOCK_REALTIME:
9549 event->clock = &ktime_get_real_ns;
9550 break;
9551
9552 case CLOCK_BOOTTIME:
9553 event->clock = &ktime_get_boot_ns;
9554 break;
9555
9556 case CLOCK_TAI:
9557 event->clock = &ktime_get_tai_ns;
9558 break;
9559
9560 default:
9561 return -EINVAL;
9562 }
9563
9564 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9565 return -EINVAL;
9566
9567 return 0;
9568}
9569
321027c1
PZ
9570/*
9571 * Variation on perf_event_ctx_lock_nested(), except we take two context
9572 * mutexes.
9573 */
9574static struct perf_event_context *
9575__perf_event_ctx_lock_double(struct perf_event *group_leader,
9576 struct perf_event_context *ctx)
9577{
9578 struct perf_event_context *gctx;
9579
9580again:
9581 rcu_read_lock();
9582 gctx = READ_ONCE(group_leader->ctx);
9583 if (!atomic_inc_not_zero(&gctx->refcount)) {
9584 rcu_read_unlock();
9585 goto again;
9586 }
9587 rcu_read_unlock();
9588
9589 mutex_lock_double(&gctx->mutex, &ctx->mutex);
9590
9591 if (group_leader->ctx != gctx) {
9592 mutex_unlock(&ctx->mutex);
9593 mutex_unlock(&gctx->mutex);
9594 put_ctx(gctx);
9595 goto again;
9596 }
9597
9598 return gctx;
9599}
9600
0793a61d 9601/**
cdd6c482 9602 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 9603 *
cdd6c482 9604 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 9605 * @pid: target pid
9f66a381 9606 * @cpu: target cpu
cdd6c482 9607 * @group_fd: group leader event fd
0793a61d 9608 */
cdd6c482
IM
9609SYSCALL_DEFINE5(perf_event_open,
9610 struct perf_event_attr __user *, attr_uptr,
2743a5b0 9611 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 9612{
b04243ef
PZ
9613 struct perf_event *group_leader = NULL, *output_event = NULL;
9614 struct perf_event *event, *sibling;
cdd6c482 9615 struct perf_event_attr attr;
f63a8daa 9616 struct perf_event_context *ctx, *uninitialized_var(gctx);
cdd6c482 9617 struct file *event_file = NULL;
2903ff01 9618 struct fd group = {NULL, 0};
38a81da2 9619 struct task_struct *task = NULL;
89a1e187 9620 struct pmu *pmu;
ea635c64 9621 int event_fd;
b04243ef 9622 int move_group = 0;
dc86cabe 9623 int err;
a21b0b35 9624 int f_flags = O_RDWR;
79dff51e 9625 int cgroup_fd = -1;
0793a61d 9626
2743a5b0 9627 /* for future expandability... */
e5d1367f 9628 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
9629 return -EINVAL;
9630
dc86cabe
IM
9631 err = perf_copy_attr(attr_uptr, &attr);
9632 if (err)
9633 return err;
eab656ae 9634
0764771d
PZ
9635 if (!attr.exclude_kernel) {
9636 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9637 return -EACCES;
9638 }
9639
df58ab24 9640 if (attr.freq) {
cdd6c482 9641 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 9642 return -EINVAL;
0819b2e3
PZ
9643 } else {
9644 if (attr.sample_period & (1ULL << 63))
9645 return -EINVAL;
df58ab24
PZ
9646 }
9647
97c79a38
ACM
9648 if (!attr.sample_max_stack)
9649 attr.sample_max_stack = sysctl_perf_event_max_stack;
9650
e5d1367f
SE
9651 /*
9652 * In cgroup mode, the pid argument is used to pass the fd
9653 * opened to the cgroup directory in cgroupfs. The cpu argument
9654 * designates the cpu on which to monitor threads from that
9655 * cgroup.
9656 */
9657 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9658 return -EINVAL;
9659
a21b0b35
YD
9660 if (flags & PERF_FLAG_FD_CLOEXEC)
9661 f_flags |= O_CLOEXEC;
9662
9663 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
9664 if (event_fd < 0)
9665 return event_fd;
9666
ac9721f3 9667 if (group_fd != -1) {
2903ff01
AV
9668 err = perf_fget_light(group_fd, &group);
9669 if (err)
d14b12d7 9670 goto err_fd;
2903ff01 9671 group_leader = group.file->private_data;
ac9721f3
PZ
9672 if (flags & PERF_FLAG_FD_OUTPUT)
9673 output_event = group_leader;
9674 if (flags & PERF_FLAG_FD_NO_GROUP)
9675 group_leader = NULL;
9676 }
9677
e5d1367f 9678 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
9679 task = find_lively_task_by_vpid(pid);
9680 if (IS_ERR(task)) {
9681 err = PTR_ERR(task);
9682 goto err_group_fd;
9683 }
9684 }
9685
1f4ee503
PZ
9686 if (task && group_leader &&
9687 group_leader->attr.inherit != attr.inherit) {
9688 err = -EINVAL;
9689 goto err_task;
9690 }
9691
fbfc623f
YZ
9692 get_online_cpus();
9693
79c9ce57
PZ
9694 if (task) {
9695 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9696 if (err)
9697 goto err_cpus;
9698
9699 /*
9700 * Reuse ptrace permission checks for now.
9701 *
9702 * We must hold cred_guard_mutex across this and any potential
9703 * perf_install_in_context() call for this new event to
9704 * serialize against exec() altering our credentials (and the
9705 * perf_event_exit_task() that could imply).
9706 */
9707 err = -EACCES;
9708 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
9709 goto err_cred;
9710 }
9711
79dff51e
MF
9712 if (flags & PERF_FLAG_PID_CGROUP)
9713 cgroup_fd = pid;
9714
4dc0da86 9715 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
79dff51e 9716 NULL, NULL, cgroup_fd);
d14b12d7
SE
9717 if (IS_ERR(event)) {
9718 err = PTR_ERR(event);
79c9ce57 9719 goto err_cred;
d14b12d7
SE
9720 }
9721
53b25335
VW
9722 if (is_sampling_event(event)) {
9723 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
a1396555 9724 err = -EOPNOTSUPP;
53b25335
VW
9725 goto err_alloc;
9726 }
9727 }
9728
89a1e187
PZ
9729 /*
9730 * Special case software events and allow them to be part of
9731 * any hardware group.
9732 */
9733 pmu = event->pmu;
b04243ef 9734
34f43927
PZ
9735 if (attr.use_clockid) {
9736 err = perf_event_set_clock(event, attr.clockid);
9737 if (err)
9738 goto err_alloc;
9739 }
9740
4ff6a8de
DCC
9741 if (pmu->task_ctx_nr == perf_sw_context)
9742 event->event_caps |= PERF_EV_CAP_SOFTWARE;
9743
b04243ef
PZ
9744 if (group_leader &&
9745 (is_software_event(event) != is_software_event(group_leader))) {
9746 if (is_software_event(event)) {
9747 /*
9748 * If event and group_leader are not both a software
9749 * event, and event is, then group leader is not.
9750 *
9751 * Allow the addition of software events to !software
9752 * groups, this is safe because software events never
9753 * fail to schedule.
9754 */
9755 pmu = group_leader->pmu;
9756 } else if (is_software_event(group_leader) &&
4ff6a8de 9757 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
b04243ef
PZ
9758 /*
9759 * In case the group is a pure software group, and we
9760 * try to add a hardware event, move the whole group to
9761 * the hardware context.
9762 */
9763 move_group = 1;
9764 }
9765 }
89a1e187
PZ
9766
9767 /*
9768 * Get the target context (task or percpu):
9769 */
4af57ef2 9770 ctx = find_get_context(pmu, task, event);
89a1e187
PZ
9771 if (IS_ERR(ctx)) {
9772 err = PTR_ERR(ctx);
c6be5a5c 9773 goto err_alloc;
89a1e187
PZ
9774 }
9775
bed5b25a
AS
9776 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
9777 err = -EBUSY;
9778 goto err_context;
9779 }
9780
ccff286d 9781 /*
cdd6c482 9782 * Look up the group leader (we will attach this event to it):
04289bb9 9783 */
ac9721f3 9784 if (group_leader) {
dc86cabe 9785 err = -EINVAL;
04289bb9 9786
04289bb9 9787 /*
ccff286d
IM
9788 * Do not allow a recursive hierarchy (this new sibling
9789 * becoming part of another group-sibling):
9790 */
9791 if (group_leader->group_leader != group_leader)
c3f00c70 9792 goto err_context;
34f43927
PZ
9793
9794 /* All events in a group should have the same clock */
9795 if (group_leader->clock != event->clock)
9796 goto err_context;
9797
ccff286d
IM
9798 /*
9799 * Do not allow to attach to a group in a different
9800 * task or CPU context:
04289bb9 9801 */
b04243ef 9802 if (move_group) {
c3c87e77
PZ
9803 /*
9804 * Make sure we're both on the same task, or both
9805 * per-cpu events.
9806 */
9807 if (group_leader->ctx->task != ctx->task)
9808 goto err_context;
9809
9810 /*
9811 * Make sure we're both events for the same CPU;
9812 * grouping events for different CPUs is broken; since
9813 * you can never concurrently schedule them anyhow.
9814 */
9815 if (group_leader->cpu != event->cpu)
b04243ef
PZ
9816 goto err_context;
9817 } else {
9818 if (group_leader->ctx != ctx)
9819 goto err_context;
9820 }
9821
3b6f9e5c
PM
9822 /*
9823 * Only a group leader can be exclusive or pinned
9824 */
0d48696f 9825 if (attr.exclusive || attr.pinned)
c3f00c70 9826 goto err_context;
ac9721f3
PZ
9827 }
9828
9829 if (output_event) {
9830 err = perf_event_set_output(event, output_event);
9831 if (err)
c3f00c70 9832 goto err_context;
ac9721f3 9833 }
0793a61d 9834
a21b0b35
YD
9835 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
9836 f_flags);
ea635c64
AV
9837 if (IS_ERR(event_file)) {
9838 err = PTR_ERR(event_file);
201c2f85 9839 event_file = NULL;
c3f00c70 9840 goto err_context;
ea635c64 9841 }
9b51f66d 9842
b04243ef 9843 if (move_group) {
321027c1
PZ
9844 gctx = __perf_event_ctx_lock_double(group_leader, ctx);
9845
84c4e620
PZ
9846 if (gctx->task == TASK_TOMBSTONE) {
9847 err = -ESRCH;
9848 goto err_locked;
9849 }
321027c1
PZ
9850
9851 /*
9852 * Check if we raced against another sys_perf_event_open() call
9853 * moving the software group underneath us.
9854 */
9855 if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
9856 /*
9857 * If someone moved the group out from under us, check
9858 * if this new event wound up on the same ctx, if so
9859 * its the regular !move_group case, otherwise fail.
9860 */
9861 if (gctx != ctx) {
9862 err = -EINVAL;
9863 goto err_locked;
9864 } else {
9865 perf_event_ctx_unlock(group_leader, gctx);
9866 move_group = 0;
9867 }
9868 }
f55fc2a5
PZ
9869 } else {
9870 mutex_lock(&ctx->mutex);
9871 }
9872
84c4e620
PZ
9873 if (ctx->task == TASK_TOMBSTONE) {
9874 err = -ESRCH;
9875 goto err_locked;
9876 }
9877
a723968c
PZ
9878 if (!perf_event_validate_size(event)) {
9879 err = -E2BIG;
9880 goto err_locked;
9881 }
9882
f55fc2a5
PZ
9883 /*
9884 * Must be under the same ctx::mutex as perf_install_in_context(),
9885 * because we need to serialize with concurrent event creation.
9886 */
9887 if (!exclusive_event_installable(event, ctx)) {
9888 /* exclusive and group stuff are assumed mutually exclusive */
9889 WARN_ON_ONCE(move_group);
f63a8daa 9890
f55fc2a5
PZ
9891 err = -EBUSY;
9892 goto err_locked;
9893 }
f63a8daa 9894
f55fc2a5
PZ
9895 WARN_ON_ONCE(ctx->parent_ctx);
9896
79c9ce57
PZ
9897 /*
9898 * This is the point on no return; we cannot fail hereafter. This is
9899 * where we start modifying current state.
9900 */
9901
f55fc2a5 9902 if (move_group) {
f63a8daa
PZ
9903 /*
9904 * See perf_event_ctx_lock() for comments on the details
9905 * of swizzling perf_event::ctx.
9906 */
45a0e07a 9907 perf_remove_from_context(group_leader, 0);
0231bb53 9908
b04243ef
PZ
9909 list_for_each_entry(sibling, &group_leader->sibling_list,
9910 group_entry) {
45a0e07a 9911 perf_remove_from_context(sibling, 0);
b04243ef
PZ
9912 put_ctx(gctx);
9913 }
b04243ef 9914
f63a8daa
PZ
9915 /*
9916 * Wait for everybody to stop referencing the events through
9917 * the old lists, before installing it on new lists.
9918 */
0cda4c02 9919 synchronize_rcu();
f63a8daa 9920
8f95b435
PZI
9921 /*
9922 * Install the group siblings before the group leader.
9923 *
9924 * Because a group leader will try and install the entire group
9925 * (through the sibling list, which is still in-tact), we can
9926 * end up with siblings installed in the wrong context.
9927 *
9928 * By installing siblings first we NO-OP because they're not
9929 * reachable through the group lists.
9930 */
b04243ef
PZ
9931 list_for_each_entry(sibling, &group_leader->sibling_list,
9932 group_entry) {
8f95b435 9933 perf_event__state_init(sibling);
9fc81d87 9934 perf_install_in_context(ctx, sibling, sibling->cpu);
b04243ef
PZ
9935 get_ctx(ctx);
9936 }
8f95b435
PZI
9937
9938 /*
9939 * Removing from the context ends up with disabled
9940 * event. What we want here is event in the initial
9941 * startup state, ready to be add into new context.
9942 */
9943 perf_event__state_init(group_leader);
9944 perf_install_in_context(ctx, group_leader, group_leader->cpu);
9945 get_ctx(ctx);
b04243ef 9946
f55fc2a5
PZ
9947 /*
9948 * Now that all events are installed in @ctx, nothing
9949 * references @gctx anymore, so drop the last reference we have
9950 * on it.
9951 */
9952 put_ctx(gctx);
bed5b25a
AS
9953 }
9954
f73e22ab
PZ
9955 /*
9956 * Precalculate sample_data sizes; do while holding ctx::mutex such
9957 * that we're serialized against further additions and before
9958 * perf_install_in_context() which is the point the event is active and
9959 * can use these values.
9960 */
9961 perf_event__header_size(event);
9962 perf_event__id_header_size(event);
9963
78cd2c74
PZ
9964 event->owner = current;
9965
e2d37cd2 9966 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 9967 perf_unpin_context(ctx);
f63a8daa 9968
f55fc2a5 9969 if (move_group)
321027c1 9970 perf_event_ctx_unlock(group_leader, gctx);
d859e29f 9971 mutex_unlock(&ctx->mutex);
9b51f66d 9972
79c9ce57
PZ
9973 if (task) {
9974 mutex_unlock(&task->signal->cred_guard_mutex);
9975 put_task_struct(task);
9976 }
9977
fbfc623f
YZ
9978 put_online_cpus();
9979
cdd6c482
IM
9980 mutex_lock(&current->perf_event_mutex);
9981 list_add_tail(&event->owner_entry, &current->perf_event_list);
9982 mutex_unlock(&current->perf_event_mutex);
082ff5a2 9983
8a49542c
PZ
9984 /*
9985 * Drop the reference on the group_event after placing the
9986 * new event on the sibling_list. This ensures destruction
9987 * of the group leader will find the pointer to itself in
9988 * perf_group_detach().
9989 */
2903ff01 9990 fdput(group);
ea635c64
AV
9991 fd_install(event_fd, event_file);
9992 return event_fd;
0793a61d 9993
f55fc2a5
PZ
9994err_locked:
9995 if (move_group)
321027c1 9996 perf_event_ctx_unlock(group_leader, gctx);
f55fc2a5
PZ
9997 mutex_unlock(&ctx->mutex);
9998/* err_file: */
9999 fput(event_file);
c3f00c70 10000err_context:
fe4b04fa 10001 perf_unpin_context(ctx);
ea635c64 10002 put_ctx(ctx);
c6be5a5c 10003err_alloc:
13005627
PZ
10004 /*
10005 * If event_file is set, the fput() above will have called ->release()
10006 * and that will take care of freeing the event.
10007 */
10008 if (!event_file)
10009 free_event(event);
79c9ce57
PZ
10010err_cred:
10011 if (task)
10012 mutex_unlock(&task->signal->cred_guard_mutex);
1f4ee503 10013err_cpus:
fbfc623f 10014 put_online_cpus();
1f4ee503 10015err_task:
e7d0bc04
PZ
10016 if (task)
10017 put_task_struct(task);
89a1e187 10018err_group_fd:
2903ff01 10019 fdput(group);
ea635c64
AV
10020err_fd:
10021 put_unused_fd(event_fd);
dc86cabe 10022 return err;
0793a61d
TG
10023}
10024
fb0459d7
AV
10025/**
10026 * perf_event_create_kernel_counter
10027 *
10028 * @attr: attributes of the counter to create
10029 * @cpu: cpu in which the counter is bound
38a81da2 10030 * @task: task to profile (NULL for percpu)
fb0459d7
AV
10031 */
10032struct perf_event *
10033perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 10034 struct task_struct *task,
4dc0da86
AK
10035 perf_overflow_handler_t overflow_handler,
10036 void *context)
fb0459d7 10037{
fb0459d7 10038 struct perf_event_context *ctx;
c3f00c70 10039 struct perf_event *event;
fb0459d7 10040 int err;
d859e29f 10041
fb0459d7
AV
10042 /*
10043 * Get the target context (task or percpu):
10044 */
d859e29f 10045
4dc0da86 10046 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
79dff51e 10047 overflow_handler, context, -1);
c3f00c70
PZ
10048 if (IS_ERR(event)) {
10049 err = PTR_ERR(event);
10050 goto err;
10051 }
d859e29f 10052
f8697762 10053 /* Mark owner so we could distinguish it from user events. */
63b6da39 10054 event->owner = TASK_TOMBSTONE;
f8697762 10055
4af57ef2 10056 ctx = find_get_context(event->pmu, task, event);
c6567f64
FW
10057 if (IS_ERR(ctx)) {
10058 err = PTR_ERR(ctx);
c3f00c70 10059 goto err_free;
d859e29f 10060 }
fb0459d7 10061
fb0459d7
AV
10062 WARN_ON_ONCE(ctx->parent_ctx);
10063 mutex_lock(&ctx->mutex);
84c4e620
PZ
10064 if (ctx->task == TASK_TOMBSTONE) {
10065 err = -ESRCH;
10066 goto err_unlock;
10067 }
10068
bed5b25a 10069 if (!exclusive_event_installable(event, ctx)) {
bed5b25a 10070 err = -EBUSY;
84c4e620 10071 goto err_unlock;
bed5b25a
AS
10072 }
10073
fb0459d7 10074 perf_install_in_context(ctx, event, cpu);
fe4b04fa 10075 perf_unpin_context(ctx);
fb0459d7
AV
10076 mutex_unlock(&ctx->mutex);
10077
fb0459d7
AV
10078 return event;
10079
84c4e620
PZ
10080err_unlock:
10081 mutex_unlock(&ctx->mutex);
10082 perf_unpin_context(ctx);
10083 put_ctx(ctx);
c3f00c70
PZ
10084err_free:
10085 free_event(event);
10086err:
c6567f64 10087 return ERR_PTR(err);
9b51f66d 10088}
fb0459d7 10089EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 10090
0cda4c02
YZ
10091void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
10092{
10093 struct perf_event_context *src_ctx;
10094 struct perf_event_context *dst_ctx;
10095 struct perf_event *event, *tmp;
10096 LIST_HEAD(events);
10097
10098 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
10099 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
10100
f63a8daa
PZ
10101 /*
10102 * See perf_event_ctx_lock() for comments on the details
10103 * of swizzling perf_event::ctx.
10104 */
10105 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
0cda4c02
YZ
10106 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
10107 event_entry) {
45a0e07a 10108 perf_remove_from_context(event, 0);
9a545de0 10109 unaccount_event_cpu(event, src_cpu);
0cda4c02 10110 put_ctx(src_ctx);
9886167d 10111 list_add(&event->migrate_entry, &events);
0cda4c02 10112 }
0cda4c02 10113
8f95b435
PZI
10114 /*
10115 * Wait for the events to quiesce before re-instating them.
10116 */
0cda4c02
YZ
10117 synchronize_rcu();
10118
8f95b435
PZI
10119 /*
10120 * Re-instate events in 2 passes.
10121 *
10122 * Skip over group leaders and only install siblings on this first
10123 * pass, siblings will not get enabled without a leader, however a
10124 * leader will enable its siblings, even if those are still on the old
10125 * context.
10126 */
10127 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10128 if (event->group_leader == event)
10129 continue;
10130
10131 list_del(&event->migrate_entry);
10132 if (event->state >= PERF_EVENT_STATE_OFF)
10133 event->state = PERF_EVENT_STATE_INACTIVE;
10134 account_event_cpu(event, dst_cpu);
10135 perf_install_in_context(dst_ctx, event, dst_cpu);
10136 get_ctx(dst_ctx);
10137 }
10138
10139 /*
10140 * Once all the siblings are setup properly, install the group leaders
10141 * to make it go.
10142 */
9886167d
PZ
10143 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10144 list_del(&event->migrate_entry);
0cda4c02
YZ
10145 if (event->state >= PERF_EVENT_STATE_OFF)
10146 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 10147 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
10148 perf_install_in_context(dst_ctx, event, dst_cpu);
10149 get_ctx(dst_ctx);
10150 }
10151 mutex_unlock(&dst_ctx->mutex);
f63a8daa 10152 mutex_unlock(&src_ctx->mutex);
0cda4c02
YZ
10153}
10154EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
10155
cdd6c482 10156static void sync_child_event(struct perf_event *child_event,
38b200d6 10157 struct task_struct *child)
d859e29f 10158{
cdd6c482 10159 struct perf_event *parent_event = child_event->parent;
8bc20959 10160 u64 child_val;
d859e29f 10161
cdd6c482
IM
10162 if (child_event->attr.inherit_stat)
10163 perf_event_read_event(child_event, child);
38b200d6 10164
b5e58793 10165 child_val = perf_event_count(child_event);
d859e29f
PM
10166
10167 /*
10168 * Add back the child's count to the parent's count:
10169 */
a6e6dea6 10170 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
10171 atomic64_add(child_event->total_time_enabled,
10172 &parent_event->child_total_time_enabled);
10173 atomic64_add(child_event->total_time_running,
10174 &parent_event->child_total_time_running);
d859e29f
PM
10175}
10176
9b51f66d 10177static void
8ba289b8
PZ
10178perf_event_exit_event(struct perf_event *child_event,
10179 struct perf_event_context *child_ctx,
10180 struct task_struct *child)
9b51f66d 10181{
8ba289b8
PZ
10182 struct perf_event *parent_event = child_event->parent;
10183
1903d50c
PZ
10184 /*
10185 * Do not destroy the 'original' grouping; because of the context
10186 * switch optimization the original events could've ended up in a
10187 * random child task.
10188 *
10189 * If we were to destroy the original group, all group related
10190 * operations would cease to function properly after this random
10191 * child dies.
10192 *
10193 * Do destroy all inherited groups, we don't care about those
10194 * and being thorough is better.
10195 */
32132a3d
PZ
10196 raw_spin_lock_irq(&child_ctx->lock);
10197 WARN_ON_ONCE(child_ctx->is_active);
10198
8ba289b8 10199 if (parent_event)
32132a3d
PZ
10200 perf_group_detach(child_event);
10201 list_del_event(child_event, child_ctx);
a69b0ca4 10202 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
32132a3d 10203 raw_spin_unlock_irq(&child_ctx->lock);
0cc0c027 10204
9b51f66d 10205 /*
8ba289b8 10206 * Parent events are governed by their filedesc, retain them.
9b51f66d 10207 */
8ba289b8 10208 if (!parent_event) {
179033b3 10209 perf_event_wakeup(child_event);
8ba289b8 10210 return;
4bcf349a 10211 }
8ba289b8
PZ
10212 /*
10213 * Child events can be cleaned up.
10214 */
10215
10216 sync_child_event(child_event, child);
10217
10218 /*
10219 * Remove this event from the parent's list
10220 */
10221 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
10222 mutex_lock(&parent_event->child_mutex);
10223 list_del_init(&child_event->child_list);
10224 mutex_unlock(&parent_event->child_mutex);
10225
10226 /*
10227 * Kick perf_poll() for is_event_hup().
10228 */
10229 perf_event_wakeup(parent_event);
10230 free_event(child_event);
10231 put_event(parent_event);
9b51f66d
IM
10232}
10233
8dc85d54 10234static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 10235{
211de6eb 10236 struct perf_event_context *child_ctx, *clone_ctx = NULL;
63b6da39 10237 struct perf_event *child_event, *next;
63b6da39
PZ
10238
10239 WARN_ON_ONCE(child != current);
9b51f66d 10240
6a3351b6 10241 child_ctx = perf_pin_task_context(child, ctxn);
63b6da39 10242 if (!child_ctx)
9b51f66d
IM
10243 return;
10244
ad3a37de 10245 /*
6a3351b6
PZ
10246 * In order to reduce the amount of tricky in ctx tear-down, we hold
10247 * ctx::mutex over the entire thing. This serializes against almost
10248 * everything that wants to access the ctx.
10249 *
10250 * The exception is sys_perf_event_open() /
10251 * perf_event_create_kernel_count() which does find_get_context()
10252 * without ctx::mutex (it cannot because of the move_group double mutex
10253 * lock thing). See the comments in perf_install_in_context().
ad3a37de 10254 */
6a3351b6 10255 mutex_lock(&child_ctx->mutex);
c93f7669
PM
10256
10257 /*
6a3351b6
PZ
10258 * In a single ctx::lock section, de-schedule the events and detach the
10259 * context from the task such that we cannot ever get it scheduled back
10260 * in.
c93f7669 10261 */
6a3351b6 10262 raw_spin_lock_irq(&child_ctx->lock);
63b6da39 10263 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
4a1c0f26 10264
71a851b4 10265 /*
63b6da39
PZ
10266 * Now that the context is inactive, destroy the task <-> ctx relation
10267 * and mark the context dead.
71a851b4 10268 */
63b6da39
PZ
10269 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
10270 put_ctx(child_ctx); /* cannot be last */
10271 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
10272 put_task_struct(current); /* cannot be last */
4a1c0f26 10273
211de6eb 10274 clone_ctx = unclone_ctx(child_ctx);
6a3351b6 10275 raw_spin_unlock_irq(&child_ctx->lock);
9f498cc5 10276
211de6eb
PZ
10277 if (clone_ctx)
10278 put_ctx(clone_ctx);
4a1c0f26 10279
9f498cc5 10280 /*
cdd6c482
IM
10281 * Report the task dead after unscheduling the events so that we
10282 * won't get any samples after PERF_RECORD_EXIT. We can however still
10283 * get a few PERF_RECORD_READ events.
9f498cc5 10284 */
cdd6c482 10285 perf_event_task(child, child_ctx, 0);
a63eaf34 10286
ebf905fc 10287 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8ba289b8 10288 perf_event_exit_event(child_event, child_ctx, child);
8bc20959 10289
a63eaf34
PM
10290 mutex_unlock(&child_ctx->mutex);
10291
10292 put_ctx(child_ctx);
9b51f66d
IM
10293}
10294
8dc85d54
PZ
10295/*
10296 * When a child task exits, feed back event values to parent events.
79c9ce57
PZ
10297 *
10298 * Can be called with cred_guard_mutex held when called from
10299 * install_exec_creds().
8dc85d54
PZ
10300 */
10301void perf_event_exit_task(struct task_struct *child)
10302{
8882135b 10303 struct perf_event *event, *tmp;
8dc85d54
PZ
10304 int ctxn;
10305
8882135b
PZ
10306 mutex_lock(&child->perf_event_mutex);
10307 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
10308 owner_entry) {
10309 list_del_init(&event->owner_entry);
10310
10311 /*
10312 * Ensure the list deletion is visible before we clear
10313 * the owner, closes a race against perf_release() where
10314 * we need to serialize on the owner->perf_event_mutex.
10315 */
f47c02c0 10316 smp_store_release(&event->owner, NULL);
8882135b
PZ
10317 }
10318 mutex_unlock(&child->perf_event_mutex);
10319
8dc85d54
PZ
10320 for_each_task_context_nr(ctxn)
10321 perf_event_exit_task_context(child, ctxn);
4e93ad60
JO
10322
10323 /*
10324 * The perf_event_exit_task_context calls perf_event_task
10325 * with child's task_ctx, which generates EXIT events for
10326 * child contexts and sets child->perf_event_ctxp[] to NULL.
10327 * At this point we need to send EXIT events to cpu contexts.
10328 */
10329 perf_event_task(child, NULL, 0);
8dc85d54
PZ
10330}
10331
889ff015
FW
10332static void perf_free_event(struct perf_event *event,
10333 struct perf_event_context *ctx)
10334{
10335 struct perf_event *parent = event->parent;
10336
10337 if (WARN_ON_ONCE(!parent))
10338 return;
10339
10340 mutex_lock(&parent->child_mutex);
10341 list_del_init(&event->child_list);
10342 mutex_unlock(&parent->child_mutex);
10343
a6fa941d 10344 put_event(parent);
889ff015 10345
652884fe 10346 raw_spin_lock_irq(&ctx->lock);
8a49542c 10347 perf_group_detach(event);
889ff015 10348 list_del_event(event, ctx);
652884fe 10349 raw_spin_unlock_irq(&ctx->lock);
889ff015
FW
10350 free_event(event);
10351}
10352
bbbee908 10353/*
652884fe 10354 * Free an unexposed, unused context as created by inheritance by
8dc85d54 10355 * perf_event_init_task below, used by fork() in case of fail.
652884fe
PZ
10356 *
10357 * Not all locks are strictly required, but take them anyway to be nice and
10358 * help out with the lockdep assertions.
bbbee908 10359 */
cdd6c482 10360void perf_event_free_task(struct task_struct *task)
bbbee908 10361{
8dc85d54 10362 struct perf_event_context *ctx;
cdd6c482 10363 struct perf_event *event, *tmp;
8dc85d54 10364 int ctxn;
bbbee908 10365
8dc85d54
PZ
10366 for_each_task_context_nr(ctxn) {
10367 ctx = task->perf_event_ctxp[ctxn];
10368 if (!ctx)
10369 continue;
bbbee908 10370
8dc85d54 10371 mutex_lock(&ctx->mutex);
bbbee908 10372again:
8dc85d54
PZ
10373 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
10374 group_entry)
10375 perf_free_event(event, ctx);
bbbee908 10376
8dc85d54
PZ
10377 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
10378 group_entry)
10379 perf_free_event(event, ctx);
bbbee908 10380
8dc85d54
PZ
10381 if (!list_empty(&ctx->pinned_groups) ||
10382 !list_empty(&ctx->flexible_groups))
10383 goto again;
bbbee908 10384
8dc85d54 10385 mutex_unlock(&ctx->mutex);
bbbee908 10386
8dc85d54
PZ
10387 put_ctx(ctx);
10388 }
889ff015
FW
10389}
10390
4e231c79
PZ
10391void perf_event_delayed_put(struct task_struct *task)
10392{
10393 int ctxn;
10394
10395 for_each_task_context_nr(ctxn)
10396 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10397}
10398
e03e7ee3 10399struct file *perf_event_get(unsigned int fd)
ffe8690c 10400{
e03e7ee3 10401 struct file *file;
ffe8690c 10402
e03e7ee3
AS
10403 file = fget_raw(fd);
10404 if (!file)
10405 return ERR_PTR(-EBADF);
ffe8690c 10406
e03e7ee3
AS
10407 if (file->f_op != &perf_fops) {
10408 fput(file);
10409 return ERR_PTR(-EBADF);
10410 }
ffe8690c 10411
e03e7ee3 10412 return file;
ffe8690c
KX
10413}
10414
10415const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10416{
10417 if (!event)
10418 return ERR_PTR(-EINVAL);
10419
10420 return &event->attr;
10421}
10422
97dee4f3
PZ
10423/*
10424 * inherit a event from parent task to child task:
10425 */
10426static struct perf_event *
10427inherit_event(struct perf_event *parent_event,
10428 struct task_struct *parent,
10429 struct perf_event_context *parent_ctx,
10430 struct task_struct *child,
10431 struct perf_event *group_leader,
10432 struct perf_event_context *child_ctx)
10433{
1929def9 10434 enum perf_event_active_state parent_state = parent_event->state;
97dee4f3 10435 struct perf_event *child_event;
cee010ec 10436 unsigned long flags;
97dee4f3
PZ
10437
10438 /*
10439 * Instead of creating recursive hierarchies of events,
10440 * we link inherited events back to the original parent,
10441 * which has a filp for sure, which we use as the reference
10442 * count:
10443 */
10444 if (parent_event->parent)
10445 parent_event = parent_event->parent;
10446
10447 child_event = perf_event_alloc(&parent_event->attr,
10448 parent_event->cpu,
d580ff86 10449 child,
97dee4f3 10450 group_leader, parent_event,
79dff51e 10451 NULL, NULL, -1);
97dee4f3
PZ
10452 if (IS_ERR(child_event))
10453 return child_event;
a6fa941d 10454
c6e5b732
PZ
10455 /*
10456 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10457 * must be under the same lock in order to serialize against
10458 * perf_event_release_kernel(), such that either we must observe
10459 * is_orphaned_event() or they will observe us on the child_list.
10460 */
10461 mutex_lock(&parent_event->child_mutex);
fadfe7be
JO
10462 if (is_orphaned_event(parent_event) ||
10463 !atomic_long_inc_not_zero(&parent_event->refcount)) {
c6e5b732 10464 mutex_unlock(&parent_event->child_mutex);
a6fa941d
AV
10465 free_event(child_event);
10466 return NULL;
10467 }
10468
97dee4f3
PZ
10469 get_ctx(child_ctx);
10470
10471 /*
10472 * Make the child state follow the state of the parent event,
10473 * not its attr.disabled bit. We hold the parent's mutex,
10474 * so we won't race with perf_event_{en, dis}able_family.
10475 */
1929def9 10476 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
97dee4f3
PZ
10477 child_event->state = PERF_EVENT_STATE_INACTIVE;
10478 else
10479 child_event->state = PERF_EVENT_STATE_OFF;
10480
10481 if (parent_event->attr.freq) {
10482 u64 sample_period = parent_event->hw.sample_period;
10483 struct hw_perf_event *hwc = &child_event->hw;
10484
10485 hwc->sample_period = sample_period;
10486 hwc->last_period = sample_period;
10487
10488 local64_set(&hwc->period_left, sample_period);
10489 }
10490
10491 child_event->ctx = child_ctx;
10492 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
10493 child_event->overflow_handler_context
10494 = parent_event->overflow_handler_context;
97dee4f3 10495
614b6780
TG
10496 /*
10497 * Precalculate sample_data sizes
10498 */
10499 perf_event__header_size(child_event);
6844c09d 10500 perf_event__id_header_size(child_event);
614b6780 10501
97dee4f3
PZ
10502 /*
10503 * Link it up in the child's context:
10504 */
cee010ec 10505 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 10506 add_event_to_ctx(child_event, child_ctx);
cee010ec 10507 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 10508
97dee4f3
PZ
10509 /*
10510 * Link this into the parent event's child list
10511 */
97dee4f3
PZ
10512 list_add_tail(&child_event->child_list, &parent_event->child_list);
10513 mutex_unlock(&parent_event->child_mutex);
10514
10515 return child_event;
10516}
10517
10518static int inherit_group(struct perf_event *parent_event,
10519 struct task_struct *parent,
10520 struct perf_event_context *parent_ctx,
10521 struct task_struct *child,
10522 struct perf_event_context *child_ctx)
10523{
10524 struct perf_event *leader;
10525 struct perf_event *sub;
10526 struct perf_event *child_ctr;
10527
10528 leader = inherit_event(parent_event, parent, parent_ctx,
10529 child, NULL, child_ctx);
10530 if (IS_ERR(leader))
10531 return PTR_ERR(leader);
10532 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10533 child_ctr = inherit_event(sub, parent, parent_ctx,
10534 child, leader, child_ctx);
10535 if (IS_ERR(child_ctr))
10536 return PTR_ERR(child_ctr);
10537 }
10538 return 0;
889ff015
FW
10539}
10540
10541static int
10542inherit_task_group(struct perf_event *event, struct task_struct *parent,
10543 struct perf_event_context *parent_ctx,
8dc85d54 10544 struct task_struct *child, int ctxn,
889ff015
FW
10545 int *inherited_all)
10546{
10547 int ret;
8dc85d54 10548 struct perf_event_context *child_ctx;
889ff015
FW
10549
10550 if (!event->attr.inherit) {
10551 *inherited_all = 0;
10552 return 0;
bbbee908
PZ
10553 }
10554
fe4b04fa 10555 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
10556 if (!child_ctx) {
10557 /*
10558 * This is executed from the parent task context, so
10559 * inherit events that have been marked for cloning.
10560 * First allocate and initialize a context for the
10561 * child.
10562 */
bbbee908 10563
734df5ab 10564 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
10565 if (!child_ctx)
10566 return -ENOMEM;
bbbee908 10567
8dc85d54 10568 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
10569 }
10570
10571 ret = inherit_group(event, parent, parent_ctx,
10572 child, child_ctx);
10573
10574 if (ret)
10575 *inherited_all = 0;
10576
10577 return ret;
bbbee908
PZ
10578}
10579
9b51f66d 10580/*
cdd6c482 10581 * Initialize the perf_event context in task_struct
9b51f66d 10582 */
985c8dcb 10583static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 10584{
889ff015 10585 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
10586 struct perf_event_context *cloned_ctx;
10587 struct perf_event *event;
9b51f66d 10588 struct task_struct *parent = current;
564c2b21 10589 int inherited_all = 1;
dddd3379 10590 unsigned long flags;
6ab423e0 10591 int ret = 0;
9b51f66d 10592
8dc85d54 10593 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
10594 return 0;
10595
ad3a37de 10596 /*
25346b93
PM
10597 * If the parent's context is a clone, pin it so it won't get
10598 * swapped under us.
ad3a37de 10599 */
8dc85d54 10600 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
10601 if (!parent_ctx)
10602 return 0;
25346b93 10603
ad3a37de
PM
10604 /*
10605 * No need to check if parent_ctx != NULL here; since we saw
10606 * it non-NULL earlier, the only reason for it to become NULL
10607 * is if we exit, and since we're currently in the middle of
10608 * a fork we can't be exiting at the same time.
10609 */
ad3a37de 10610
9b51f66d
IM
10611 /*
10612 * Lock the parent list. No need to lock the child - not PID
10613 * hashed yet and not running, so nobody can access it.
10614 */
d859e29f 10615 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
10616
10617 /*
10618 * We dont have to disable NMIs - we are only looking at
10619 * the list, not manipulating it:
10620 */
889ff015 10621 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
10622 ret = inherit_task_group(event, parent, parent_ctx,
10623 child, ctxn, &inherited_all);
889ff015
FW
10624 if (ret)
10625 break;
10626 }
b93f7978 10627
dddd3379
TG
10628 /*
10629 * We can't hold ctx->lock when iterating the ->flexible_group list due
10630 * to allocations, but we need to prevent rotation because
10631 * rotate_ctx() will change the list from interrupt context.
10632 */
10633 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10634 parent_ctx->rotate_disable = 1;
10635 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10636
889ff015 10637 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
10638 ret = inherit_task_group(event, parent, parent_ctx,
10639 child, ctxn, &inherited_all);
889ff015 10640 if (ret)
9b51f66d 10641 break;
564c2b21
PM
10642 }
10643
dddd3379
TG
10644 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10645 parent_ctx->rotate_disable = 0;
dddd3379 10646
8dc85d54 10647 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 10648
05cbaa28 10649 if (child_ctx && inherited_all) {
564c2b21
PM
10650 /*
10651 * Mark the child context as a clone of the parent
10652 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
10653 *
10654 * Note that if the parent is a clone, the holding of
10655 * parent_ctx->lock avoids it from being uncloned.
564c2b21 10656 */
c5ed5145 10657 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
10658 if (cloned_ctx) {
10659 child_ctx->parent_ctx = cloned_ctx;
25346b93 10660 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
10661 } else {
10662 child_ctx->parent_ctx = parent_ctx;
10663 child_ctx->parent_gen = parent_ctx->generation;
10664 }
10665 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
10666 }
10667
c5ed5145 10668 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 10669 mutex_unlock(&parent_ctx->mutex);
6ab423e0 10670
25346b93 10671 perf_unpin_context(parent_ctx);
fe4b04fa 10672 put_ctx(parent_ctx);
ad3a37de 10673
6ab423e0 10674 return ret;
9b51f66d
IM
10675}
10676
8dc85d54
PZ
10677/*
10678 * Initialize the perf_event context in task_struct
10679 */
10680int perf_event_init_task(struct task_struct *child)
10681{
10682 int ctxn, ret;
10683
8550d7cb
ON
10684 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
10685 mutex_init(&child->perf_event_mutex);
10686 INIT_LIST_HEAD(&child->perf_event_list);
10687
8dc85d54
PZ
10688 for_each_task_context_nr(ctxn) {
10689 ret = perf_event_init_context(child, ctxn);
6c72e350
PZ
10690 if (ret) {
10691 perf_event_free_task(child);
8dc85d54 10692 return ret;
6c72e350 10693 }
8dc85d54
PZ
10694 }
10695
10696 return 0;
10697}
10698
220b140b
PM
10699static void __init perf_event_init_all_cpus(void)
10700{
b28ab83c 10701 struct swevent_htable *swhash;
220b140b 10702 int cpu;
220b140b
PM
10703
10704 for_each_possible_cpu(cpu) {
b28ab83c
PZ
10705 swhash = &per_cpu(swevent_htable, cpu);
10706 mutex_init(&swhash->hlist_mutex);
2fde4f94 10707 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
f2fb6bef
KL
10708
10709 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
10710 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
e48c1788
PZ
10711
10712 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
220b140b
PM
10713 }
10714}
10715
00e16c3d 10716int perf_event_init_cpu(unsigned int cpu)
0793a61d 10717{
108b02cf 10718 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 10719
b28ab83c 10720 mutex_lock(&swhash->hlist_mutex);
059fcd8c 10721 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
76e1d904
FW
10722 struct swevent_hlist *hlist;
10723
b28ab83c
PZ
10724 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
10725 WARN_ON(!hlist);
10726 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 10727 }
b28ab83c 10728 mutex_unlock(&swhash->hlist_mutex);
00e16c3d 10729 return 0;
0793a61d
TG
10730}
10731
2965faa5 10732#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
108b02cf 10733static void __perf_event_exit_context(void *__info)
0793a61d 10734{
108b02cf 10735 struct perf_event_context *ctx = __info;
fae3fde6
PZ
10736 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
10737 struct perf_event *event;
0793a61d 10738
fae3fde6
PZ
10739 raw_spin_lock(&ctx->lock);
10740 list_for_each_entry(event, &ctx->event_list, event_entry)
45a0e07a 10741 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
fae3fde6 10742 raw_spin_unlock(&ctx->lock);
0793a61d 10743}
108b02cf
PZ
10744
10745static void perf_event_exit_cpu_context(int cpu)
10746{
10747 struct perf_event_context *ctx;
10748 struct pmu *pmu;
10749 int idx;
10750
10751 idx = srcu_read_lock(&pmus_srcu);
10752 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 10753 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
10754
10755 mutex_lock(&ctx->mutex);
10756 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
10757 mutex_unlock(&ctx->mutex);
10758 }
10759 srcu_read_unlock(&pmus_srcu, idx);
108b02cf 10760}
00e16c3d
TG
10761#else
10762
10763static void perf_event_exit_cpu_context(int cpu) { }
10764
10765#endif
108b02cf 10766
00e16c3d 10767int perf_event_exit_cpu(unsigned int cpu)
0793a61d 10768{
e3703f8c 10769 perf_event_exit_cpu_context(cpu);
00e16c3d 10770 return 0;
0793a61d 10771}
0793a61d 10772
c277443c
PZ
10773static int
10774perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
10775{
10776 int cpu;
10777
10778 for_each_online_cpu(cpu)
10779 perf_event_exit_cpu(cpu);
10780
10781 return NOTIFY_OK;
10782}
10783
10784/*
10785 * Run the perf reboot notifier at the very last possible moment so that
10786 * the generic watchdog code runs as long as possible.
10787 */
10788static struct notifier_block perf_reboot_notifier = {
10789 .notifier_call = perf_reboot,
10790 .priority = INT_MIN,
10791};
10792
cdd6c482 10793void __init perf_event_init(void)
0793a61d 10794{
3c502e7a
JW
10795 int ret;
10796
2e80a82a
PZ
10797 idr_init(&pmu_idr);
10798
220b140b 10799 perf_event_init_all_cpus();
b0a873eb 10800 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
10801 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
10802 perf_pmu_register(&perf_cpu_clock, NULL, -1);
10803 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb 10804 perf_tp_register();
00e16c3d 10805 perf_event_init_cpu(smp_processor_id());
c277443c 10806 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
10807
10808 ret = init_hw_breakpoint();
10809 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520 10810
b01c3a00
JO
10811 /*
10812 * Build time assertion that we keep the data_head at the intended
10813 * location. IOW, validation we got the __reserved[] size right.
10814 */
10815 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
10816 != 1024);
0793a61d 10817}
abe43400 10818
fd979c01
CS
10819ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
10820 char *page)
10821{
10822 struct perf_pmu_events_attr *pmu_attr =
10823 container_of(attr, struct perf_pmu_events_attr, attr);
10824
10825 if (pmu_attr->event_str)
10826 return sprintf(page, "%s\n", pmu_attr->event_str);
10827
10828 return 0;
10829}
675965b0 10830EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
fd979c01 10831
abe43400
PZ
10832static int __init perf_event_sysfs_init(void)
10833{
10834 struct pmu *pmu;
10835 int ret;
10836
10837 mutex_lock(&pmus_lock);
10838
10839 ret = bus_register(&pmu_bus);
10840 if (ret)
10841 goto unlock;
10842
10843 list_for_each_entry(pmu, &pmus, entry) {
10844 if (!pmu->name || pmu->type < 0)
10845 continue;
10846
10847 ret = pmu_dev_alloc(pmu);
10848 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
10849 }
10850 pmu_bus_running = 1;
10851 ret = 0;
10852
10853unlock:
10854 mutex_unlock(&pmus_lock);
10855
10856 return ret;
10857}
10858device_initcall(perf_event_sysfs_init);
e5d1367f
SE
10859
10860#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
10861static struct cgroup_subsys_state *
10862perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
10863{
10864 struct perf_cgroup *jc;
e5d1367f 10865
1b15d055 10866 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
10867 if (!jc)
10868 return ERR_PTR(-ENOMEM);
10869
e5d1367f
SE
10870 jc->info = alloc_percpu(struct perf_cgroup_info);
10871 if (!jc->info) {
10872 kfree(jc);
10873 return ERR_PTR(-ENOMEM);
10874 }
10875
e5d1367f
SE
10876 return &jc->css;
10877}
10878
eb95419b 10879static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 10880{
eb95419b
TH
10881 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
10882
e5d1367f
SE
10883 free_percpu(jc->info);
10884 kfree(jc);
10885}
10886
10887static int __perf_cgroup_move(void *info)
10888{
10889 struct task_struct *task = info;
ddaaf4e2 10890 rcu_read_lock();
e5d1367f 10891 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
ddaaf4e2 10892 rcu_read_unlock();
e5d1367f
SE
10893 return 0;
10894}
10895
1f7dd3e5 10896static void perf_cgroup_attach(struct cgroup_taskset *tset)
e5d1367f 10897{
bb9d97b6 10898 struct task_struct *task;
1f7dd3e5 10899 struct cgroup_subsys_state *css;
bb9d97b6 10900
1f7dd3e5 10901 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 10902 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
10903}
10904
073219e9 10905struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
10906 .css_alloc = perf_cgroup_css_alloc,
10907 .css_free = perf_cgroup_css_free,
bb9d97b6 10908 .attach = perf_cgroup_attach,
e5d1367f
SE
10909};
10910#endif /* CONFIG_CGROUP_PERF */