]> git.ipfire.org Git - thirdparty/linux.git/blame - kernel/events/core.c
perf/core: Generalize event->group_flags
[thirdparty/linux.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
90eec103 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
39bed6cb 37#include <linux/cgroup.h>
cdd6c482 38#include <linux/perf_event.h>
af658dca 39#include <linux/trace_events.h>
3c502e7a 40#include <linux/hw_breakpoint.h>
c5ebcedb 41#include <linux/mm_types.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
b3f20785 44#include <linux/compat.h>
2541517c
AS
45#include <linux/bpf.h>
46#include <linux/filter.h>
375637bc
AS
47#include <linux/namei.h>
48#include <linux/parser.h>
0793a61d 49
76369139
FW
50#include "internal.h"
51
4e193bd4
TB
52#include <asm/irq_regs.h>
53
272325c4
PZ
54typedef int (*remote_function_f)(void *);
55
fe4b04fa 56struct remote_function_call {
e7e7ee2e 57 struct task_struct *p;
272325c4 58 remote_function_f func;
e7e7ee2e
IM
59 void *info;
60 int ret;
fe4b04fa
PZ
61};
62
63static void remote_function(void *data)
64{
65 struct remote_function_call *tfc = data;
66 struct task_struct *p = tfc->p;
67
68 if (p) {
0da4cf3e
PZ
69 /* -EAGAIN */
70 if (task_cpu(p) != smp_processor_id())
71 return;
72
73 /*
74 * Now that we're on right CPU with IRQs disabled, we can test
75 * if we hit the right task without races.
76 */
77
78 tfc->ret = -ESRCH; /* No such (running) process */
79 if (p != current)
fe4b04fa
PZ
80 return;
81 }
82
83 tfc->ret = tfc->func(tfc->info);
84}
85
86/**
87 * task_function_call - call a function on the cpu on which a task runs
88 * @p: the task to evaluate
89 * @func: the function to be called
90 * @info: the function call argument
91 *
92 * Calls the function @func when the task is currently running. This might
93 * be on the current CPU, which just calls the function directly
94 *
95 * returns: @func return value, or
96 * -ESRCH - when the process isn't running
97 * -EAGAIN - when the process moved away
98 */
99static int
272325c4 100task_function_call(struct task_struct *p, remote_function_f func, void *info)
fe4b04fa
PZ
101{
102 struct remote_function_call data = {
e7e7ee2e
IM
103 .p = p,
104 .func = func,
105 .info = info,
0da4cf3e 106 .ret = -EAGAIN,
fe4b04fa 107 };
0da4cf3e 108 int ret;
fe4b04fa 109
0da4cf3e
PZ
110 do {
111 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
112 if (!ret)
113 ret = data.ret;
114 } while (ret == -EAGAIN);
fe4b04fa 115
0da4cf3e 116 return ret;
fe4b04fa
PZ
117}
118
119/**
120 * cpu_function_call - call a function on the cpu
121 * @func: the function to be called
122 * @info: the function call argument
123 *
124 * Calls the function @func on the remote cpu.
125 *
126 * returns: @func return value or -ENXIO when the cpu is offline
127 */
272325c4 128static int cpu_function_call(int cpu, remote_function_f func, void *info)
fe4b04fa
PZ
129{
130 struct remote_function_call data = {
e7e7ee2e
IM
131 .p = NULL,
132 .func = func,
133 .info = info,
134 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
135 };
136
137 smp_call_function_single(cpu, remote_function, &data, 1);
138
139 return data.ret;
140}
141
fae3fde6
PZ
142static inline struct perf_cpu_context *
143__get_cpu_context(struct perf_event_context *ctx)
144{
145 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
146}
147
148static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
149 struct perf_event_context *ctx)
0017960f 150{
fae3fde6
PZ
151 raw_spin_lock(&cpuctx->ctx.lock);
152 if (ctx)
153 raw_spin_lock(&ctx->lock);
154}
155
156static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
157 struct perf_event_context *ctx)
158{
159 if (ctx)
160 raw_spin_unlock(&ctx->lock);
161 raw_spin_unlock(&cpuctx->ctx.lock);
162}
163
63b6da39
PZ
164#define TASK_TOMBSTONE ((void *)-1L)
165
166static bool is_kernel_event(struct perf_event *event)
167{
f47c02c0 168 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
63b6da39
PZ
169}
170
39a43640
PZ
171/*
172 * On task ctx scheduling...
173 *
174 * When !ctx->nr_events a task context will not be scheduled. This means
175 * we can disable the scheduler hooks (for performance) without leaving
176 * pending task ctx state.
177 *
178 * This however results in two special cases:
179 *
180 * - removing the last event from a task ctx; this is relatively straight
181 * forward and is done in __perf_remove_from_context.
182 *
183 * - adding the first event to a task ctx; this is tricky because we cannot
184 * rely on ctx->is_active and therefore cannot use event_function_call().
185 * See perf_install_in_context().
186 *
39a43640
PZ
187 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
188 */
189
fae3fde6
PZ
190typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
191 struct perf_event_context *, void *);
192
193struct event_function_struct {
194 struct perf_event *event;
195 event_f func;
196 void *data;
197};
198
199static int event_function(void *info)
200{
201 struct event_function_struct *efs = info;
202 struct perf_event *event = efs->event;
0017960f 203 struct perf_event_context *ctx = event->ctx;
fae3fde6
PZ
204 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
205 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63b6da39 206 int ret = 0;
fae3fde6
PZ
207
208 WARN_ON_ONCE(!irqs_disabled());
209
63b6da39 210 perf_ctx_lock(cpuctx, task_ctx);
fae3fde6
PZ
211 /*
212 * Since we do the IPI call without holding ctx->lock things can have
213 * changed, double check we hit the task we set out to hit.
fae3fde6
PZ
214 */
215 if (ctx->task) {
63b6da39 216 if (ctx->task != current) {
0da4cf3e 217 ret = -ESRCH;
63b6da39
PZ
218 goto unlock;
219 }
fae3fde6 220
fae3fde6
PZ
221 /*
222 * We only use event_function_call() on established contexts,
223 * and event_function() is only ever called when active (or
224 * rather, we'll have bailed in task_function_call() or the
225 * above ctx->task != current test), therefore we must have
226 * ctx->is_active here.
227 */
228 WARN_ON_ONCE(!ctx->is_active);
229 /*
230 * And since we have ctx->is_active, cpuctx->task_ctx must
231 * match.
232 */
63b6da39
PZ
233 WARN_ON_ONCE(task_ctx != ctx);
234 } else {
235 WARN_ON_ONCE(&cpuctx->ctx != ctx);
fae3fde6 236 }
63b6da39 237
fae3fde6 238 efs->func(event, cpuctx, ctx, efs->data);
63b6da39 239unlock:
fae3fde6
PZ
240 perf_ctx_unlock(cpuctx, task_ctx);
241
63b6da39 242 return ret;
fae3fde6
PZ
243}
244
fae3fde6 245static void event_function_call(struct perf_event *event, event_f func, void *data)
0017960f
PZ
246{
247 struct perf_event_context *ctx = event->ctx;
63b6da39 248 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
fae3fde6
PZ
249 struct event_function_struct efs = {
250 .event = event,
251 .func = func,
252 .data = data,
253 };
0017960f 254
c97f4736
PZ
255 if (!event->parent) {
256 /*
257 * If this is a !child event, we must hold ctx::mutex to
258 * stabilize the the event->ctx relation. See
259 * perf_event_ctx_lock().
260 */
261 lockdep_assert_held(&ctx->mutex);
262 }
0017960f
PZ
263
264 if (!task) {
fae3fde6 265 cpu_function_call(event->cpu, event_function, &efs);
0017960f
PZ
266 return;
267 }
268
63b6da39
PZ
269 if (task == TASK_TOMBSTONE)
270 return;
271
a096309b 272again:
fae3fde6 273 if (!task_function_call(task, event_function, &efs))
0017960f
PZ
274 return;
275
276 raw_spin_lock_irq(&ctx->lock);
63b6da39
PZ
277 /*
278 * Reload the task pointer, it might have been changed by
279 * a concurrent perf_event_context_sched_out().
280 */
281 task = ctx->task;
a096309b
PZ
282 if (task == TASK_TOMBSTONE) {
283 raw_spin_unlock_irq(&ctx->lock);
284 return;
0017960f 285 }
a096309b
PZ
286 if (ctx->is_active) {
287 raw_spin_unlock_irq(&ctx->lock);
288 goto again;
289 }
290 func(event, NULL, ctx, data);
0017960f
PZ
291 raw_spin_unlock_irq(&ctx->lock);
292}
293
cca20946
PZ
294/*
295 * Similar to event_function_call() + event_function(), but hard assumes IRQs
296 * are already disabled and we're on the right CPU.
297 */
298static void event_function_local(struct perf_event *event, event_f func, void *data)
299{
300 struct perf_event_context *ctx = event->ctx;
301 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
302 struct task_struct *task = READ_ONCE(ctx->task);
303 struct perf_event_context *task_ctx = NULL;
304
305 WARN_ON_ONCE(!irqs_disabled());
306
307 if (task) {
308 if (task == TASK_TOMBSTONE)
309 return;
310
311 task_ctx = ctx;
312 }
313
314 perf_ctx_lock(cpuctx, task_ctx);
315
316 task = ctx->task;
317 if (task == TASK_TOMBSTONE)
318 goto unlock;
319
320 if (task) {
321 /*
322 * We must be either inactive or active and the right task,
323 * otherwise we're screwed, since we cannot IPI to somewhere
324 * else.
325 */
326 if (ctx->is_active) {
327 if (WARN_ON_ONCE(task != current))
328 goto unlock;
329
330 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
331 goto unlock;
332 }
333 } else {
334 WARN_ON_ONCE(&cpuctx->ctx != ctx);
335 }
336
337 func(event, cpuctx, ctx, data);
338unlock:
339 perf_ctx_unlock(cpuctx, task_ctx);
340}
341
e5d1367f
SE
342#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
343 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
344 PERF_FLAG_PID_CGROUP |\
345 PERF_FLAG_FD_CLOEXEC)
e5d1367f 346
bce38cd5
SE
347/*
348 * branch priv levels that need permission checks
349 */
350#define PERF_SAMPLE_BRANCH_PERM_PLM \
351 (PERF_SAMPLE_BRANCH_KERNEL |\
352 PERF_SAMPLE_BRANCH_HV)
353
0b3fcf17
SE
354enum event_type_t {
355 EVENT_FLEXIBLE = 0x1,
356 EVENT_PINNED = 0x2,
3cbaa590 357 EVENT_TIME = 0x4,
0b3fcf17
SE
358 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
359};
360
e5d1367f
SE
361/*
362 * perf_sched_events : >0 events exist
363 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
364 */
9107c89e
PZ
365
366static void perf_sched_delayed(struct work_struct *work);
367DEFINE_STATIC_KEY_FALSE(perf_sched_events);
368static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
369static DEFINE_MUTEX(perf_sched_mutex);
370static atomic_t perf_sched_count;
371
e5d1367f 372static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
ba532500 373static DEFINE_PER_CPU(int, perf_sched_cb_usages);
f2fb6bef 374static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
e5d1367f 375
cdd6c482
IM
376static atomic_t nr_mmap_events __read_mostly;
377static atomic_t nr_comm_events __read_mostly;
378static atomic_t nr_task_events __read_mostly;
948b26b6 379static atomic_t nr_freq_events __read_mostly;
45ac1403 380static atomic_t nr_switch_events __read_mostly;
9ee318a7 381
108b02cf
PZ
382static LIST_HEAD(pmus);
383static DEFINE_MUTEX(pmus_lock);
384static struct srcu_struct pmus_srcu;
385
0764771d 386/*
cdd6c482 387 * perf event paranoia level:
0fbdea19
IM
388 * -1 - not paranoid at all
389 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 390 * 1 - disallow cpu events for unpriv
0fbdea19 391 * 2 - disallow kernel profiling for unpriv
0764771d 392 */
0161028b 393int sysctl_perf_event_paranoid __read_mostly = 2;
0764771d 394
20443384
FW
395/* Minimum for 512 kiB + 1 user control page */
396int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
397
398/*
cdd6c482 399 * max perf event sample rate
df58ab24 400 */
14c63f17
DH
401#define DEFAULT_MAX_SAMPLE_RATE 100000
402#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
403#define DEFAULT_CPU_TIME_MAX_PERCENT 25
404
405int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
406
407static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
408static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
409
d9494cb4
PZ
410static int perf_sample_allowed_ns __read_mostly =
411 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17 412
18ab2cd3 413static void update_perf_cpu_limits(void)
14c63f17
DH
414{
415 u64 tmp = perf_sample_period_ns;
416
417 tmp *= sysctl_perf_cpu_time_max_percent;
91a612ee
PZ
418 tmp = div_u64(tmp, 100);
419 if (!tmp)
420 tmp = 1;
421
422 WRITE_ONCE(perf_sample_allowed_ns, tmp);
14c63f17 423}
163ec435 424
9e630205
SE
425static int perf_rotate_context(struct perf_cpu_context *cpuctx);
426
163ec435
PZ
427int perf_proc_update_handler(struct ctl_table *table, int write,
428 void __user *buffer, size_t *lenp,
429 loff_t *ppos)
430{
723478c8 431 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
432
433 if (ret || !write)
434 return ret;
435
ab7fdefb
KL
436 /*
437 * If throttling is disabled don't allow the write:
438 */
439 if (sysctl_perf_cpu_time_max_percent == 100 ||
440 sysctl_perf_cpu_time_max_percent == 0)
441 return -EINVAL;
442
163ec435 443 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
444 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
445 update_perf_cpu_limits();
446
447 return 0;
448}
449
450int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
451
452int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
453 void __user *buffer, size_t *lenp,
454 loff_t *ppos)
455{
456 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
457
458 if (ret || !write)
459 return ret;
460
b303e7c1
PZ
461 if (sysctl_perf_cpu_time_max_percent == 100 ||
462 sysctl_perf_cpu_time_max_percent == 0) {
91a612ee
PZ
463 printk(KERN_WARNING
464 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
465 WRITE_ONCE(perf_sample_allowed_ns, 0);
466 } else {
467 update_perf_cpu_limits();
468 }
163ec435
PZ
469
470 return 0;
471}
1ccd1549 472
14c63f17
DH
473/*
474 * perf samples are done in some very critical code paths (NMIs).
475 * If they take too much CPU time, the system can lock up and not
476 * get any real work done. This will drop the sample rate when
477 * we detect that events are taking too long.
478 */
479#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 480static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 481
91a612ee
PZ
482static u64 __report_avg;
483static u64 __report_allowed;
484
6a02ad66 485static void perf_duration_warn(struct irq_work *w)
14c63f17 486{
0d87d7ec 487 printk_ratelimited(KERN_INFO
91a612ee
PZ
488 "perf: interrupt took too long (%lld > %lld), lowering "
489 "kernel.perf_event_max_sample_rate to %d\n",
490 __report_avg, __report_allowed,
491 sysctl_perf_event_sample_rate);
6a02ad66
PZ
492}
493
494static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
495
496void perf_sample_event_took(u64 sample_len_ns)
497{
91a612ee
PZ
498 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
499 u64 running_len;
500 u64 avg_len;
501 u32 max;
14c63f17 502
91a612ee 503 if (max_len == 0)
14c63f17
DH
504 return;
505
91a612ee
PZ
506 /* Decay the counter by 1 average sample. */
507 running_len = __this_cpu_read(running_sample_length);
508 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
509 running_len += sample_len_ns;
510 __this_cpu_write(running_sample_length, running_len);
14c63f17
DH
511
512 /*
91a612ee
PZ
513 * Note: this will be biased artifically low until we have
514 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
14c63f17
DH
515 * from having to maintain a count.
516 */
91a612ee
PZ
517 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
518 if (avg_len <= max_len)
14c63f17
DH
519 return;
520
91a612ee
PZ
521 __report_avg = avg_len;
522 __report_allowed = max_len;
14c63f17 523
91a612ee
PZ
524 /*
525 * Compute a throttle threshold 25% below the current duration.
526 */
527 avg_len += avg_len / 4;
528 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
529 if (avg_len < max)
530 max /= (u32)avg_len;
531 else
532 max = 1;
14c63f17 533
91a612ee
PZ
534 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
535 WRITE_ONCE(max_samples_per_tick, max);
536
537 sysctl_perf_event_sample_rate = max * HZ;
538 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
6a02ad66 539
cd578abb 540 if (!irq_work_queue(&perf_duration_work)) {
91a612ee 541 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
cd578abb 542 "kernel.perf_event_max_sample_rate to %d\n",
91a612ee 543 __report_avg, __report_allowed,
cd578abb
PZ
544 sysctl_perf_event_sample_rate);
545 }
14c63f17
DH
546}
547
cdd6c482 548static atomic64_t perf_event_id;
a96bbc16 549
0b3fcf17
SE
550static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
551 enum event_type_t event_type);
552
553static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
554 enum event_type_t event_type,
555 struct task_struct *task);
556
557static void update_context_time(struct perf_event_context *ctx);
558static u64 perf_event_time(struct perf_event *event);
0b3fcf17 559
cdd6c482 560void __weak perf_event_print_debug(void) { }
0793a61d 561
84c79910 562extern __weak const char *perf_pmu_name(void)
0793a61d 563{
84c79910 564 return "pmu";
0793a61d
TG
565}
566
0b3fcf17
SE
567static inline u64 perf_clock(void)
568{
569 return local_clock();
570}
571
34f43927
PZ
572static inline u64 perf_event_clock(struct perf_event *event)
573{
574 return event->clock();
575}
576
e5d1367f
SE
577#ifdef CONFIG_CGROUP_PERF
578
e5d1367f
SE
579static inline bool
580perf_cgroup_match(struct perf_event *event)
581{
582 struct perf_event_context *ctx = event->ctx;
583 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
584
ef824fa1
TH
585 /* @event doesn't care about cgroup */
586 if (!event->cgrp)
587 return true;
588
589 /* wants specific cgroup scope but @cpuctx isn't associated with any */
590 if (!cpuctx->cgrp)
591 return false;
592
593 /*
594 * Cgroup scoping is recursive. An event enabled for a cgroup is
595 * also enabled for all its descendant cgroups. If @cpuctx's
596 * cgroup is a descendant of @event's (the test covers identity
597 * case), it's a match.
598 */
599 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
600 event->cgrp->css.cgroup);
e5d1367f
SE
601}
602
e5d1367f
SE
603static inline void perf_detach_cgroup(struct perf_event *event)
604{
4e2ba650 605 css_put(&event->cgrp->css);
e5d1367f
SE
606 event->cgrp = NULL;
607}
608
609static inline int is_cgroup_event(struct perf_event *event)
610{
611 return event->cgrp != NULL;
612}
613
614static inline u64 perf_cgroup_event_time(struct perf_event *event)
615{
616 struct perf_cgroup_info *t;
617
618 t = per_cpu_ptr(event->cgrp->info, event->cpu);
619 return t->time;
620}
621
622static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
623{
624 struct perf_cgroup_info *info;
625 u64 now;
626
627 now = perf_clock();
628
629 info = this_cpu_ptr(cgrp->info);
630
631 info->time += now - info->timestamp;
632 info->timestamp = now;
633}
634
635static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
636{
637 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
638 if (cgrp_out)
639 __update_cgrp_time(cgrp_out);
640}
641
642static inline void update_cgrp_time_from_event(struct perf_event *event)
643{
3f7cce3c
SE
644 struct perf_cgroup *cgrp;
645
e5d1367f 646 /*
3f7cce3c
SE
647 * ensure we access cgroup data only when needed and
648 * when we know the cgroup is pinned (css_get)
e5d1367f 649 */
3f7cce3c 650 if (!is_cgroup_event(event))
e5d1367f
SE
651 return;
652
614e4c4e 653 cgrp = perf_cgroup_from_task(current, event->ctx);
3f7cce3c
SE
654 /*
655 * Do not update time when cgroup is not active
656 */
657 if (cgrp == event->cgrp)
658 __update_cgrp_time(event->cgrp);
e5d1367f
SE
659}
660
661static inline void
3f7cce3c
SE
662perf_cgroup_set_timestamp(struct task_struct *task,
663 struct perf_event_context *ctx)
e5d1367f
SE
664{
665 struct perf_cgroup *cgrp;
666 struct perf_cgroup_info *info;
667
3f7cce3c
SE
668 /*
669 * ctx->lock held by caller
670 * ensure we do not access cgroup data
671 * unless we have the cgroup pinned (css_get)
672 */
673 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
674 return;
675
614e4c4e 676 cgrp = perf_cgroup_from_task(task, ctx);
e5d1367f 677 info = this_cpu_ptr(cgrp->info);
3f7cce3c 678 info->timestamp = ctx->timestamp;
e5d1367f
SE
679}
680
681#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
682#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
683
684/*
685 * reschedule events based on the cgroup constraint of task.
686 *
687 * mode SWOUT : schedule out everything
688 * mode SWIN : schedule in based on cgroup for next
689 */
18ab2cd3 690static void perf_cgroup_switch(struct task_struct *task, int mode)
e5d1367f
SE
691{
692 struct perf_cpu_context *cpuctx;
693 struct pmu *pmu;
694 unsigned long flags;
695
696 /*
697 * disable interrupts to avoid geting nr_cgroup
698 * changes via __perf_event_disable(). Also
699 * avoids preemption.
700 */
701 local_irq_save(flags);
702
703 /*
704 * we reschedule only in the presence of cgroup
705 * constrained events.
706 */
e5d1367f
SE
707
708 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 709 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
710 if (cpuctx->unique_pmu != pmu)
711 continue; /* ensure we process each cpuctx once */
e5d1367f 712
e5d1367f
SE
713 /*
714 * perf_cgroup_events says at least one
715 * context on this CPU has cgroup events.
716 *
717 * ctx->nr_cgroups reports the number of cgroup
718 * events for a context.
719 */
720 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
721 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
722 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
723
724 if (mode & PERF_CGROUP_SWOUT) {
725 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
726 /*
727 * must not be done before ctxswout due
728 * to event_filter_match() in event_sched_out()
729 */
730 cpuctx->cgrp = NULL;
731 }
732
733 if (mode & PERF_CGROUP_SWIN) {
e566b76e 734 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
735 /*
736 * set cgrp before ctxsw in to allow
737 * event_filter_match() to not have to pass
738 * task around
614e4c4e
SE
739 * we pass the cpuctx->ctx to perf_cgroup_from_task()
740 * because cgorup events are only per-cpu
e5d1367f 741 */
614e4c4e 742 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
e5d1367f
SE
743 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
744 }
facc4307
PZ
745 perf_pmu_enable(cpuctx->ctx.pmu);
746 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 747 }
e5d1367f
SE
748 }
749
e5d1367f
SE
750 local_irq_restore(flags);
751}
752
a8d757ef
SE
753static inline void perf_cgroup_sched_out(struct task_struct *task,
754 struct task_struct *next)
e5d1367f 755{
a8d757ef
SE
756 struct perf_cgroup *cgrp1;
757 struct perf_cgroup *cgrp2 = NULL;
758
ddaaf4e2 759 rcu_read_lock();
a8d757ef
SE
760 /*
761 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
762 * we do not need to pass the ctx here because we know
763 * we are holding the rcu lock
a8d757ef 764 */
614e4c4e 765 cgrp1 = perf_cgroup_from_task(task, NULL);
70a01657 766 cgrp2 = perf_cgroup_from_task(next, NULL);
a8d757ef
SE
767
768 /*
769 * only schedule out current cgroup events if we know
770 * that we are switching to a different cgroup. Otherwise,
771 * do no touch the cgroup events.
772 */
773 if (cgrp1 != cgrp2)
774 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
ddaaf4e2
SE
775
776 rcu_read_unlock();
e5d1367f
SE
777}
778
a8d757ef
SE
779static inline void perf_cgroup_sched_in(struct task_struct *prev,
780 struct task_struct *task)
e5d1367f 781{
a8d757ef
SE
782 struct perf_cgroup *cgrp1;
783 struct perf_cgroup *cgrp2 = NULL;
784
ddaaf4e2 785 rcu_read_lock();
a8d757ef
SE
786 /*
787 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
788 * we do not need to pass the ctx here because we know
789 * we are holding the rcu lock
a8d757ef 790 */
614e4c4e 791 cgrp1 = perf_cgroup_from_task(task, NULL);
614e4c4e 792 cgrp2 = perf_cgroup_from_task(prev, NULL);
a8d757ef
SE
793
794 /*
795 * only need to schedule in cgroup events if we are changing
796 * cgroup during ctxsw. Cgroup events were not scheduled
797 * out of ctxsw out if that was not the case.
798 */
799 if (cgrp1 != cgrp2)
800 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
ddaaf4e2
SE
801
802 rcu_read_unlock();
e5d1367f
SE
803}
804
805static inline int perf_cgroup_connect(int fd, struct perf_event *event,
806 struct perf_event_attr *attr,
807 struct perf_event *group_leader)
808{
809 struct perf_cgroup *cgrp;
810 struct cgroup_subsys_state *css;
2903ff01
AV
811 struct fd f = fdget(fd);
812 int ret = 0;
e5d1367f 813
2903ff01 814 if (!f.file)
e5d1367f
SE
815 return -EBADF;
816
b583043e 817 css = css_tryget_online_from_dir(f.file->f_path.dentry,
ec903c0c 818 &perf_event_cgrp_subsys);
3db272c0
LZ
819 if (IS_ERR(css)) {
820 ret = PTR_ERR(css);
821 goto out;
822 }
e5d1367f
SE
823
824 cgrp = container_of(css, struct perf_cgroup, css);
825 event->cgrp = cgrp;
826
827 /*
828 * all events in a group must monitor
829 * the same cgroup because a task belongs
830 * to only one perf cgroup at a time
831 */
832 if (group_leader && group_leader->cgrp != cgrp) {
833 perf_detach_cgroup(event);
834 ret = -EINVAL;
e5d1367f 835 }
3db272c0 836out:
2903ff01 837 fdput(f);
e5d1367f
SE
838 return ret;
839}
840
841static inline void
842perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
843{
844 struct perf_cgroup_info *t;
845 t = per_cpu_ptr(event->cgrp->info, event->cpu);
846 event->shadow_ctx_time = now - t->timestamp;
847}
848
849static inline void
850perf_cgroup_defer_enabled(struct perf_event *event)
851{
852 /*
853 * when the current task's perf cgroup does not match
854 * the event's, we need to remember to call the
855 * perf_mark_enable() function the first time a task with
856 * a matching perf cgroup is scheduled in.
857 */
858 if (is_cgroup_event(event) && !perf_cgroup_match(event))
859 event->cgrp_defer_enabled = 1;
860}
861
862static inline void
863perf_cgroup_mark_enabled(struct perf_event *event,
864 struct perf_event_context *ctx)
865{
866 struct perf_event *sub;
867 u64 tstamp = perf_event_time(event);
868
869 if (!event->cgrp_defer_enabled)
870 return;
871
872 event->cgrp_defer_enabled = 0;
873
874 event->tstamp_enabled = tstamp - event->total_time_enabled;
875 list_for_each_entry(sub, &event->sibling_list, group_entry) {
876 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
877 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
878 sub->cgrp_defer_enabled = 0;
879 }
880 }
881}
db4a8356
DCC
882
883/*
884 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
885 * cleared when last cgroup event is removed.
886 */
887static inline void
888list_update_cgroup_event(struct perf_event *event,
889 struct perf_event_context *ctx, bool add)
890{
891 struct perf_cpu_context *cpuctx;
892
893 if (!is_cgroup_event(event))
894 return;
895
896 if (add && ctx->nr_cgroups++)
897 return;
898 else if (!add && --ctx->nr_cgroups)
899 return;
900 /*
901 * Because cgroup events are always per-cpu events,
902 * this will always be called from the right CPU.
903 */
904 cpuctx = __get_cpu_context(ctx);
905 cpuctx->cgrp = add ? event->cgrp : NULL;
906}
907
e5d1367f
SE
908#else /* !CONFIG_CGROUP_PERF */
909
910static inline bool
911perf_cgroup_match(struct perf_event *event)
912{
913 return true;
914}
915
916static inline void perf_detach_cgroup(struct perf_event *event)
917{}
918
919static inline int is_cgroup_event(struct perf_event *event)
920{
921 return 0;
922}
923
924static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
925{
926 return 0;
927}
928
929static inline void update_cgrp_time_from_event(struct perf_event *event)
930{
931}
932
933static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
934{
935}
936
a8d757ef
SE
937static inline void perf_cgroup_sched_out(struct task_struct *task,
938 struct task_struct *next)
e5d1367f
SE
939{
940}
941
a8d757ef
SE
942static inline void perf_cgroup_sched_in(struct task_struct *prev,
943 struct task_struct *task)
e5d1367f
SE
944{
945}
946
947static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
948 struct perf_event_attr *attr,
949 struct perf_event *group_leader)
950{
951 return -EINVAL;
952}
953
954static inline void
3f7cce3c
SE
955perf_cgroup_set_timestamp(struct task_struct *task,
956 struct perf_event_context *ctx)
e5d1367f
SE
957{
958}
959
960void
961perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
962{
963}
964
965static inline void
966perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
967{
968}
969
970static inline u64 perf_cgroup_event_time(struct perf_event *event)
971{
972 return 0;
973}
974
975static inline void
976perf_cgroup_defer_enabled(struct perf_event *event)
977{
978}
979
980static inline void
981perf_cgroup_mark_enabled(struct perf_event *event,
982 struct perf_event_context *ctx)
983{
984}
db4a8356
DCC
985
986static inline void
987list_update_cgroup_event(struct perf_event *event,
988 struct perf_event_context *ctx, bool add)
989{
990}
991
e5d1367f
SE
992#endif
993
9e630205
SE
994/*
995 * set default to be dependent on timer tick just
996 * like original code
997 */
998#define PERF_CPU_HRTIMER (1000 / HZ)
999/*
1000 * function must be called with interrupts disbled
1001 */
272325c4 1002static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
9e630205
SE
1003{
1004 struct perf_cpu_context *cpuctx;
9e630205
SE
1005 int rotations = 0;
1006
1007 WARN_ON(!irqs_disabled());
1008
1009 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
9e630205
SE
1010 rotations = perf_rotate_context(cpuctx);
1011
4cfafd30
PZ
1012 raw_spin_lock(&cpuctx->hrtimer_lock);
1013 if (rotations)
9e630205 1014 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
4cfafd30
PZ
1015 else
1016 cpuctx->hrtimer_active = 0;
1017 raw_spin_unlock(&cpuctx->hrtimer_lock);
9e630205 1018
4cfafd30 1019 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
9e630205
SE
1020}
1021
272325c4 1022static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
9e630205 1023{
272325c4 1024 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1025 struct pmu *pmu = cpuctx->ctx.pmu;
272325c4 1026 u64 interval;
9e630205
SE
1027
1028 /* no multiplexing needed for SW PMU */
1029 if (pmu->task_ctx_nr == perf_sw_context)
1030 return;
1031
62b85639
SE
1032 /*
1033 * check default is sane, if not set then force to
1034 * default interval (1/tick)
1035 */
272325c4
PZ
1036 interval = pmu->hrtimer_interval_ms;
1037 if (interval < 1)
1038 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
62b85639 1039
272325c4 1040 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
9e630205 1041
4cfafd30
PZ
1042 raw_spin_lock_init(&cpuctx->hrtimer_lock);
1043 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
272325c4 1044 timer->function = perf_mux_hrtimer_handler;
9e630205
SE
1045}
1046
272325c4 1047static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
9e630205 1048{
272325c4 1049 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1050 struct pmu *pmu = cpuctx->ctx.pmu;
4cfafd30 1051 unsigned long flags;
9e630205
SE
1052
1053 /* not for SW PMU */
1054 if (pmu->task_ctx_nr == perf_sw_context)
272325c4 1055 return 0;
9e630205 1056
4cfafd30
PZ
1057 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1058 if (!cpuctx->hrtimer_active) {
1059 cpuctx->hrtimer_active = 1;
1060 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1061 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1062 }
1063 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
9e630205 1064
272325c4 1065 return 0;
9e630205
SE
1066}
1067
33696fc0 1068void perf_pmu_disable(struct pmu *pmu)
9e35ad38 1069{
33696fc0
PZ
1070 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1071 if (!(*count)++)
1072 pmu->pmu_disable(pmu);
9e35ad38 1073}
9e35ad38 1074
33696fc0 1075void perf_pmu_enable(struct pmu *pmu)
9e35ad38 1076{
33696fc0
PZ
1077 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1078 if (!--(*count))
1079 pmu->pmu_enable(pmu);
9e35ad38 1080}
9e35ad38 1081
2fde4f94 1082static DEFINE_PER_CPU(struct list_head, active_ctx_list);
e9d2b064
PZ
1083
1084/*
2fde4f94
MR
1085 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1086 * perf_event_task_tick() are fully serialized because they're strictly cpu
1087 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1088 * disabled, while perf_event_task_tick is called from IRQ context.
e9d2b064 1089 */
2fde4f94 1090static void perf_event_ctx_activate(struct perf_event_context *ctx)
9e35ad38 1091{
2fde4f94 1092 struct list_head *head = this_cpu_ptr(&active_ctx_list);
b5ab4cd5 1093
e9d2b064 1094 WARN_ON(!irqs_disabled());
b5ab4cd5 1095
2fde4f94
MR
1096 WARN_ON(!list_empty(&ctx->active_ctx_list));
1097
1098 list_add(&ctx->active_ctx_list, head);
1099}
1100
1101static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1102{
1103 WARN_ON(!irqs_disabled());
1104
1105 WARN_ON(list_empty(&ctx->active_ctx_list));
1106
1107 list_del_init(&ctx->active_ctx_list);
9e35ad38 1108}
9e35ad38 1109
cdd6c482 1110static void get_ctx(struct perf_event_context *ctx)
a63eaf34 1111{
e5289d4a 1112 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
1113}
1114
4af57ef2
YZ
1115static void free_ctx(struct rcu_head *head)
1116{
1117 struct perf_event_context *ctx;
1118
1119 ctx = container_of(head, struct perf_event_context, rcu_head);
1120 kfree(ctx->task_ctx_data);
1121 kfree(ctx);
1122}
1123
cdd6c482 1124static void put_ctx(struct perf_event_context *ctx)
a63eaf34 1125{
564c2b21
PM
1126 if (atomic_dec_and_test(&ctx->refcount)) {
1127 if (ctx->parent_ctx)
1128 put_ctx(ctx->parent_ctx);
63b6da39 1129 if (ctx->task && ctx->task != TASK_TOMBSTONE)
c93f7669 1130 put_task_struct(ctx->task);
4af57ef2 1131 call_rcu(&ctx->rcu_head, free_ctx);
564c2b21 1132 }
a63eaf34
PM
1133}
1134
f63a8daa
PZ
1135/*
1136 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1137 * perf_pmu_migrate_context() we need some magic.
1138 *
1139 * Those places that change perf_event::ctx will hold both
1140 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1141 *
8b10c5e2
PZ
1142 * Lock ordering is by mutex address. There are two other sites where
1143 * perf_event_context::mutex nests and those are:
1144 *
1145 * - perf_event_exit_task_context() [ child , 0 ]
8ba289b8
PZ
1146 * perf_event_exit_event()
1147 * put_event() [ parent, 1 ]
8b10c5e2
PZ
1148 *
1149 * - perf_event_init_context() [ parent, 0 ]
1150 * inherit_task_group()
1151 * inherit_group()
1152 * inherit_event()
1153 * perf_event_alloc()
1154 * perf_init_event()
1155 * perf_try_init_event() [ child , 1 ]
1156 *
1157 * While it appears there is an obvious deadlock here -- the parent and child
1158 * nesting levels are inverted between the two. This is in fact safe because
1159 * life-time rules separate them. That is an exiting task cannot fork, and a
1160 * spawning task cannot (yet) exit.
1161 *
1162 * But remember that that these are parent<->child context relations, and
1163 * migration does not affect children, therefore these two orderings should not
1164 * interact.
f63a8daa
PZ
1165 *
1166 * The change in perf_event::ctx does not affect children (as claimed above)
1167 * because the sys_perf_event_open() case will install a new event and break
1168 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1169 * concerned with cpuctx and that doesn't have children.
1170 *
1171 * The places that change perf_event::ctx will issue:
1172 *
1173 * perf_remove_from_context();
1174 * synchronize_rcu();
1175 * perf_install_in_context();
1176 *
1177 * to affect the change. The remove_from_context() + synchronize_rcu() should
1178 * quiesce the event, after which we can install it in the new location. This
1179 * means that only external vectors (perf_fops, prctl) can perturb the event
1180 * while in transit. Therefore all such accessors should also acquire
1181 * perf_event_context::mutex to serialize against this.
1182 *
1183 * However; because event->ctx can change while we're waiting to acquire
1184 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1185 * function.
1186 *
1187 * Lock order:
79c9ce57 1188 * cred_guard_mutex
f63a8daa
PZ
1189 * task_struct::perf_event_mutex
1190 * perf_event_context::mutex
f63a8daa 1191 * perf_event::child_mutex;
07c4a776 1192 * perf_event_context::lock
f63a8daa
PZ
1193 * perf_event::mmap_mutex
1194 * mmap_sem
1195 */
a83fe28e
PZ
1196static struct perf_event_context *
1197perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
f63a8daa
PZ
1198{
1199 struct perf_event_context *ctx;
1200
1201again:
1202 rcu_read_lock();
1203 ctx = ACCESS_ONCE(event->ctx);
1204 if (!atomic_inc_not_zero(&ctx->refcount)) {
1205 rcu_read_unlock();
1206 goto again;
1207 }
1208 rcu_read_unlock();
1209
a83fe28e 1210 mutex_lock_nested(&ctx->mutex, nesting);
f63a8daa
PZ
1211 if (event->ctx != ctx) {
1212 mutex_unlock(&ctx->mutex);
1213 put_ctx(ctx);
1214 goto again;
1215 }
1216
1217 return ctx;
1218}
1219
a83fe28e
PZ
1220static inline struct perf_event_context *
1221perf_event_ctx_lock(struct perf_event *event)
1222{
1223 return perf_event_ctx_lock_nested(event, 0);
1224}
1225
f63a8daa
PZ
1226static void perf_event_ctx_unlock(struct perf_event *event,
1227 struct perf_event_context *ctx)
1228{
1229 mutex_unlock(&ctx->mutex);
1230 put_ctx(ctx);
1231}
1232
211de6eb
PZ
1233/*
1234 * This must be done under the ctx->lock, such as to serialize against
1235 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1236 * calling scheduler related locks and ctx->lock nests inside those.
1237 */
1238static __must_check struct perf_event_context *
1239unclone_ctx(struct perf_event_context *ctx)
71a851b4 1240{
211de6eb
PZ
1241 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1242
1243 lockdep_assert_held(&ctx->lock);
1244
1245 if (parent_ctx)
71a851b4 1246 ctx->parent_ctx = NULL;
5a3126d4 1247 ctx->generation++;
211de6eb
PZ
1248
1249 return parent_ctx;
71a851b4
PZ
1250}
1251
6844c09d
ACM
1252static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1253{
1254 /*
1255 * only top level events have the pid namespace they were created in
1256 */
1257 if (event->parent)
1258 event = event->parent;
1259
1260 return task_tgid_nr_ns(p, event->ns);
1261}
1262
1263static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1264{
1265 /*
1266 * only top level events have the pid namespace they were created in
1267 */
1268 if (event->parent)
1269 event = event->parent;
1270
1271 return task_pid_nr_ns(p, event->ns);
1272}
1273
7f453c24 1274/*
cdd6c482 1275 * If we inherit events we want to return the parent event id
7f453c24
PZ
1276 * to userspace.
1277 */
cdd6c482 1278static u64 primary_event_id(struct perf_event *event)
7f453c24 1279{
cdd6c482 1280 u64 id = event->id;
7f453c24 1281
cdd6c482
IM
1282 if (event->parent)
1283 id = event->parent->id;
7f453c24
PZ
1284
1285 return id;
1286}
1287
25346b93 1288/*
cdd6c482 1289 * Get the perf_event_context for a task and lock it.
63b6da39 1290 *
25346b93
PM
1291 * This has to cope with with the fact that until it is locked,
1292 * the context could get moved to another task.
1293 */
cdd6c482 1294static struct perf_event_context *
8dc85d54 1295perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 1296{
cdd6c482 1297 struct perf_event_context *ctx;
25346b93 1298
9ed6060d 1299retry:
058ebd0e
PZ
1300 /*
1301 * One of the few rules of preemptible RCU is that one cannot do
1302 * rcu_read_unlock() while holding a scheduler (or nested) lock when
2fd59077 1303 * part of the read side critical section was irqs-enabled -- see
058ebd0e
PZ
1304 * rcu_read_unlock_special().
1305 *
1306 * Since ctx->lock nests under rq->lock we must ensure the entire read
2fd59077 1307 * side critical section has interrupts disabled.
058ebd0e 1308 */
2fd59077 1309 local_irq_save(*flags);
058ebd0e 1310 rcu_read_lock();
8dc85d54 1311 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
1312 if (ctx) {
1313 /*
1314 * If this context is a clone of another, it might
1315 * get swapped for another underneath us by
cdd6c482 1316 * perf_event_task_sched_out, though the
25346b93
PM
1317 * rcu_read_lock() protects us from any context
1318 * getting freed. Lock the context and check if it
1319 * got swapped before we could get the lock, and retry
1320 * if so. If we locked the right context, then it
1321 * can't get swapped on us any more.
1322 */
2fd59077 1323 raw_spin_lock(&ctx->lock);
8dc85d54 1324 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
2fd59077 1325 raw_spin_unlock(&ctx->lock);
058ebd0e 1326 rcu_read_unlock();
2fd59077 1327 local_irq_restore(*flags);
25346b93
PM
1328 goto retry;
1329 }
b49a9e7e 1330
63b6da39
PZ
1331 if (ctx->task == TASK_TOMBSTONE ||
1332 !atomic_inc_not_zero(&ctx->refcount)) {
2fd59077 1333 raw_spin_unlock(&ctx->lock);
b49a9e7e 1334 ctx = NULL;
828b6f0e
PZ
1335 } else {
1336 WARN_ON_ONCE(ctx->task != task);
b49a9e7e 1337 }
25346b93
PM
1338 }
1339 rcu_read_unlock();
2fd59077
PM
1340 if (!ctx)
1341 local_irq_restore(*flags);
25346b93
PM
1342 return ctx;
1343}
1344
1345/*
1346 * Get the context for a task and increment its pin_count so it
1347 * can't get swapped to another task. This also increments its
1348 * reference count so that the context can't get freed.
1349 */
8dc85d54
PZ
1350static struct perf_event_context *
1351perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1352{
cdd6c482 1353 struct perf_event_context *ctx;
25346b93
PM
1354 unsigned long flags;
1355
8dc85d54 1356 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1357 if (ctx) {
1358 ++ctx->pin_count;
e625cce1 1359 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1360 }
1361 return ctx;
1362}
1363
cdd6c482 1364static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1365{
1366 unsigned long flags;
1367
e625cce1 1368 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1369 --ctx->pin_count;
e625cce1 1370 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1371}
1372
f67218c3
PZ
1373/*
1374 * Update the record of the current time in a context.
1375 */
1376static void update_context_time(struct perf_event_context *ctx)
1377{
1378 u64 now = perf_clock();
1379
1380 ctx->time += now - ctx->timestamp;
1381 ctx->timestamp = now;
1382}
1383
4158755d
SE
1384static u64 perf_event_time(struct perf_event *event)
1385{
1386 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1387
1388 if (is_cgroup_event(event))
1389 return perf_cgroup_event_time(event);
1390
4158755d
SE
1391 return ctx ? ctx->time : 0;
1392}
1393
f67218c3
PZ
1394/*
1395 * Update the total_time_enabled and total_time_running fields for a event.
1396 */
1397static void update_event_times(struct perf_event *event)
1398{
1399 struct perf_event_context *ctx = event->ctx;
1400 u64 run_end;
1401
3cbaa590
PZ
1402 lockdep_assert_held(&ctx->lock);
1403
f67218c3
PZ
1404 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1405 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1406 return;
3cbaa590 1407
e5d1367f
SE
1408 /*
1409 * in cgroup mode, time_enabled represents
1410 * the time the event was enabled AND active
1411 * tasks were in the monitored cgroup. This is
1412 * independent of the activity of the context as
1413 * there may be a mix of cgroup and non-cgroup events.
1414 *
1415 * That is why we treat cgroup events differently
1416 * here.
1417 */
1418 if (is_cgroup_event(event))
46cd6a7f 1419 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1420 else if (ctx->is_active)
1421 run_end = ctx->time;
acd1d7c1
PZ
1422 else
1423 run_end = event->tstamp_stopped;
1424
1425 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1426
1427 if (event->state == PERF_EVENT_STATE_INACTIVE)
1428 run_end = event->tstamp_stopped;
1429 else
4158755d 1430 run_end = perf_event_time(event);
f67218c3
PZ
1431
1432 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1433
f67218c3
PZ
1434}
1435
96c21a46
PZ
1436/*
1437 * Update total_time_enabled and total_time_running for all events in a group.
1438 */
1439static void update_group_times(struct perf_event *leader)
1440{
1441 struct perf_event *event;
1442
1443 update_event_times(leader);
1444 list_for_each_entry(event, &leader->sibling_list, group_entry)
1445 update_event_times(event);
1446}
1447
889ff015
FW
1448static struct list_head *
1449ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1450{
1451 if (event->attr.pinned)
1452 return &ctx->pinned_groups;
1453 else
1454 return &ctx->flexible_groups;
1455}
1456
fccc714b 1457/*
cdd6c482 1458 * Add a event from the lists for its context.
fccc714b
PZ
1459 * Must be called with ctx->mutex and ctx->lock held.
1460 */
04289bb9 1461static void
cdd6c482 1462list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1463{
db4a8356 1464
c994d613
PZ
1465 lockdep_assert_held(&ctx->lock);
1466
8a49542c
PZ
1467 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1468 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1469
1470 /*
8a49542c
PZ
1471 * If we're a stand alone event or group leader, we go to the context
1472 * list, group events are kept attached to the group so that
1473 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1474 */
8a49542c 1475 if (event->group_leader == event) {
889ff015
FW
1476 struct list_head *list;
1477
4ff6a8de 1478 event->group_caps = event->event_caps;
d6f962b5 1479
889ff015
FW
1480 list = ctx_group_list(event, ctx);
1481 list_add_tail(&event->group_entry, list);
5c148194 1482 }
592903cd 1483
db4a8356 1484 list_update_cgroup_event(event, ctx, true);
e5d1367f 1485
cdd6c482
IM
1486 list_add_rcu(&event->event_entry, &ctx->event_list);
1487 ctx->nr_events++;
1488 if (event->attr.inherit_stat)
bfbd3381 1489 ctx->nr_stat++;
5a3126d4
PZ
1490
1491 ctx->generation++;
04289bb9
IM
1492}
1493
0231bb53
JO
1494/*
1495 * Initialize event state based on the perf_event_attr::disabled.
1496 */
1497static inline void perf_event__state_init(struct perf_event *event)
1498{
1499 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1500 PERF_EVENT_STATE_INACTIVE;
1501}
1502
a723968c 1503static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
c320c7b7
ACM
1504{
1505 int entry = sizeof(u64); /* value */
1506 int size = 0;
1507 int nr = 1;
1508
1509 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1510 size += sizeof(u64);
1511
1512 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1513 size += sizeof(u64);
1514
1515 if (event->attr.read_format & PERF_FORMAT_ID)
1516 entry += sizeof(u64);
1517
1518 if (event->attr.read_format & PERF_FORMAT_GROUP) {
a723968c 1519 nr += nr_siblings;
c320c7b7
ACM
1520 size += sizeof(u64);
1521 }
1522
1523 size += entry * nr;
1524 event->read_size = size;
1525}
1526
a723968c 1527static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
c320c7b7
ACM
1528{
1529 struct perf_sample_data *data;
c320c7b7
ACM
1530 u16 size = 0;
1531
c320c7b7
ACM
1532 if (sample_type & PERF_SAMPLE_IP)
1533 size += sizeof(data->ip);
1534
6844c09d
ACM
1535 if (sample_type & PERF_SAMPLE_ADDR)
1536 size += sizeof(data->addr);
1537
1538 if (sample_type & PERF_SAMPLE_PERIOD)
1539 size += sizeof(data->period);
1540
c3feedf2
AK
1541 if (sample_type & PERF_SAMPLE_WEIGHT)
1542 size += sizeof(data->weight);
1543
6844c09d
ACM
1544 if (sample_type & PERF_SAMPLE_READ)
1545 size += event->read_size;
1546
d6be9ad6
SE
1547 if (sample_type & PERF_SAMPLE_DATA_SRC)
1548 size += sizeof(data->data_src.val);
1549
fdfbbd07
AK
1550 if (sample_type & PERF_SAMPLE_TRANSACTION)
1551 size += sizeof(data->txn);
1552
6844c09d
ACM
1553 event->header_size = size;
1554}
1555
a723968c
PZ
1556/*
1557 * Called at perf_event creation and when events are attached/detached from a
1558 * group.
1559 */
1560static void perf_event__header_size(struct perf_event *event)
1561{
1562 __perf_event_read_size(event,
1563 event->group_leader->nr_siblings);
1564 __perf_event_header_size(event, event->attr.sample_type);
1565}
1566
6844c09d
ACM
1567static void perf_event__id_header_size(struct perf_event *event)
1568{
1569 struct perf_sample_data *data;
1570 u64 sample_type = event->attr.sample_type;
1571 u16 size = 0;
1572
c320c7b7
ACM
1573 if (sample_type & PERF_SAMPLE_TID)
1574 size += sizeof(data->tid_entry);
1575
1576 if (sample_type & PERF_SAMPLE_TIME)
1577 size += sizeof(data->time);
1578
ff3d527c
AH
1579 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1580 size += sizeof(data->id);
1581
c320c7b7
ACM
1582 if (sample_type & PERF_SAMPLE_ID)
1583 size += sizeof(data->id);
1584
1585 if (sample_type & PERF_SAMPLE_STREAM_ID)
1586 size += sizeof(data->stream_id);
1587
1588 if (sample_type & PERF_SAMPLE_CPU)
1589 size += sizeof(data->cpu_entry);
1590
6844c09d 1591 event->id_header_size = size;
c320c7b7
ACM
1592}
1593
a723968c
PZ
1594static bool perf_event_validate_size(struct perf_event *event)
1595{
1596 /*
1597 * The values computed here will be over-written when we actually
1598 * attach the event.
1599 */
1600 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1601 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1602 perf_event__id_header_size(event);
1603
1604 /*
1605 * Sum the lot; should not exceed the 64k limit we have on records.
1606 * Conservative limit to allow for callchains and other variable fields.
1607 */
1608 if (event->read_size + event->header_size +
1609 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1610 return false;
1611
1612 return true;
1613}
1614
8a49542c
PZ
1615static void perf_group_attach(struct perf_event *event)
1616{
c320c7b7 1617 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1618
74c3337c
PZ
1619 /*
1620 * We can have double attach due to group movement in perf_event_open.
1621 */
1622 if (event->attach_state & PERF_ATTACH_GROUP)
1623 return;
1624
8a49542c
PZ
1625 event->attach_state |= PERF_ATTACH_GROUP;
1626
1627 if (group_leader == event)
1628 return;
1629
652884fe
PZ
1630 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1631
4ff6a8de 1632 group_leader->group_caps &= event->event_caps;
8a49542c
PZ
1633
1634 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1635 group_leader->nr_siblings++;
c320c7b7
ACM
1636
1637 perf_event__header_size(group_leader);
1638
1639 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1640 perf_event__header_size(pos);
8a49542c
PZ
1641}
1642
a63eaf34 1643/*
cdd6c482 1644 * Remove a event from the lists for its context.
fccc714b 1645 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1646 */
04289bb9 1647static void
cdd6c482 1648list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1649{
652884fe
PZ
1650 WARN_ON_ONCE(event->ctx != ctx);
1651 lockdep_assert_held(&ctx->lock);
1652
8a49542c
PZ
1653 /*
1654 * We can have double detach due to exit/hot-unplug + close.
1655 */
1656 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1657 return;
8a49542c
PZ
1658
1659 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1660
db4a8356 1661 list_update_cgroup_event(event, ctx, false);
e5d1367f 1662
cdd6c482
IM
1663 ctx->nr_events--;
1664 if (event->attr.inherit_stat)
bfbd3381 1665 ctx->nr_stat--;
8bc20959 1666
cdd6c482 1667 list_del_rcu(&event->event_entry);
04289bb9 1668
8a49542c
PZ
1669 if (event->group_leader == event)
1670 list_del_init(&event->group_entry);
5c148194 1671
96c21a46 1672 update_group_times(event);
b2e74a26
SE
1673
1674 /*
1675 * If event was in error state, then keep it
1676 * that way, otherwise bogus counts will be
1677 * returned on read(). The only way to get out
1678 * of error state is by explicit re-enabling
1679 * of the event
1680 */
1681 if (event->state > PERF_EVENT_STATE_OFF)
1682 event->state = PERF_EVENT_STATE_OFF;
5a3126d4
PZ
1683
1684 ctx->generation++;
050735b0
PZ
1685}
1686
8a49542c 1687static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1688{
1689 struct perf_event *sibling, *tmp;
8a49542c
PZ
1690 struct list_head *list = NULL;
1691
1692 /*
1693 * We can have double detach due to exit/hot-unplug + close.
1694 */
1695 if (!(event->attach_state & PERF_ATTACH_GROUP))
1696 return;
1697
1698 event->attach_state &= ~PERF_ATTACH_GROUP;
1699
1700 /*
1701 * If this is a sibling, remove it from its group.
1702 */
1703 if (event->group_leader != event) {
1704 list_del_init(&event->group_entry);
1705 event->group_leader->nr_siblings--;
c320c7b7 1706 goto out;
8a49542c
PZ
1707 }
1708
1709 if (!list_empty(&event->group_entry))
1710 list = &event->group_entry;
2e2af50b 1711
04289bb9 1712 /*
cdd6c482
IM
1713 * If this was a group event with sibling events then
1714 * upgrade the siblings to singleton events by adding them
8a49542c 1715 * to whatever list we are on.
04289bb9 1716 */
cdd6c482 1717 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1718 if (list)
1719 list_move_tail(&sibling->group_entry, list);
04289bb9 1720 sibling->group_leader = sibling;
d6f962b5
FW
1721
1722 /* Inherit group flags from the previous leader */
4ff6a8de 1723 sibling->group_caps = event->group_caps;
652884fe
PZ
1724
1725 WARN_ON_ONCE(sibling->ctx != event->ctx);
04289bb9 1726 }
c320c7b7
ACM
1727
1728out:
1729 perf_event__header_size(event->group_leader);
1730
1731 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1732 perf_event__header_size(tmp);
04289bb9
IM
1733}
1734
fadfe7be
JO
1735static bool is_orphaned_event(struct perf_event *event)
1736{
a69b0ca4 1737 return event->state == PERF_EVENT_STATE_DEAD;
fadfe7be
JO
1738}
1739
2c81a647 1740static inline int __pmu_filter_match(struct perf_event *event)
66eb579e
MR
1741{
1742 struct pmu *pmu = event->pmu;
1743 return pmu->filter_match ? pmu->filter_match(event) : 1;
1744}
1745
2c81a647
MR
1746/*
1747 * Check whether we should attempt to schedule an event group based on
1748 * PMU-specific filtering. An event group can consist of HW and SW events,
1749 * potentially with a SW leader, so we must check all the filters, to
1750 * determine whether a group is schedulable:
1751 */
1752static inline int pmu_filter_match(struct perf_event *event)
1753{
1754 struct perf_event *child;
1755
1756 if (!__pmu_filter_match(event))
1757 return 0;
1758
1759 list_for_each_entry(child, &event->sibling_list, group_entry) {
1760 if (!__pmu_filter_match(child))
1761 return 0;
1762 }
1763
1764 return 1;
1765}
1766
fa66f07a
SE
1767static inline int
1768event_filter_match(struct perf_event *event)
1769{
0b8f1e2e
PZ
1770 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1771 perf_cgroup_match(event) && pmu_filter_match(event);
fa66f07a
SE
1772}
1773
9ffcfa6f
SE
1774static void
1775event_sched_out(struct perf_event *event,
3b6f9e5c 1776 struct perf_cpu_context *cpuctx,
cdd6c482 1777 struct perf_event_context *ctx)
3b6f9e5c 1778{
4158755d 1779 u64 tstamp = perf_event_time(event);
fa66f07a 1780 u64 delta;
652884fe
PZ
1781
1782 WARN_ON_ONCE(event->ctx != ctx);
1783 lockdep_assert_held(&ctx->lock);
1784
fa66f07a
SE
1785 /*
1786 * An event which could not be activated because of
1787 * filter mismatch still needs to have its timings
1788 * maintained, otherwise bogus information is return
1789 * via read() for time_enabled, time_running:
1790 */
0b8f1e2e
PZ
1791 if (event->state == PERF_EVENT_STATE_INACTIVE &&
1792 !event_filter_match(event)) {
e5d1367f 1793 delta = tstamp - event->tstamp_stopped;
fa66f07a 1794 event->tstamp_running += delta;
4158755d 1795 event->tstamp_stopped = tstamp;
fa66f07a
SE
1796 }
1797
cdd6c482 1798 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1799 return;
3b6f9e5c 1800
44377277
AS
1801 perf_pmu_disable(event->pmu);
1802
28a967c3
PZ
1803 event->tstamp_stopped = tstamp;
1804 event->pmu->del(event, 0);
1805 event->oncpu = -1;
cdd6c482
IM
1806 event->state = PERF_EVENT_STATE_INACTIVE;
1807 if (event->pending_disable) {
1808 event->pending_disable = 0;
1809 event->state = PERF_EVENT_STATE_OFF;
970892a9 1810 }
3b6f9e5c 1811
cdd6c482 1812 if (!is_software_event(event))
3b6f9e5c 1813 cpuctx->active_oncpu--;
2fde4f94
MR
1814 if (!--ctx->nr_active)
1815 perf_event_ctx_deactivate(ctx);
0f5a2601
PZ
1816 if (event->attr.freq && event->attr.sample_freq)
1817 ctx->nr_freq--;
cdd6c482 1818 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 1819 cpuctx->exclusive = 0;
44377277
AS
1820
1821 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
1822}
1823
d859e29f 1824static void
cdd6c482 1825group_sched_out(struct perf_event *group_event,
d859e29f 1826 struct perf_cpu_context *cpuctx,
cdd6c482 1827 struct perf_event_context *ctx)
d859e29f 1828{
cdd6c482 1829 struct perf_event *event;
fa66f07a 1830 int state = group_event->state;
d859e29f 1831
3f005e7d
MR
1832 perf_pmu_disable(ctx->pmu);
1833
cdd6c482 1834 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1835
1836 /*
1837 * Schedule out siblings (if any):
1838 */
cdd6c482
IM
1839 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1840 event_sched_out(event, cpuctx, ctx);
d859e29f 1841
3f005e7d
MR
1842 perf_pmu_enable(ctx->pmu);
1843
fa66f07a 1844 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1845 cpuctx->exclusive = 0;
1846}
1847
45a0e07a 1848#define DETACH_GROUP 0x01UL
0017960f 1849
0793a61d 1850/*
cdd6c482 1851 * Cross CPU call to remove a performance event
0793a61d 1852 *
cdd6c482 1853 * We disable the event on the hardware level first. After that we
0793a61d
TG
1854 * remove it from the context list.
1855 */
fae3fde6
PZ
1856static void
1857__perf_remove_from_context(struct perf_event *event,
1858 struct perf_cpu_context *cpuctx,
1859 struct perf_event_context *ctx,
1860 void *info)
0793a61d 1861{
45a0e07a 1862 unsigned long flags = (unsigned long)info;
0793a61d 1863
cdd6c482 1864 event_sched_out(event, cpuctx, ctx);
45a0e07a 1865 if (flags & DETACH_GROUP)
46ce0fe9 1866 perf_group_detach(event);
cdd6c482 1867 list_del_event(event, ctx);
39a43640
PZ
1868
1869 if (!ctx->nr_events && ctx->is_active) {
64ce3126 1870 ctx->is_active = 0;
39a43640
PZ
1871 if (ctx->task) {
1872 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1873 cpuctx->task_ctx = NULL;
1874 }
64ce3126 1875 }
0793a61d
TG
1876}
1877
0793a61d 1878/*
cdd6c482 1879 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1880 *
cdd6c482
IM
1881 * If event->ctx is a cloned context, callers must make sure that
1882 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1883 * remains valid. This is OK when called from perf_release since
1884 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1885 * When called from perf_event_exit_task, it's OK because the
c93f7669 1886 * context has been detached from its task.
0793a61d 1887 */
45a0e07a 1888static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
0793a61d 1889{
fae3fde6 1890 lockdep_assert_held(&event->ctx->mutex);
0793a61d 1891
45a0e07a 1892 event_function_call(event, __perf_remove_from_context, (void *)flags);
0793a61d
TG
1893}
1894
d859e29f 1895/*
cdd6c482 1896 * Cross CPU call to disable a performance event
d859e29f 1897 */
fae3fde6
PZ
1898static void __perf_event_disable(struct perf_event *event,
1899 struct perf_cpu_context *cpuctx,
1900 struct perf_event_context *ctx,
1901 void *info)
7b648018 1902{
fae3fde6
PZ
1903 if (event->state < PERF_EVENT_STATE_INACTIVE)
1904 return;
7b648018 1905
fae3fde6
PZ
1906 update_context_time(ctx);
1907 update_cgrp_time_from_event(event);
1908 update_group_times(event);
1909 if (event == event->group_leader)
1910 group_sched_out(event, cpuctx, ctx);
1911 else
1912 event_sched_out(event, cpuctx, ctx);
1913 event->state = PERF_EVENT_STATE_OFF;
7b648018
PZ
1914}
1915
d859e29f 1916/*
cdd6c482 1917 * Disable a event.
c93f7669 1918 *
cdd6c482
IM
1919 * If event->ctx is a cloned context, callers must make sure that
1920 * every task struct that event->ctx->task could possibly point to
c93f7669 1921 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1922 * perf_event_for_each_child or perf_event_for_each because they
1923 * hold the top-level event's child_mutex, so any descendant that
8ba289b8
PZ
1924 * goes to exit will block in perf_event_exit_event().
1925 *
cdd6c482 1926 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1927 * is the current context on this CPU and preemption is disabled,
cdd6c482 1928 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1929 */
f63a8daa 1930static void _perf_event_disable(struct perf_event *event)
d859e29f 1931{
cdd6c482 1932 struct perf_event_context *ctx = event->ctx;
d859e29f 1933
e625cce1 1934 raw_spin_lock_irq(&ctx->lock);
7b648018 1935 if (event->state <= PERF_EVENT_STATE_OFF) {
e625cce1 1936 raw_spin_unlock_irq(&ctx->lock);
7b648018 1937 return;
53cfbf59 1938 }
e625cce1 1939 raw_spin_unlock_irq(&ctx->lock);
7b648018 1940
fae3fde6
PZ
1941 event_function_call(event, __perf_event_disable, NULL);
1942}
1943
1944void perf_event_disable_local(struct perf_event *event)
1945{
1946 event_function_local(event, __perf_event_disable, NULL);
d859e29f 1947}
f63a8daa
PZ
1948
1949/*
1950 * Strictly speaking kernel users cannot create groups and therefore this
1951 * interface does not need the perf_event_ctx_lock() magic.
1952 */
1953void perf_event_disable(struct perf_event *event)
1954{
1955 struct perf_event_context *ctx;
1956
1957 ctx = perf_event_ctx_lock(event);
1958 _perf_event_disable(event);
1959 perf_event_ctx_unlock(event, ctx);
1960}
dcfce4a0 1961EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1962
e5d1367f
SE
1963static void perf_set_shadow_time(struct perf_event *event,
1964 struct perf_event_context *ctx,
1965 u64 tstamp)
1966{
1967 /*
1968 * use the correct time source for the time snapshot
1969 *
1970 * We could get by without this by leveraging the
1971 * fact that to get to this function, the caller
1972 * has most likely already called update_context_time()
1973 * and update_cgrp_time_xx() and thus both timestamp
1974 * are identical (or very close). Given that tstamp is,
1975 * already adjusted for cgroup, we could say that:
1976 * tstamp - ctx->timestamp
1977 * is equivalent to
1978 * tstamp - cgrp->timestamp.
1979 *
1980 * Then, in perf_output_read(), the calculation would
1981 * work with no changes because:
1982 * - event is guaranteed scheduled in
1983 * - no scheduled out in between
1984 * - thus the timestamp would be the same
1985 *
1986 * But this is a bit hairy.
1987 *
1988 * So instead, we have an explicit cgroup call to remain
1989 * within the time time source all along. We believe it
1990 * is cleaner and simpler to understand.
1991 */
1992 if (is_cgroup_event(event))
1993 perf_cgroup_set_shadow_time(event, tstamp);
1994 else
1995 event->shadow_ctx_time = tstamp - ctx->timestamp;
1996}
1997
4fe757dd
PZ
1998#define MAX_INTERRUPTS (~0ULL)
1999
2000static void perf_log_throttle(struct perf_event *event, int enable);
ec0d7729 2001static void perf_log_itrace_start(struct perf_event *event);
4fe757dd 2002
235c7fc7 2003static int
9ffcfa6f 2004event_sched_in(struct perf_event *event,
235c7fc7 2005 struct perf_cpu_context *cpuctx,
6e37738a 2006 struct perf_event_context *ctx)
235c7fc7 2007{
4158755d 2008 u64 tstamp = perf_event_time(event);
44377277 2009 int ret = 0;
4158755d 2010
63342411
PZ
2011 lockdep_assert_held(&ctx->lock);
2012
cdd6c482 2013 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
2014 return 0;
2015
95ff4ca2
AS
2016 WRITE_ONCE(event->oncpu, smp_processor_id());
2017 /*
2018 * Order event::oncpu write to happen before the ACTIVE state
2019 * is visible.
2020 */
2021 smp_wmb();
2022 WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
4fe757dd
PZ
2023
2024 /*
2025 * Unthrottle events, since we scheduled we might have missed several
2026 * ticks already, also for a heavily scheduling task there is little
2027 * guarantee it'll get a tick in a timely manner.
2028 */
2029 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2030 perf_log_throttle(event, 1);
2031 event->hw.interrupts = 0;
2032 }
2033
235c7fc7
IM
2034 /*
2035 * The new state must be visible before we turn it on in the hardware:
2036 */
2037 smp_wmb();
2038
44377277
AS
2039 perf_pmu_disable(event->pmu);
2040
72f669c0
SL
2041 perf_set_shadow_time(event, ctx, tstamp);
2042
ec0d7729
AS
2043 perf_log_itrace_start(event);
2044
a4eaf7f1 2045 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
2046 event->state = PERF_EVENT_STATE_INACTIVE;
2047 event->oncpu = -1;
44377277
AS
2048 ret = -EAGAIN;
2049 goto out;
235c7fc7
IM
2050 }
2051
00a2916f
PZ
2052 event->tstamp_running += tstamp - event->tstamp_stopped;
2053
cdd6c482 2054 if (!is_software_event(event))
3b6f9e5c 2055 cpuctx->active_oncpu++;
2fde4f94
MR
2056 if (!ctx->nr_active++)
2057 perf_event_ctx_activate(ctx);
0f5a2601
PZ
2058 if (event->attr.freq && event->attr.sample_freq)
2059 ctx->nr_freq++;
235c7fc7 2060
cdd6c482 2061 if (event->attr.exclusive)
3b6f9e5c
PM
2062 cpuctx->exclusive = 1;
2063
44377277
AS
2064out:
2065 perf_pmu_enable(event->pmu);
2066
2067 return ret;
235c7fc7
IM
2068}
2069
6751b71e 2070static int
cdd6c482 2071group_sched_in(struct perf_event *group_event,
6751b71e 2072 struct perf_cpu_context *cpuctx,
6e37738a 2073 struct perf_event_context *ctx)
6751b71e 2074{
6bde9b6c 2075 struct perf_event *event, *partial_group = NULL;
4a234593 2076 struct pmu *pmu = ctx->pmu;
d7842da4
SE
2077 u64 now = ctx->time;
2078 bool simulate = false;
6751b71e 2079
cdd6c482 2080 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
2081 return 0;
2082
fbbe0701 2083 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
6bde9b6c 2084
9ffcfa6f 2085 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 2086 pmu->cancel_txn(pmu);
272325c4 2087 perf_mux_hrtimer_restart(cpuctx);
6751b71e 2088 return -EAGAIN;
90151c35 2089 }
6751b71e
PM
2090
2091 /*
2092 * Schedule in siblings as one group (if any):
2093 */
cdd6c482 2094 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 2095 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 2096 partial_group = event;
6751b71e
PM
2097 goto group_error;
2098 }
2099 }
2100
9ffcfa6f 2101 if (!pmu->commit_txn(pmu))
6e85158c 2102 return 0;
9ffcfa6f 2103
6751b71e
PM
2104group_error:
2105 /*
2106 * Groups can be scheduled in as one unit only, so undo any
2107 * partial group before returning:
d7842da4
SE
2108 * The events up to the failed event are scheduled out normally,
2109 * tstamp_stopped will be updated.
2110 *
2111 * The failed events and the remaining siblings need to have
2112 * their timings updated as if they had gone thru event_sched_in()
2113 * and event_sched_out(). This is required to get consistent timings
2114 * across the group. This also takes care of the case where the group
2115 * could never be scheduled by ensuring tstamp_stopped is set to mark
2116 * the time the event was actually stopped, such that time delta
2117 * calculation in update_event_times() is correct.
6751b71e 2118 */
cdd6c482
IM
2119 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2120 if (event == partial_group)
d7842da4
SE
2121 simulate = true;
2122
2123 if (simulate) {
2124 event->tstamp_running += now - event->tstamp_stopped;
2125 event->tstamp_stopped = now;
2126 } else {
2127 event_sched_out(event, cpuctx, ctx);
2128 }
6751b71e 2129 }
9ffcfa6f 2130 event_sched_out(group_event, cpuctx, ctx);
6751b71e 2131
ad5133b7 2132 pmu->cancel_txn(pmu);
90151c35 2133
272325c4 2134 perf_mux_hrtimer_restart(cpuctx);
9e630205 2135
6751b71e
PM
2136 return -EAGAIN;
2137}
2138
3b6f9e5c 2139/*
cdd6c482 2140 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 2141 */
cdd6c482 2142static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
2143 struct perf_cpu_context *cpuctx,
2144 int can_add_hw)
2145{
2146 /*
cdd6c482 2147 * Groups consisting entirely of software events can always go on.
3b6f9e5c 2148 */
4ff6a8de 2149 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
3b6f9e5c
PM
2150 return 1;
2151 /*
2152 * If an exclusive group is already on, no other hardware
cdd6c482 2153 * events can go on.
3b6f9e5c
PM
2154 */
2155 if (cpuctx->exclusive)
2156 return 0;
2157 /*
2158 * If this group is exclusive and there are already
cdd6c482 2159 * events on the CPU, it can't go on.
3b6f9e5c 2160 */
cdd6c482 2161 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
2162 return 0;
2163 /*
2164 * Otherwise, try to add it if all previous groups were able
2165 * to go on.
2166 */
2167 return can_add_hw;
2168}
2169
cdd6c482
IM
2170static void add_event_to_ctx(struct perf_event *event,
2171 struct perf_event_context *ctx)
53cfbf59 2172{
4158755d
SE
2173 u64 tstamp = perf_event_time(event);
2174
cdd6c482 2175 list_add_event(event, ctx);
8a49542c 2176 perf_group_attach(event);
4158755d
SE
2177 event->tstamp_enabled = tstamp;
2178 event->tstamp_running = tstamp;
2179 event->tstamp_stopped = tstamp;
53cfbf59
PM
2180}
2181
bd2afa49
PZ
2182static void ctx_sched_out(struct perf_event_context *ctx,
2183 struct perf_cpu_context *cpuctx,
2184 enum event_type_t event_type);
2c29ef0f
PZ
2185static void
2186ctx_sched_in(struct perf_event_context *ctx,
2187 struct perf_cpu_context *cpuctx,
2188 enum event_type_t event_type,
2189 struct task_struct *task);
fe4b04fa 2190
bd2afa49
PZ
2191static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2192 struct perf_event_context *ctx)
2193{
2194 if (!cpuctx->task_ctx)
2195 return;
2196
2197 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2198 return;
2199
2200 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2201}
2202
dce5855b
PZ
2203static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2204 struct perf_event_context *ctx,
2205 struct task_struct *task)
2206{
2207 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2208 if (ctx)
2209 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2210 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2211 if (ctx)
2212 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2213}
2214
3e349507
PZ
2215static void ctx_resched(struct perf_cpu_context *cpuctx,
2216 struct perf_event_context *task_ctx)
0017960f 2217{
3e349507
PZ
2218 perf_pmu_disable(cpuctx->ctx.pmu);
2219 if (task_ctx)
2220 task_ctx_sched_out(cpuctx, task_ctx);
2221 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2222 perf_event_sched_in(cpuctx, task_ctx, current);
2223 perf_pmu_enable(cpuctx->ctx.pmu);
0017960f
PZ
2224}
2225
0793a61d 2226/*
cdd6c482 2227 * Cross CPU call to install and enable a performance event
682076ae 2228 *
a096309b
PZ
2229 * Very similar to remote_function() + event_function() but cannot assume that
2230 * things like ctx->is_active and cpuctx->task_ctx are set.
0793a61d 2231 */
fe4b04fa 2232static int __perf_install_in_context(void *info)
0793a61d 2233{
a096309b
PZ
2234 struct perf_event *event = info;
2235 struct perf_event_context *ctx = event->ctx;
108b02cf 2236 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f 2237 struct perf_event_context *task_ctx = cpuctx->task_ctx;
a096309b
PZ
2238 bool activate = true;
2239 int ret = 0;
0793a61d 2240
63b6da39 2241 raw_spin_lock(&cpuctx->ctx.lock);
39a43640 2242 if (ctx->task) {
b58f6b0d
PZ
2243 raw_spin_lock(&ctx->lock);
2244 task_ctx = ctx;
a096309b
PZ
2245
2246 /* If we're on the wrong CPU, try again */
2247 if (task_cpu(ctx->task) != smp_processor_id()) {
2248 ret = -ESRCH;
63b6da39 2249 goto unlock;
a096309b 2250 }
b58f6b0d 2251
39a43640 2252 /*
a096309b
PZ
2253 * If we're on the right CPU, see if the task we target is
2254 * current, if not we don't have to activate the ctx, a future
2255 * context switch will do that for us.
39a43640 2256 */
a096309b
PZ
2257 if (ctx->task != current)
2258 activate = false;
2259 else
2260 WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2261
63b6da39
PZ
2262 } else if (task_ctx) {
2263 raw_spin_lock(&task_ctx->lock);
2c29ef0f 2264 }
b58f6b0d 2265
a096309b
PZ
2266 if (activate) {
2267 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2268 add_event_to_ctx(event, ctx);
2269 ctx_resched(cpuctx, task_ctx);
2270 } else {
2271 add_event_to_ctx(event, ctx);
2272 }
2273
63b6da39 2274unlock:
2c29ef0f 2275 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa 2276
a096309b 2277 return ret;
0793a61d
TG
2278}
2279
2280/*
a096309b
PZ
2281 * Attach a performance event to a context.
2282 *
2283 * Very similar to event_function_call, see comment there.
0793a61d
TG
2284 */
2285static void
cdd6c482
IM
2286perf_install_in_context(struct perf_event_context *ctx,
2287 struct perf_event *event,
0793a61d
TG
2288 int cpu)
2289{
a096309b 2290 struct task_struct *task = READ_ONCE(ctx->task);
39a43640 2291
fe4b04fa
PZ
2292 lockdep_assert_held(&ctx->mutex);
2293
0cda4c02
YZ
2294 if (event->cpu != -1)
2295 event->cpu = cpu;
c3f00c70 2296
0b8f1e2e
PZ
2297 /*
2298 * Ensures that if we can observe event->ctx, both the event and ctx
2299 * will be 'complete'. See perf_iterate_sb_cpu().
2300 */
2301 smp_store_release(&event->ctx, ctx);
2302
a096309b
PZ
2303 if (!task) {
2304 cpu_function_call(cpu, __perf_install_in_context, event);
2305 return;
2306 }
2307
2308 /*
2309 * Should not happen, we validate the ctx is still alive before calling.
2310 */
2311 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2312 return;
2313
39a43640
PZ
2314 /*
2315 * Installing events is tricky because we cannot rely on ctx->is_active
2316 * to be set in case this is the nr_events 0 -> 1 transition.
39a43640 2317 */
a096309b 2318again:
63b6da39 2319 /*
a096309b
PZ
2320 * Cannot use task_function_call() because we need to run on the task's
2321 * CPU regardless of whether its current or not.
63b6da39 2322 */
a096309b
PZ
2323 if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
2324 return;
2325
2326 raw_spin_lock_irq(&ctx->lock);
2327 task = ctx->task;
84c4e620 2328 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
a096309b
PZ
2329 /*
2330 * Cannot happen because we already checked above (which also
2331 * cannot happen), and we hold ctx->mutex, which serializes us
2332 * against perf_event_exit_task_context().
2333 */
63b6da39
PZ
2334 raw_spin_unlock_irq(&ctx->lock);
2335 return;
2336 }
39a43640 2337 raw_spin_unlock_irq(&ctx->lock);
39a43640 2338 /*
a096309b
PZ
2339 * Since !ctx->is_active doesn't mean anything, we must IPI
2340 * unconditionally.
39a43640 2341 */
a096309b 2342 goto again;
0793a61d
TG
2343}
2344
fa289bec 2345/*
cdd6c482 2346 * Put a event into inactive state and update time fields.
fa289bec
PM
2347 * Enabling the leader of a group effectively enables all
2348 * the group members that aren't explicitly disabled, so we
2349 * have to update their ->tstamp_enabled also.
2350 * Note: this works for group members as well as group leaders
2351 * since the non-leader members' sibling_lists will be empty.
2352 */
1d9b482e 2353static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 2354{
cdd6c482 2355 struct perf_event *sub;
4158755d 2356 u64 tstamp = perf_event_time(event);
fa289bec 2357
cdd6c482 2358 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 2359 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 2360 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
2361 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2362 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 2363 }
fa289bec
PM
2364}
2365
d859e29f 2366/*
cdd6c482 2367 * Cross CPU call to enable a performance event
d859e29f 2368 */
fae3fde6
PZ
2369static void __perf_event_enable(struct perf_event *event,
2370 struct perf_cpu_context *cpuctx,
2371 struct perf_event_context *ctx,
2372 void *info)
04289bb9 2373{
cdd6c482 2374 struct perf_event *leader = event->group_leader;
fae3fde6 2375 struct perf_event_context *task_ctx;
04289bb9 2376
6e801e01
PZ
2377 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2378 event->state <= PERF_EVENT_STATE_ERROR)
fae3fde6 2379 return;
3cbed429 2380
bd2afa49
PZ
2381 if (ctx->is_active)
2382 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2383
1d9b482e 2384 __perf_event_mark_enabled(event);
04289bb9 2385
fae3fde6
PZ
2386 if (!ctx->is_active)
2387 return;
2388
e5d1367f 2389 if (!event_filter_match(event)) {
bd2afa49 2390 if (is_cgroup_event(event))
e5d1367f 2391 perf_cgroup_defer_enabled(event);
bd2afa49 2392 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2393 return;
e5d1367f 2394 }
f4c4176f 2395
04289bb9 2396 /*
cdd6c482 2397 * If the event is in a group and isn't the group leader,
d859e29f 2398 * then don't put it on unless the group is on.
04289bb9 2399 */
bd2afa49
PZ
2400 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2401 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2402 return;
bd2afa49 2403 }
fe4b04fa 2404
fae3fde6
PZ
2405 task_ctx = cpuctx->task_ctx;
2406 if (ctx->task)
2407 WARN_ON_ONCE(task_ctx != ctx);
d859e29f 2408
fae3fde6 2409 ctx_resched(cpuctx, task_ctx);
7b648018
PZ
2410}
2411
d859e29f 2412/*
cdd6c482 2413 * Enable a event.
c93f7669 2414 *
cdd6c482
IM
2415 * If event->ctx is a cloned context, callers must make sure that
2416 * every task struct that event->ctx->task could possibly point to
c93f7669 2417 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2418 * perf_event_for_each_child or perf_event_for_each as described
2419 * for perf_event_disable.
d859e29f 2420 */
f63a8daa 2421static void _perf_event_enable(struct perf_event *event)
d859e29f 2422{
cdd6c482 2423 struct perf_event_context *ctx = event->ctx;
d859e29f 2424
7b648018 2425 raw_spin_lock_irq(&ctx->lock);
6e801e01
PZ
2426 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2427 event->state < PERF_EVENT_STATE_ERROR) {
7b648018 2428 raw_spin_unlock_irq(&ctx->lock);
d859e29f
PM
2429 return;
2430 }
2431
d859e29f 2432 /*
cdd6c482 2433 * If the event is in error state, clear that first.
7b648018
PZ
2434 *
2435 * That way, if we see the event in error state below, we know that it
2436 * has gone back into error state, as distinct from the task having
2437 * been scheduled away before the cross-call arrived.
d859e29f 2438 */
cdd6c482
IM
2439 if (event->state == PERF_EVENT_STATE_ERROR)
2440 event->state = PERF_EVENT_STATE_OFF;
e625cce1 2441 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa 2442
fae3fde6 2443 event_function_call(event, __perf_event_enable, NULL);
d859e29f 2444}
f63a8daa
PZ
2445
2446/*
2447 * See perf_event_disable();
2448 */
2449void perf_event_enable(struct perf_event *event)
2450{
2451 struct perf_event_context *ctx;
2452
2453 ctx = perf_event_ctx_lock(event);
2454 _perf_event_enable(event);
2455 perf_event_ctx_unlock(event, ctx);
2456}
dcfce4a0 2457EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2458
375637bc
AS
2459struct stop_event_data {
2460 struct perf_event *event;
2461 unsigned int restart;
2462};
2463
95ff4ca2
AS
2464static int __perf_event_stop(void *info)
2465{
375637bc
AS
2466 struct stop_event_data *sd = info;
2467 struct perf_event *event = sd->event;
95ff4ca2 2468
375637bc 2469 /* if it's already INACTIVE, do nothing */
95ff4ca2
AS
2470 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2471 return 0;
2472
2473 /* matches smp_wmb() in event_sched_in() */
2474 smp_rmb();
2475
2476 /*
2477 * There is a window with interrupts enabled before we get here,
2478 * so we need to check again lest we try to stop another CPU's event.
2479 */
2480 if (READ_ONCE(event->oncpu) != smp_processor_id())
2481 return -EAGAIN;
2482
2483 event->pmu->stop(event, PERF_EF_UPDATE);
2484
375637bc
AS
2485 /*
2486 * May race with the actual stop (through perf_pmu_output_stop()),
2487 * but it is only used for events with AUX ring buffer, and such
2488 * events will refuse to restart because of rb::aux_mmap_count==0,
2489 * see comments in perf_aux_output_begin().
2490 *
2491 * Since this is happening on a event-local CPU, no trace is lost
2492 * while restarting.
2493 */
2494 if (sd->restart)
2495 event->pmu->start(event, PERF_EF_START);
2496
95ff4ca2
AS
2497 return 0;
2498}
2499
375637bc
AS
2500static int perf_event_restart(struct perf_event *event)
2501{
2502 struct stop_event_data sd = {
2503 .event = event,
2504 .restart = 1,
2505 };
2506 int ret = 0;
2507
2508 do {
2509 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2510 return 0;
2511
2512 /* matches smp_wmb() in event_sched_in() */
2513 smp_rmb();
2514
2515 /*
2516 * We only want to restart ACTIVE events, so if the event goes
2517 * inactive here (event->oncpu==-1), there's nothing more to do;
2518 * fall through with ret==-ENXIO.
2519 */
2520 ret = cpu_function_call(READ_ONCE(event->oncpu),
2521 __perf_event_stop, &sd);
2522 } while (ret == -EAGAIN);
2523
2524 return ret;
2525}
2526
2527/*
2528 * In order to contain the amount of racy and tricky in the address filter
2529 * configuration management, it is a two part process:
2530 *
2531 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2532 * we update the addresses of corresponding vmas in
2533 * event::addr_filters_offs array and bump the event::addr_filters_gen;
2534 * (p2) when an event is scheduled in (pmu::add), it calls
2535 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2536 * if the generation has changed since the previous call.
2537 *
2538 * If (p1) happens while the event is active, we restart it to force (p2).
2539 *
2540 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2541 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2542 * ioctl;
2543 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2544 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2545 * for reading;
2546 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2547 * of exec.
2548 */
2549void perf_event_addr_filters_sync(struct perf_event *event)
2550{
2551 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2552
2553 if (!has_addr_filter(event))
2554 return;
2555
2556 raw_spin_lock(&ifh->lock);
2557 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2558 event->pmu->addr_filters_sync(event);
2559 event->hw.addr_filters_gen = event->addr_filters_gen;
2560 }
2561 raw_spin_unlock(&ifh->lock);
2562}
2563EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2564
f63a8daa 2565static int _perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2566{
2023b359 2567 /*
cdd6c482 2568 * not supported on inherited events
2023b359 2569 */
2e939d1d 2570 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2571 return -EINVAL;
2572
cdd6c482 2573 atomic_add(refresh, &event->event_limit);
f63a8daa 2574 _perf_event_enable(event);
2023b359
PZ
2575
2576 return 0;
79f14641 2577}
f63a8daa
PZ
2578
2579/*
2580 * See perf_event_disable()
2581 */
2582int perf_event_refresh(struct perf_event *event, int refresh)
2583{
2584 struct perf_event_context *ctx;
2585 int ret;
2586
2587 ctx = perf_event_ctx_lock(event);
2588 ret = _perf_event_refresh(event, refresh);
2589 perf_event_ctx_unlock(event, ctx);
2590
2591 return ret;
2592}
26ca5c11 2593EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2594
5b0311e1
FW
2595static void ctx_sched_out(struct perf_event_context *ctx,
2596 struct perf_cpu_context *cpuctx,
2597 enum event_type_t event_type)
235c7fc7 2598{
db24d33e 2599 int is_active = ctx->is_active;
c994d613 2600 struct perf_event *event;
235c7fc7 2601
c994d613 2602 lockdep_assert_held(&ctx->lock);
235c7fc7 2603
39a43640
PZ
2604 if (likely(!ctx->nr_events)) {
2605 /*
2606 * See __perf_remove_from_context().
2607 */
2608 WARN_ON_ONCE(ctx->is_active);
2609 if (ctx->task)
2610 WARN_ON_ONCE(cpuctx->task_ctx);
facc4307 2611 return;
39a43640
PZ
2612 }
2613
db24d33e 2614 ctx->is_active &= ~event_type;
3cbaa590
PZ
2615 if (!(ctx->is_active & EVENT_ALL))
2616 ctx->is_active = 0;
2617
63e30d3e
PZ
2618 if (ctx->task) {
2619 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2620 if (!ctx->is_active)
2621 cpuctx->task_ctx = NULL;
2622 }
facc4307 2623
8fdc6539
PZ
2624 /*
2625 * Always update time if it was set; not only when it changes.
2626 * Otherwise we can 'forget' to update time for any but the last
2627 * context we sched out. For example:
2628 *
2629 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2630 * ctx_sched_out(.event_type = EVENT_PINNED)
2631 *
2632 * would only update time for the pinned events.
2633 */
3cbaa590
PZ
2634 if (is_active & EVENT_TIME) {
2635 /* update (and stop) ctx time */
2636 update_context_time(ctx);
2637 update_cgrp_time_from_cpuctx(cpuctx);
2638 }
2639
8fdc6539
PZ
2640 is_active ^= ctx->is_active; /* changed bits */
2641
3cbaa590 2642 if (!ctx->nr_active || !(is_active & EVENT_ALL))
facc4307 2643 return;
5b0311e1 2644
075e0b00 2645 perf_pmu_disable(ctx->pmu);
3cbaa590 2646 if (is_active & EVENT_PINNED) {
889ff015
FW
2647 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2648 group_sched_out(event, cpuctx, ctx);
9ed6060d 2649 }
889ff015 2650
3cbaa590 2651 if (is_active & EVENT_FLEXIBLE) {
889ff015 2652 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2653 group_sched_out(event, cpuctx, ctx);
9ed6060d 2654 }
1b9a644f 2655 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2656}
2657
564c2b21 2658/*
5a3126d4
PZ
2659 * Test whether two contexts are equivalent, i.e. whether they have both been
2660 * cloned from the same version of the same context.
2661 *
2662 * Equivalence is measured using a generation number in the context that is
2663 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2664 * and list_del_event().
564c2b21 2665 */
cdd6c482
IM
2666static int context_equiv(struct perf_event_context *ctx1,
2667 struct perf_event_context *ctx2)
564c2b21 2668{
211de6eb
PZ
2669 lockdep_assert_held(&ctx1->lock);
2670 lockdep_assert_held(&ctx2->lock);
2671
5a3126d4
PZ
2672 /* Pinning disables the swap optimization */
2673 if (ctx1->pin_count || ctx2->pin_count)
2674 return 0;
2675
2676 /* If ctx1 is the parent of ctx2 */
2677 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2678 return 1;
2679
2680 /* If ctx2 is the parent of ctx1 */
2681 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2682 return 1;
2683
2684 /*
2685 * If ctx1 and ctx2 have the same parent; we flatten the parent
2686 * hierarchy, see perf_event_init_context().
2687 */
2688 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2689 ctx1->parent_gen == ctx2->parent_gen)
2690 return 1;
2691
2692 /* Unmatched */
2693 return 0;
564c2b21
PM
2694}
2695
cdd6c482
IM
2696static void __perf_event_sync_stat(struct perf_event *event,
2697 struct perf_event *next_event)
bfbd3381
PZ
2698{
2699 u64 value;
2700
cdd6c482 2701 if (!event->attr.inherit_stat)
bfbd3381
PZ
2702 return;
2703
2704 /*
cdd6c482 2705 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2706 * because we're in the middle of a context switch and have IRQs
2707 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2708 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2709 * don't need to use it.
2710 */
cdd6c482
IM
2711 switch (event->state) {
2712 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2713 event->pmu->read(event);
2714 /* fall-through */
bfbd3381 2715
cdd6c482
IM
2716 case PERF_EVENT_STATE_INACTIVE:
2717 update_event_times(event);
bfbd3381
PZ
2718 break;
2719
2720 default:
2721 break;
2722 }
2723
2724 /*
cdd6c482 2725 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2726 * values when we flip the contexts.
2727 */
e7850595
PZ
2728 value = local64_read(&next_event->count);
2729 value = local64_xchg(&event->count, value);
2730 local64_set(&next_event->count, value);
bfbd3381 2731
cdd6c482
IM
2732 swap(event->total_time_enabled, next_event->total_time_enabled);
2733 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2734
bfbd3381 2735 /*
19d2e755 2736 * Since we swizzled the values, update the user visible data too.
bfbd3381 2737 */
cdd6c482
IM
2738 perf_event_update_userpage(event);
2739 perf_event_update_userpage(next_event);
bfbd3381
PZ
2740}
2741
cdd6c482
IM
2742static void perf_event_sync_stat(struct perf_event_context *ctx,
2743 struct perf_event_context *next_ctx)
bfbd3381 2744{
cdd6c482 2745 struct perf_event *event, *next_event;
bfbd3381
PZ
2746
2747 if (!ctx->nr_stat)
2748 return;
2749
02ffdbc8
PZ
2750 update_context_time(ctx);
2751
cdd6c482
IM
2752 event = list_first_entry(&ctx->event_list,
2753 struct perf_event, event_entry);
bfbd3381 2754
cdd6c482
IM
2755 next_event = list_first_entry(&next_ctx->event_list,
2756 struct perf_event, event_entry);
bfbd3381 2757
cdd6c482
IM
2758 while (&event->event_entry != &ctx->event_list &&
2759 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2760
cdd6c482 2761 __perf_event_sync_stat(event, next_event);
bfbd3381 2762
cdd6c482
IM
2763 event = list_next_entry(event, event_entry);
2764 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2765 }
2766}
2767
fe4b04fa
PZ
2768static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2769 struct task_struct *next)
0793a61d 2770{
8dc85d54 2771 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 2772 struct perf_event_context *next_ctx;
5a3126d4 2773 struct perf_event_context *parent, *next_parent;
108b02cf 2774 struct perf_cpu_context *cpuctx;
c93f7669 2775 int do_switch = 1;
0793a61d 2776
108b02cf
PZ
2777 if (likely(!ctx))
2778 return;
10989fb2 2779
108b02cf
PZ
2780 cpuctx = __get_cpu_context(ctx);
2781 if (!cpuctx->task_ctx)
0793a61d
TG
2782 return;
2783
c93f7669 2784 rcu_read_lock();
8dc85d54 2785 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
2786 if (!next_ctx)
2787 goto unlock;
2788
2789 parent = rcu_dereference(ctx->parent_ctx);
2790 next_parent = rcu_dereference(next_ctx->parent_ctx);
2791
2792 /* If neither context have a parent context; they cannot be clones. */
802c8a61 2793 if (!parent && !next_parent)
5a3126d4
PZ
2794 goto unlock;
2795
2796 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
2797 /*
2798 * Looks like the two contexts are clones, so we might be
2799 * able to optimize the context switch. We lock both
2800 * contexts and check that they are clones under the
2801 * lock (including re-checking that neither has been
2802 * uncloned in the meantime). It doesn't matter which
2803 * order we take the locks because no other cpu could
2804 * be trying to lock both of these tasks.
2805 */
e625cce1
TG
2806 raw_spin_lock(&ctx->lock);
2807 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2808 if (context_equiv(ctx, next_ctx)) {
63b6da39
PZ
2809 WRITE_ONCE(ctx->task, next);
2810 WRITE_ONCE(next_ctx->task, task);
5a158c3c
YZ
2811
2812 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2813
63b6da39
PZ
2814 /*
2815 * RCU_INIT_POINTER here is safe because we've not
2816 * modified the ctx and the above modification of
2817 * ctx->task and ctx->task_ctx_data are immaterial
2818 * since those values are always verified under
2819 * ctx->lock which we're now holding.
2820 */
2821 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2822 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2823
c93f7669 2824 do_switch = 0;
bfbd3381 2825
cdd6c482 2826 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2827 }
e625cce1
TG
2828 raw_spin_unlock(&next_ctx->lock);
2829 raw_spin_unlock(&ctx->lock);
564c2b21 2830 }
5a3126d4 2831unlock:
c93f7669 2832 rcu_read_unlock();
564c2b21 2833
c93f7669 2834 if (do_switch) {
facc4307 2835 raw_spin_lock(&ctx->lock);
8833d0e2 2836 task_ctx_sched_out(cpuctx, ctx);
facc4307 2837 raw_spin_unlock(&ctx->lock);
c93f7669 2838 }
0793a61d
TG
2839}
2840
e48c1788
PZ
2841static DEFINE_PER_CPU(struct list_head, sched_cb_list);
2842
ba532500
YZ
2843void perf_sched_cb_dec(struct pmu *pmu)
2844{
e48c1788
PZ
2845 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2846
ba532500 2847 this_cpu_dec(perf_sched_cb_usages);
e48c1788
PZ
2848
2849 if (!--cpuctx->sched_cb_usage)
2850 list_del(&cpuctx->sched_cb_entry);
ba532500
YZ
2851}
2852
e48c1788 2853
ba532500
YZ
2854void perf_sched_cb_inc(struct pmu *pmu)
2855{
e48c1788
PZ
2856 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2857
2858 if (!cpuctx->sched_cb_usage++)
2859 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
2860
ba532500
YZ
2861 this_cpu_inc(perf_sched_cb_usages);
2862}
2863
2864/*
2865 * This function provides the context switch callback to the lower code
2866 * layer. It is invoked ONLY when the context switch callback is enabled.
09e61b4f
PZ
2867 *
2868 * This callback is relevant even to per-cpu events; for example multi event
2869 * PEBS requires this to provide PID/TID information. This requires we flush
2870 * all queued PEBS records before we context switch to a new task.
ba532500
YZ
2871 */
2872static void perf_pmu_sched_task(struct task_struct *prev,
2873 struct task_struct *next,
2874 bool sched_in)
2875{
2876 struct perf_cpu_context *cpuctx;
2877 struct pmu *pmu;
ba532500
YZ
2878
2879 if (prev == next)
2880 return;
2881
e48c1788
PZ
2882 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
2883 pmu = cpuctx->unique_pmu; /* software PMUs will not have sched_task */
ba532500 2884
e48c1788
PZ
2885 if (WARN_ON_ONCE(!pmu->sched_task))
2886 continue;
ba532500 2887
e48c1788
PZ
2888 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2889 perf_pmu_disable(pmu);
ba532500 2890
e48c1788 2891 pmu->sched_task(cpuctx->task_ctx, sched_in);
ba532500 2892
e48c1788
PZ
2893 perf_pmu_enable(pmu);
2894 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
ba532500 2895 }
ba532500
YZ
2896}
2897
45ac1403
AH
2898static void perf_event_switch(struct task_struct *task,
2899 struct task_struct *next_prev, bool sched_in);
2900
8dc85d54
PZ
2901#define for_each_task_context_nr(ctxn) \
2902 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2903
2904/*
2905 * Called from scheduler to remove the events of the current task,
2906 * with interrupts disabled.
2907 *
2908 * We stop each event and update the event value in event->count.
2909 *
2910 * This does not protect us against NMI, but disable()
2911 * sets the disabled bit in the control field of event _before_
2912 * accessing the event control register. If a NMI hits, then it will
2913 * not restart the event.
2914 */
ab0cce56
JO
2915void __perf_event_task_sched_out(struct task_struct *task,
2916 struct task_struct *next)
8dc85d54
PZ
2917{
2918 int ctxn;
2919
ba532500
YZ
2920 if (__this_cpu_read(perf_sched_cb_usages))
2921 perf_pmu_sched_task(task, next, false);
2922
45ac1403
AH
2923 if (atomic_read(&nr_switch_events))
2924 perf_event_switch(task, next, false);
2925
8dc85d54
PZ
2926 for_each_task_context_nr(ctxn)
2927 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2928
2929 /*
2930 * if cgroup events exist on this CPU, then we need
2931 * to check if we have to switch out PMU state.
2932 * cgroup event are system-wide mode only
2933 */
4a32fea9 2934 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 2935 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2936}
2937
5b0311e1
FW
2938/*
2939 * Called with IRQs disabled
2940 */
2941static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2942 enum event_type_t event_type)
2943{
2944 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2945}
2946
235c7fc7 2947static void
5b0311e1 2948ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2949 struct perf_cpu_context *cpuctx)
0793a61d 2950{
cdd6c482 2951 struct perf_event *event;
0793a61d 2952
889ff015
FW
2953 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2954 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2955 continue;
5632ab12 2956 if (!event_filter_match(event))
3b6f9e5c
PM
2957 continue;
2958
e5d1367f
SE
2959 /* may need to reset tstamp_enabled */
2960 if (is_cgroup_event(event))
2961 perf_cgroup_mark_enabled(event, ctx);
2962
8c9ed8e1 2963 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2964 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2965
2966 /*
2967 * If this pinned group hasn't been scheduled,
2968 * put it in error state.
2969 */
cdd6c482
IM
2970 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2971 update_group_times(event);
2972 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2973 }
3b6f9e5c 2974 }
5b0311e1
FW
2975}
2976
2977static void
2978ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2979 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2980{
2981 struct perf_event *event;
2982 int can_add_hw = 1;
3b6f9e5c 2983
889ff015
FW
2984 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2985 /* Ignore events in OFF or ERROR state */
2986 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2987 continue;
04289bb9
IM
2988 /*
2989 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2990 * of events:
04289bb9 2991 */
5632ab12 2992 if (!event_filter_match(event))
0793a61d
TG
2993 continue;
2994
e5d1367f
SE
2995 /* may need to reset tstamp_enabled */
2996 if (is_cgroup_event(event))
2997 perf_cgroup_mark_enabled(event, ctx);
2998
9ed6060d 2999 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 3000 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 3001 can_add_hw = 0;
9ed6060d 3002 }
0793a61d 3003 }
5b0311e1
FW
3004}
3005
3006static void
3007ctx_sched_in(struct perf_event_context *ctx,
3008 struct perf_cpu_context *cpuctx,
e5d1367f
SE
3009 enum event_type_t event_type,
3010 struct task_struct *task)
5b0311e1 3011{
db24d33e 3012 int is_active = ctx->is_active;
c994d613
PZ
3013 u64 now;
3014
3015 lockdep_assert_held(&ctx->lock);
e5d1367f 3016
5b0311e1 3017 if (likely(!ctx->nr_events))
facc4307 3018 return;
5b0311e1 3019
3cbaa590 3020 ctx->is_active |= (event_type | EVENT_TIME);
63e30d3e
PZ
3021 if (ctx->task) {
3022 if (!is_active)
3023 cpuctx->task_ctx = ctx;
3024 else
3025 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3026 }
3027
3cbaa590
PZ
3028 is_active ^= ctx->is_active; /* changed bits */
3029
3030 if (is_active & EVENT_TIME) {
3031 /* start ctx time */
3032 now = perf_clock();
3033 ctx->timestamp = now;
3034 perf_cgroup_set_timestamp(task, ctx);
3035 }
3036
5b0311e1
FW
3037 /*
3038 * First go through the list and put on any pinned groups
3039 * in order to give them the best chance of going on.
3040 */
3cbaa590 3041 if (is_active & EVENT_PINNED)
6e37738a 3042 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
3043
3044 /* Then walk through the lower prio flexible groups */
3cbaa590 3045 if (is_active & EVENT_FLEXIBLE)
6e37738a 3046 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
3047}
3048
329c0e01 3049static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
3050 enum event_type_t event_type,
3051 struct task_struct *task)
329c0e01
FW
3052{
3053 struct perf_event_context *ctx = &cpuctx->ctx;
3054
e5d1367f 3055 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
3056}
3057
e5d1367f
SE
3058static void perf_event_context_sched_in(struct perf_event_context *ctx,
3059 struct task_struct *task)
235c7fc7 3060{
108b02cf 3061 struct perf_cpu_context *cpuctx;
235c7fc7 3062
108b02cf 3063 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
3064 if (cpuctx->task_ctx == ctx)
3065 return;
3066
facc4307 3067 perf_ctx_lock(cpuctx, ctx);
1b9a644f 3068 perf_pmu_disable(ctx->pmu);
329c0e01
FW
3069 /*
3070 * We want to keep the following priority order:
3071 * cpu pinned (that don't need to move), task pinned,
3072 * cpu flexible, task flexible.
3073 */
3074 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
63e30d3e 3075 perf_event_sched_in(cpuctx, ctx, task);
facc4307
PZ
3076 perf_pmu_enable(ctx->pmu);
3077 perf_ctx_unlock(cpuctx, ctx);
235c7fc7
IM
3078}
3079
8dc85d54
PZ
3080/*
3081 * Called from scheduler to add the events of the current task
3082 * with interrupts disabled.
3083 *
3084 * We restore the event value and then enable it.
3085 *
3086 * This does not protect us against NMI, but enable()
3087 * sets the enabled bit in the control field of event _before_
3088 * accessing the event control register. If a NMI hits, then it will
3089 * keep the event running.
3090 */
ab0cce56
JO
3091void __perf_event_task_sched_in(struct task_struct *prev,
3092 struct task_struct *task)
8dc85d54
PZ
3093{
3094 struct perf_event_context *ctx;
3095 int ctxn;
3096
7e41d177
PZ
3097 /*
3098 * If cgroup events exist on this CPU, then we need to check if we have
3099 * to switch in PMU state; cgroup event are system-wide mode only.
3100 *
3101 * Since cgroup events are CPU events, we must schedule these in before
3102 * we schedule in the task events.
3103 */
3104 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3105 perf_cgroup_sched_in(prev, task);
3106
8dc85d54
PZ
3107 for_each_task_context_nr(ctxn) {
3108 ctx = task->perf_event_ctxp[ctxn];
3109 if (likely(!ctx))
3110 continue;
3111
e5d1367f 3112 perf_event_context_sched_in(ctx, task);
8dc85d54 3113 }
d010b332 3114
45ac1403
AH
3115 if (atomic_read(&nr_switch_events))
3116 perf_event_switch(task, prev, true);
3117
ba532500
YZ
3118 if (__this_cpu_read(perf_sched_cb_usages))
3119 perf_pmu_sched_task(prev, task, true);
235c7fc7
IM
3120}
3121
abd50713
PZ
3122static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3123{
3124 u64 frequency = event->attr.sample_freq;
3125 u64 sec = NSEC_PER_SEC;
3126 u64 divisor, dividend;
3127
3128 int count_fls, nsec_fls, frequency_fls, sec_fls;
3129
3130 count_fls = fls64(count);
3131 nsec_fls = fls64(nsec);
3132 frequency_fls = fls64(frequency);
3133 sec_fls = 30;
3134
3135 /*
3136 * We got @count in @nsec, with a target of sample_freq HZ
3137 * the target period becomes:
3138 *
3139 * @count * 10^9
3140 * period = -------------------
3141 * @nsec * sample_freq
3142 *
3143 */
3144
3145 /*
3146 * Reduce accuracy by one bit such that @a and @b converge
3147 * to a similar magnitude.
3148 */
fe4b04fa 3149#define REDUCE_FLS(a, b) \
abd50713
PZ
3150do { \
3151 if (a##_fls > b##_fls) { \
3152 a >>= 1; \
3153 a##_fls--; \
3154 } else { \
3155 b >>= 1; \
3156 b##_fls--; \
3157 } \
3158} while (0)
3159
3160 /*
3161 * Reduce accuracy until either term fits in a u64, then proceed with
3162 * the other, so that finally we can do a u64/u64 division.
3163 */
3164 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3165 REDUCE_FLS(nsec, frequency);
3166 REDUCE_FLS(sec, count);
3167 }
3168
3169 if (count_fls + sec_fls > 64) {
3170 divisor = nsec * frequency;
3171
3172 while (count_fls + sec_fls > 64) {
3173 REDUCE_FLS(count, sec);
3174 divisor >>= 1;
3175 }
3176
3177 dividend = count * sec;
3178 } else {
3179 dividend = count * sec;
3180
3181 while (nsec_fls + frequency_fls > 64) {
3182 REDUCE_FLS(nsec, frequency);
3183 dividend >>= 1;
3184 }
3185
3186 divisor = nsec * frequency;
3187 }
3188
f6ab91ad
PZ
3189 if (!divisor)
3190 return dividend;
3191
abd50713
PZ
3192 return div64_u64(dividend, divisor);
3193}
3194
e050e3f0
SE
3195static DEFINE_PER_CPU(int, perf_throttled_count);
3196static DEFINE_PER_CPU(u64, perf_throttled_seq);
3197
f39d47ff 3198static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 3199{
cdd6c482 3200 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 3201 s64 period, sample_period;
bd2b5b12
PZ
3202 s64 delta;
3203
abd50713 3204 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
3205
3206 delta = (s64)(period - hwc->sample_period);
3207 delta = (delta + 7) / 8; /* low pass filter */
3208
3209 sample_period = hwc->sample_period + delta;
3210
3211 if (!sample_period)
3212 sample_period = 1;
3213
bd2b5b12 3214 hwc->sample_period = sample_period;
abd50713 3215
e7850595 3216 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
3217 if (disable)
3218 event->pmu->stop(event, PERF_EF_UPDATE);
3219
e7850595 3220 local64_set(&hwc->period_left, 0);
f39d47ff
SE
3221
3222 if (disable)
3223 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 3224 }
bd2b5b12
PZ
3225}
3226
e050e3f0
SE
3227/*
3228 * combine freq adjustment with unthrottling to avoid two passes over the
3229 * events. At the same time, make sure, having freq events does not change
3230 * the rate of unthrottling as that would introduce bias.
3231 */
3232static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3233 int needs_unthr)
60db5e09 3234{
cdd6c482
IM
3235 struct perf_event *event;
3236 struct hw_perf_event *hwc;
e050e3f0 3237 u64 now, period = TICK_NSEC;
abd50713 3238 s64 delta;
60db5e09 3239
e050e3f0
SE
3240 /*
3241 * only need to iterate over all events iff:
3242 * - context have events in frequency mode (needs freq adjust)
3243 * - there are events to unthrottle on this cpu
3244 */
3245 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
3246 return;
3247
e050e3f0 3248 raw_spin_lock(&ctx->lock);
f39d47ff 3249 perf_pmu_disable(ctx->pmu);
e050e3f0 3250
03541f8b 3251 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 3252 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
3253 continue;
3254
5632ab12 3255 if (!event_filter_match(event))
5d27c23d
PZ
3256 continue;
3257
44377277
AS
3258 perf_pmu_disable(event->pmu);
3259
cdd6c482 3260 hwc = &event->hw;
6a24ed6c 3261
ae23bff1 3262 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 3263 hwc->interrupts = 0;
cdd6c482 3264 perf_log_throttle(event, 1);
a4eaf7f1 3265 event->pmu->start(event, 0);
a78ac325
PZ
3266 }
3267
cdd6c482 3268 if (!event->attr.freq || !event->attr.sample_freq)
44377277 3269 goto next;
60db5e09 3270
e050e3f0
SE
3271 /*
3272 * stop the event and update event->count
3273 */
3274 event->pmu->stop(event, PERF_EF_UPDATE);
3275
e7850595 3276 now = local64_read(&event->count);
abd50713
PZ
3277 delta = now - hwc->freq_count_stamp;
3278 hwc->freq_count_stamp = now;
60db5e09 3279
e050e3f0
SE
3280 /*
3281 * restart the event
3282 * reload only if value has changed
f39d47ff
SE
3283 * we have stopped the event so tell that
3284 * to perf_adjust_period() to avoid stopping it
3285 * twice.
e050e3f0 3286 */
abd50713 3287 if (delta > 0)
f39d47ff 3288 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
3289
3290 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
3291 next:
3292 perf_pmu_enable(event->pmu);
60db5e09 3293 }
e050e3f0 3294
f39d47ff 3295 perf_pmu_enable(ctx->pmu);
e050e3f0 3296 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
3297}
3298
235c7fc7 3299/*
cdd6c482 3300 * Round-robin a context's events:
235c7fc7 3301 */
cdd6c482 3302static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 3303{
dddd3379
TG
3304 /*
3305 * Rotate the first entry last of non-pinned groups. Rotation might be
3306 * disabled by the inheritance code.
3307 */
3308 if (!ctx->rotate_disable)
3309 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
3310}
3311
9e630205 3312static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 3313{
8dc85d54 3314 struct perf_event_context *ctx = NULL;
2fde4f94 3315 int rotate = 0;
7fc23a53 3316
b5ab4cd5 3317 if (cpuctx->ctx.nr_events) {
b5ab4cd5
PZ
3318 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3319 rotate = 1;
3320 }
235c7fc7 3321
8dc85d54 3322 ctx = cpuctx->task_ctx;
b5ab4cd5 3323 if (ctx && ctx->nr_events) {
b5ab4cd5
PZ
3324 if (ctx->nr_events != ctx->nr_active)
3325 rotate = 1;
3326 }
9717e6cd 3327
e050e3f0 3328 if (!rotate)
0f5a2601
PZ
3329 goto done;
3330
facc4307 3331 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 3332 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 3333
e050e3f0
SE
3334 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3335 if (ctx)
3336 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 3337
e050e3f0
SE
3338 rotate_ctx(&cpuctx->ctx);
3339 if (ctx)
3340 rotate_ctx(ctx);
235c7fc7 3341
e050e3f0 3342 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 3343
0f5a2601
PZ
3344 perf_pmu_enable(cpuctx->ctx.pmu);
3345 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 3346done:
9e630205
SE
3347
3348 return rotate;
e9d2b064
PZ
3349}
3350
3351void perf_event_task_tick(void)
3352{
2fde4f94
MR
3353 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3354 struct perf_event_context *ctx, *tmp;
e050e3f0 3355 int throttled;
b5ab4cd5 3356
e9d2b064
PZ
3357 WARN_ON(!irqs_disabled());
3358
e050e3f0
SE
3359 __this_cpu_inc(perf_throttled_seq);
3360 throttled = __this_cpu_xchg(perf_throttled_count, 0);
555e0c1e 3361 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
e050e3f0 3362
2fde4f94 3363 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
e050e3f0 3364 perf_adjust_freq_unthr_context(ctx, throttled);
0793a61d
TG
3365}
3366
889ff015
FW
3367static int event_enable_on_exec(struct perf_event *event,
3368 struct perf_event_context *ctx)
3369{
3370 if (!event->attr.enable_on_exec)
3371 return 0;
3372
3373 event->attr.enable_on_exec = 0;
3374 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3375 return 0;
3376
1d9b482e 3377 __perf_event_mark_enabled(event);
889ff015
FW
3378
3379 return 1;
3380}
3381
57e7986e 3382/*
cdd6c482 3383 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
3384 * This expects task == current.
3385 */
c1274499 3386static void perf_event_enable_on_exec(int ctxn)
57e7986e 3387{
c1274499 3388 struct perf_event_context *ctx, *clone_ctx = NULL;
3e349507 3389 struct perf_cpu_context *cpuctx;
cdd6c482 3390 struct perf_event *event;
57e7986e
PM
3391 unsigned long flags;
3392 int enabled = 0;
3393
3394 local_irq_save(flags);
c1274499 3395 ctx = current->perf_event_ctxp[ctxn];
cdd6c482 3396 if (!ctx || !ctx->nr_events)
57e7986e
PM
3397 goto out;
3398
3e349507
PZ
3399 cpuctx = __get_cpu_context(ctx);
3400 perf_ctx_lock(cpuctx, ctx);
7fce2509 3401 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3e349507
PZ
3402 list_for_each_entry(event, &ctx->event_list, event_entry)
3403 enabled |= event_enable_on_exec(event, ctx);
57e7986e
PM
3404
3405 /*
3e349507 3406 * Unclone and reschedule this context if we enabled any event.
57e7986e 3407 */
3e349507 3408 if (enabled) {
211de6eb 3409 clone_ctx = unclone_ctx(ctx);
3e349507
PZ
3410 ctx_resched(cpuctx, ctx);
3411 }
3412 perf_ctx_unlock(cpuctx, ctx);
57e7986e 3413
9ed6060d 3414out:
57e7986e 3415 local_irq_restore(flags);
211de6eb
PZ
3416
3417 if (clone_ctx)
3418 put_ctx(clone_ctx);
57e7986e
PM
3419}
3420
0492d4c5
PZ
3421struct perf_read_data {
3422 struct perf_event *event;
3423 bool group;
7d88962e 3424 int ret;
0492d4c5
PZ
3425};
3426
0793a61d 3427/*
cdd6c482 3428 * Cross CPU call to read the hardware event
0793a61d 3429 */
cdd6c482 3430static void __perf_event_read(void *info)
0793a61d 3431{
0492d4c5
PZ
3432 struct perf_read_data *data = info;
3433 struct perf_event *sub, *event = data->event;
cdd6c482 3434 struct perf_event_context *ctx = event->ctx;
108b02cf 3435 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
4a00c16e 3436 struct pmu *pmu = event->pmu;
621a01ea 3437
e1ac3614
PM
3438 /*
3439 * If this is a task context, we need to check whether it is
3440 * the current task context of this cpu. If not it has been
3441 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
3442 * event->count would have been updated to a recent sample
3443 * when the event was scheduled out.
e1ac3614
PM
3444 */
3445 if (ctx->task && cpuctx->task_ctx != ctx)
3446 return;
3447
e625cce1 3448 raw_spin_lock(&ctx->lock);
e5d1367f 3449 if (ctx->is_active) {
542e72fc 3450 update_context_time(ctx);
e5d1367f
SE
3451 update_cgrp_time_from_event(event);
3452 }
0492d4c5 3453
cdd6c482 3454 update_event_times(event);
4a00c16e
SB
3455 if (event->state != PERF_EVENT_STATE_ACTIVE)
3456 goto unlock;
0492d4c5 3457
4a00c16e
SB
3458 if (!data->group) {
3459 pmu->read(event);
3460 data->ret = 0;
0492d4c5 3461 goto unlock;
4a00c16e
SB
3462 }
3463
3464 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3465
3466 pmu->read(event);
0492d4c5
PZ
3467
3468 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3469 update_event_times(sub);
4a00c16e
SB
3470 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3471 /*
3472 * Use sibling's PMU rather than @event's since
3473 * sibling could be on different (eg: software) PMU.
3474 */
0492d4c5 3475 sub->pmu->read(sub);
4a00c16e 3476 }
0492d4c5 3477 }
4a00c16e
SB
3478
3479 data->ret = pmu->commit_txn(pmu);
0492d4c5
PZ
3480
3481unlock:
e625cce1 3482 raw_spin_unlock(&ctx->lock);
0793a61d
TG
3483}
3484
b5e58793
PZ
3485static inline u64 perf_event_count(struct perf_event *event)
3486{
eacd3ecc
MF
3487 if (event->pmu->count)
3488 return event->pmu->count(event);
3489
3490 return __perf_event_count(event);
b5e58793
PZ
3491}
3492
ffe8690c
KX
3493/*
3494 * NMI-safe method to read a local event, that is an event that
3495 * is:
3496 * - either for the current task, or for this CPU
3497 * - does not have inherit set, for inherited task events
3498 * will not be local and we cannot read them atomically
3499 * - must not have a pmu::count method
3500 */
3501u64 perf_event_read_local(struct perf_event *event)
3502{
3503 unsigned long flags;
3504 u64 val;
3505
3506 /*
3507 * Disabling interrupts avoids all counter scheduling (context
3508 * switches, timer based rotation and IPIs).
3509 */
3510 local_irq_save(flags);
3511
3512 /* If this is a per-task event, it must be for current */
3513 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3514 event->hw.target != current);
3515
3516 /* If this is a per-CPU event, it must be for this CPU */
3517 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3518 event->cpu != smp_processor_id());
3519
3520 /*
3521 * It must not be an event with inherit set, we cannot read
3522 * all child counters from atomic context.
3523 */
3524 WARN_ON_ONCE(event->attr.inherit);
3525
3526 /*
3527 * It must not have a pmu::count method, those are not
3528 * NMI safe.
3529 */
3530 WARN_ON_ONCE(event->pmu->count);
3531
3532 /*
3533 * If the event is currently on this CPU, its either a per-task event,
3534 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3535 * oncpu == -1).
3536 */
3537 if (event->oncpu == smp_processor_id())
3538 event->pmu->read(event);
3539
3540 val = local64_read(&event->count);
3541 local_irq_restore(flags);
3542
3543 return val;
3544}
3545
7d88962e 3546static int perf_event_read(struct perf_event *event, bool group)
0793a61d 3547{
7d88962e
SB
3548 int ret = 0;
3549
0793a61d 3550 /*
cdd6c482
IM
3551 * If event is enabled and currently active on a CPU, update the
3552 * value in the event structure:
0793a61d 3553 */
cdd6c482 3554 if (event->state == PERF_EVENT_STATE_ACTIVE) {
0492d4c5
PZ
3555 struct perf_read_data data = {
3556 .event = event,
3557 .group = group,
7d88962e 3558 .ret = 0,
0492d4c5 3559 };
71e7bc2b
DCC
3560 ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
3561 /* The event must have been read from an online CPU: */
3562 WARN_ON_ONCE(ret);
3563 ret = ret ? : data.ret;
cdd6c482 3564 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
3565 struct perf_event_context *ctx = event->ctx;
3566 unsigned long flags;
3567
e625cce1 3568 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
3569 /*
3570 * may read while context is not active
3571 * (e.g., thread is blocked), in that case
3572 * we cannot update context time
3573 */
e5d1367f 3574 if (ctx->is_active) {
c530ccd9 3575 update_context_time(ctx);
e5d1367f
SE
3576 update_cgrp_time_from_event(event);
3577 }
0492d4c5
PZ
3578 if (group)
3579 update_group_times(event);
3580 else
3581 update_event_times(event);
e625cce1 3582 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d 3583 }
7d88962e
SB
3584
3585 return ret;
0793a61d
TG
3586}
3587
a63eaf34 3588/*
cdd6c482 3589 * Initialize the perf_event context in a task_struct:
a63eaf34 3590 */
eb184479 3591static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 3592{
e625cce1 3593 raw_spin_lock_init(&ctx->lock);
a63eaf34 3594 mutex_init(&ctx->mutex);
2fde4f94 3595 INIT_LIST_HEAD(&ctx->active_ctx_list);
889ff015
FW
3596 INIT_LIST_HEAD(&ctx->pinned_groups);
3597 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
3598 INIT_LIST_HEAD(&ctx->event_list);
3599 atomic_set(&ctx->refcount, 1);
eb184479
PZ
3600}
3601
3602static struct perf_event_context *
3603alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3604{
3605 struct perf_event_context *ctx;
3606
3607 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3608 if (!ctx)
3609 return NULL;
3610
3611 __perf_event_init_context(ctx);
3612 if (task) {
3613 ctx->task = task;
3614 get_task_struct(task);
0793a61d 3615 }
eb184479
PZ
3616 ctx->pmu = pmu;
3617
3618 return ctx;
a63eaf34
PM
3619}
3620
2ebd4ffb
MH
3621static struct task_struct *
3622find_lively_task_by_vpid(pid_t vpid)
3623{
3624 struct task_struct *task;
0793a61d
TG
3625
3626 rcu_read_lock();
2ebd4ffb 3627 if (!vpid)
0793a61d
TG
3628 task = current;
3629 else
2ebd4ffb 3630 task = find_task_by_vpid(vpid);
0793a61d
TG
3631 if (task)
3632 get_task_struct(task);
3633 rcu_read_unlock();
3634
3635 if (!task)
3636 return ERR_PTR(-ESRCH);
3637
2ebd4ffb 3638 return task;
2ebd4ffb
MH
3639}
3640
fe4b04fa
PZ
3641/*
3642 * Returns a matching context with refcount and pincount.
3643 */
108b02cf 3644static struct perf_event_context *
4af57ef2
YZ
3645find_get_context(struct pmu *pmu, struct task_struct *task,
3646 struct perf_event *event)
0793a61d 3647{
211de6eb 3648 struct perf_event_context *ctx, *clone_ctx = NULL;
22a4f650 3649 struct perf_cpu_context *cpuctx;
4af57ef2 3650 void *task_ctx_data = NULL;
25346b93 3651 unsigned long flags;
8dc85d54 3652 int ctxn, err;
4af57ef2 3653 int cpu = event->cpu;
0793a61d 3654
22a4ec72 3655 if (!task) {
cdd6c482 3656 /* Must be root to operate on a CPU event: */
0764771d 3657 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3658 return ERR_PTR(-EACCES);
3659
0793a61d 3660 /*
cdd6c482 3661 * We could be clever and allow to attach a event to an
0793a61d
TG
3662 * offline CPU and activate it when the CPU comes up, but
3663 * that's for later.
3664 */
f6325e30 3665 if (!cpu_online(cpu))
0793a61d
TG
3666 return ERR_PTR(-ENODEV);
3667
108b02cf 3668 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3669 ctx = &cpuctx->ctx;
c93f7669 3670 get_ctx(ctx);
fe4b04fa 3671 ++ctx->pin_count;
0793a61d 3672
0793a61d
TG
3673 return ctx;
3674 }
3675
8dc85d54
PZ
3676 err = -EINVAL;
3677 ctxn = pmu->task_ctx_nr;
3678 if (ctxn < 0)
3679 goto errout;
3680
4af57ef2
YZ
3681 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3682 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3683 if (!task_ctx_data) {
3684 err = -ENOMEM;
3685 goto errout;
3686 }
3687 }
3688
9ed6060d 3689retry:
8dc85d54 3690 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3691 if (ctx) {
211de6eb 3692 clone_ctx = unclone_ctx(ctx);
fe4b04fa 3693 ++ctx->pin_count;
4af57ef2
YZ
3694
3695 if (task_ctx_data && !ctx->task_ctx_data) {
3696 ctx->task_ctx_data = task_ctx_data;
3697 task_ctx_data = NULL;
3698 }
e625cce1 3699 raw_spin_unlock_irqrestore(&ctx->lock, flags);
211de6eb
PZ
3700
3701 if (clone_ctx)
3702 put_ctx(clone_ctx);
9137fb28 3703 } else {
eb184479 3704 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3705 err = -ENOMEM;
3706 if (!ctx)
3707 goto errout;
eb184479 3708
4af57ef2
YZ
3709 if (task_ctx_data) {
3710 ctx->task_ctx_data = task_ctx_data;
3711 task_ctx_data = NULL;
3712 }
3713
dbe08d82
ON
3714 err = 0;
3715 mutex_lock(&task->perf_event_mutex);
3716 /*
3717 * If it has already passed perf_event_exit_task().
3718 * we must see PF_EXITING, it takes this mutex too.
3719 */
3720 if (task->flags & PF_EXITING)
3721 err = -ESRCH;
3722 else if (task->perf_event_ctxp[ctxn])
3723 err = -EAGAIN;
fe4b04fa 3724 else {
9137fb28 3725 get_ctx(ctx);
fe4b04fa 3726 ++ctx->pin_count;
dbe08d82 3727 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3728 }
dbe08d82
ON
3729 mutex_unlock(&task->perf_event_mutex);
3730
3731 if (unlikely(err)) {
9137fb28 3732 put_ctx(ctx);
dbe08d82
ON
3733
3734 if (err == -EAGAIN)
3735 goto retry;
3736 goto errout;
a63eaf34
PM
3737 }
3738 }
3739
4af57ef2 3740 kfree(task_ctx_data);
0793a61d 3741 return ctx;
c93f7669 3742
9ed6060d 3743errout:
4af57ef2 3744 kfree(task_ctx_data);
c93f7669 3745 return ERR_PTR(err);
0793a61d
TG
3746}
3747
6fb2915d 3748static void perf_event_free_filter(struct perf_event *event);
2541517c 3749static void perf_event_free_bpf_prog(struct perf_event *event);
6fb2915d 3750
cdd6c482 3751static void free_event_rcu(struct rcu_head *head)
592903cd 3752{
cdd6c482 3753 struct perf_event *event;
592903cd 3754
cdd6c482
IM
3755 event = container_of(head, struct perf_event, rcu_head);
3756 if (event->ns)
3757 put_pid_ns(event->ns);
6fb2915d 3758 perf_event_free_filter(event);
cdd6c482 3759 kfree(event);
592903cd
PZ
3760}
3761
b69cf536
PZ
3762static void ring_buffer_attach(struct perf_event *event,
3763 struct ring_buffer *rb);
925d519a 3764
f2fb6bef
KL
3765static void detach_sb_event(struct perf_event *event)
3766{
3767 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3768
3769 raw_spin_lock(&pel->lock);
3770 list_del_rcu(&event->sb_list);
3771 raw_spin_unlock(&pel->lock);
3772}
3773
a4f144eb 3774static bool is_sb_event(struct perf_event *event)
f2fb6bef 3775{
a4f144eb
DCC
3776 struct perf_event_attr *attr = &event->attr;
3777
f2fb6bef 3778 if (event->parent)
a4f144eb 3779 return false;
f2fb6bef
KL
3780
3781 if (event->attach_state & PERF_ATTACH_TASK)
a4f144eb 3782 return false;
f2fb6bef 3783
a4f144eb
DCC
3784 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3785 attr->comm || attr->comm_exec ||
3786 attr->task ||
3787 attr->context_switch)
3788 return true;
3789 return false;
3790}
3791
3792static void unaccount_pmu_sb_event(struct perf_event *event)
3793{
3794 if (is_sb_event(event))
3795 detach_sb_event(event);
f2fb6bef
KL
3796}
3797
4beb31f3 3798static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 3799{
4beb31f3
FW
3800 if (event->parent)
3801 return;
3802
4beb31f3
FW
3803 if (is_cgroup_event(event))
3804 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3805}
925d519a 3806
555e0c1e
FW
3807#ifdef CONFIG_NO_HZ_FULL
3808static DEFINE_SPINLOCK(nr_freq_lock);
3809#endif
3810
3811static void unaccount_freq_event_nohz(void)
3812{
3813#ifdef CONFIG_NO_HZ_FULL
3814 spin_lock(&nr_freq_lock);
3815 if (atomic_dec_and_test(&nr_freq_events))
3816 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3817 spin_unlock(&nr_freq_lock);
3818#endif
3819}
3820
3821static void unaccount_freq_event(void)
3822{
3823 if (tick_nohz_full_enabled())
3824 unaccount_freq_event_nohz();
3825 else
3826 atomic_dec(&nr_freq_events);
3827}
3828
4beb31f3
FW
3829static void unaccount_event(struct perf_event *event)
3830{
25432ae9
PZ
3831 bool dec = false;
3832
4beb31f3
FW
3833 if (event->parent)
3834 return;
3835
3836 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 3837 dec = true;
4beb31f3
FW
3838 if (event->attr.mmap || event->attr.mmap_data)
3839 atomic_dec(&nr_mmap_events);
3840 if (event->attr.comm)
3841 atomic_dec(&nr_comm_events);
3842 if (event->attr.task)
3843 atomic_dec(&nr_task_events);
948b26b6 3844 if (event->attr.freq)
555e0c1e 3845 unaccount_freq_event();
45ac1403 3846 if (event->attr.context_switch) {
25432ae9 3847 dec = true;
45ac1403
AH
3848 atomic_dec(&nr_switch_events);
3849 }
4beb31f3 3850 if (is_cgroup_event(event))
25432ae9 3851 dec = true;
4beb31f3 3852 if (has_branch_stack(event))
25432ae9
PZ
3853 dec = true;
3854
9107c89e
PZ
3855 if (dec) {
3856 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3857 schedule_delayed_work(&perf_sched_work, HZ);
3858 }
4beb31f3
FW
3859
3860 unaccount_event_cpu(event, event->cpu);
f2fb6bef
KL
3861
3862 unaccount_pmu_sb_event(event);
4beb31f3 3863}
925d519a 3864
9107c89e
PZ
3865static void perf_sched_delayed(struct work_struct *work)
3866{
3867 mutex_lock(&perf_sched_mutex);
3868 if (atomic_dec_and_test(&perf_sched_count))
3869 static_branch_disable(&perf_sched_events);
3870 mutex_unlock(&perf_sched_mutex);
3871}
3872
bed5b25a
AS
3873/*
3874 * The following implement mutual exclusion of events on "exclusive" pmus
3875 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3876 * at a time, so we disallow creating events that might conflict, namely:
3877 *
3878 * 1) cpu-wide events in the presence of per-task events,
3879 * 2) per-task events in the presence of cpu-wide events,
3880 * 3) two matching events on the same context.
3881 *
3882 * The former two cases are handled in the allocation path (perf_event_alloc(),
a0733e69 3883 * _free_event()), the latter -- before the first perf_install_in_context().
bed5b25a
AS
3884 */
3885static int exclusive_event_init(struct perf_event *event)
3886{
3887 struct pmu *pmu = event->pmu;
3888
3889 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3890 return 0;
3891
3892 /*
3893 * Prevent co-existence of per-task and cpu-wide events on the
3894 * same exclusive pmu.
3895 *
3896 * Negative pmu::exclusive_cnt means there are cpu-wide
3897 * events on this "exclusive" pmu, positive means there are
3898 * per-task events.
3899 *
3900 * Since this is called in perf_event_alloc() path, event::ctx
3901 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3902 * to mean "per-task event", because unlike other attach states it
3903 * never gets cleared.
3904 */
3905 if (event->attach_state & PERF_ATTACH_TASK) {
3906 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3907 return -EBUSY;
3908 } else {
3909 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3910 return -EBUSY;
3911 }
3912
3913 return 0;
3914}
3915
3916static void exclusive_event_destroy(struct perf_event *event)
3917{
3918 struct pmu *pmu = event->pmu;
3919
3920 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3921 return;
3922
3923 /* see comment in exclusive_event_init() */
3924 if (event->attach_state & PERF_ATTACH_TASK)
3925 atomic_dec(&pmu->exclusive_cnt);
3926 else
3927 atomic_inc(&pmu->exclusive_cnt);
3928}
3929
3930static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3931{
3932 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3933 (e1->cpu == e2->cpu ||
3934 e1->cpu == -1 ||
3935 e2->cpu == -1))
3936 return true;
3937 return false;
3938}
3939
3940/* Called under the same ctx::mutex as perf_install_in_context() */
3941static bool exclusive_event_installable(struct perf_event *event,
3942 struct perf_event_context *ctx)
3943{
3944 struct perf_event *iter_event;
3945 struct pmu *pmu = event->pmu;
3946
3947 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3948 return true;
3949
3950 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3951 if (exclusive_event_match(iter_event, event))
3952 return false;
3953 }
3954
3955 return true;
3956}
3957
375637bc
AS
3958static void perf_addr_filters_splice(struct perf_event *event,
3959 struct list_head *head);
3960
683ede43 3961static void _free_event(struct perf_event *event)
f1600952 3962{
e360adbe 3963 irq_work_sync(&event->pending);
925d519a 3964
4beb31f3 3965 unaccount_event(event);
9ee318a7 3966
76369139 3967 if (event->rb) {
9bb5d40c
PZ
3968 /*
3969 * Can happen when we close an event with re-directed output.
3970 *
3971 * Since we have a 0 refcount, perf_mmap_close() will skip
3972 * over us; possibly making our ring_buffer_put() the last.
3973 */
3974 mutex_lock(&event->mmap_mutex);
b69cf536 3975 ring_buffer_attach(event, NULL);
9bb5d40c 3976 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
3977 }
3978
e5d1367f
SE
3979 if (is_cgroup_event(event))
3980 perf_detach_cgroup(event);
3981
a0733e69
PZ
3982 if (!event->parent) {
3983 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3984 put_callchain_buffers();
3985 }
3986
3987 perf_event_free_bpf_prog(event);
375637bc
AS
3988 perf_addr_filters_splice(event, NULL);
3989 kfree(event->addr_filters_offs);
a0733e69
PZ
3990
3991 if (event->destroy)
3992 event->destroy(event);
3993
3994 if (event->ctx)
3995 put_ctx(event->ctx);
3996
62a92c8f
AS
3997 exclusive_event_destroy(event);
3998 module_put(event->pmu->module);
a0733e69
PZ
3999
4000 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
4001}
4002
683ede43
PZ
4003/*
4004 * Used to free events which have a known refcount of 1, such as in error paths
4005 * where the event isn't exposed yet and inherited events.
4006 */
4007static void free_event(struct perf_event *event)
0793a61d 4008{
683ede43
PZ
4009 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4010 "unexpected event refcount: %ld; ptr=%p\n",
4011 atomic_long_read(&event->refcount), event)) {
4012 /* leak to avoid use-after-free */
4013 return;
4014 }
0793a61d 4015
683ede43 4016 _free_event(event);
0793a61d
TG
4017}
4018
a66a3052 4019/*
f8697762 4020 * Remove user event from the owner task.
a66a3052 4021 */
f8697762 4022static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 4023{
8882135b 4024 struct task_struct *owner;
fb0459d7 4025
8882135b 4026 rcu_read_lock();
8882135b 4027 /*
f47c02c0
PZ
4028 * Matches the smp_store_release() in perf_event_exit_task(). If we
4029 * observe !owner it means the list deletion is complete and we can
4030 * indeed free this event, otherwise we need to serialize on
8882135b
PZ
4031 * owner->perf_event_mutex.
4032 */
f47c02c0 4033 owner = lockless_dereference(event->owner);
8882135b
PZ
4034 if (owner) {
4035 /*
4036 * Since delayed_put_task_struct() also drops the last
4037 * task reference we can safely take a new reference
4038 * while holding the rcu_read_lock().
4039 */
4040 get_task_struct(owner);
4041 }
4042 rcu_read_unlock();
4043
4044 if (owner) {
f63a8daa
PZ
4045 /*
4046 * If we're here through perf_event_exit_task() we're already
4047 * holding ctx->mutex which would be an inversion wrt. the
4048 * normal lock order.
4049 *
4050 * However we can safely take this lock because its the child
4051 * ctx->mutex.
4052 */
4053 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4054
8882135b
PZ
4055 /*
4056 * We have to re-check the event->owner field, if it is cleared
4057 * we raced with perf_event_exit_task(), acquiring the mutex
4058 * ensured they're done, and we can proceed with freeing the
4059 * event.
4060 */
f47c02c0 4061 if (event->owner) {
8882135b 4062 list_del_init(&event->owner_entry);
f47c02c0
PZ
4063 smp_store_release(&event->owner, NULL);
4064 }
8882135b
PZ
4065 mutex_unlock(&owner->perf_event_mutex);
4066 put_task_struct(owner);
4067 }
f8697762
JO
4068}
4069
f8697762
JO
4070static void put_event(struct perf_event *event)
4071{
f8697762
JO
4072 if (!atomic_long_dec_and_test(&event->refcount))
4073 return;
4074
c6e5b732
PZ
4075 _free_event(event);
4076}
4077
4078/*
4079 * Kill an event dead; while event:refcount will preserve the event
4080 * object, it will not preserve its functionality. Once the last 'user'
4081 * gives up the object, we'll destroy the thing.
4082 */
4083int perf_event_release_kernel(struct perf_event *event)
4084{
a4f4bb6d 4085 struct perf_event_context *ctx = event->ctx;
c6e5b732
PZ
4086 struct perf_event *child, *tmp;
4087
a4f4bb6d
PZ
4088 /*
4089 * If we got here through err_file: fput(event_file); we will not have
4090 * attached to a context yet.
4091 */
4092 if (!ctx) {
4093 WARN_ON_ONCE(event->attach_state &
4094 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4095 goto no_ctx;
4096 }
4097
f8697762
JO
4098 if (!is_kernel_event(event))
4099 perf_remove_from_owner(event);
8882135b 4100
5fa7c8ec 4101 ctx = perf_event_ctx_lock(event);
a83fe28e 4102 WARN_ON_ONCE(ctx->parent_ctx);
a69b0ca4 4103 perf_remove_from_context(event, DETACH_GROUP);
683ede43 4104
a69b0ca4 4105 raw_spin_lock_irq(&ctx->lock);
683ede43 4106 /*
a69b0ca4
PZ
4107 * Mark this even as STATE_DEAD, there is no external reference to it
4108 * anymore.
683ede43 4109 *
a69b0ca4
PZ
4110 * Anybody acquiring event->child_mutex after the below loop _must_
4111 * also see this, most importantly inherit_event() which will avoid
4112 * placing more children on the list.
683ede43 4113 *
c6e5b732
PZ
4114 * Thus this guarantees that we will in fact observe and kill _ALL_
4115 * child events.
683ede43 4116 */
a69b0ca4
PZ
4117 event->state = PERF_EVENT_STATE_DEAD;
4118 raw_spin_unlock_irq(&ctx->lock);
4119
4120 perf_event_ctx_unlock(event, ctx);
683ede43 4121
c6e5b732
PZ
4122again:
4123 mutex_lock(&event->child_mutex);
4124 list_for_each_entry(child, &event->child_list, child_list) {
a6fa941d 4125
c6e5b732
PZ
4126 /*
4127 * Cannot change, child events are not migrated, see the
4128 * comment with perf_event_ctx_lock_nested().
4129 */
4130 ctx = lockless_dereference(child->ctx);
4131 /*
4132 * Since child_mutex nests inside ctx::mutex, we must jump
4133 * through hoops. We start by grabbing a reference on the ctx.
4134 *
4135 * Since the event cannot get freed while we hold the
4136 * child_mutex, the context must also exist and have a !0
4137 * reference count.
4138 */
4139 get_ctx(ctx);
4140
4141 /*
4142 * Now that we have a ctx ref, we can drop child_mutex, and
4143 * acquire ctx::mutex without fear of it going away. Then we
4144 * can re-acquire child_mutex.
4145 */
4146 mutex_unlock(&event->child_mutex);
4147 mutex_lock(&ctx->mutex);
4148 mutex_lock(&event->child_mutex);
4149
4150 /*
4151 * Now that we hold ctx::mutex and child_mutex, revalidate our
4152 * state, if child is still the first entry, it didn't get freed
4153 * and we can continue doing so.
4154 */
4155 tmp = list_first_entry_or_null(&event->child_list,
4156 struct perf_event, child_list);
4157 if (tmp == child) {
4158 perf_remove_from_context(child, DETACH_GROUP);
4159 list_del(&child->child_list);
4160 free_event(child);
4161 /*
4162 * This matches the refcount bump in inherit_event();
4163 * this can't be the last reference.
4164 */
4165 put_event(event);
4166 }
4167
4168 mutex_unlock(&event->child_mutex);
4169 mutex_unlock(&ctx->mutex);
4170 put_ctx(ctx);
4171 goto again;
4172 }
4173 mutex_unlock(&event->child_mutex);
4174
a4f4bb6d
PZ
4175no_ctx:
4176 put_event(event); /* Must be the 'last' reference */
683ede43
PZ
4177 return 0;
4178}
4179EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4180
8b10c5e2
PZ
4181/*
4182 * Called when the last reference to the file is gone.
4183 */
a6fa941d
AV
4184static int perf_release(struct inode *inode, struct file *file)
4185{
c6e5b732 4186 perf_event_release_kernel(file->private_data);
a6fa941d 4187 return 0;
fb0459d7 4188}
fb0459d7 4189
59ed446f 4190u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 4191{
cdd6c482 4192 struct perf_event *child;
e53c0994
PZ
4193 u64 total = 0;
4194
59ed446f
PZ
4195 *enabled = 0;
4196 *running = 0;
4197
6f10581a 4198 mutex_lock(&event->child_mutex);
01add3ea 4199
7d88962e 4200 (void)perf_event_read(event, false);
01add3ea
SB
4201 total += perf_event_count(event);
4202
59ed446f
PZ
4203 *enabled += event->total_time_enabled +
4204 atomic64_read(&event->child_total_time_enabled);
4205 *running += event->total_time_running +
4206 atomic64_read(&event->child_total_time_running);
4207
4208 list_for_each_entry(child, &event->child_list, child_list) {
7d88962e 4209 (void)perf_event_read(child, false);
01add3ea 4210 total += perf_event_count(child);
59ed446f
PZ
4211 *enabled += child->total_time_enabled;
4212 *running += child->total_time_running;
4213 }
6f10581a 4214 mutex_unlock(&event->child_mutex);
e53c0994
PZ
4215
4216 return total;
4217}
fb0459d7 4218EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 4219
7d88962e 4220static int __perf_read_group_add(struct perf_event *leader,
fa8c2693 4221 u64 read_format, u64 *values)
3dab77fb 4222{
fa8c2693
PZ
4223 struct perf_event *sub;
4224 int n = 1; /* skip @nr */
7d88962e 4225 int ret;
f63a8daa 4226
7d88962e
SB
4227 ret = perf_event_read(leader, true);
4228 if (ret)
4229 return ret;
abf4868b 4230
fa8c2693
PZ
4231 /*
4232 * Since we co-schedule groups, {enabled,running} times of siblings
4233 * will be identical to those of the leader, so we only publish one
4234 * set.
4235 */
4236 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4237 values[n++] += leader->total_time_enabled +
4238 atomic64_read(&leader->child_total_time_enabled);
4239 }
3dab77fb 4240
fa8c2693
PZ
4241 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4242 values[n++] += leader->total_time_running +
4243 atomic64_read(&leader->child_total_time_running);
4244 }
4245
4246 /*
4247 * Write {count,id} tuples for every sibling.
4248 */
4249 values[n++] += perf_event_count(leader);
abf4868b
PZ
4250 if (read_format & PERF_FORMAT_ID)
4251 values[n++] = primary_event_id(leader);
3dab77fb 4252
fa8c2693
PZ
4253 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4254 values[n++] += perf_event_count(sub);
4255 if (read_format & PERF_FORMAT_ID)
4256 values[n++] = primary_event_id(sub);
4257 }
7d88962e
SB
4258
4259 return 0;
fa8c2693 4260}
3dab77fb 4261
fa8c2693
PZ
4262static int perf_read_group(struct perf_event *event,
4263 u64 read_format, char __user *buf)
4264{
4265 struct perf_event *leader = event->group_leader, *child;
4266 struct perf_event_context *ctx = leader->ctx;
7d88962e 4267 int ret;
fa8c2693 4268 u64 *values;
3dab77fb 4269
fa8c2693 4270 lockdep_assert_held(&ctx->mutex);
3dab77fb 4271
fa8c2693
PZ
4272 values = kzalloc(event->read_size, GFP_KERNEL);
4273 if (!values)
4274 return -ENOMEM;
3dab77fb 4275
fa8c2693
PZ
4276 values[0] = 1 + leader->nr_siblings;
4277
4278 /*
4279 * By locking the child_mutex of the leader we effectively
4280 * lock the child list of all siblings.. XXX explain how.
4281 */
4282 mutex_lock(&leader->child_mutex);
abf4868b 4283
7d88962e
SB
4284 ret = __perf_read_group_add(leader, read_format, values);
4285 if (ret)
4286 goto unlock;
4287
4288 list_for_each_entry(child, &leader->child_list, child_list) {
4289 ret = __perf_read_group_add(child, read_format, values);
4290 if (ret)
4291 goto unlock;
4292 }
abf4868b 4293
fa8c2693 4294 mutex_unlock(&leader->child_mutex);
abf4868b 4295
7d88962e 4296 ret = event->read_size;
fa8c2693
PZ
4297 if (copy_to_user(buf, values, event->read_size))
4298 ret = -EFAULT;
7d88962e 4299 goto out;
fa8c2693 4300
7d88962e
SB
4301unlock:
4302 mutex_unlock(&leader->child_mutex);
4303out:
fa8c2693 4304 kfree(values);
abf4868b 4305 return ret;
3dab77fb
PZ
4306}
4307
b15f495b 4308static int perf_read_one(struct perf_event *event,
3dab77fb
PZ
4309 u64 read_format, char __user *buf)
4310{
59ed446f 4311 u64 enabled, running;
3dab77fb
PZ
4312 u64 values[4];
4313 int n = 0;
4314
59ed446f
PZ
4315 values[n++] = perf_event_read_value(event, &enabled, &running);
4316 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4317 values[n++] = enabled;
4318 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4319 values[n++] = running;
3dab77fb 4320 if (read_format & PERF_FORMAT_ID)
cdd6c482 4321 values[n++] = primary_event_id(event);
3dab77fb
PZ
4322
4323 if (copy_to_user(buf, values, n * sizeof(u64)))
4324 return -EFAULT;
4325
4326 return n * sizeof(u64);
4327}
4328
dc633982
JO
4329static bool is_event_hup(struct perf_event *event)
4330{
4331 bool no_children;
4332
a69b0ca4 4333 if (event->state > PERF_EVENT_STATE_EXIT)
dc633982
JO
4334 return false;
4335
4336 mutex_lock(&event->child_mutex);
4337 no_children = list_empty(&event->child_list);
4338 mutex_unlock(&event->child_mutex);
4339 return no_children;
4340}
4341
0793a61d 4342/*
cdd6c482 4343 * Read the performance event - simple non blocking version for now
0793a61d
TG
4344 */
4345static ssize_t
b15f495b 4346__perf_read(struct perf_event *event, char __user *buf, size_t count)
0793a61d 4347{
cdd6c482 4348 u64 read_format = event->attr.read_format;
3dab77fb 4349 int ret;
0793a61d 4350
3b6f9e5c 4351 /*
cdd6c482 4352 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
4353 * error state (i.e. because it was pinned but it couldn't be
4354 * scheduled on to the CPU at some point).
4355 */
cdd6c482 4356 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
4357 return 0;
4358
c320c7b7 4359 if (count < event->read_size)
3dab77fb
PZ
4360 return -ENOSPC;
4361
cdd6c482 4362 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 4363 if (read_format & PERF_FORMAT_GROUP)
b15f495b 4364 ret = perf_read_group(event, read_format, buf);
3dab77fb 4365 else
b15f495b 4366 ret = perf_read_one(event, read_format, buf);
0793a61d 4367
3dab77fb 4368 return ret;
0793a61d
TG
4369}
4370
0793a61d
TG
4371static ssize_t
4372perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4373{
cdd6c482 4374 struct perf_event *event = file->private_data;
f63a8daa
PZ
4375 struct perf_event_context *ctx;
4376 int ret;
0793a61d 4377
f63a8daa 4378 ctx = perf_event_ctx_lock(event);
b15f495b 4379 ret = __perf_read(event, buf, count);
f63a8daa
PZ
4380 perf_event_ctx_unlock(event, ctx);
4381
4382 return ret;
0793a61d
TG
4383}
4384
4385static unsigned int perf_poll(struct file *file, poll_table *wait)
4386{
cdd6c482 4387 struct perf_event *event = file->private_data;
76369139 4388 struct ring_buffer *rb;
61b67684 4389 unsigned int events = POLLHUP;
c7138f37 4390
e708d7ad 4391 poll_wait(file, &event->waitq, wait);
179033b3 4392
dc633982 4393 if (is_event_hup(event))
179033b3 4394 return events;
c7138f37 4395
10c6db11 4396 /*
9bb5d40c
PZ
4397 * Pin the event->rb by taking event->mmap_mutex; otherwise
4398 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
4399 */
4400 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
4401 rb = event->rb;
4402 if (rb)
76369139 4403 events = atomic_xchg(&rb->poll, 0);
10c6db11 4404 mutex_unlock(&event->mmap_mutex);
0793a61d
TG
4405 return events;
4406}
4407
f63a8daa 4408static void _perf_event_reset(struct perf_event *event)
6de6a7b9 4409{
7d88962e 4410 (void)perf_event_read(event, false);
e7850595 4411 local64_set(&event->count, 0);
cdd6c482 4412 perf_event_update_userpage(event);
3df5edad
PZ
4413}
4414
c93f7669 4415/*
cdd6c482
IM
4416 * Holding the top-level event's child_mutex means that any
4417 * descendant process that has inherited this event will block
8ba289b8 4418 * in perf_event_exit_event() if it goes to exit, thus satisfying the
cdd6c482 4419 * task existence requirements of perf_event_enable/disable.
c93f7669 4420 */
cdd6c482
IM
4421static void perf_event_for_each_child(struct perf_event *event,
4422 void (*func)(struct perf_event *))
3df5edad 4423{
cdd6c482 4424 struct perf_event *child;
3df5edad 4425
cdd6c482 4426 WARN_ON_ONCE(event->ctx->parent_ctx);
f63a8daa 4427
cdd6c482
IM
4428 mutex_lock(&event->child_mutex);
4429 func(event);
4430 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 4431 func(child);
cdd6c482 4432 mutex_unlock(&event->child_mutex);
3df5edad
PZ
4433}
4434
cdd6c482
IM
4435static void perf_event_for_each(struct perf_event *event,
4436 void (*func)(struct perf_event *))
3df5edad 4437{
cdd6c482
IM
4438 struct perf_event_context *ctx = event->ctx;
4439 struct perf_event *sibling;
3df5edad 4440
f63a8daa
PZ
4441 lockdep_assert_held(&ctx->mutex);
4442
cdd6c482 4443 event = event->group_leader;
75f937f2 4444
cdd6c482 4445 perf_event_for_each_child(event, func);
cdd6c482 4446 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 4447 perf_event_for_each_child(sibling, func);
6de6a7b9
PZ
4448}
4449
fae3fde6
PZ
4450static void __perf_event_period(struct perf_event *event,
4451 struct perf_cpu_context *cpuctx,
4452 struct perf_event_context *ctx,
4453 void *info)
c7999c6f 4454{
fae3fde6 4455 u64 value = *((u64 *)info);
c7999c6f 4456 bool active;
08247e31 4457
cdd6c482 4458 if (event->attr.freq) {
cdd6c482 4459 event->attr.sample_freq = value;
08247e31 4460 } else {
cdd6c482
IM
4461 event->attr.sample_period = value;
4462 event->hw.sample_period = value;
08247e31 4463 }
bad7192b
PZ
4464
4465 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4466 if (active) {
4467 perf_pmu_disable(ctx->pmu);
1e02cd40
PZ
4468 /*
4469 * We could be throttled; unthrottle now to avoid the tick
4470 * trying to unthrottle while we already re-started the event.
4471 */
4472 if (event->hw.interrupts == MAX_INTERRUPTS) {
4473 event->hw.interrupts = 0;
4474 perf_log_throttle(event, 1);
4475 }
bad7192b
PZ
4476 event->pmu->stop(event, PERF_EF_UPDATE);
4477 }
4478
4479 local64_set(&event->hw.period_left, 0);
4480
4481 if (active) {
4482 event->pmu->start(event, PERF_EF_RELOAD);
4483 perf_pmu_enable(ctx->pmu);
4484 }
c7999c6f
PZ
4485}
4486
4487static int perf_event_period(struct perf_event *event, u64 __user *arg)
4488{
c7999c6f
PZ
4489 u64 value;
4490
4491 if (!is_sampling_event(event))
4492 return -EINVAL;
4493
4494 if (copy_from_user(&value, arg, sizeof(value)))
4495 return -EFAULT;
4496
4497 if (!value)
4498 return -EINVAL;
4499
4500 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4501 return -EINVAL;
4502
fae3fde6 4503 event_function_call(event, __perf_event_period, &value);
08247e31 4504
c7999c6f 4505 return 0;
08247e31
PZ
4506}
4507
ac9721f3
PZ
4508static const struct file_operations perf_fops;
4509
2903ff01 4510static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 4511{
2903ff01
AV
4512 struct fd f = fdget(fd);
4513 if (!f.file)
4514 return -EBADF;
ac9721f3 4515
2903ff01
AV
4516 if (f.file->f_op != &perf_fops) {
4517 fdput(f);
4518 return -EBADF;
ac9721f3 4519 }
2903ff01
AV
4520 *p = f;
4521 return 0;
ac9721f3
PZ
4522}
4523
4524static int perf_event_set_output(struct perf_event *event,
4525 struct perf_event *output_event);
6fb2915d 4526static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2541517c 4527static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
a4be7c27 4528
f63a8daa 4529static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
d859e29f 4530{
cdd6c482 4531 void (*func)(struct perf_event *);
3df5edad 4532 u32 flags = arg;
d859e29f
PM
4533
4534 switch (cmd) {
cdd6c482 4535 case PERF_EVENT_IOC_ENABLE:
f63a8daa 4536 func = _perf_event_enable;
d859e29f 4537 break;
cdd6c482 4538 case PERF_EVENT_IOC_DISABLE:
f63a8daa 4539 func = _perf_event_disable;
79f14641 4540 break;
cdd6c482 4541 case PERF_EVENT_IOC_RESET:
f63a8daa 4542 func = _perf_event_reset;
6de6a7b9 4543 break;
3df5edad 4544
cdd6c482 4545 case PERF_EVENT_IOC_REFRESH:
f63a8daa 4546 return _perf_event_refresh(event, arg);
08247e31 4547
cdd6c482
IM
4548 case PERF_EVENT_IOC_PERIOD:
4549 return perf_event_period(event, (u64 __user *)arg);
08247e31 4550
cf4957f1
JO
4551 case PERF_EVENT_IOC_ID:
4552 {
4553 u64 id = primary_event_id(event);
4554
4555 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4556 return -EFAULT;
4557 return 0;
4558 }
4559
cdd6c482 4560 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 4561 {
ac9721f3 4562 int ret;
ac9721f3 4563 if (arg != -1) {
2903ff01
AV
4564 struct perf_event *output_event;
4565 struct fd output;
4566 ret = perf_fget_light(arg, &output);
4567 if (ret)
4568 return ret;
4569 output_event = output.file->private_data;
4570 ret = perf_event_set_output(event, output_event);
4571 fdput(output);
4572 } else {
4573 ret = perf_event_set_output(event, NULL);
ac9721f3 4574 }
ac9721f3
PZ
4575 return ret;
4576 }
a4be7c27 4577
6fb2915d
LZ
4578 case PERF_EVENT_IOC_SET_FILTER:
4579 return perf_event_set_filter(event, (void __user *)arg);
4580
2541517c
AS
4581 case PERF_EVENT_IOC_SET_BPF:
4582 return perf_event_set_bpf_prog(event, arg);
4583
86e7972f
WN
4584 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4585 struct ring_buffer *rb;
4586
4587 rcu_read_lock();
4588 rb = rcu_dereference(event->rb);
4589 if (!rb || !rb->nr_pages) {
4590 rcu_read_unlock();
4591 return -EINVAL;
4592 }
4593 rb_toggle_paused(rb, !!arg);
4594 rcu_read_unlock();
4595 return 0;
4596 }
d859e29f 4597 default:
3df5edad 4598 return -ENOTTY;
d859e29f 4599 }
3df5edad
PZ
4600
4601 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 4602 perf_event_for_each(event, func);
3df5edad 4603 else
cdd6c482 4604 perf_event_for_each_child(event, func);
3df5edad
PZ
4605
4606 return 0;
d859e29f
PM
4607}
4608
f63a8daa
PZ
4609static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4610{
4611 struct perf_event *event = file->private_data;
4612 struct perf_event_context *ctx;
4613 long ret;
4614
4615 ctx = perf_event_ctx_lock(event);
4616 ret = _perf_ioctl(event, cmd, arg);
4617 perf_event_ctx_unlock(event, ctx);
4618
4619 return ret;
4620}
4621
b3f20785
PM
4622#ifdef CONFIG_COMPAT
4623static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4624 unsigned long arg)
4625{
4626 switch (_IOC_NR(cmd)) {
4627 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4628 case _IOC_NR(PERF_EVENT_IOC_ID):
4629 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4630 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4631 cmd &= ~IOCSIZE_MASK;
4632 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4633 }
4634 break;
4635 }
4636 return perf_ioctl(file, cmd, arg);
4637}
4638#else
4639# define perf_compat_ioctl NULL
4640#endif
4641
cdd6c482 4642int perf_event_task_enable(void)
771d7cde 4643{
f63a8daa 4644 struct perf_event_context *ctx;
cdd6c482 4645 struct perf_event *event;
771d7cde 4646
cdd6c482 4647 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4648 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4649 ctx = perf_event_ctx_lock(event);
4650 perf_event_for_each_child(event, _perf_event_enable);
4651 perf_event_ctx_unlock(event, ctx);
4652 }
cdd6c482 4653 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4654
4655 return 0;
4656}
4657
cdd6c482 4658int perf_event_task_disable(void)
771d7cde 4659{
f63a8daa 4660 struct perf_event_context *ctx;
cdd6c482 4661 struct perf_event *event;
771d7cde 4662
cdd6c482 4663 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4664 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4665 ctx = perf_event_ctx_lock(event);
4666 perf_event_for_each_child(event, _perf_event_disable);
4667 perf_event_ctx_unlock(event, ctx);
4668 }
cdd6c482 4669 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4670
4671 return 0;
4672}
4673
cdd6c482 4674static int perf_event_index(struct perf_event *event)
194002b2 4675{
a4eaf7f1
PZ
4676 if (event->hw.state & PERF_HES_STOPPED)
4677 return 0;
4678
cdd6c482 4679 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
4680 return 0;
4681
35edc2a5 4682 return event->pmu->event_idx(event);
194002b2
PZ
4683}
4684
c4794295 4685static void calc_timer_values(struct perf_event *event,
e3f3541c 4686 u64 *now,
7f310a5d
EM
4687 u64 *enabled,
4688 u64 *running)
c4794295 4689{
e3f3541c 4690 u64 ctx_time;
c4794295 4691
e3f3541c
PZ
4692 *now = perf_clock();
4693 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
4694 *enabled = ctx_time - event->tstamp_enabled;
4695 *running = ctx_time - event->tstamp_running;
4696}
4697
fa731587
PZ
4698static void perf_event_init_userpage(struct perf_event *event)
4699{
4700 struct perf_event_mmap_page *userpg;
4701 struct ring_buffer *rb;
4702
4703 rcu_read_lock();
4704 rb = rcu_dereference(event->rb);
4705 if (!rb)
4706 goto unlock;
4707
4708 userpg = rb->user_page;
4709
4710 /* Allow new userspace to detect that bit 0 is deprecated */
4711 userpg->cap_bit0_is_deprecated = 1;
4712 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
e8c6deac
AS
4713 userpg->data_offset = PAGE_SIZE;
4714 userpg->data_size = perf_data_size(rb);
fa731587
PZ
4715
4716unlock:
4717 rcu_read_unlock();
4718}
4719
c1317ec2
AL
4720void __weak arch_perf_update_userpage(
4721 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
4722{
4723}
4724
38ff667b
PZ
4725/*
4726 * Callers need to ensure there can be no nesting of this function, otherwise
4727 * the seqlock logic goes bad. We can not serialize this because the arch
4728 * code calls this from NMI context.
4729 */
cdd6c482 4730void perf_event_update_userpage(struct perf_event *event)
37d81828 4731{
cdd6c482 4732 struct perf_event_mmap_page *userpg;
76369139 4733 struct ring_buffer *rb;
e3f3541c 4734 u64 enabled, running, now;
38ff667b
PZ
4735
4736 rcu_read_lock();
5ec4c599
PZ
4737 rb = rcu_dereference(event->rb);
4738 if (!rb)
4739 goto unlock;
4740
0d641208
EM
4741 /*
4742 * compute total_time_enabled, total_time_running
4743 * based on snapshot values taken when the event
4744 * was last scheduled in.
4745 *
4746 * we cannot simply called update_context_time()
4747 * because of locking issue as we can be called in
4748 * NMI context
4749 */
e3f3541c 4750 calc_timer_values(event, &now, &enabled, &running);
38ff667b 4751
76369139 4752 userpg = rb->user_page;
7b732a75
PZ
4753 /*
4754 * Disable preemption so as to not let the corresponding user-space
4755 * spin too long if we get preempted.
4756 */
4757 preempt_disable();
37d81828 4758 ++userpg->lock;
92f22a38 4759 barrier();
cdd6c482 4760 userpg->index = perf_event_index(event);
b5e58793 4761 userpg->offset = perf_event_count(event);
365a4038 4762 if (userpg->index)
e7850595 4763 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 4764
0d641208 4765 userpg->time_enabled = enabled +
cdd6c482 4766 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 4767
0d641208 4768 userpg->time_running = running +
cdd6c482 4769 atomic64_read(&event->child_total_time_running);
7f8b4e4e 4770
c1317ec2 4771 arch_perf_update_userpage(event, userpg, now);
e3f3541c 4772
92f22a38 4773 barrier();
37d81828 4774 ++userpg->lock;
7b732a75 4775 preempt_enable();
38ff667b 4776unlock:
7b732a75 4777 rcu_read_unlock();
37d81828
PM
4778}
4779
906010b2
PZ
4780static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4781{
4782 struct perf_event *event = vma->vm_file->private_data;
76369139 4783 struct ring_buffer *rb;
906010b2
PZ
4784 int ret = VM_FAULT_SIGBUS;
4785
4786 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4787 if (vmf->pgoff == 0)
4788 ret = 0;
4789 return ret;
4790 }
4791
4792 rcu_read_lock();
76369139
FW
4793 rb = rcu_dereference(event->rb);
4794 if (!rb)
906010b2
PZ
4795 goto unlock;
4796
4797 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4798 goto unlock;
4799
76369139 4800 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
4801 if (!vmf->page)
4802 goto unlock;
4803
4804 get_page(vmf->page);
4805 vmf->page->mapping = vma->vm_file->f_mapping;
4806 vmf->page->index = vmf->pgoff;
4807
4808 ret = 0;
4809unlock:
4810 rcu_read_unlock();
4811
4812 return ret;
4813}
4814
10c6db11
PZ
4815static void ring_buffer_attach(struct perf_event *event,
4816 struct ring_buffer *rb)
4817{
b69cf536 4818 struct ring_buffer *old_rb = NULL;
10c6db11
PZ
4819 unsigned long flags;
4820
b69cf536
PZ
4821 if (event->rb) {
4822 /*
4823 * Should be impossible, we set this when removing
4824 * event->rb_entry and wait/clear when adding event->rb_entry.
4825 */
4826 WARN_ON_ONCE(event->rcu_pending);
10c6db11 4827
b69cf536 4828 old_rb = event->rb;
b69cf536
PZ
4829 spin_lock_irqsave(&old_rb->event_lock, flags);
4830 list_del_rcu(&event->rb_entry);
4831 spin_unlock_irqrestore(&old_rb->event_lock, flags);
10c6db11 4832
2f993cf0
ON
4833 event->rcu_batches = get_state_synchronize_rcu();
4834 event->rcu_pending = 1;
b69cf536 4835 }
10c6db11 4836
b69cf536 4837 if (rb) {
2f993cf0
ON
4838 if (event->rcu_pending) {
4839 cond_synchronize_rcu(event->rcu_batches);
4840 event->rcu_pending = 0;
4841 }
4842
b69cf536
PZ
4843 spin_lock_irqsave(&rb->event_lock, flags);
4844 list_add_rcu(&event->rb_entry, &rb->event_list);
4845 spin_unlock_irqrestore(&rb->event_lock, flags);
4846 }
4847
4848 rcu_assign_pointer(event->rb, rb);
4849
4850 if (old_rb) {
4851 ring_buffer_put(old_rb);
4852 /*
4853 * Since we detached before setting the new rb, so that we
4854 * could attach the new rb, we could have missed a wakeup.
4855 * Provide it now.
4856 */
4857 wake_up_all(&event->waitq);
4858 }
10c6db11
PZ
4859}
4860
4861static void ring_buffer_wakeup(struct perf_event *event)
4862{
4863 struct ring_buffer *rb;
4864
4865 rcu_read_lock();
4866 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
4867 if (rb) {
4868 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4869 wake_up_all(&event->waitq);
4870 }
10c6db11
PZ
4871 rcu_read_unlock();
4872}
4873
fdc26706 4874struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 4875{
76369139 4876 struct ring_buffer *rb;
7b732a75 4877
ac9721f3 4878 rcu_read_lock();
76369139
FW
4879 rb = rcu_dereference(event->rb);
4880 if (rb) {
4881 if (!atomic_inc_not_zero(&rb->refcount))
4882 rb = NULL;
ac9721f3
PZ
4883 }
4884 rcu_read_unlock();
4885
76369139 4886 return rb;
ac9721f3
PZ
4887}
4888
fdc26706 4889void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 4890{
76369139 4891 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 4892 return;
7b732a75 4893
9bb5d40c 4894 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 4895
76369139 4896 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
4897}
4898
4899static void perf_mmap_open(struct vm_area_struct *vma)
4900{
cdd6c482 4901 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4902
cdd6c482 4903 atomic_inc(&event->mmap_count);
9bb5d40c 4904 atomic_inc(&event->rb->mmap_count);
1e0fb9ec 4905
45bfb2e5
PZ
4906 if (vma->vm_pgoff)
4907 atomic_inc(&event->rb->aux_mmap_count);
4908
1e0fb9ec
AL
4909 if (event->pmu->event_mapped)
4910 event->pmu->event_mapped(event);
7b732a75
PZ
4911}
4912
95ff4ca2
AS
4913static void perf_pmu_output_stop(struct perf_event *event);
4914
9bb5d40c
PZ
4915/*
4916 * A buffer can be mmap()ed multiple times; either directly through the same
4917 * event, or through other events by use of perf_event_set_output().
4918 *
4919 * In order to undo the VM accounting done by perf_mmap() we need to destroy
4920 * the buffer here, where we still have a VM context. This means we need
4921 * to detach all events redirecting to us.
4922 */
7b732a75
PZ
4923static void perf_mmap_close(struct vm_area_struct *vma)
4924{
cdd6c482 4925 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4926
b69cf536 4927 struct ring_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
4928 struct user_struct *mmap_user = rb->mmap_user;
4929 int mmap_locked = rb->mmap_locked;
4930 unsigned long size = perf_data_size(rb);
789f90fc 4931
1e0fb9ec
AL
4932 if (event->pmu->event_unmapped)
4933 event->pmu->event_unmapped(event);
4934
45bfb2e5
PZ
4935 /*
4936 * rb->aux_mmap_count will always drop before rb->mmap_count and
4937 * event->mmap_count, so it is ok to use event->mmap_mutex to
4938 * serialize with perf_mmap here.
4939 */
4940 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
4941 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
95ff4ca2
AS
4942 /*
4943 * Stop all AUX events that are writing to this buffer,
4944 * so that we can free its AUX pages and corresponding PMU
4945 * data. Note that after rb::aux_mmap_count dropped to zero,
4946 * they won't start any more (see perf_aux_output_begin()).
4947 */
4948 perf_pmu_output_stop(event);
4949
4950 /* now it's safe to free the pages */
45bfb2e5
PZ
4951 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
4952 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
4953
95ff4ca2 4954 /* this has to be the last one */
45bfb2e5 4955 rb_free_aux(rb);
95ff4ca2
AS
4956 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
4957
45bfb2e5
PZ
4958 mutex_unlock(&event->mmap_mutex);
4959 }
4960
9bb5d40c
PZ
4961 atomic_dec(&rb->mmap_count);
4962
4963 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 4964 goto out_put;
9bb5d40c 4965
b69cf536 4966 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
4967 mutex_unlock(&event->mmap_mutex);
4968
4969 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
4970 if (atomic_read(&rb->mmap_count))
4971 goto out_put;
ac9721f3 4972
9bb5d40c
PZ
4973 /*
4974 * No other mmap()s, detach from all other events that might redirect
4975 * into the now unreachable buffer. Somewhat complicated by the
4976 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4977 */
4978again:
4979 rcu_read_lock();
4980 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4981 if (!atomic_long_inc_not_zero(&event->refcount)) {
4982 /*
4983 * This event is en-route to free_event() which will
4984 * detach it and remove it from the list.
4985 */
4986 continue;
4987 }
4988 rcu_read_unlock();
789f90fc 4989
9bb5d40c
PZ
4990 mutex_lock(&event->mmap_mutex);
4991 /*
4992 * Check we didn't race with perf_event_set_output() which can
4993 * swizzle the rb from under us while we were waiting to
4994 * acquire mmap_mutex.
4995 *
4996 * If we find a different rb; ignore this event, a next
4997 * iteration will no longer find it on the list. We have to
4998 * still restart the iteration to make sure we're not now
4999 * iterating the wrong list.
5000 */
b69cf536
PZ
5001 if (event->rb == rb)
5002 ring_buffer_attach(event, NULL);
5003
cdd6c482 5004 mutex_unlock(&event->mmap_mutex);
9bb5d40c 5005 put_event(event);
ac9721f3 5006
9bb5d40c
PZ
5007 /*
5008 * Restart the iteration; either we're on the wrong list or
5009 * destroyed its integrity by doing a deletion.
5010 */
5011 goto again;
7b732a75 5012 }
9bb5d40c
PZ
5013 rcu_read_unlock();
5014
5015 /*
5016 * It could be there's still a few 0-ref events on the list; they'll
5017 * get cleaned up by free_event() -- they'll also still have their
5018 * ref on the rb and will free it whenever they are done with it.
5019 *
5020 * Aside from that, this buffer is 'fully' detached and unmapped,
5021 * undo the VM accounting.
5022 */
5023
5024 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
5025 vma->vm_mm->pinned_vm -= mmap_locked;
5026 free_uid(mmap_user);
5027
b69cf536 5028out_put:
9bb5d40c 5029 ring_buffer_put(rb); /* could be last */
37d81828
PM
5030}
5031
f0f37e2f 5032static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8 5033 .open = perf_mmap_open,
45bfb2e5 5034 .close = perf_mmap_close, /* non mergable */
43a21ea8
PZ
5035 .fault = perf_mmap_fault,
5036 .page_mkwrite = perf_mmap_fault,
37d81828
PM
5037};
5038
5039static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5040{
cdd6c482 5041 struct perf_event *event = file->private_data;
22a4f650 5042 unsigned long user_locked, user_lock_limit;
789f90fc 5043 struct user_struct *user = current_user();
22a4f650 5044 unsigned long locked, lock_limit;
45bfb2e5 5045 struct ring_buffer *rb = NULL;
7b732a75
PZ
5046 unsigned long vma_size;
5047 unsigned long nr_pages;
45bfb2e5 5048 long user_extra = 0, extra = 0;
d57e34fd 5049 int ret = 0, flags = 0;
37d81828 5050
c7920614
PZ
5051 /*
5052 * Don't allow mmap() of inherited per-task counters. This would
5053 * create a performance issue due to all children writing to the
76369139 5054 * same rb.
c7920614
PZ
5055 */
5056 if (event->cpu == -1 && event->attr.inherit)
5057 return -EINVAL;
5058
43a21ea8 5059 if (!(vma->vm_flags & VM_SHARED))
37d81828 5060 return -EINVAL;
7b732a75
PZ
5061
5062 vma_size = vma->vm_end - vma->vm_start;
45bfb2e5
PZ
5063
5064 if (vma->vm_pgoff == 0) {
5065 nr_pages = (vma_size / PAGE_SIZE) - 1;
5066 } else {
5067 /*
5068 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5069 * mapped, all subsequent mappings should have the same size
5070 * and offset. Must be above the normal perf buffer.
5071 */
5072 u64 aux_offset, aux_size;
5073
5074 if (!event->rb)
5075 return -EINVAL;
5076
5077 nr_pages = vma_size / PAGE_SIZE;
5078
5079 mutex_lock(&event->mmap_mutex);
5080 ret = -EINVAL;
5081
5082 rb = event->rb;
5083 if (!rb)
5084 goto aux_unlock;
5085
5086 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
5087 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
5088
5089 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5090 goto aux_unlock;
5091
5092 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5093 goto aux_unlock;
5094
5095 /* already mapped with a different offset */
5096 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5097 goto aux_unlock;
5098
5099 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5100 goto aux_unlock;
5101
5102 /* already mapped with a different size */
5103 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5104 goto aux_unlock;
5105
5106 if (!is_power_of_2(nr_pages))
5107 goto aux_unlock;
5108
5109 if (!atomic_inc_not_zero(&rb->mmap_count))
5110 goto aux_unlock;
5111
5112 if (rb_has_aux(rb)) {
5113 atomic_inc(&rb->aux_mmap_count);
5114 ret = 0;
5115 goto unlock;
5116 }
5117
5118 atomic_set(&rb->aux_mmap_count, 1);
5119 user_extra = nr_pages;
5120
5121 goto accounting;
5122 }
7b732a75 5123
7730d865 5124 /*
76369139 5125 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
5126 * can do bitmasks instead of modulo.
5127 */
2ed11312 5128 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
5129 return -EINVAL;
5130
7b732a75 5131 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
5132 return -EINVAL;
5133
cdd6c482 5134 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 5135again:
cdd6c482 5136 mutex_lock(&event->mmap_mutex);
76369139 5137 if (event->rb) {
9bb5d40c 5138 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 5139 ret = -EINVAL;
9bb5d40c
PZ
5140 goto unlock;
5141 }
5142
5143 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5144 /*
5145 * Raced against perf_mmap_close() through
5146 * perf_event_set_output(). Try again, hope for better
5147 * luck.
5148 */
5149 mutex_unlock(&event->mmap_mutex);
5150 goto again;
5151 }
5152
ebb3c4c4
PZ
5153 goto unlock;
5154 }
5155
789f90fc 5156 user_extra = nr_pages + 1;
45bfb2e5
PZ
5157
5158accounting:
cdd6c482 5159 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
5160
5161 /*
5162 * Increase the limit linearly with more CPUs:
5163 */
5164 user_lock_limit *= num_online_cpus();
5165
789f90fc 5166 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 5167
789f90fc
PZ
5168 if (user_locked > user_lock_limit)
5169 extra = user_locked - user_lock_limit;
7b732a75 5170
78d7d407 5171 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 5172 lock_limit >>= PAGE_SHIFT;
bc3e53f6 5173 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 5174
459ec28a
IM
5175 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5176 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
5177 ret = -EPERM;
5178 goto unlock;
5179 }
7b732a75 5180
45bfb2e5 5181 WARN_ON(!rb && event->rb);
906010b2 5182
d57e34fd 5183 if (vma->vm_flags & VM_WRITE)
76369139 5184 flags |= RING_BUFFER_WRITABLE;
d57e34fd 5185
76369139 5186 if (!rb) {
45bfb2e5
PZ
5187 rb = rb_alloc(nr_pages,
5188 event->attr.watermark ? event->attr.wakeup_watermark : 0,
5189 event->cpu, flags);
26cb63ad 5190
45bfb2e5
PZ
5191 if (!rb) {
5192 ret = -ENOMEM;
5193 goto unlock;
5194 }
43a21ea8 5195
45bfb2e5
PZ
5196 atomic_set(&rb->mmap_count, 1);
5197 rb->mmap_user = get_current_user();
5198 rb->mmap_locked = extra;
26cb63ad 5199
45bfb2e5 5200 ring_buffer_attach(event, rb);
ac9721f3 5201
45bfb2e5
PZ
5202 perf_event_init_userpage(event);
5203 perf_event_update_userpage(event);
5204 } else {
1a594131
AS
5205 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5206 event->attr.aux_watermark, flags);
45bfb2e5
PZ
5207 if (!ret)
5208 rb->aux_mmap_locked = extra;
5209 }
9a0f05cb 5210
ebb3c4c4 5211unlock:
45bfb2e5
PZ
5212 if (!ret) {
5213 atomic_long_add(user_extra, &user->locked_vm);
5214 vma->vm_mm->pinned_vm += extra;
5215
ac9721f3 5216 atomic_inc(&event->mmap_count);
45bfb2e5
PZ
5217 } else if (rb) {
5218 atomic_dec(&rb->mmap_count);
5219 }
5220aux_unlock:
cdd6c482 5221 mutex_unlock(&event->mmap_mutex);
37d81828 5222
9bb5d40c
PZ
5223 /*
5224 * Since pinned accounting is per vm we cannot allow fork() to copy our
5225 * vma.
5226 */
26cb63ad 5227 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 5228 vma->vm_ops = &perf_mmap_vmops;
7b732a75 5229
1e0fb9ec
AL
5230 if (event->pmu->event_mapped)
5231 event->pmu->event_mapped(event);
5232
7b732a75 5233 return ret;
37d81828
PM
5234}
5235
3c446b3d
PZ
5236static int perf_fasync(int fd, struct file *filp, int on)
5237{
496ad9aa 5238 struct inode *inode = file_inode(filp);
cdd6c482 5239 struct perf_event *event = filp->private_data;
3c446b3d
PZ
5240 int retval;
5241
5955102c 5242 inode_lock(inode);
cdd6c482 5243 retval = fasync_helper(fd, filp, on, &event->fasync);
5955102c 5244 inode_unlock(inode);
3c446b3d
PZ
5245
5246 if (retval < 0)
5247 return retval;
5248
5249 return 0;
5250}
5251
0793a61d 5252static const struct file_operations perf_fops = {
3326c1ce 5253 .llseek = no_llseek,
0793a61d
TG
5254 .release = perf_release,
5255 .read = perf_read,
5256 .poll = perf_poll,
d859e29f 5257 .unlocked_ioctl = perf_ioctl,
b3f20785 5258 .compat_ioctl = perf_compat_ioctl,
37d81828 5259 .mmap = perf_mmap,
3c446b3d 5260 .fasync = perf_fasync,
0793a61d
TG
5261};
5262
925d519a 5263/*
cdd6c482 5264 * Perf event wakeup
925d519a
PZ
5265 *
5266 * If there's data, ensure we set the poll() state and publish everything
5267 * to user-space before waking everybody up.
5268 */
5269
fed66e2c
PZ
5270static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5271{
5272 /* only the parent has fasync state */
5273 if (event->parent)
5274 event = event->parent;
5275 return &event->fasync;
5276}
5277
cdd6c482 5278void perf_event_wakeup(struct perf_event *event)
925d519a 5279{
10c6db11 5280 ring_buffer_wakeup(event);
4c9e2542 5281
cdd6c482 5282 if (event->pending_kill) {
fed66e2c 5283 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
cdd6c482 5284 event->pending_kill = 0;
4c9e2542 5285 }
925d519a
PZ
5286}
5287
e360adbe 5288static void perf_pending_event(struct irq_work *entry)
79f14641 5289{
cdd6c482
IM
5290 struct perf_event *event = container_of(entry,
5291 struct perf_event, pending);
d525211f
PZ
5292 int rctx;
5293
5294 rctx = perf_swevent_get_recursion_context();
5295 /*
5296 * If we 'fail' here, that's OK, it means recursion is already disabled
5297 * and we won't recurse 'further'.
5298 */
79f14641 5299
cdd6c482
IM
5300 if (event->pending_disable) {
5301 event->pending_disable = 0;
fae3fde6 5302 perf_event_disable_local(event);
79f14641
PZ
5303 }
5304
cdd6c482
IM
5305 if (event->pending_wakeup) {
5306 event->pending_wakeup = 0;
5307 perf_event_wakeup(event);
79f14641 5308 }
d525211f
PZ
5309
5310 if (rctx >= 0)
5311 perf_swevent_put_recursion_context(rctx);
79f14641
PZ
5312}
5313
39447b38
ZY
5314/*
5315 * We assume there is only KVM supporting the callbacks.
5316 * Later on, we might change it to a list if there is
5317 * another virtualization implementation supporting the callbacks.
5318 */
5319struct perf_guest_info_callbacks *perf_guest_cbs;
5320
5321int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5322{
5323 perf_guest_cbs = cbs;
5324 return 0;
5325}
5326EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5327
5328int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5329{
5330 perf_guest_cbs = NULL;
5331 return 0;
5332}
5333EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5334
4018994f
JO
5335static void
5336perf_output_sample_regs(struct perf_output_handle *handle,
5337 struct pt_regs *regs, u64 mask)
5338{
5339 int bit;
29dd3288 5340 DECLARE_BITMAP(_mask, 64);
4018994f 5341
29dd3288
MS
5342 bitmap_from_u64(_mask, mask);
5343 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
4018994f
JO
5344 u64 val;
5345
5346 val = perf_reg_value(regs, bit);
5347 perf_output_put(handle, val);
5348 }
5349}
5350
60e2364e 5351static void perf_sample_regs_user(struct perf_regs *regs_user,
88a7c26a
AL
5352 struct pt_regs *regs,
5353 struct pt_regs *regs_user_copy)
4018994f 5354{
88a7c26a
AL
5355 if (user_mode(regs)) {
5356 regs_user->abi = perf_reg_abi(current);
2565711f 5357 regs_user->regs = regs;
88a7c26a
AL
5358 } else if (current->mm) {
5359 perf_get_regs_user(regs_user, regs, regs_user_copy);
2565711f
PZ
5360 } else {
5361 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5362 regs_user->regs = NULL;
4018994f
JO
5363 }
5364}
5365
60e2364e
SE
5366static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5367 struct pt_regs *regs)
5368{
5369 regs_intr->regs = regs;
5370 regs_intr->abi = perf_reg_abi(current);
5371}
5372
5373
c5ebcedb
JO
5374/*
5375 * Get remaining task size from user stack pointer.
5376 *
5377 * It'd be better to take stack vma map and limit this more
5378 * precisly, but there's no way to get it safely under interrupt,
5379 * so using TASK_SIZE as limit.
5380 */
5381static u64 perf_ustack_task_size(struct pt_regs *regs)
5382{
5383 unsigned long addr = perf_user_stack_pointer(regs);
5384
5385 if (!addr || addr >= TASK_SIZE)
5386 return 0;
5387
5388 return TASK_SIZE - addr;
5389}
5390
5391static u16
5392perf_sample_ustack_size(u16 stack_size, u16 header_size,
5393 struct pt_regs *regs)
5394{
5395 u64 task_size;
5396
5397 /* No regs, no stack pointer, no dump. */
5398 if (!regs)
5399 return 0;
5400
5401 /*
5402 * Check if we fit in with the requested stack size into the:
5403 * - TASK_SIZE
5404 * If we don't, we limit the size to the TASK_SIZE.
5405 *
5406 * - remaining sample size
5407 * If we don't, we customize the stack size to
5408 * fit in to the remaining sample size.
5409 */
5410
5411 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5412 stack_size = min(stack_size, (u16) task_size);
5413
5414 /* Current header size plus static size and dynamic size. */
5415 header_size += 2 * sizeof(u64);
5416
5417 /* Do we fit in with the current stack dump size? */
5418 if ((u16) (header_size + stack_size) < header_size) {
5419 /*
5420 * If we overflow the maximum size for the sample,
5421 * we customize the stack dump size to fit in.
5422 */
5423 stack_size = USHRT_MAX - header_size - sizeof(u64);
5424 stack_size = round_up(stack_size, sizeof(u64));
5425 }
5426
5427 return stack_size;
5428}
5429
5430static void
5431perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5432 struct pt_regs *regs)
5433{
5434 /* Case of a kernel thread, nothing to dump */
5435 if (!regs) {
5436 u64 size = 0;
5437 perf_output_put(handle, size);
5438 } else {
5439 unsigned long sp;
5440 unsigned int rem;
5441 u64 dyn_size;
5442
5443 /*
5444 * We dump:
5445 * static size
5446 * - the size requested by user or the best one we can fit
5447 * in to the sample max size
5448 * data
5449 * - user stack dump data
5450 * dynamic size
5451 * - the actual dumped size
5452 */
5453
5454 /* Static size. */
5455 perf_output_put(handle, dump_size);
5456
5457 /* Data. */
5458 sp = perf_user_stack_pointer(regs);
5459 rem = __output_copy_user(handle, (void *) sp, dump_size);
5460 dyn_size = dump_size - rem;
5461
5462 perf_output_skip(handle, rem);
5463
5464 /* Dynamic size. */
5465 perf_output_put(handle, dyn_size);
5466 }
5467}
5468
c980d109
ACM
5469static void __perf_event_header__init_id(struct perf_event_header *header,
5470 struct perf_sample_data *data,
5471 struct perf_event *event)
6844c09d
ACM
5472{
5473 u64 sample_type = event->attr.sample_type;
5474
5475 data->type = sample_type;
5476 header->size += event->id_header_size;
5477
5478 if (sample_type & PERF_SAMPLE_TID) {
5479 /* namespace issues */
5480 data->tid_entry.pid = perf_event_pid(event, current);
5481 data->tid_entry.tid = perf_event_tid(event, current);
5482 }
5483
5484 if (sample_type & PERF_SAMPLE_TIME)
34f43927 5485 data->time = perf_event_clock(event);
6844c09d 5486
ff3d527c 5487 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
5488 data->id = primary_event_id(event);
5489
5490 if (sample_type & PERF_SAMPLE_STREAM_ID)
5491 data->stream_id = event->id;
5492
5493 if (sample_type & PERF_SAMPLE_CPU) {
5494 data->cpu_entry.cpu = raw_smp_processor_id();
5495 data->cpu_entry.reserved = 0;
5496 }
5497}
5498
76369139
FW
5499void perf_event_header__init_id(struct perf_event_header *header,
5500 struct perf_sample_data *data,
5501 struct perf_event *event)
c980d109
ACM
5502{
5503 if (event->attr.sample_id_all)
5504 __perf_event_header__init_id(header, data, event);
5505}
5506
5507static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5508 struct perf_sample_data *data)
5509{
5510 u64 sample_type = data->type;
5511
5512 if (sample_type & PERF_SAMPLE_TID)
5513 perf_output_put(handle, data->tid_entry);
5514
5515 if (sample_type & PERF_SAMPLE_TIME)
5516 perf_output_put(handle, data->time);
5517
5518 if (sample_type & PERF_SAMPLE_ID)
5519 perf_output_put(handle, data->id);
5520
5521 if (sample_type & PERF_SAMPLE_STREAM_ID)
5522 perf_output_put(handle, data->stream_id);
5523
5524 if (sample_type & PERF_SAMPLE_CPU)
5525 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
5526
5527 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5528 perf_output_put(handle, data->id);
c980d109
ACM
5529}
5530
76369139
FW
5531void perf_event__output_id_sample(struct perf_event *event,
5532 struct perf_output_handle *handle,
5533 struct perf_sample_data *sample)
c980d109
ACM
5534{
5535 if (event->attr.sample_id_all)
5536 __perf_event__output_id_sample(handle, sample);
5537}
5538
3dab77fb 5539static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
5540 struct perf_event *event,
5541 u64 enabled, u64 running)
3dab77fb 5542{
cdd6c482 5543 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5544 u64 values[4];
5545 int n = 0;
5546
b5e58793 5547 values[n++] = perf_event_count(event);
3dab77fb 5548 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 5549 values[n++] = enabled +
cdd6c482 5550 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
5551 }
5552 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 5553 values[n++] = running +
cdd6c482 5554 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
5555 }
5556 if (read_format & PERF_FORMAT_ID)
cdd6c482 5557 values[n++] = primary_event_id(event);
3dab77fb 5558
76369139 5559 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5560}
5561
5562/*
cdd6c482 5563 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
5564 */
5565static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
5566 struct perf_event *event,
5567 u64 enabled, u64 running)
3dab77fb 5568{
cdd6c482
IM
5569 struct perf_event *leader = event->group_leader, *sub;
5570 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5571 u64 values[5];
5572 int n = 0;
5573
5574 values[n++] = 1 + leader->nr_siblings;
5575
5576 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 5577 values[n++] = enabled;
3dab77fb
PZ
5578
5579 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 5580 values[n++] = running;
3dab77fb 5581
cdd6c482 5582 if (leader != event)
3dab77fb
PZ
5583 leader->pmu->read(leader);
5584
b5e58793 5585 values[n++] = perf_event_count(leader);
3dab77fb 5586 if (read_format & PERF_FORMAT_ID)
cdd6c482 5587 values[n++] = primary_event_id(leader);
3dab77fb 5588
76369139 5589 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 5590
65abc865 5591 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
5592 n = 0;
5593
6f5ab001
JO
5594 if ((sub != event) &&
5595 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
5596 sub->pmu->read(sub);
5597
b5e58793 5598 values[n++] = perf_event_count(sub);
3dab77fb 5599 if (read_format & PERF_FORMAT_ID)
cdd6c482 5600 values[n++] = primary_event_id(sub);
3dab77fb 5601
76369139 5602 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5603 }
5604}
5605
eed01528
SE
5606#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5607 PERF_FORMAT_TOTAL_TIME_RUNNING)
5608
3dab77fb 5609static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 5610 struct perf_event *event)
3dab77fb 5611{
e3f3541c 5612 u64 enabled = 0, running = 0, now;
eed01528
SE
5613 u64 read_format = event->attr.read_format;
5614
5615 /*
5616 * compute total_time_enabled, total_time_running
5617 * based on snapshot values taken when the event
5618 * was last scheduled in.
5619 *
5620 * we cannot simply called update_context_time()
5621 * because of locking issue as we are called in
5622 * NMI context
5623 */
c4794295 5624 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 5625 calc_timer_values(event, &now, &enabled, &running);
eed01528 5626
cdd6c482 5627 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 5628 perf_output_read_group(handle, event, enabled, running);
3dab77fb 5629 else
eed01528 5630 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
5631}
5632
5622f295
MM
5633void perf_output_sample(struct perf_output_handle *handle,
5634 struct perf_event_header *header,
5635 struct perf_sample_data *data,
cdd6c482 5636 struct perf_event *event)
5622f295
MM
5637{
5638 u64 sample_type = data->type;
5639
5640 perf_output_put(handle, *header);
5641
ff3d527c
AH
5642 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5643 perf_output_put(handle, data->id);
5644
5622f295
MM
5645 if (sample_type & PERF_SAMPLE_IP)
5646 perf_output_put(handle, data->ip);
5647
5648 if (sample_type & PERF_SAMPLE_TID)
5649 perf_output_put(handle, data->tid_entry);
5650
5651 if (sample_type & PERF_SAMPLE_TIME)
5652 perf_output_put(handle, data->time);
5653
5654 if (sample_type & PERF_SAMPLE_ADDR)
5655 perf_output_put(handle, data->addr);
5656
5657 if (sample_type & PERF_SAMPLE_ID)
5658 perf_output_put(handle, data->id);
5659
5660 if (sample_type & PERF_SAMPLE_STREAM_ID)
5661 perf_output_put(handle, data->stream_id);
5662
5663 if (sample_type & PERF_SAMPLE_CPU)
5664 perf_output_put(handle, data->cpu_entry);
5665
5666 if (sample_type & PERF_SAMPLE_PERIOD)
5667 perf_output_put(handle, data->period);
5668
5669 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 5670 perf_output_read(handle, event);
5622f295
MM
5671
5672 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5673 if (data->callchain) {
5674 int size = 1;
5675
5676 if (data->callchain)
5677 size += data->callchain->nr;
5678
5679 size *= sizeof(u64);
5680
76369139 5681 __output_copy(handle, data->callchain, size);
5622f295
MM
5682 } else {
5683 u64 nr = 0;
5684 perf_output_put(handle, nr);
5685 }
5686 }
5687
5688 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
5689 struct perf_raw_record *raw = data->raw;
5690
5691 if (raw) {
5692 struct perf_raw_frag *frag = &raw->frag;
5693
5694 perf_output_put(handle, raw->size);
5695 do {
5696 if (frag->copy) {
5697 __output_custom(handle, frag->copy,
5698 frag->data, frag->size);
5699 } else {
5700 __output_copy(handle, frag->data,
5701 frag->size);
5702 }
5703 if (perf_raw_frag_last(frag))
5704 break;
5705 frag = frag->next;
5706 } while (1);
5707 if (frag->pad)
5708 __output_skip(handle, NULL, frag->pad);
5622f295
MM
5709 } else {
5710 struct {
5711 u32 size;
5712 u32 data;
5713 } raw = {
5714 .size = sizeof(u32),
5715 .data = 0,
5716 };
5717 perf_output_put(handle, raw);
5718 }
5719 }
a7ac67ea 5720
bce38cd5
SE
5721 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5722 if (data->br_stack) {
5723 size_t size;
5724
5725 size = data->br_stack->nr
5726 * sizeof(struct perf_branch_entry);
5727
5728 perf_output_put(handle, data->br_stack->nr);
5729 perf_output_copy(handle, data->br_stack->entries, size);
5730 } else {
5731 /*
5732 * we always store at least the value of nr
5733 */
5734 u64 nr = 0;
5735 perf_output_put(handle, nr);
5736 }
5737 }
4018994f
JO
5738
5739 if (sample_type & PERF_SAMPLE_REGS_USER) {
5740 u64 abi = data->regs_user.abi;
5741
5742 /*
5743 * If there are no regs to dump, notice it through
5744 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5745 */
5746 perf_output_put(handle, abi);
5747
5748 if (abi) {
5749 u64 mask = event->attr.sample_regs_user;
5750 perf_output_sample_regs(handle,
5751 data->regs_user.regs,
5752 mask);
5753 }
5754 }
c5ebcedb 5755
a5cdd40c 5756 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
5757 perf_output_sample_ustack(handle,
5758 data->stack_user_size,
5759 data->regs_user.regs);
a5cdd40c 5760 }
c3feedf2
AK
5761
5762 if (sample_type & PERF_SAMPLE_WEIGHT)
5763 perf_output_put(handle, data->weight);
d6be9ad6
SE
5764
5765 if (sample_type & PERF_SAMPLE_DATA_SRC)
5766 perf_output_put(handle, data->data_src.val);
a5cdd40c 5767
fdfbbd07
AK
5768 if (sample_type & PERF_SAMPLE_TRANSACTION)
5769 perf_output_put(handle, data->txn);
5770
60e2364e
SE
5771 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5772 u64 abi = data->regs_intr.abi;
5773 /*
5774 * If there are no regs to dump, notice it through
5775 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5776 */
5777 perf_output_put(handle, abi);
5778
5779 if (abi) {
5780 u64 mask = event->attr.sample_regs_intr;
5781
5782 perf_output_sample_regs(handle,
5783 data->regs_intr.regs,
5784 mask);
5785 }
5786 }
5787
a5cdd40c
PZ
5788 if (!event->attr.watermark) {
5789 int wakeup_events = event->attr.wakeup_events;
5790
5791 if (wakeup_events) {
5792 struct ring_buffer *rb = handle->rb;
5793 int events = local_inc_return(&rb->events);
5794
5795 if (events >= wakeup_events) {
5796 local_sub(wakeup_events, &rb->events);
5797 local_inc(&rb->wakeup);
5798 }
5799 }
5800 }
5622f295
MM
5801}
5802
5803void perf_prepare_sample(struct perf_event_header *header,
5804 struct perf_sample_data *data,
cdd6c482 5805 struct perf_event *event,
5622f295 5806 struct pt_regs *regs)
7b732a75 5807{
cdd6c482 5808 u64 sample_type = event->attr.sample_type;
7b732a75 5809
cdd6c482 5810 header->type = PERF_RECORD_SAMPLE;
c320c7b7 5811 header->size = sizeof(*header) + event->header_size;
5622f295
MM
5812
5813 header->misc = 0;
5814 header->misc |= perf_misc_flags(regs);
6fab0192 5815
c980d109 5816 __perf_event_header__init_id(header, data, event);
6844c09d 5817
c320c7b7 5818 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
5819 data->ip = perf_instruction_pointer(regs);
5820
b23f3325 5821 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 5822 int size = 1;
394ee076 5823
e6dab5ff 5824 data->callchain = perf_callchain(event, regs);
5622f295
MM
5825
5826 if (data->callchain)
5827 size += data->callchain->nr;
5828
5829 header->size += size * sizeof(u64);
394ee076
PZ
5830 }
5831
3a43ce68 5832 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
5833 struct perf_raw_record *raw = data->raw;
5834 int size;
5835
5836 if (raw) {
5837 struct perf_raw_frag *frag = &raw->frag;
5838 u32 sum = 0;
5839
5840 do {
5841 sum += frag->size;
5842 if (perf_raw_frag_last(frag))
5843 break;
5844 frag = frag->next;
5845 } while (1);
5846
5847 size = round_up(sum + sizeof(u32), sizeof(u64));
5848 raw->size = size - sizeof(u32);
5849 frag->pad = raw->size - sum;
5850 } else {
5851 size = sizeof(u64);
5852 }
a044560c 5853
7e3f977e 5854 header->size += size;
7f453c24 5855 }
bce38cd5
SE
5856
5857 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5858 int size = sizeof(u64); /* nr */
5859 if (data->br_stack) {
5860 size += data->br_stack->nr
5861 * sizeof(struct perf_branch_entry);
5862 }
5863 header->size += size;
5864 }
4018994f 5865
2565711f 5866 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
88a7c26a
AL
5867 perf_sample_regs_user(&data->regs_user, regs,
5868 &data->regs_user_copy);
2565711f 5869
4018994f
JO
5870 if (sample_type & PERF_SAMPLE_REGS_USER) {
5871 /* regs dump ABI info */
5872 int size = sizeof(u64);
5873
4018994f
JO
5874 if (data->regs_user.regs) {
5875 u64 mask = event->attr.sample_regs_user;
5876 size += hweight64(mask) * sizeof(u64);
5877 }
5878
5879 header->size += size;
5880 }
c5ebcedb
JO
5881
5882 if (sample_type & PERF_SAMPLE_STACK_USER) {
5883 /*
5884 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5885 * processed as the last one or have additional check added
5886 * in case new sample type is added, because we could eat
5887 * up the rest of the sample size.
5888 */
c5ebcedb
JO
5889 u16 stack_size = event->attr.sample_stack_user;
5890 u16 size = sizeof(u64);
5891
c5ebcedb 5892 stack_size = perf_sample_ustack_size(stack_size, header->size,
2565711f 5893 data->regs_user.regs);
c5ebcedb
JO
5894
5895 /*
5896 * If there is something to dump, add space for the dump
5897 * itself and for the field that tells the dynamic size,
5898 * which is how many have been actually dumped.
5899 */
5900 if (stack_size)
5901 size += sizeof(u64) + stack_size;
5902
5903 data->stack_user_size = stack_size;
5904 header->size += size;
5905 }
60e2364e
SE
5906
5907 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5908 /* regs dump ABI info */
5909 int size = sizeof(u64);
5910
5911 perf_sample_regs_intr(&data->regs_intr, regs);
5912
5913 if (data->regs_intr.regs) {
5914 u64 mask = event->attr.sample_regs_intr;
5915
5916 size += hweight64(mask) * sizeof(u64);
5917 }
5918
5919 header->size += size;
5920 }
5622f295 5921}
7f453c24 5922
9ecda41a
WN
5923static void __always_inline
5924__perf_event_output(struct perf_event *event,
5925 struct perf_sample_data *data,
5926 struct pt_regs *regs,
5927 int (*output_begin)(struct perf_output_handle *,
5928 struct perf_event *,
5929 unsigned int))
5622f295
MM
5930{
5931 struct perf_output_handle handle;
5932 struct perf_event_header header;
689802b2 5933
927c7a9e
FW
5934 /* protect the callchain buffers */
5935 rcu_read_lock();
5936
cdd6c482 5937 perf_prepare_sample(&header, data, event, regs);
5c148194 5938
9ecda41a 5939 if (output_begin(&handle, event, header.size))
927c7a9e 5940 goto exit;
0322cd6e 5941
cdd6c482 5942 perf_output_sample(&handle, &header, data, event);
f413cdb8 5943
8a057d84 5944 perf_output_end(&handle);
927c7a9e
FW
5945
5946exit:
5947 rcu_read_unlock();
0322cd6e
PZ
5948}
5949
9ecda41a
WN
5950void
5951perf_event_output_forward(struct perf_event *event,
5952 struct perf_sample_data *data,
5953 struct pt_regs *regs)
5954{
5955 __perf_event_output(event, data, regs, perf_output_begin_forward);
5956}
5957
5958void
5959perf_event_output_backward(struct perf_event *event,
5960 struct perf_sample_data *data,
5961 struct pt_regs *regs)
5962{
5963 __perf_event_output(event, data, regs, perf_output_begin_backward);
5964}
5965
5966void
5967perf_event_output(struct perf_event *event,
5968 struct perf_sample_data *data,
5969 struct pt_regs *regs)
5970{
5971 __perf_event_output(event, data, regs, perf_output_begin);
5972}
5973
38b200d6 5974/*
cdd6c482 5975 * read event_id
38b200d6
PZ
5976 */
5977
5978struct perf_read_event {
5979 struct perf_event_header header;
5980
5981 u32 pid;
5982 u32 tid;
38b200d6
PZ
5983};
5984
5985static void
cdd6c482 5986perf_event_read_event(struct perf_event *event,
38b200d6
PZ
5987 struct task_struct *task)
5988{
5989 struct perf_output_handle handle;
c980d109 5990 struct perf_sample_data sample;
dfc65094 5991 struct perf_read_event read_event = {
38b200d6 5992 .header = {
cdd6c482 5993 .type = PERF_RECORD_READ,
38b200d6 5994 .misc = 0,
c320c7b7 5995 .size = sizeof(read_event) + event->read_size,
38b200d6 5996 },
cdd6c482
IM
5997 .pid = perf_event_pid(event, task),
5998 .tid = perf_event_tid(event, task),
38b200d6 5999 };
3dab77fb 6000 int ret;
38b200d6 6001
c980d109 6002 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 6003 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
6004 if (ret)
6005 return;
6006
dfc65094 6007 perf_output_put(&handle, read_event);
cdd6c482 6008 perf_output_read(&handle, event);
c980d109 6009 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 6010
38b200d6
PZ
6011 perf_output_end(&handle);
6012}
6013
aab5b71e 6014typedef void (perf_iterate_f)(struct perf_event *event, void *data);
52d857a8
JO
6015
6016static void
aab5b71e
PZ
6017perf_iterate_ctx(struct perf_event_context *ctx,
6018 perf_iterate_f output,
b73e4fef 6019 void *data, bool all)
52d857a8
JO
6020{
6021 struct perf_event *event;
6022
6023 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
b73e4fef
AS
6024 if (!all) {
6025 if (event->state < PERF_EVENT_STATE_INACTIVE)
6026 continue;
6027 if (!event_filter_match(event))
6028 continue;
6029 }
6030
67516844 6031 output(event, data);
52d857a8
JO
6032 }
6033}
6034
aab5b71e 6035static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
f2fb6bef
KL
6036{
6037 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
6038 struct perf_event *event;
6039
6040 list_for_each_entry_rcu(event, &pel->list, sb_list) {
0b8f1e2e
PZ
6041 /*
6042 * Skip events that are not fully formed yet; ensure that
6043 * if we observe event->ctx, both event and ctx will be
6044 * complete enough. See perf_install_in_context().
6045 */
6046 if (!smp_load_acquire(&event->ctx))
6047 continue;
6048
f2fb6bef
KL
6049 if (event->state < PERF_EVENT_STATE_INACTIVE)
6050 continue;
6051 if (!event_filter_match(event))
6052 continue;
6053 output(event, data);
6054 }
6055}
6056
aab5b71e
PZ
6057/*
6058 * Iterate all events that need to receive side-band events.
6059 *
6060 * For new callers; ensure that account_pmu_sb_event() includes
6061 * your event, otherwise it might not get delivered.
6062 */
52d857a8 6063static void
aab5b71e 6064perf_iterate_sb(perf_iterate_f output, void *data,
52d857a8
JO
6065 struct perf_event_context *task_ctx)
6066{
52d857a8 6067 struct perf_event_context *ctx;
52d857a8
JO
6068 int ctxn;
6069
aab5b71e
PZ
6070 rcu_read_lock();
6071 preempt_disable();
6072
4e93ad60 6073 /*
aab5b71e
PZ
6074 * If we have task_ctx != NULL we only notify the task context itself.
6075 * The task_ctx is set only for EXIT events before releasing task
4e93ad60
JO
6076 * context.
6077 */
6078 if (task_ctx) {
aab5b71e
PZ
6079 perf_iterate_ctx(task_ctx, output, data, false);
6080 goto done;
4e93ad60
JO
6081 }
6082
aab5b71e 6083 perf_iterate_sb_cpu(output, data);
f2fb6bef
KL
6084
6085 for_each_task_context_nr(ctxn) {
52d857a8
JO
6086 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6087 if (ctx)
aab5b71e 6088 perf_iterate_ctx(ctx, output, data, false);
52d857a8 6089 }
aab5b71e 6090done:
f2fb6bef 6091 preempt_enable();
52d857a8 6092 rcu_read_unlock();
95ff4ca2
AS
6093}
6094
375637bc
AS
6095/*
6096 * Clear all file-based filters at exec, they'll have to be
6097 * re-instated when/if these objects are mmapped again.
6098 */
6099static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6100{
6101 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6102 struct perf_addr_filter *filter;
6103 unsigned int restart = 0, count = 0;
6104 unsigned long flags;
6105
6106 if (!has_addr_filter(event))
6107 return;
6108
6109 raw_spin_lock_irqsave(&ifh->lock, flags);
6110 list_for_each_entry(filter, &ifh->list, entry) {
6111 if (filter->inode) {
6112 event->addr_filters_offs[count] = 0;
6113 restart++;
6114 }
6115
6116 count++;
6117 }
6118
6119 if (restart)
6120 event->addr_filters_gen++;
6121 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6122
6123 if (restart)
6124 perf_event_restart(event);
6125}
6126
6127void perf_event_exec(void)
6128{
6129 struct perf_event_context *ctx;
6130 int ctxn;
6131
6132 rcu_read_lock();
6133 for_each_task_context_nr(ctxn) {
6134 ctx = current->perf_event_ctxp[ctxn];
6135 if (!ctx)
6136 continue;
6137
6138 perf_event_enable_on_exec(ctxn);
6139
aab5b71e 6140 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
375637bc
AS
6141 true);
6142 }
6143 rcu_read_unlock();
6144}
6145
95ff4ca2
AS
6146struct remote_output {
6147 struct ring_buffer *rb;
6148 int err;
6149};
6150
6151static void __perf_event_output_stop(struct perf_event *event, void *data)
6152{
6153 struct perf_event *parent = event->parent;
6154 struct remote_output *ro = data;
6155 struct ring_buffer *rb = ro->rb;
375637bc
AS
6156 struct stop_event_data sd = {
6157 .event = event,
6158 };
95ff4ca2
AS
6159
6160 if (!has_aux(event))
6161 return;
6162
6163 if (!parent)
6164 parent = event;
6165
6166 /*
6167 * In case of inheritance, it will be the parent that links to the
6168 * ring-buffer, but it will be the child that's actually using it:
6169 */
6170 if (rcu_dereference(parent->rb) == rb)
375637bc 6171 ro->err = __perf_event_stop(&sd);
95ff4ca2
AS
6172}
6173
6174static int __perf_pmu_output_stop(void *info)
6175{
6176 struct perf_event *event = info;
6177 struct pmu *pmu = event->pmu;
6178 struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
6179 struct remote_output ro = {
6180 .rb = event->rb,
6181 };
6182
6183 rcu_read_lock();
aab5b71e 6184 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
95ff4ca2 6185 if (cpuctx->task_ctx)
aab5b71e 6186 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
b73e4fef 6187 &ro, false);
95ff4ca2
AS
6188 rcu_read_unlock();
6189
6190 return ro.err;
6191}
6192
6193static void perf_pmu_output_stop(struct perf_event *event)
6194{
6195 struct perf_event *iter;
6196 int err, cpu;
6197
6198restart:
6199 rcu_read_lock();
6200 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6201 /*
6202 * For per-CPU events, we need to make sure that neither they
6203 * nor their children are running; for cpu==-1 events it's
6204 * sufficient to stop the event itself if it's active, since
6205 * it can't have children.
6206 */
6207 cpu = iter->cpu;
6208 if (cpu == -1)
6209 cpu = READ_ONCE(iter->oncpu);
6210
6211 if (cpu == -1)
6212 continue;
6213
6214 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6215 if (err == -EAGAIN) {
6216 rcu_read_unlock();
6217 goto restart;
6218 }
6219 }
6220 rcu_read_unlock();
52d857a8
JO
6221}
6222
60313ebe 6223/*
9f498cc5
PZ
6224 * task tracking -- fork/exit
6225 *
13d7a241 6226 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
6227 */
6228
9f498cc5 6229struct perf_task_event {
3a80b4a3 6230 struct task_struct *task;
cdd6c482 6231 struct perf_event_context *task_ctx;
60313ebe
PZ
6232
6233 struct {
6234 struct perf_event_header header;
6235
6236 u32 pid;
6237 u32 ppid;
9f498cc5
PZ
6238 u32 tid;
6239 u32 ptid;
393b2ad8 6240 u64 time;
cdd6c482 6241 } event_id;
60313ebe
PZ
6242};
6243
67516844
JO
6244static int perf_event_task_match(struct perf_event *event)
6245{
13d7a241
SE
6246 return event->attr.comm || event->attr.mmap ||
6247 event->attr.mmap2 || event->attr.mmap_data ||
6248 event->attr.task;
67516844
JO
6249}
6250
cdd6c482 6251static void perf_event_task_output(struct perf_event *event,
52d857a8 6252 void *data)
60313ebe 6253{
52d857a8 6254 struct perf_task_event *task_event = data;
60313ebe 6255 struct perf_output_handle handle;
c980d109 6256 struct perf_sample_data sample;
9f498cc5 6257 struct task_struct *task = task_event->task;
c980d109 6258 int ret, size = task_event->event_id.header.size;
8bb39f9a 6259
67516844
JO
6260 if (!perf_event_task_match(event))
6261 return;
6262
c980d109 6263 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 6264
c980d109 6265 ret = perf_output_begin(&handle, event,
a7ac67ea 6266 task_event->event_id.header.size);
ef60777c 6267 if (ret)
c980d109 6268 goto out;
60313ebe 6269
cdd6c482
IM
6270 task_event->event_id.pid = perf_event_pid(event, task);
6271 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 6272
cdd6c482
IM
6273 task_event->event_id.tid = perf_event_tid(event, task);
6274 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 6275
34f43927
PZ
6276 task_event->event_id.time = perf_event_clock(event);
6277
cdd6c482 6278 perf_output_put(&handle, task_event->event_id);
393b2ad8 6279
c980d109
ACM
6280 perf_event__output_id_sample(event, &handle, &sample);
6281
60313ebe 6282 perf_output_end(&handle);
c980d109
ACM
6283out:
6284 task_event->event_id.header.size = size;
60313ebe
PZ
6285}
6286
cdd6c482
IM
6287static void perf_event_task(struct task_struct *task,
6288 struct perf_event_context *task_ctx,
3a80b4a3 6289 int new)
60313ebe 6290{
9f498cc5 6291 struct perf_task_event task_event;
60313ebe 6292
cdd6c482
IM
6293 if (!atomic_read(&nr_comm_events) &&
6294 !atomic_read(&nr_mmap_events) &&
6295 !atomic_read(&nr_task_events))
60313ebe
PZ
6296 return;
6297
9f498cc5 6298 task_event = (struct perf_task_event){
3a80b4a3
PZ
6299 .task = task,
6300 .task_ctx = task_ctx,
cdd6c482 6301 .event_id = {
60313ebe 6302 .header = {
cdd6c482 6303 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 6304 .misc = 0,
cdd6c482 6305 .size = sizeof(task_event.event_id),
60313ebe 6306 },
573402db
PZ
6307 /* .pid */
6308 /* .ppid */
9f498cc5
PZ
6309 /* .tid */
6310 /* .ptid */
34f43927 6311 /* .time */
60313ebe
PZ
6312 },
6313 };
6314
aab5b71e 6315 perf_iterate_sb(perf_event_task_output,
52d857a8
JO
6316 &task_event,
6317 task_ctx);
9f498cc5
PZ
6318}
6319
cdd6c482 6320void perf_event_fork(struct task_struct *task)
9f498cc5 6321{
cdd6c482 6322 perf_event_task(task, NULL, 1);
60313ebe
PZ
6323}
6324
8d1b2d93
PZ
6325/*
6326 * comm tracking
6327 */
6328
6329struct perf_comm_event {
22a4f650
IM
6330 struct task_struct *task;
6331 char *comm;
8d1b2d93
PZ
6332 int comm_size;
6333
6334 struct {
6335 struct perf_event_header header;
6336
6337 u32 pid;
6338 u32 tid;
cdd6c482 6339 } event_id;
8d1b2d93
PZ
6340};
6341
67516844
JO
6342static int perf_event_comm_match(struct perf_event *event)
6343{
6344 return event->attr.comm;
6345}
6346
cdd6c482 6347static void perf_event_comm_output(struct perf_event *event,
52d857a8 6348 void *data)
8d1b2d93 6349{
52d857a8 6350 struct perf_comm_event *comm_event = data;
8d1b2d93 6351 struct perf_output_handle handle;
c980d109 6352 struct perf_sample_data sample;
cdd6c482 6353 int size = comm_event->event_id.header.size;
c980d109
ACM
6354 int ret;
6355
67516844
JO
6356 if (!perf_event_comm_match(event))
6357 return;
6358
c980d109
ACM
6359 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6360 ret = perf_output_begin(&handle, event,
a7ac67ea 6361 comm_event->event_id.header.size);
8d1b2d93
PZ
6362
6363 if (ret)
c980d109 6364 goto out;
8d1b2d93 6365
cdd6c482
IM
6366 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6367 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 6368
cdd6c482 6369 perf_output_put(&handle, comm_event->event_id);
76369139 6370 __output_copy(&handle, comm_event->comm,
8d1b2d93 6371 comm_event->comm_size);
c980d109
ACM
6372
6373 perf_event__output_id_sample(event, &handle, &sample);
6374
8d1b2d93 6375 perf_output_end(&handle);
c980d109
ACM
6376out:
6377 comm_event->event_id.header.size = size;
8d1b2d93
PZ
6378}
6379
cdd6c482 6380static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 6381{
413ee3b4 6382 char comm[TASK_COMM_LEN];
8d1b2d93 6383 unsigned int size;
8d1b2d93 6384
413ee3b4 6385 memset(comm, 0, sizeof(comm));
96b02d78 6386 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 6387 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
6388
6389 comm_event->comm = comm;
6390 comm_event->comm_size = size;
6391
cdd6c482 6392 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 6393
aab5b71e 6394 perf_iterate_sb(perf_event_comm_output,
52d857a8
JO
6395 comm_event,
6396 NULL);
8d1b2d93
PZ
6397}
6398
82b89778 6399void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 6400{
9ee318a7
PZ
6401 struct perf_comm_event comm_event;
6402
cdd6c482 6403 if (!atomic_read(&nr_comm_events))
9ee318a7 6404 return;
a63eaf34 6405
9ee318a7 6406 comm_event = (struct perf_comm_event){
8d1b2d93 6407 .task = task,
573402db
PZ
6408 /* .comm */
6409 /* .comm_size */
cdd6c482 6410 .event_id = {
573402db 6411 .header = {
cdd6c482 6412 .type = PERF_RECORD_COMM,
82b89778 6413 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
6414 /* .size */
6415 },
6416 /* .pid */
6417 /* .tid */
8d1b2d93
PZ
6418 },
6419 };
6420
cdd6c482 6421 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
6422}
6423
0a4a9391
PZ
6424/*
6425 * mmap tracking
6426 */
6427
6428struct perf_mmap_event {
089dd79d
PZ
6429 struct vm_area_struct *vma;
6430
6431 const char *file_name;
6432 int file_size;
13d7a241
SE
6433 int maj, min;
6434 u64 ino;
6435 u64 ino_generation;
f972eb63 6436 u32 prot, flags;
0a4a9391
PZ
6437
6438 struct {
6439 struct perf_event_header header;
6440
6441 u32 pid;
6442 u32 tid;
6443 u64 start;
6444 u64 len;
6445 u64 pgoff;
cdd6c482 6446 } event_id;
0a4a9391
PZ
6447};
6448
67516844
JO
6449static int perf_event_mmap_match(struct perf_event *event,
6450 void *data)
6451{
6452 struct perf_mmap_event *mmap_event = data;
6453 struct vm_area_struct *vma = mmap_event->vma;
6454 int executable = vma->vm_flags & VM_EXEC;
6455
6456 return (!executable && event->attr.mmap_data) ||
13d7a241 6457 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
6458}
6459
cdd6c482 6460static void perf_event_mmap_output(struct perf_event *event,
52d857a8 6461 void *data)
0a4a9391 6462{
52d857a8 6463 struct perf_mmap_event *mmap_event = data;
0a4a9391 6464 struct perf_output_handle handle;
c980d109 6465 struct perf_sample_data sample;
cdd6c482 6466 int size = mmap_event->event_id.header.size;
c980d109 6467 int ret;
0a4a9391 6468
67516844
JO
6469 if (!perf_event_mmap_match(event, data))
6470 return;
6471
13d7a241
SE
6472 if (event->attr.mmap2) {
6473 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6474 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6475 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6476 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 6477 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
6478 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6479 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
6480 }
6481
c980d109
ACM
6482 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6483 ret = perf_output_begin(&handle, event,
a7ac67ea 6484 mmap_event->event_id.header.size);
0a4a9391 6485 if (ret)
c980d109 6486 goto out;
0a4a9391 6487
cdd6c482
IM
6488 mmap_event->event_id.pid = perf_event_pid(event, current);
6489 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 6490
cdd6c482 6491 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
6492
6493 if (event->attr.mmap2) {
6494 perf_output_put(&handle, mmap_event->maj);
6495 perf_output_put(&handle, mmap_event->min);
6496 perf_output_put(&handle, mmap_event->ino);
6497 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
6498 perf_output_put(&handle, mmap_event->prot);
6499 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
6500 }
6501
76369139 6502 __output_copy(&handle, mmap_event->file_name,
0a4a9391 6503 mmap_event->file_size);
c980d109
ACM
6504
6505 perf_event__output_id_sample(event, &handle, &sample);
6506
78d613eb 6507 perf_output_end(&handle);
c980d109
ACM
6508out:
6509 mmap_event->event_id.header.size = size;
0a4a9391
PZ
6510}
6511
cdd6c482 6512static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 6513{
089dd79d
PZ
6514 struct vm_area_struct *vma = mmap_event->vma;
6515 struct file *file = vma->vm_file;
13d7a241
SE
6516 int maj = 0, min = 0;
6517 u64 ino = 0, gen = 0;
f972eb63 6518 u32 prot = 0, flags = 0;
0a4a9391
PZ
6519 unsigned int size;
6520 char tmp[16];
6521 char *buf = NULL;
2c42cfbf 6522 char *name;
413ee3b4 6523
0a4a9391 6524 if (file) {
13d7a241
SE
6525 struct inode *inode;
6526 dev_t dev;
3ea2f2b9 6527
2c42cfbf 6528 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 6529 if (!buf) {
c7e548b4
ON
6530 name = "//enomem";
6531 goto cpy_name;
0a4a9391 6532 }
413ee3b4 6533 /*
3ea2f2b9 6534 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
6535 * need to add enough zero bytes after the string to handle
6536 * the 64bit alignment we do later.
6537 */
9bf39ab2 6538 name = file_path(file, buf, PATH_MAX - sizeof(u64));
0a4a9391 6539 if (IS_ERR(name)) {
c7e548b4
ON
6540 name = "//toolong";
6541 goto cpy_name;
0a4a9391 6542 }
13d7a241
SE
6543 inode = file_inode(vma->vm_file);
6544 dev = inode->i_sb->s_dev;
6545 ino = inode->i_ino;
6546 gen = inode->i_generation;
6547 maj = MAJOR(dev);
6548 min = MINOR(dev);
f972eb63
PZ
6549
6550 if (vma->vm_flags & VM_READ)
6551 prot |= PROT_READ;
6552 if (vma->vm_flags & VM_WRITE)
6553 prot |= PROT_WRITE;
6554 if (vma->vm_flags & VM_EXEC)
6555 prot |= PROT_EXEC;
6556
6557 if (vma->vm_flags & VM_MAYSHARE)
6558 flags = MAP_SHARED;
6559 else
6560 flags = MAP_PRIVATE;
6561
6562 if (vma->vm_flags & VM_DENYWRITE)
6563 flags |= MAP_DENYWRITE;
6564 if (vma->vm_flags & VM_MAYEXEC)
6565 flags |= MAP_EXECUTABLE;
6566 if (vma->vm_flags & VM_LOCKED)
6567 flags |= MAP_LOCKED;
6568 if (vma->vm_flags & VM_HUGETLB)
6569 flags |= MAP_HUGETLB;
6570
c7e548b4 6571 goto got_name;
0a4a9391 6572 } else {
fbe26abe
JO
6573 if (vma->vm_ops && vma->vm_ops->name) {
6574 name = (char *) vma->vm_ops->name(vma);
6575 if (name)
6576 goto cpy_name;
6577 }
6578
2c42cfbf 6579 name = (char *)arch_vma_name(vma);
c7e548b4
ON
6580 if (name)
6581 goto cpy_name;
089dd79d 6582
32c5fb7e 6583 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 6584 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
6585 name = "[heap]";
6586 goto cpy_name;
32c5fb7e
ON
6587 }
6588 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 6589 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
6590 name = "[stack]";
6591 goto cpy_name;
089dd79d
PZ
6592 }
6593
c7e548b4
ON
6594 name = "//anon";
6595 goto cpy_name;
0a4a9391
PZ
6596 }
6597
c7e548b4
ON
6598cpy_name:
6599 strlcpy(tmp, name, sizeof(tmp));
6600 name = tmp;
0a4a9391 6601got_name:
2c42cfbf
PZ
6602 /*
6603 * Since our buffer works in 8 byte units we need to align our string
6604 * size to a multiple of 8. However, we must guarantee the tail end is
6605 * zero'd out to avoid leaking random bits to userspace.
6606 */
6607 size = strlen(name)+1;
6608 while (!IS_ALIGNED(size, sizeof(u64)))
6609 name[size++] = '\0';
0a4a9391
PZ
6610
6611 mmap_event->file_name = name;
6612 mmap_event->file_size = size;
13d7a241
SE
6613 mmap_event->maj = maj;
6614 mmap_event->min = min;
6615 mmap_event->ino = ino;
6616 mmap_event->ino_generation = gen;
f972eb63
PZ
6617 mmap_event->prot = prot;
6618 mmap_event->flags = flags;
0a4a9391 6619
2fe85427
SE
6620 if (!(vma->vm_flags & VM_EXEC))
6621 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6622
cdd6c482 6623 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 6624
aab5b71e 6625 perf_iterate_sb(perf_event_mmap_output,
52d857a8
JO
6626 mmap_event,
6627 NULL);
665c2142 6628
0a4a9391
PZ
6629 kfree(buf);
6630}
6631
375637bc
AS
6632/*
6633 * Check whether inode and address range match filter criteria.
6634 */
6635static bool perf_addr_filter_match(struct perf_addr_filter *filter,
6636 struct file *file, unsigned long offset,
6637 unsigned long size)
6638{
6639 if (filter->inode != file->f_inode)
6640 return false;
6641
6642 if (filter->offset > offset + size)
6643 return false;
6644
6645 if (filter->offset + filter->size < offset)
6646 return false;
6647
6648 return true;
6649}
6650
6651static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6652{
6653 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6654 struct vm_area_struct *vma = data;
6655 unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
6656 struct file *file = vma->vm_file;
6657 struct perf_addr_filter *filter;
6658 unsigned int restart = 0, count = 0;
6659
6660 if (!has_addr_filter(event))
6661 return;
6662
6663 if (!file)
6664 return;
6665
6666 raw_spin_lock_irqsave(&ifh->lock, flags);
6667 list_for_each_entry(filter, &ifh->list, entry) {
6668 if (perf_addr_filter_match(filter, file, off,
6669 vma->vm_end - vma->vm_start)) {
6670 event->addr_filters_offs[count] = vma->vm_start;
6671 restart++;
6672 }
6673
6674 count++;
6675 }
6676
6677 if (restart)
6678 event->addr_filters_gen++;
6679 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6680
6681 if (restart)
6682 perf_event_restart(event);
6683}
6684
6685/*
6686 * Adjust all task's events' filters to the new vma
6687 */
6688static void perf_addr_filters_adjust(struct vm_area_struct *vma)
6689{
6690 struct perf_event_context *ctx;
6691 int ctxn;
6692
12b40a23
MP
6693 /*
6694 * Data tracing isn't supported yet and as such there is no need
6695 * to keep track of anything that isn't related to executable code:
6696 */
6697 if (!(vma->vm_flags & VM_EXEC))
6698 return;
6699
375637bc
AS
6700 rcu_read_lock();
6701 for_each_task_context_nr(ctxn) {
6702 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6703 if (!ctx)
6704 continue;
6705
aab5b71e 6706 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
375637bc
AS
6707 }
6708 rcu_read_unlock();
6709}
6710
3af9e859 6711void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 6712{
9ee318a7
PZ
6713 struct perf_mmap_event mmap_event;
6714
cdd6c482 6715 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
6716 return;
6717
6718 mmap_event = (struct perf_mmap_event){
089dd79d 6719 .vma = vma,
573402db
PZ
6720 /* .file_name */
6721 /* .file_size */
cdd6c482 6722 .event_id = {
573402db 6723 .header = {
cdd6c482 6724 .type = PERF_RECORD_MMAP,
39447b38 6725 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
6726 /* .size */
6727 },
6728 /* .pid */
6729 /* .tid */
089dd79d
PZ
6730 .start = vma->vm_start,
6731 .len = vma->vm_end - vma->vm_start,
3a0304e9 6732 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 6733 },
13d7a241
SE
6734 /* .maj (attr_mmap2 only) */
6735 /* .min (attr_mmap2 only) */
6736 /* .ino (attr_mmap2 only) */
6737 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
6738 /* .prot (attr_mmap2 only) */
6739 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
6740 };
6741
375637bc 6742 perf_addr_filters_adjust(vma);
cdd6c482 6743 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
6744}
6745
68db7e98
AS
6746void perf_event_aux_event(struct perf_event *event, unsigned long head,
6747 unsigned long size, u64 flags)
6748{
6749 struct perf_output_handle handle;
6750 struct perf_sample_data sample;
6751 struct perf_aux_event {
6752 struct perf_event_header header;
6753 u64 offset;
6754 u64 size;
6755 u64 flags;
6756 } rec = {
6757 .header = {
6758 .type = PERF_RECORD_AUX,
6759 .misc = 0,
6760 .size = sizeof(rec),
6761 },
6762 .offset = head,
6763 .size = size,
6764 .flags = flags,
6765 };
6766 int ret;
6767
6768 perf_event_header__init_id(&rec.header, &sample, event);
6769 ret = perf_output_begin(&handle, event, rec.header.size);
6770
6771 if (ret)
6772 return;
6773
6774 perf_output_put(&handle, rec);
6775 perf_event__output_id_sample(event, &handle, &sample);
6776
6777 perf_output_end(&handle);
6778}
6779
f38b0dbb
KL
6780/*
6781 * Lost/dropped samples logging
6782 */
6783void perf_log_lost_samples(struct perf_event *event, u64 lost)
6784{
6785 struct perf_output_handle handle;
6786 struct perf_sample_data sample;
6787 int ret;
6788
6789 struct {
6790 struct perf_event_header header;
6791 u64 lost;
6792 } lost_samples_event = {
6793 .header = {
6794 .type = PERF_RECORD_LOST_SAMPLES,
6795 .misc = 0,
6796 .size = sizeof(lost_samples_event),
6797 },
6798 .lost = lost,
6799 };
6800
6801 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6802
6803 ret = perf_output_begin(&handle, event,
6804 lost_samples_event.header.size);
6805 if (ret)
6806 return;
6807
6808 perf_output_put(&handle, lost_samples_event);
6809 perf_event__output_id_sample(event, &handle, &sample);
6810 perf_output_end(&handle);
6811}
6812
45ac1403
AH
6813/*
6814 * context_switch tracking
6815 */
6816
6817struct perf_switch_event {
6818 struct task_struct *task;
6819 struct task_struct *next_prev;
6820
6821 struct {
6822 struct perf_event_header header;
6823 u32 next_prev_pid;
6824 u32 next_prev_tid;
6825 } event_id;
6826};
6827
6828static int perf_event_switch_match(struct perf_event *event)
6829{
6830 return event->attr.context_switch;
6831}
6832
6833static void perf_event_switch_output(struct perf_event *event, void *data)
6834{
6835 struct perf_switch_event *se = data;
6836 struct perf_output_handle handle;
6837 struct perf_sample_data sample;
6838 int ret;
6839
6840 if (!perf_event_switch_match(event))
6841 return;
6842
6843 /* Only CPU-wide events are allowed to see next/prev pid/tid */
6844 if (event->ctx->task) {
6845 se->event_id.header.type = PERF_RECORD_SWITCH;
6846 se->event_id.header.size = sizeof(se->event_id.header);
6847 } else {
6848 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6849 se->event_id.header.size = sizeof(se->event_id);
6850 se->event_id.next_prev_pid =
6851 perf_event_pid(event, se->next_prev);
6852 se->event_id.next_prev_tid =
6853 perf_event_tid(event, se->next_prev);
6854 }
6855
6856 perf_event_header__init_id(&se->event_id.header, &sample, event);
6857
6858 ret = perf_output_begin(&handle, event, se->event_id.header.size);
6859 if (ret)
6860 return;
6861
6862 if (event->ctx->task)
6863 perf_output_put(&handle, se->event_id.header);
6864 else
6865 perf_output_put(&handle, se->event_id);
6866
6867 perf_event__output_id_sample(event, &handle, &sample);
6868
6869 perf_output_end(&handle);
6870}
6871
6872static void perf_event_switch(struct task_struct *task,
6873 struct task_struct *next_prev, bool sched_in)
6874{
6875 struct perf_switch_event switch_event;
6876
6877 /* N.B. caller checks nr_switch_events != 0 */
6878
6879 switch_event = (struct perf_switch_event){
6880 .task = task,
6881 .next_prev = next_prev,
6882 .event_id = {
6883 .header = {
6884 /* .type */
6885 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6886 /* .size */
6887 },
6888 /* .next_prev_pid */
6889 /* .next_prev_tid */
6890 },
6891 };
6892
aab5b71e 6893 perf_iterate_sb(perf_event_switch_output,
45ac1403
AH
6894 &switch_event,
6895 NULL);
6896}
6897
a78ac325
PZ
6898/*
6899 * IRQ throttle logging
6900 */
6901
cdd6c482 6902static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
6903{
6904 struct perf_output_handle handle;
c980d109 6905 struct perf_sample_data sample;
a78ac325
PZ
6906 int ret;
6907
6908 struct {
6909 struct perf_event_header header;
6910 u64 time;
cca3f454 6911 u64 id;
7f453c24 6912 u64 stream_id;
a78ac325
PZ
6913 } throttle_event = {
6914 .header = {
cdd6c482 6915 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
6916 .misc = 0,
6917 .size = sizeof(throttle_event),
6918 },
34f43927 6919 .time = perf_event_clock(event),
cdd6c482
IM
6920 .id = primary_event_id(event),
6921 .stream_id = event->id,
a78ac325
PZ
6922 };
6923
966ee4d6 6924 if (enable)
cdd6c482 6925 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 6926
c980d109
ACM
6927 perf_event_header__init_id(&throttle_event.header, &sample, event);
6928
6929 ret = perf_output_begin(&handle, event,
a7ac67ea 6930 throttle_event.header.size);
a78ac325
PZ
6931 if (ret)
6932 return;
6933
6934 perf_output_put(&handle, throttle_event);
c980d109 6935 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
6936 perf_output_end(&handle);
6937}
6938
ec0d7729
AS
6939static void perf_log_itrace_start(struct perf_event *event)
6940{
6941 struct perf_output_handle handle;
6942 struct perf_sample_data sample;
6943 struct perf_aux_event {
6944 struct perf_event_header header;
6945 u32 pid;
6946 u32 tid;
6947 } rec;
6948 int ret;
6949
6950 if (event->parent)
6951 event = event->parent;
6952
6953 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
6954 event->hw.itrace_started)
6955 return;
6956
ec0d7729
AS
6957 rec.header.type = PERF_RECORD_ITRACE_START;
6958 rec.header.misc = 0;
6959 rec.header.size = sizeof(rec);
6960 rec.pid = perf_event_pid(event, current);
6961 rec.tid = perf_event_tid(event, current);
6962
6963 perf_event_header__init_id(&rec.header, &sample, event);
6964 ret = perf_output_begin(&handle, event, rec.header.size);
6965
6966 if (ret)
6967 return;
6968
6969 perf_output_put(&handle, rec);
6970 perf_event__output_id_sample(event, &handle, &sample);
6971
6972 perf_output_end(&handle);
6973}
6974
f6c7d5fe 6975/*
cdd6c482 6976 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
6977 */
6978
a8b0ca17 6979static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
6980 int throttle, struct perf_sample_data *data,
6981 struct pt_regs *regs)
f6c7d5fe 6982{
cdd6c482
IM
6983 int events = atomic_read(&event->event_limit);
6984 struct hw_perf_event *hwc = &event->hw;
e050e3f0 6985 u64 seq;
79f14641
PZ
6986 int ret = 0;
6987
96398826
PZ
6988 /*
6989 * Non-sampling counters might still use the PMI to fold short
6990 * hardware counters, ignore those.
6991 */
6992 if (unlikely(!is_sampling_event(event)))
6993 return 0;
6994
e050e3f0
SE
6995 seq = __this_cpu_read(perf_throttled_seq);
6996 if (seq != hwc->interrupts_seq) {
6997 hwc->interrupts_seq = seq;
6998 hwc->interrupts = 1;
6999 } else {
7000 hwc->interrupts++;
7001 if (unlikely(throttle
7002 && hwc->interrupts >= max_samples_per_tick)) {
7003 __this_cpu_inc(perf_throttled_count);
555e0c1e 7004 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
163ec435
PZ
7005 hwc->interrupts = MAX_INTERRUPTS;
7006 perf_log_throttle(event, 0);
a78ac325
PZ
7007 ret = 1;
7008 }
e050e3f0 7009 }
60db5e09 7010
cdd6c482 7011 if (event->attr.freq) {
def0a9b2 7012 u64 now = perf_clock();
abd50713 7013 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 7014
abd50713 7015 hwc->freq_time_stamp = now;
bd2b5b12 7016
abd50713 7017 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 7018 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
7019 }
7020
2023b359
PZ
7021 /*
7022 * XXX event_limit might not quite work as expected on inherited
cdd6c482 7023 * events
2023b359
PZ
7024 */
7025
cdd6c482
IM
7026 event->pending_kill = POLL_IN;
7027 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 7028 ret = 1;
cdd6c482 7029 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
7030 event->pending_disable = 1;
7031 irq_work_queue(&event->pending);
79f14641
PZ
7032 }
7033
1879445d 7034 event->overflow_handler(event, data, regs);
453f19ee 7035
fed66e2c 7036 if (*perf_event_fasync(event) && event->pending_kill) {
a8b0ca17
PZ
7037 event->pending_wakeup = 1;
7038 irq_work_queue(&event->pending);
f506b3dc
PZ
7039 }
7040
79f14641 7041 return ret;
f6c7d5fe
PZ
7042}
7043
a8b0ca17 7044int perf_event_overflow(struct perf_event *event,
5622f295
MM
7045 struct perf_sample_data *data,
7046 struct pt_regs *regs)
850bc73f 7047{
a8b0ca17 7048 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
7049}
7050
15dbf27c 7051/*
cdd6c482 7052 * Generic software event infrastructure
15dbf27c
PZ
7053 */
7054
b28ab83c
PZ
7055struct swevent_htable {
7056 struct swevent_hlist *swevent_hlist;
7057 struct mutex hlist_mutex;
7058 int hlist_refcount;
7059
7060 /* Recursion avoidance in each contexts */
7061 int recursion[PERF_NR_CONTEXTS];
7062};
7063
7064static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7065
7b4b6658 7066/*
cdd6c482
IM
7067 * We directly increment event->count and keep a second value in
7068 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
7069 * is kept in the range [-sample_period, 0] so that we can use the
7070 * sign as trigger.
7071 */
7072
ab573844 7073u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 7074{
cdd6c482 7075 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
7076 u64 period = hwc->last_period;
7077 u64 nr, offset;
7078 s64 old, val;
7079
7080 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
7081
7082again:
e7850595 7083 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
7084 if (val < 0)
7085 return 0;
15dbf27c 7086
7b4b6658
PZ
7087 nr = div64_u64(period + val, period);
7088 offset = nr * period;
7089 val -= offset;
e7850595 7090 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 7091 goto again;
15dbf27c 7092
7b4b6658 7093 return nr;
15dbf27c
PZ
7094}
7095
0cff784a 7096static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 7097 struct perf_sample_data *data,
5622f295 7098 struct pt_regs *regs)
15dbf27c 7099{
cdd6c482 7100 struct hw_perf_event *hwc = &event->hw;
850bc73f 7101 int throttle = 0;
15dbf27c 7102
0cff784a
PZ
7103 if (!overflow)
7104 overflow = perf_swevent_set_period(event);
15dbf27c 7105
7b4b6658
PZ
7106 if (hwc->interrupts == MAX_INTERRUPTS)
7107 return;
15dbf27c 7108
7b4b6658 7109 for (; overflow; overflow--) {
a8b0ca17 7110 if (__perf_event_overflow(event, throttle,
5622f295 7111 data, regs)) {
7b4b6658
PZ
7112 /*
7113 * We inhibit the overflow from happening when
7114 * hwc->interrupts == MAX_INTERRUPTS.
7115 */
7116 break;
7117 }
cf450a73 7118 throttle = 1;
7b4b6658 7119 }
15dbf27c
PZ
7120}
7121
a4eaf7f1 7122static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 7123 struct perf_sample_data *data,
5622f295 7124 struct pt_regs *regs)
7b4b6658 7125{
cdd6c482 7126 struct hw_perf_event *hwc = &event->hw;
d6d020e9 7127
e7850595 7128 local64_add(nr, &event->count);
d6d020e9 7129
0cff784a
PZ
7130 if (!regs)
7131 return;
7132
6c7e550f 7133 if (!is_sampling_event(event))
7b4b6658 7134 return;
d6d020e9 7135
5d81e5cf
AV
7136 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7137 data->period = nr;
7138 return perf_swevent_overflow(event, 1, data, regs);
7139 } else
7140 data->period = event->hw.last_period;
7141
0cff784a 7142 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 7143 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 7144
e7850595 7145 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 7146 return;
df1a132b 7147
a8b0ca17 7148 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
7149}
7150
f5ffe02e
FW
7151static int perf_exclude_event(struct perf_event *event,
7152 struct pt_regs *regs)
7153{
a4eaf7f1 7154 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 7155 return 1;
a4eaf7f1 7156
f5ffe02e
FW
7157 if (regs) {
7158 if (event->attr.exclude_user && user_mode(regs))
7159 return 1;
7160
7161 if (event->attr.exclude_kernel && !user_mode(regs))
7162 return 1;
7163 }
7164
7165 return 0;
7166}
7167
cdd6c482 7168static int perf_swevent_match(struct perf_event *event,
1c432d89 7169 enum perf_type_id type,
6fb2915d
LZ
7170 u32 event_id,
7171 struct perf_sample_data *data,
7172 struct pt_regs *regs)
15dbf27c 7173{
cdd6c482 7174 if (event->attr.type != type)
a21ca2ca 7175 return 0;
f5ffe02e 7176
cdd6c482 7177 if (event->attr.config != event_id)
15dbf27c
PZ
7178 return 0;
7179
f5ffe02e
FW
7180 if (perf_exclude_event(event, regs))
7181 return 0;
15dbf27c
PZ
7182
7183 return 1;
7184}
7185
76e1d904
FW
7186static inline u64 swevent_hash(u64 type, u32 event_id)
7187{
7188 u64 val = event_id | (type << 32);
7189
7190 return hash_64(val, SWEVENT_HLIST_BITS);
7191}
7192
49f135ed
FW
7193static inline struct hlist_head *
7194__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 7195{
49f135ed
FW
7196 u64 hash = swevent_hash(type, event_id);
7197
7198 return &hlist->heads[hash];
7199}
76e1d904 7200
49f135ed
FW
7201/* For the read side: events when they trigger */
7202static inline struct hlist_head *
b28ab83c 7203find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
7204{
7205 struct swevent_hlist *hlist;
76e1d904 7206
b28ab83c 7207 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
7208 if (!hlist)
7209 return NULL;
7210
49f135ed
FW
7211 return __find_swevent_head(hlist, type, event_id);
7212}
7213
7214/* For the event head insertion and removal in the hlist */
7215static inline struct hlist_head *
b28ab83c 7216find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
7217{
7218 struct swevent_hlist *hlist;
7219 u32 event_id = event->attr.config;
7220 u64 type = event->attr.type;
7221
7222 /*
7223 * Event scheduling is always serialized against hlist allocation
7224 * and release. Which makes the protected version suitable here.
7225 * The context lock guarantees that.
7226 */
b28ab83c 7227 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
7228 lockdep_is_held(&event->ctx->lock));
7229 if (!hlist)
7230 return NULL;
7231
7232 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
7233}
7234
7235static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 7236 u64 nr,
76e1d904
FW
7237 struct perf_sample_data *data,
7238 struct pt_regs *regs)
15dbf27c 7239{
4a32fea9 7240 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 7241 struct perf_event *event;
76e1d904 7242 struct hlist_head *head;
15dbf27c 7243
76e1d904 7244 rcu_read_lock();
b28ab83c 7245 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
7246 if (!head)
7247 goto end;
7248
b67bfe0d 7249 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 7250 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 7251 perf_swevent_event(event, nr, data, regs);
15dbf27c 7252 }
76e1d904
FW
7253end:
7254 rcu_read_unlock();
15dbf27c
PZ
7255}
7256
86038c5e
PZI
7257DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7258
4ed7c92d 7259int perf_swevent_get_recursion_context(void)
96f6d444 7260{
4a32fea9 7261 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
96f6d444 7262
b28ab83c 7263 return get_recursion_context(swhash->recursion);
96f6d444 7264}
645e8cc0 7265EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 7266
98b5c2c6 7267void perf_swevent_put_recursion_context(int rctx)
15dbf27c 7268{
4a32fea9 7269 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
927c7a9e 7270
b28ab83c 7271 put_recursion_context(swhash->recursion, rctx);
ce71b9df 7272}
15dbf27c 7273
86038c5e 7274void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 7275{
a4234bfc 7276 struct perf_sample_data data;
4ed7c92d 7277
86038c5e 7278 if (WARN_ON_ONCE(!regs))
4ed7c92d 7279 return;
a4234bfc 7280
fd0d000b 7281 perf_sample_data_init(&data, addr, 0);
a8b0ca17 7282 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
86038c5e
PZI
7283}
7284
7285void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7286{
7287 int rctx;
7288
7289 preempt_disable_notrace();
7290 rctx = perf_swevent_get_recursion_context();
7291 if (unlikely(rctx < 0))
7292 goto fail;
7293
7294 ___perf_sw_event(event_id, nr, regs, addr);
4ed7c92d
PZ
7295
7296 perf_swevent_put_recursion_context(rctx);
86038c5e 7297fail:
1c024eca 7298 preempt_enable_notrace();
b8e83514
PZ
7299}
7300
cdd6c482 7301static void perf_swevent_read(struct perf_event *event)
15dbf27c 7302{
15dbf27c
PZ
7303}
7304
a4eaf7f1 7305static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 7306{
4a32fea9 7307 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 7308 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
7309 struct hlist_head *head;
7310
6c7e550f 7311 if (is_sampling_event(event)) {
7b4b6658 7312 hwc->last_period = hwc->sample_period;
cdd6c482 7313 perf_swevent_set_period(event);
7b4b6658 7314 }
76e1d904 7315
a4eaf7f1
PZ
7316 hwc->state = !(flags & PERF_EF_START);
7317
b28ab83c 7318 head = find_swevent_head(swhash, event);
12ca6ad2 7319 if (WARN_ON_ONCE(!head))
76e1d904
FW
7320 return -EINVAL;
7321
7322 hlist_add_head_rcu(&event->hlist_entry, head);
6a694a60 7323 perf_event_update_userpage(event);
76e1d904 7324
15dbf27c
PZ
7325 return 0;
7326}
7327
a4eaf7f1 7328static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 7329{
76e1d904 7330 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
7331}
7332
a4eaf7f1 7333static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 7334{
a4eaf7f1 7335 event->hw.state = 0;
d6d020e9 7336}
aa9c4c0f 7337
a4eaf7f1 7338static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 7339{
a4eaf7f1 7340 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
7341}
7342
49f135ed
FW
7343/* Deref the hlist from the update side */
7344static inline struct swevent_hlist *
b28ab83c 7345swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 7346{
b28ab83c
PZ
7347 return rcu_dereference_protected(swhash->swevent_hlist,
7348 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
7349}
7350
b28ab83c 7351static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 7352{
b28ab83c 7353 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 7354
49f135ed 7355 if (!hlist)
76e1d904
FW
7356 return;
7357
70691d4a 7358 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
fa4bbc4c 7359 kfree_rcu(hlist, rcu_head);
76e1d904
FW
7360}
7361
3b364d7b 7362static void swevent_hlist_put_cpu(int cpu)
76e1d904 7363{
b28ab83c 7364 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 7365
b28ab83c 7366 mutex_lock(&swhash->hlist_mutex);
76e1d904 7367
b28ab83c
PZ
7368 if (!--swhash->hlist_refcount)
7369 swevent_hlist_release(swhash);
76e1d904 7370
b28ab83c 7371 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
7372}
7373
3b364d7b 7374static void swevent_hlist_put(void)
76e1d904
FW
7375{
7376 int cpu;
7377
76e1d904 7378 for_each_possible_cpu(cpu)
3b364d7b 7379 swevent_hlist_put_cpu(cpu);
76e1d904
FW
7380}
7381
3b364d7b 7382static int swevent_hlist_get_cpu(int cpu)
76e1d904 7383{
b28ab83c 7384 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
7385 int err = 0;
7386
b28ab83c 7387 mutex_lock(&swhash->hlist_mutex);
b28ab83c 7388 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
7389 struct swevent_hlist *hlist;
7390
7391 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7392 if (!hlist) {
7393 err = -ENOMEM;
7394 goto exit;
7395 }
b28ab83c 7396 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 7397 }
b28ab83c 7398 swhash->hlist_refcount++;
9ed6060d 7399exit:
b28ab83c 7400 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
7401
7402 return err;
7403}
7404
3b364d7b 7405static int swevent_hlist_get(void)
76e1d904 7406{
3b364d7b 7407 int err, cpu, failed_cpu;
76e1d904 7408
76e1d904
FW
7409 get_online_cpus();
7410 for_each_possible_cpu(cpu) {
3b364d7b 7411 err = swevent_hlist_get_cpu(cpu);
76e1d904
FW
7412 if (err) {
7413 failed_cpu = cpu;
7414 goto fail;
7415 }
7416 }
7417 put_online_cpus();
7418
7419 return 0;
9ed6060d 7420fail:
76e1d904
FW
7421 for_each_possible_cpu(cpu) {
7422 if (cpu == failed_cpu)
7423 break;
3b364d7b 7424 swevent_hlist_put_cpu(cpu);
76e1d904
FW
7425 }
7426
7427 put_online_cpus();
7428 return err;
7429}
7430
c5905afb 7431struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 7432
b0a873eb
PZ
7433static void sw_perf_event_destroy(struct perf_event *event)
7434{
7435 u64 event_id = event->attr.config;
95476b64 7436
b0a873eb
PZ
7437 WARN_ON(event->parent);
7438
c5905afb 7439 static_key_slow_dec(&perf_swevent_enabled[event_id]);
3b364d7b 7440 swevent_hlist_put();
b0a873eb
PZ
7441}
7442
7443static int perf_swevent_init(struct perf_event *event)
7444{
8176cced 7445 u64 event_id = event->attr.config;
b0a873eb
PZ
7446
7447 if (event->attr.type != PERF_TYPE_SOFTWARE)
7448 return -ENOENT;
7449
2481c5fa
SE
7450 /*
7451 * no branch sampling for software events
7452 */
7453 if (has_branch_stack(event))
7454 return -EOPNOTSUPP;
7455
b0a873eb
PZ
7456 switch (event_id) {
7457 case PERF_COUNT_SW_CPU_CLOCK:
7458 case PERF_COUNT_SW_TASK_CLOCK:
7459 return -ENOENT;
7460
7461 default:
7462 break;
7463 }
7464
ce677831 7465 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
7466 return -ENOENT;
7467
7468 if (!event->parent) {
7469 int err;
7470
3b364d7b 7471 err = swevent_hlist_get();
b0a873eb
PZ
7472 if (err)
7473 return err;
7474
c5905afb 7475 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
7476 event->destroy = sw_perf_event_destroy;
7477 }
7478
7479 return 0;
7480}
7481
7482static struct pmu perf_swevent = {
89a1e187 7483 .task_ctx_nr = perf_sw_context,
95476b64 7484
34f43927
PZ
7485 .capabilities = PERF_PMU_CAP_NO_NMI,
7486
b0a873eb 7487 .event_init = perf_swevent_init,
a4eaf7f1
PZ
7488 .add = perf_swevent_add,
7489 .del = perf_swevent_del,
7490 .start = perf_swevent_start,
7491 .stop = perf_swevent_stop,
1c024eca 7492 .read = perf_swevent_read,
1c024eca
PZ
7493};
7494
b0a873eb
PZ
7495#ifdef CONFIG_EVENT_TRACING
7496
1c024eca
PZ
7497static int perf_tp_filter_match(struct perf_event *event,
7498 struct perf_sample_data *data)
7499{
7e3f977e 7500 void *record = data->raw->frag.data;
1c024eca 7501
b71b437e
PZ
7502 /* only top level events have filters set */
7503 if (event->parent)
7504 event = event->parent;
7505
1c024eca
PZ
7506 if (likely(!event->filter) || filter_match_preds(event->filter, record))
7507 return 1;
7508 return 0;
7509}
7510
7511static int perf_tp_event_match(struct perf_event *event,
7512 struct perf_sample_data *data,
7513 struct pt_regs *regs)
7514{
a0f7d0f7
FW
7515 if (event->hw.state & PERF_HES_STOPPED)
7516 return 0;
580d607c
PZ
7517 /*
7518 * All tracepoints are from kernel-space.
7519 */
7520 if (event->attr.exclude_kernel)
1c024eca
PZ
7521 return 0;
7522
7523 if (!perf_tp_filter_match(event, data))
7524 return 0;
7525
7526 return 1;
7527}
7528
85b67bcb
AS
7529void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7530 struct trace_event_call *call, u64 count,
7531 struct pt_regs *regs, struct hlist_head *head,
7532 struct task_struct *task)
7533{
7534 struct bpf_prog *prog = call->prog;
7535
7536 if (prog) {
7537 *(struct pt_regs **)raw_data = regs;
7538 if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
7539 perf_swevent_put_recursion_context(rctx);
7540 return;
7541 }
7542 }
7543 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
7544 rctx, task);
7545}
7546EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7547
1e1dcd93 7548void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
e6dab5ff
AV
7549 struct pt_regs *regs, struct hlist_head *head, int rctx,
7550 struct task_struct *task)
95476b64
FW
7551{
7552 struct perf_sample_data data;
1c024eca 7553 struct perf_event *event;
1c024eca 7554
95476b64 7555 struct perf_raw_record raw = {
7e3f977e
DB
7556 .frag = {
7557 .size = entry_size,
7558 .data = record,
7559 },
95476b64
FW
7560 };
7561
1e1dcd93 7562 perf_sample_data_init(&data, 0, 0);
95476b64
FW
7563 data.raw = &raw;
7564
1e1dcd93
AS
7565 perf_trace_buf_update(record, event_type);
7566
b67bfe0d 7567 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 7568 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 7569 perf_swevent_event(event, count, &data, regs);
4f41c013 7570 }
ecc55f84 7571
e6dab5ff
AV
7572 /*
7573 * If we got specified a target task, also iterate its context and
7574 * deliver this event there too.
7575 */
7576 if (task && task != current) {
7577 struct perf_event_context *ctx;
7578 struct trace_entry *entry = record;
7579
7580 rcu_read_lock();
7581 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7582 if (!ctx)
7583 goto unlock;
7584
7585 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7586 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7587 continue;
7588 if (event->attr.config != entry->type)
7589 continue;
7590 if (perf_tp_event_match(event, &data, regs))
7591 perf_swevent_event(event, count, &data, regs);
7592 }
7593unlock:
7594 rcu_read_unlock();
7595 }
7596
ecc55f84 7597 perf_swevent_put_recursion_context(rctx);
95476b64
FW
7598}
7599EXPORT_SYMBOL_GPL(perf_tp_event);
7600
cdd6c482 7601static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 7602{
1c024eca 7603 perf_trace_destroy(event);
e077df4f
PZ
7604}
7605
b0a873eb 7606static int perf_tp_event_init(struct perf_event *event)
e077df4f 7607{
76e1d904
FW
7608 int err;
7609
b0a873eb
PZ
7610 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7611 return -ENOENT;
7612
2481c5fa
SE
7613 /*
7614 * no branch sampling for tracepoint events
7615 */
7616 if (has_branch_stack(event))
7617 return -EOPNOTSUPP;
7618
1c024eca
PZ
7619 err = perf_trace_init(event);
7620 if (err)
b0a873eb 7621 return err;
e077df4f 7622
cdd6c482 7623 event->destroy = tp_perf_event_destroy;
e077df4f 7624
b0a873eb
PZ
7625 return 0;
7626}
7627
7628static struct pmu perf_tracepoint = {
89a1e187
PZ
7629 .task_ctx_nr = perf_sw_context,
7630
b0a873eb 7631 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
7632 .add = perf_trace_add,
7633 .del = perf_trace_del,
7634 .start = perf_swevent_start,
7635 .stop = perf_swevent_stop,
b0a873eb 7636 .read = perf_swevent_read,
b0a873eb
PZ
7637};
7638
7639static inline void perf_tp_register(void)
7640{
2e80a82a 7641 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 7642}
6fb2915d 7643
6fb2915d
LZ
7644static void perf_event_free_filter(struct perf_event *event)
7645{
7646 ftrace_profile_free_filter(event);
7647}
7648
2541517c
AS
7649static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7650{
98b5c2c6 7651 bool is_kprobe, is_tracepoint;
2541517c
AS
7652 struct bpf_prog *prog;
7653
7654 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7655 return -EINVAL;
7656
7657 if (event->tp_event->prog)
7658 return -EEXIST;
7659
98b5c2c6
AS
7660 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
7661 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
7662 if (!is_kprobe && !is_tracepoint)
7663 /* bpf programs can only be attached to u/kprobe or tracepoint */
2541517c
AS
7664 return -EINVAL;
7665
7666 prog = bpf_prog_get(prog_fd);
7667 if (IS_ERR(prog))
7668 return PTR_ERR(prog);
7669
98b5c2c6
AS
7670 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
7671 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
2541517c
AS
7672 /* valid fd, but invalid bpf program type */
7673 bpf_prog_put(prog);
7674 return -EINVAL;
7675 }
7676
32bbe007
AS
7677 if (is_tracepoint) {
7678 int off = trace_event_get_offsets(event->tp_event);
7679
7680 if (prog->aux->max_ctx_offset > off) {
7681 bpf_prog_put(prog);
7682 return -EACCES;
7683 }
7684 }
2541517c
AS
7685 event->tp_event->prog = prog;
7686
7687 return 0;
7688}
7689
7690static void perf_event_free_bpf_prog(struct perf_event *event)
7691{
7692 struct bpf_prog *prog;
7693
7694 if (!event->tp_event)
7695 return;
7696
7697 prog = event->tp_event->prog;
7698 if (prog) {
7699 event->tp_event->prog = NULL;
1aacde3d 7700 bpf_prog_put(prog);
2541517c
AS
7701 }
7702}
7703
e077df4f 7704#else
6fb2915d 7705
b0a873eb 7706static inline void perf_tp_register(void)
e077df4f 7707{
e077df4f 7708}
6fb2915d 7709
6fb2915d
LZ
7710static void perf_event_free_filter(struct perf_event *event)
7711{
7712}
7713
2541517c
AS
7714static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7715{
7716 return -ENOENT;
7717}
7718
7719static void perf_event_free_bpf_prog(struct perf_event *event)
7720{
7721}
07b139c8 7722#endif /* CONFIG_EVENT_TRACING */
e077df4f 7723
24f1e32c 7724#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 7725void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 7726{
f5ffe02e
FW
7727 struct perf_sample_data sample;
7728 struct pt_regs *regs = data;
7729
fd0d000b 7730 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 7731
a4eaf7f1 7732 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 7733 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
7734}
7735#endif
7736
375637bc
AS
7737/*
7738 * Allocate a new address filter
7739 */
7740static struct perf_addr_filter *
7741perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
7742{
7743 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
7744 struct perf_addr_filter *filter;
7745
7746 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
7747 if (!filter)
7748 return NULL;
7749
7750 INIT_LIST_HEAD(&filter->entry);
7751 list_add_tail(&filter->entry, filters);
7752
7753 return filter;
7754}
7755
7756static void free_filters_list(struct list_head *filters)
7757{
7758 struct perf_addr_filter *filter, *iter;
7759
7760 list_for_each_entry_safe(filter, iter, filters, entry) {
7761 if (filter->inode)
7762 iput(filter->inode);
7763 list_del(&filter->entry);
7764 kfree(filter);
7765 }
7766}
7767
7768/*
7769 * Free existing address filters and optionally install new ones
7770 */
7771static void perf_addr_filters_splice(struct perf_event *event,
7772 struct list_head *head)
7773{
7774 unsigned long flags;
7775 LIST_HEAD(list);
7776
7777 if (!has_addr_filter(event))
7778 return;
7779
7780 /* don't bother with children, they don't have their own filters */
7781 if (event->parent)
7782 return;
7783
7784 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
7785
7786 list_splice_init(&event->addr_filters.list, &list);
7787 if (head)
7788 list_splice(head, &event->addr_filters.list);
7789
7790 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
7791
7792 free_filters_list(&list);
7793}
7794
7795/*
7796 * Scan through mm's vmas and see if one of them matches the
7797 * @filter; if so, adjust filter's address range.
7798 * Called with mm::mmap_sem down for reading.
7799 */
7800static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
7801 struct mm_struct *mm)
7802{
7803 struct vm_area_struct *vma;
7804
7805 for (vma = mm->mmap; vma; vma = vma->vm_next) {
7806 struct file *file = vma->vm_file;
7807 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
7808 unsigned long vma_size = vma->vm_end - vma->vm_start;
7809
7810 if (!file)
7811 continue;
7812
7813 if (!perf_addr_filter_match(filter, file, off, vma_size))
7814 continue;
7815
7816 return vma->vm_start;
7817 }
7818
7819 return 0;
7820}
7821
7822/*
7823 * Update event's address range filters based on the
7824 * task's existing mappings, if any.
7825 */
7826static void perf_event_addr_filters_apply(struct perf_event *event)
7827{
7828 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
7829 struct task_struct *task = READ_ONCE(event->ctx->task);
7830 struct perf_addr_filter *filter;
7831 struct mm_struct *mm = NULL;
7832 unsigned int count = 0;
7833 unsigned long flags;
7834
7835 /*
7836 * We may observe TASK_TOMBSTONE, which means that the event tear-down
7837 * will stop on the parent's child_mutex that our caller is also holding
7838 */
7839 if (task == TASK_TOMBSTONE)
7840 return;
7841
7842 mm = get_task_mm(event->ctx->task);
7843 if (!mm)
7844 goto restart;
7845
7846 down_read(&mm->mmap_sem);
7847
7848 raw_spin_lock_irqsave(&ifh->lock, flags);
7849 list_for_each_entry(filter, &ifh->list, entry) {
7850 event->addr_filters_offs[count] = 0;
7851
99f5bc9b
MP
7852 /*
7853 * Adjust base offset if the filter is associated to a binary
7854 * that needs to be mapped:
7855 */
7856 if (filter->inode)
375637bc
AS
7857 event->addr_filters_offs[count] =
7858 perf_addr_filter_apply(filter, mm);
7859
7860 count++;
7861 }
7862
7863 event->addr_filters_gen++;
7864 raw_spin_unlock_irqrestore(&ifh->lock, flags);
7865
7866 up_read(&mm->mmap_sem);
7867
7868 mmput(mm);
7869
7870restart:
7871 perf_event_restart(event);
7872}
7873
7874/*
7875 * Address range filtering: limiting the data to certain
7876 * instruction address ranges. Filters are ioctl()ed to us from
7877 * userspace as ascii strings.
7878 *
7879 * Filter string format:
7880 *
7881 * ACTION RANGE_SPEC
7882 * where ACTION is one of the
7883 * * "filter": limit the trace to this region
7884 * * "start": start tracing from this address
7885 * * "stop": stop tracing at this address/region;
7886 * RANGE_SPEC is
7887 * * for kernel addresses: <start address>[/<size>]
7888 * * for object files: <start address>[/<size>]@</path/to/object/file>
7889 *
7890 * if <size> is not specified, the range is treated as a single address.
7891 */
7892enum {
7893 IF_ACT_FILTER,
7894 IF_ACT_START,
7895 IF_ACT_STOP,
7896 IF_SRC_FILE,
7897 IF_SRC_KERNEL,
7898 IF_SRC_FILEADDR,
7899 IF_SRC_KERNELADDR,
7900};
7901
7902enum {
7903 IF_STATE_ACTION = 0,
7904 IF_STATE_SOURCE,
7905 IF_STATE_END,
7906};
7907
7908static const match_table_t if_tokens = {
7909 { IF_ACT_FILTER, "filter" },
7910 { IF_ACT_START, "start" },
7911 { IF_ACT_STOP, "stop" },
7912 { IF_SRC_FILE, "%u/%u@%s" },
7913 { IF_SRC_KERNEL, "%u/%u" },
7914 { IF_SRC_FILEADDR, "%u@%s" },
7915 { IF_SRC_KERNELADDR, "%u" },
7916};
7917
7918/*
7919 * Address filter string parser
7920 */
7921static int
7922perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
7923 struct list_head *filters)
7924{
7925 struct perf_addr_filter *filter = NULL;
7926 char *start, *orig, *filename = NULL;
7927 struct path path;
7928 substring_t args[MAX_OPT_ARGS];
7929 int state = IF_STATE_ACTION, token;
7930 unsigned int kernel = 0;
7931 int ret = -EINVAL;
7932
7933 orig = fstr = kstrdup(fstr, GFP_KERNEL);
7934 if (!fstr)
7935 return -ENOMEM;
7936
7937 while ((start = strsep(&fstr, " ,\n")) != NULL) {
7938 ret = -EINVAL;
7939
7940 if (!*start)
7941 continue;
7942
7943 /* filter definition begins */
7944 if (state == IF_STATE_ACTION) {
7945 filter = perf_addr_filter_new(event, filters);
7946 if (!filter)
7947 goto fail;
7948 }
7949
7950 token = match_token(start, if_tokens, args);
7951 switch (token) {
7952 case IF_ACT_FILTER:
7953 case IF_ACT_START:
7954 filter->filter = 1;
7955
7956 case IF_ACT_STOP:
7957 if (state != IF_STATE_ACTION)
7958 goto fail;
7959
7960 state = IF_STATE_SOURCE;
7961 break;
7962
7963 case IF_SRC_KERNELADDR:
7964 case IF_SRC_KERNEL:
7965 kernel = 1;
7966
7967 case IF_SRC_FILEADDR:
7968 case IF_SRC_FILE:
7969 if (state != IF_STATE_SOURCE)
7970 goto fail;
7971
7972 if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
7973 filter->range = 1;
7974
7975 *args[0].to = 0;
7976 ret = kstrtoul(args[0].from, 0, &filter->offset);
7977 if (ret)
7978 goto fail;
7979
7980 if (filter->range) {
7981 *args[1].to = 0;
7982 ret = kstrtoul(args[1].from, 0, &filter->size);
7983 if (ret)
7984 goto fail;
7985 }
7986
4059ffd0
MP
7987 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
7988 int fpos = filter->range ? 2 : 1;
7989
7990 filename = match_strdup(&args[fpos]);
375637bc
AS
7991 if (!filename) {
7992 ret = -ENOMEM;
7993 goto fail;
7994 }
7995 }
7996
7997 state = IF_STATE_END;
7998 break;
7999
8000 default:
8001 goto fail;
8002 }
8003
8004 /*
8005 * Filter definition is fully parsed, validate and install it.
8006 * Make sure that it doesn't contradict itself or the event's
8007 * attribute.
8008 */
8009 if (state == IF_STATE_END) {
8010 if (kernel && event->attr.exclude_kernel)
8011 goto fail;
8012
8013 if (!kernel) {
8014 if (!filename)
8015 goto fail;
8016
8017 /* look up the path and grab its inode */
8018 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
8019 if (ret)
8020 goto fail_free_name;
8021
8022 filter->inode = igrab(d_inode(path.dentry));
8023 path_put(&path);
8024 kfree(filename);
8025 filename = NULL;
8026
8027 ret = -EINVAL;
8028 if (!filter->inode ||
8029 !S_ISREG(filter->inode->i_mode))
8030 /* free_filters_list() will iput() */
8031 goto fail;
8032 }
8033
8034 /* ready to consume more filters */
8035 state = IF_STATE_ACTION;
8036 filter = NULL;
8037 }
8038 }
8039
8040 if (state != IF_STATE_ACTION)
8041 goto fail;
8042
8043 kfree(orig);
8044
8045 return 0;
8046
8047fail_free_name:
8048 kfree(filename);
8049fail:
8050 free_filters_list(filters);
8051 kfree(orig);
8052
8053 return ret;
8054}
8055
8056static int
8057perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
8058{
8059 LIST_HEAD(filters);
8060 int ret;
8061
8062 /*
8063 * Since this is called in perf_ioctl() path, we're already holding
8064 * ctx::mutex.
8065 */
8066 lockdep_assert_held(&event->ctx->mutex);
8067
8068 if (WARN_ON_ONCE(event->parent))
8069 return -EINVAL;
8070
8071 /*
8072 * For now, we only support filtering in per-task events; doing so
8073 * for CPU-wide events requires additional context switching trickery,
8074 * since same object code will be mapped at different virtual
8075 * addresses in different processes.
8076 */
8077 if (!event->ctx->task)
8078 return -EOPNOTSUPP;
8079
8080 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
8081 if (ret)
8082 return ret;
8083
8084 ret = event->pmu->addr_filters_validate(&filters);
8085 if (ret) {
8086 free_filters_list(&filters);
8087 return ret;
8088 }
8089
8090 /* remove existing filters, if any */
8091 perf_addr_filters_splice(event, &filters);
8092
8093 /* install new filters */
8094 perf_event_for_each_child(event, perf_event_addr_filters_apply);
8095
8096 return ret;
8097}
8098
c796bbbe
AS
8099static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8100{
8101 char *filter_str;
8102 int ret = -EINVAL;
8103
375637bc
AS
8104 if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
8105 !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
8106 !has_addr_filter(event))
c796bbbe
AS
8107 return -EINVAL;
8108
8109 filter_str = strndup_user(arg, PAGE_SIZE);
8110 if (IS_ERR(filter_str))
8111 return PTR_ERR(filter_str);
8112
8113 if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8114 event->attr.type == PERF_TYPE_TRACEPOINT)
8115 ret = ftrace_profile_set_filter(event, event->attr.config,
8116 filter_str);
375637bc
AS
8117 else if (has_addr_filter(event))
8118 ret = perf_event_set_addr_filter(event, filter_str);
c796bbbe
AS
8119
8120 kfree(filter_str);
8121 return ret;
8122}
8123
b0a873eb
PZ
8124/*
8125 * hrtimer based swevent callback
8126 */
f29ac756 8127
b0a873eb 8128static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 8129{
b0a873eb
PZ
8130 enum hrtimer_restart ret = HRTIMER_RESTART;
8131 struct perf_sample_data data;
8132 struct pt_regs *regs;
8133 struct perf_event *event;
8134 u64 period;
f29ac756 8135
b0a873eb 8136 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
8137
8138 if (event->state != PERF_EVENT_STATE_ACTIVE)
8139 return HRTIMER_NORESTART;
8140
b0a873eb 8141 event->pmu->read(event);
f344011c 8142
fd0d000b 8143 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
8144 regs = get_irq_regs();
8145
8146 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 8147 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 8148 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
8149 ret = HRTIMER_NORESTART;
8150 }
24f1e32c 8151
b0a873eb
PZ
8152 period = max_t(u64, 10000, event->hw.sample_period);
8153 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 8154
b0a873eb 8155 return ret;
f29ac756
PZ
8156}
8157
b0a873eb 8158static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 8159{
b0a873eb 8160 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
8161 s64 period;
8162
8163 if (!is_sampling_event(event))
8164 return;
f5ffe02e 8165
5d508e82
FBH
8166 period = local64_read(&hwc->period_left);
8167 if (period) {
8168 if (period < 0)
8169 period = 10000;
fa407f35 8170
5d508e82
FBH
8171 local64_set(&hwc->period_left, 0);
8172 } else {
8173 period = max_t(u64, 10000, hwc->sample_period);
8174 }
3497d206
TG
8175 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8176 HRTIMER_MODE_REL_PINNED);
24f1e32c 8177}
b0a873eb
PZ
8178
8179static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 8180{
b0a873eb
PZ
8181 struct hw_perf_event *hwc = &event->hw;
8182
6c7e550f 8183 if (is_sampling_event(event)) {
b0a873eb 8184 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 8185 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
8186
8187 hrtimer_cancel(&hwc->hrtimer);
8188 }
24f1e32c
FW
8189}
8190
ba3dd36c
PZ
8191static void perf_swevent_init_hrtimer(struct perf_event *event)
8192{
8193 struct hw_perf_event *hwc = &event->hw;
8194
8195 if (!is_sampling_event(event))
8196 return;
8197
8198 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8199 hwc->hrtimer.function = perf_swevent_hrtimer;
8200
8201 /*
8202 * Since hrtimers have a fixed rate, we can do a static freq->period
8203 * mapping and avoid the whole period adjust feedback stuff.
8204 */
8205 if (event->attr.freq) {
8206 long freq = event->attr.sample_freq;
8207
8208 event->attr.sample_period = NSEC_PER_SEC / freq;
8209 hwc->sample_period = event->attr.sample_period;
8210 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 8211 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
8212 event->attr.freq = 0;
8213 }
8214}
8215
b0a873eb
PZ
8216/*
8217 * Software event: cpu wall time clock
8218 */
8219
8220static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 8221{
b0a873eb
PZ
8222 s64 prev;
8223 u64 now;
8224
a4eaf7f1 8225 now = local_clock();
b0a873eb
PZ
8226 prev = local64_xchg(&event->hw.prev_count, now);
8227 local64_add(now - prev, &event->count);
24f1e32c 8228}
24f1e32c 8229
a4eaf7f1 8230static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 8231{
a4eaf7f1 8232 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 8233 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
8234}
8235
a4eaf7f1 8236static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 8237{
b0a873eb
PZ
8238 perf_swevent_cancel_hrtimer(event);
8239 cpu_clock_event_update(event);
8240}
f29ac756 8241
a4eaf7f1
PZ
8242static int cpu_clock_event_add(struct perf_event *event, int flags)
8243{
8244 if (flags & PERF_EF_START)
8245 cpu_clock_event_start(event, flags);
6a694a60 8246 perf_event_update_userpage(event);
a4eaf7f1
PZ
8247
8248 return 0;
8249}
8250
8251static void cpu_clock_event_del(struct perf_event *event, int flags)
8252{
8253 cpu_clock_event_stop(event, flags);
8254}
8255
b0a873eb
PZ
8256static void cpu_clock_event_read(struct perf_event *event)
8257{
8258 cpu_clock_event_update(event);
8259}
f344011c 8260
b0a873eb
PZ
8261static int cpu_clock_event_init(struct perf_event *event)
8262{
8263 if (event->attr.type != PERF_TYPE_SOFTWARE)
8264 return -ENOENT;
8265
8266 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8267 return -ENOENT;
8268
2481c5fa
SE
8269 /*
8270 * no branch sampling for software events
8271 */
8272 if (has_branch_stack(event))
8273 return -EOPNOTSUPP;
8274
ba3dd36c
PZ
8275 perf_swevent_init_hrtimer(event);
8276
b0a873eb 8277 return 0;
f29ac756
PZ
8278}
8279
b0a873eb 8280static struct pmu perf_cpu_clock = {
89a1e187
PZ
8281 .task_ctx_nr = perf_sw_context,
8282
34f43927
PZ
8283 .capabilities = PERF_PMU_CAP_NO_NMI,
8284
b0a873eb 8285 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
8286 .add = cpu_clock_event_add,
8287 .del = cpu_clock_event_del,
8288 .start = cpu_clock_event_start,
8289 .stop = cpu_clock_event_stop,
b0a873eb
PZ
8290 .read = cpu_clock_event_read,
8291};
8292
8293/*
8294 * Software event: task time clock
8295 */
8296
8297static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 8298{
b0a873eb
PZ
8299 u64 prev;
8300 s64 delta;
5c92d124 8301
b0a873eb
PZ
8302 prev = local64_xchg(&event->hw.prev_count, now);
8303 delta = now - prev;
8304 local64_add(delta, &event->count);
8305}
5c92d124 8306
a4eaf7f1 8307static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 8308{
a4eaf7f1 8309 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 8310 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
8311}
8312
a4eaf7f1 8313static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
8314{
8315 perf_swevent_cancel_hrtimer(event);
8316 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
8317}
8318
8319static int task_clock_event_add(struct perf_event *event, int flags)
8320{
8321 if (flags & PERF_EF_START)
8322 task_clock_event_start(event, flags);
6a694a60 8323 perf_event_update_userpage(event);
b0a873eb 8324
a4eaf7f1
PZ
8325 return 0;
8326}
8327
8328static void task_clock_event_del(struct perf_event *event, int flags)
8329{
8330 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
8331}
8332
8333static void task_clock_event_read(struct perf_event *event)
8334{
768a06e2
PZ
8335 u64 now = perf_clock();
8336 u64 delta = now - event->ctx->timestamp;
8337 u64 time = event->ctx->time + delta;
b0a873eb
PZ
8338
8339 task_clock_event_update(event, time);
8340}
8341
8342static int task_clock_event_init(struct perf_event *event)
6fb2915d 8343{
b0a873eb
PZ
8344 if (event->attr.type != PERF_TYPE_SOFTWARE)
8345 return -ENOENT;
8346
8347 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8348 return -ENOENT;
8349
2481c5fa
SE
8350 /*
8351 * no branch sampling for software events
8352 */
8353 if (has_branch_stack(event))
8354 return -EOPNOTSUPP;
8355
ba3dd36c
PZ
8356 perf_swevent_init_hrtimer(event);
8357
b0a873eb 8358 return 0;
6fb2915d
LZ
8359}
8360
b0a873eb 8361static struct pmu perf_task_clock = {
89a1e187
PZ
8362 .task_ctx_nr = perf_sw_context,
8363
34f43927
PZ
8364 .capabilities = PERF_PMU_CAP_NO_NMI,
8365
b0a873eb 8366 .event_init = task_clock_event_init,
a4eaf7f1
PZ
8367 .add = task_clock_event_add,
8368 .del = task_clock_event_del,
8369 .start = task_clock_event_start,
8370 .stop = task_clock_event_stop,
b0a873eb
PZ
8371 .read = task_clock_event_read,
8372};
6fb2915d 8373
ad5133b7 8374static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 8375{
e077df4f 8376}
6fb2915d 8377
fbbe0701
SB
8378static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8379{
8380}
8381
ad5133b7 8382static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 8383{
ad5133b7 8384 return 0;
6fb2915d
LZ
8385}
8386
18ab2cd3 8387static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
fbbe0701
SB
8388
8389static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
6fb2915d 8390{
fbbe0701
SB
8391 __this_cpu_write(nop_txn_flags, flags);
8392
8393 if (flags & ~PERF_PMU_TXN_ADD)
8394 return;
8395
ad5133b7 8396 perf_pmu_disable(pmu);
6fb2915d
LZ
8397}
8398
ad5133b7
PZ
8399static int perf_pmu_commit_txn(struct pmu *pmu)
8400{
fbbe0701
SB
8401 unsigned int flags = __this_cpu_read(nop_txn_flags);
8402
8403 __this_cpu_write(nop_txn_flags, 0);
8404
8405 if (flags & ~PERF_PMU_TXN_ADD)
8406 return 0;
8407
ad5133b7
PZ
8408 perf_pmu_enable(pmu);
8409 return 0;
8410}
e077df4f 8411
ad5133b7 8412static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 8413{
fbbe0701
SB
8414 unsigned int flags = __this_cpu_read(nop_txn_flags);
8415
8416 __this_cpu_write(nop_txn_flags, 0);
8417
8418 if (flags & ~PERF_PMU_TXN_ADD)
8419 return;
8420
ad5133b7 8421 perf_pmu_enable(pmu);
24f1e32c
FW
8422}
8423
35edc2a5
PZ
8424static int perf_event_idx_default(struct perf_event *event)
8425{
c719f560 8426 return 0;
35edc2a5
PZ
8427}
8428
8dc85d54
PZ
8429/*
8430 * Ensures all contexts with the same task_ctx_nr have the same
8431 * pmu_cpu_context too.
8432 */
9e317041 8433static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 8434{
8dc85d54 8435 struct pmu *pmu;
b326e956 8436
8dc85d54
PZ
8437 if (ctxn < 0)
8438 return NULL;
24f1e32c 8439
8dc85d54
PZ
8440 list_for_each_entry(pmu, &pmus, entry) {
8441 if (pmu->task_ctx_nr == ctxn)
8442 return pmu->pmu_cpu_context;
8443 }
24f1e32c 8444
8dc85d54 8445 return NULL;
24f1e32c
FW
8446}
8447
51676957 8448static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 8449{
51676957
PZ
8450 int cpu;
8451
8452 for_each_possible_cpu(cpu) {
8453 struct perf_cpu_context *cpuctx;
8454
8455 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8456
3f1f3320
PZ
8457 if (cpuctx->unique_pmu == old_pmu)
8458 cpuctx->unique_pmu = pmu;
51676957
PZ
8459 }
8460}
8461
8462static void free_pmu_context(struct pmu *pmu)
8463{
8464 struct pmu *i;
f5ffe02e 8465
8dc85d54 8466 mutex_lock(&pmus_lock);
0475f9ea 8467 /*
8dc85d54 8468 * Like a real lame refcount.
0475f9ea 8469 */
51676957
PZ
8470 list_for_each_entry(i, &pmus, entry) {
8471 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
8472 update_pmu_context(i, pmu);
8dc85d54 8473 goto out;
51676957 8474 }
8dc85d54 8475 }
d6d020e9 8476
51676957 8477 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
8478out:
8479 mutex_unlock(&pmus_lock);
24f1e32c 8480}
6e855cd4
AS
8481
8482/*
8483 * Let userspace know that this PMU supports address range filtering:
8484 */
8485static ssize_t nr_addr_filters_show(struct device *dev,
8486 struct device_attribute *attr,
8487 char *page)
8488{
8489 struct pmu *pmu = dev_get_drvdata(dev);
8490
8491 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8492}
8493DEVICE_ATTR_RO(nr_addr_filters);
8494
2e80a82a 8495static struct idr pmu_idr;
d6d020e9 8496
abe43400
PZ
8497static ssize_t
8498type_show(struct device *dev, struct device_attribute *attr, char *page)
8499{
8500 struct pmu *pmu = dev_get_drvdata(dev);
8501
8502 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8503}
90826ca7 8504static DEVICE_ATTR_RO(type);
abe43400 8505
62b85639
SE
8506static ssize_t
8507perf_event_mux_interval_ms_show(struct device *dev,
8508 struct device_attribute *attr,
8509 char *page)
8510{
8511 struct pmu *pmu = dev_get_drvdata(dev);
8512
8513 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8514}
8515
272325c4
PZ
8516static DEFINE_MUTEX(mux_interval_mutex);
8517
62b85639
SE
8518static ssize_t
8519perf_event_mux_interval_ms_store(struct device *dev,
8520 struct device_attribute *attr,
8521 const char *buf, size_t count)
8522{
8523 struct pmu *pmu = dev_get_drvdata(dev);
8524 int timer, cpu, ret;
8525
8526 ret = kstrtoint(buf, 0, &timer);
8527 if (ret)
8528 return ret;
8529
8530 if (timer < 1)
8531 return -EINVAL;
8532
8533 /* same value, noting to do */
8534 if (timer == pmu->hrtimer_interval_ms)
8535 return count;
8536
272325c4 8537 mutex_lock(&mux_interval_mutex);
62b85639
SE
8538 pmu->hrtimer_interval_ms = timer;
8539
8540 /* update all cpuctx for this PMU */
272325c4
PZ
8541 get_online_cpus();
8542 for_each_online_cpu(cpu) {
62b85639
SE
8543 struct perf_cpu_context *cpuctx;
8544 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8545 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
8546
272325c4
PZ
8547 cpu_function_call(cpu,
8548 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
62b85639 8549 }
272325c4
PZ
8550 put_online_cpus();
8551 mutex_unlock(&mux_interval_mutex);
62b85639
SE
8552
8553 return count;
8554}
90826ca7 8555static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 8556
90826ca7
GKH
8557static struct attribute *pmu_dev_attrs[] = {
8558 &dev_attr_type.attr,
8559 &dev_attr_perf_event_mux_interval_ms.attr,
8560 NULL,
abe43400 8561};
90826ca7 8562ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
8563
8564static int pmu_bus_running;
8565static struct bus_type pmu_bus = {
8566 .name = "event_source",
90826ca7 8567 .dev_groups = pmu_dev_groups,
abe43400
PZ
8568};
8569
8570static void pmu_dev_release(struct device *dev)
8571{
8572 kfree(dev);
8573}
8574
8575static int pmu_dev_alloc(struct pmu *pmu)
8576{
8577 int ret = -ENOMEM;
8578
8579 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
8580 if (!pmu->dev)
8581 goto out;
8582
0c9d42ed 8583 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
8584 device_initialize(pmu->dev);
8585 ret = dev_set_name(pmu->dev, "%s", pmu->name);
8586 if (ret)
8587 goto free_dev;
8588
8589 dev_set_drvdata(pmu->dev, pmu);
8590 pmu->dev->bus = &pmu_bus;
8591 pmu->dev->release = pmu_dev_release;
8592 ret = device_add(pmu->dev);
8593 if (ret)
8594 goto free_dev;
8595
6e855cd4
AS
8596 /* For PMUs with address filters, throw in an extra attribute: */
8597 if (pmu->nr_addr_filters)
8598 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
8599
8600 if (ret)
8601 goto del_dev;
8602
abe43400
PZ
8603out:
8604 return ret;
8605
6e855cd4
AS
8606del_dev:
8607 device_del(pmu->dev);
8608
abe43400
PZ
8609free_dev:
8610 put_device(pmu->dev);
8611 goto out;
8612}
8613
547e9fd7 8614static struct lock_class_key cpuctx_mutex;
facc4307 8615static struct lock_class_key cpuctx_lock;
547e9fd7 8616
03d8e80b 8617int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 8618{
108b02cf 8619 int cpu, ret;
24f1e32c 8620
b0a873eb 8621 mutex_lock(&pmus_lock);
33696fc0
PZ
8622 ret = -ENOMEM;
8623 pmu->pmu_disable_count = alloc_percpu(int);
8624 if (!pmu->pmu_disable_count)
8625 goto unlock;
f29ac756 8626
2e80a82a
PZ
8627 pmu->type = -1;
8628 if (!name)
8629 goto skip_type;
8630 pmu->name = name;
8631
8632 if (type < 0) {
0e9c3be2
TH
8633 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
8634 if (type < 0) {
8635 ret = type;
2e80a82a
PZ
8636 goto free_pdc;
8637 }
8638 }
8639 pmu->type = type;
8640
abe43400
PZ
8641 if (pmu_bus_running) {
8642 ret = pmu_dev_alloc(pmu);
8643 if (ret)
8644 goto free_idr;
8645 }
8646
2e80a82a 8647skip_type:
26657848
PZ
8648 if (pmu->task_ctx_nr == perf_hw_context) {
8649 static int hw_context_taken = 0;
8650
5101ef20
MR
8651 /*
8652 * Other than systems with heterogeneous CPUs, it never makes
8653 * sense for two PMUs to share perf_hw_context. PMUs which are
8654 * uncore must use perf_invalid_context.
8655 */
8656 if (WARN_ON_ONCE(hw_context_taken &&
8657 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
26657848
PZ
8658 pmu->task_ctx_nr = perf_invalid_context;
8659
8660 hw_context_taken = 1;
8661 }
8662
8dc85d54
PZ
8663 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
8664 if (pmu->pmu_cpu_context)
8665 goto got_cpu_context;
f29ac756 8666
c4814202 8667 ret = -ENOMEM;
108b02cf
PZ
8668 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
8669 if (!pmu->pmu_cpu_context)
abe43400 8670 goto free_dev;
f344011c 8671
108b02cf
PZ
8672 for_each_possible_cpu(cpu) {
8673 struct perf_cpu_context *cpuctx;
8674
8675 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 8676 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 8677 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 8678 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
108b02cf 8679 cpuctx->ctx.pmu = pmu;
9e630205 8680
272325c4 8681 __perf_mux_hrtimer_init(cpuctx, cpu);
9e630205 8682
3f1f3320 8683 cpuctx->unique_pmu = pmu;
108b02cf 8684 }
76e1d904 8685
8dc85d54 8686got_cpu_context:
ad5133b7
PZ
8687 if (!pmu->start_txn) {
8688 if (pmu->pmu_enable) {
8689 /*
8690 * If we have pmu_enable/pmu_disable calls, install
8691 * transaction stubs that use that to try and batch
8692 * hardware accesses.
8693 */
8694 pmu->start_txn = perf_pmu_start_txn;
8695 pmu->commit_txn = perf_pmu_commit_txn;
8696 pmu->cancel_txn = perf_pmu_cancel_txn;
8697 } else {
fbbe0701 8698 pmu->start_txn = perf_pmu_nop_txn;
ad5133b7
PZ
8699 pmu->commit_txn = perf_pmu_nop_int;
8700 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 8701 }
5c92d124 8702 }
15dbf27c 8703
ad5133b7
PZ
8704 if (!pmu->pmu_enable) {
8705 pmu->pmu_enable = perf_pmu_nop_void;
8706 pmu->pmu_disable = perf_pmu_nop_void;
8707 }
8708
35edc2a5
PZ
8709 if (!pmu->event_idx)
8710 pmu->event_idx = perf_event_idx_default;
8711
b0a873eb 8712 list_add_rcu(&pmu->entry, &pmus);
bed5b25a 8713 atomic_set(&pmu->exclusive_cnt, 0);
33696fc0
PZ
8714 ret = 0;
8715unlock:
b0a873eb
PZ
8716 mutex_unlock(&pmus_lock);
8717
33696fc0 8718 return ret;
108b02cf 8719
abe43400
PZ
8720free_dev:
8721 device_del(pmu->dev);
8722 put_device(pmu->dev);
8723
2e80a82a
PZ
8724free_idr:
8725 if (pmu->type >= PERF_TYPE_MAX)
8726 idr_remove(&pmu_idr, pmu->type);
8727
108b02cf
PZ
8728free_pdc:
8729 free_percpu(pmu->pmu_disable_count);
8730 goto unlock;
f29ac756 8731}
c464c76e 8732EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 8733
b0a873eb 8734void perf_pmu_unregister(struct pmu *pmu)
5c92d124 8735{
b0a873eb
PZ
8736 mutex_lock(&pmus_lock);
8737 list_del_rcu(&pmu->entry);
8738 mutex_unlock(&pmus_lock);
5c92d124 8739
0475f9ea 8740 /*
cde8e884
PZ
8741 * We dereference the pmu list under both SRCU and regular RCU, so
8742 * synchronize against both of those.
0475f9ea 8743 */
b0a873eb 8744 synchronize_srcu(&pmus_srcu);
cde8e884 8745 synchronize_rcu();
d6d020e9 8746
33696fc0 8747 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
8748 if (pmu->type >= PERF_TYPE_MAX)
8749 idr_remove(&pmu_idr, pmu->type);
6e855cd4
AS
8750 if (pmu->nr_addr_filters)
8751 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
abe43400
PZ
8752 device_del(pmu->dev);
8753 put_device(pmu->dev);
51676957 8754 free_pmu_context(pmu);
b0a873eb 8755}
c464c76e 8756EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 8757
cc34b98b
MR
8758static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
8759{
ccd41c86 8760 struct perf_event_context *ctx = NULL;
cc34b98b
MR
8761 int ret;
8762
8763 if (!try_module_get(pmu->module))
8764 return -ENODEV;
ccd41c86
PZ
8765
8766 if (event->group_leader != event) {
8b10c5e2
PZ
8767 /*
8768 * This ctx->mutex can nest when we're called through
8769 * inheritance. See the perf_event_ctx_lock_nested() comment.
8770 */
8771 ctx = perf_event_ctx_lock_nested(event->group_leader,
8772 SINGLE_DEPTH_NESTING);
ccd41c86
PZ
8773 BUG_ON(!ctx);
8774 }
8775
cc34b98b
MR
8776 event->pmu = pmu;
8777 ret = pmu->event_init(event);
ccd41c86
PZ
8778
8779 if (ctx)
8780 perf_event_ctx_unlock(event->group_leader, ctx);
8781
cc34b98b
MR
8782 if (ret)
8783 module_put(pmu->module);
8784
8785 return ret;
8786}
8787
18ab2cd3 8788static struct pmu *perf_init_event(struct perf_event *event)
b0a873eb
PZ
8789{
8790 struct pmu *pmu = NULL;
8791 int idx;
940c5b29 8792 int ret;
b0a873eb
PZ
8793
8794 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
8795
8796 rcu_read_lock();
8797 pmu = idr_find(&pmu_idr, event->attr.type);
8798 rcu_read_unlock();
940c5b29 8799 if (pmu) {
cc34b98b 8800 ret = perf_try_init_event(pmu, event);
940c5b29
LM
8801 if (ret)
8802 pmu = ERR_PTR(ret);
2e80a82a 8803 goto unlock;
940c5b29 8804 }
2e80a82a 8805
b0a873eb 8806 list_for_each_entry_rcu(pmu, &pmus, entry) {
cc34b98b 8807 ret = perf_try_init_event(pmu, event);
b0a873eb 8808 if (!ret)
e5f4d339 8809 goto unlock;
76e1d904 8810
b0a873eb
PZ
8811 if (ret != -ENOENT) {
8812 pmu = ERR_PTR(ret);
e5f4d339 8813 goto unlock;
f344011c 8814 }
5c92d124 8815 }
e5f4d339
PZ
8816 pmu = ERR_PTR(-ENOENT);
8817unlock:
b0a873eb 8818 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 8819
4aeb0b42 8820 return pmu;
5c92d124
IM
8821}
8822
f2fb6bef
KL
8823static void attach_sb_event(struct perf_event *event)
8824{
8825 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
8826
8827 raw_spin_lock(&pel->lock);
8828 list_add_rcu(&event->sb_list, &pel->list);
8829 raw_spin_unlock(&pel->lock);
8830}
8831
aab5b71e
PZ
8832/*
8833 * We keep a list of all !task (and therefore per-cpu) events
8834 * that need to receive side-band records.
8835 *
8836 * This avoids having to scan all the various PMU per-cpu contexts
8837 * looking for them.
8838 */
f2fb6bef
KL
8839static void account_pmu_sb_event(struct perf_event *event)
8840{
a4f144eb 8841 if (is_sb_event(event))
f2fb6bef
KL
8842 attach_sb_event(event);
8843}
8844
4beb31f3
FW
8845static void account_event_cpu(struct perf_event *event, int cpu)
8846{
8847 if (event->parent)
8848 return;
8849
4beb31f3
FW
8850 if (is_cgroup_event(event))
8851 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
8852}
8853
555e0c1e
FW
8854/* Freq events need the tick to stay alive (see perf_event_task_tick). */
8855static void account_freq_event_nohz(void)
8856{
8857#ifdef CONFIG_NO_HZ_FULL
8858 /* Lock so we don't race with concurrent unaccount */
8859 spin_lock(&nr_freq_lock);
8860 if (atomic_inc_return(&nr_freq_events) == 1)
8861 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
8862 spin_unlock(&nr_freq_lock);
8863#endif
8864}
8865
8866static void account_freq_event(void)
8867{
8868 if (tick_nohz_full_enabled())
8869 account_freq_event_nohz();
8870 else
8871 atomic_inc(&nr_freq_events);
8872}
8873
8874
766d6c07
FW
8875static void account_event(struct perf_event *event)
8876{
25432ae9
PZ
8877 bool inc = false;
8878
4beb31f3
FW
8879 if (event->parent)
8880 return;
8881
766d6c07 8882 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 8883 inc = true;
766d6c07
FW
8884 if (event->attr.mmap || event->attr.mmap_data)
8885 atomic_inc(&nr_mmap_events);
8886 if (event->attr.comm)
8887 atomic_inc(&nr_comm_events);
8888 if (event->attr.task)
8889 atomic_inc(&nr_task_events);
555e0c1e
FW
8890 if (event->attr.freq)
8891 account_freq_event();
45ac1403
AH
8892 if (event->attr.context_switch) {
8893 atomic_inc(&nr_switch_events);
25432ae9 8894 inc = true;
45ac1403 8895 }
4beb31f3 8896 if (has_branch_stack(event))
25432ae9 8897 inc = true;
4beb31f3 8898 if (is_cgroup_event(event))
25432ae9
PZ
8899 inc = true;
8900
9107c89e
PZ
8901 if (inc) {
8902 if (atomic_inc_not_zero(&perf_sched_count))
8903 goto enabled;
8904
8905 mutex_lock(&perf_sched_mutex);
8906 if (!atomic_read(&perf_sched_count)) {
8907 static_branch_enable(&perf_sched_events);
8908 /*
8909 * Guarantee that all CPUs observe they key change and
8910 * call the perf scheduling hooks before proceeding to
8911 * install events that need them.
8912 */
8913 synchronize_sched();
8914 }
8915 /*
8916 * Now that we have waited for the sync_sched(), allow further
8917 * increments to by-pass the mutex.
8918 */
8919 atomic_inc(&perf_sched_count);
8920 mutex_unlock(&perf_sched_mutex);
8921 }
8922enabled:
4beb31f3
FW
8923
8924 account_event_cpu(event, event->cpu);
f2fb6bef
KL
8925
8926 account_pmu_sb_event(event);
766d6c07
FW
8927}
8928
0793a61d 8929/*
cdd6c482 8930 * Allocate and initialize a event structure
0793a61d 8931 */
cdd6c482 8932static struct perf_event *
c3f00c70 8933perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
8934 struct task_struct *task,
8935 struct perf_event *group_leader,
8936 struct perf_event *parent_event,
4dc0da86 8937 perf_overflow_handler_t overflow_handler,
79dff51e 8938 void *context, int cgroup_fd)
0793a61d 8939{
51b0fe39 8940 struct pmu *pmu;
cdd6c482
IM
8941 struct perf_event *event;
8942 struct hw_perf_event *hwc;
90983b16 8943 long err = -EINVAL;
0793a61d 8944
66832eb4
ON
8945 if ((unsigned)cpu >= nr_cpu_ids) {
8946 if (!task || cpu != -1)
8947 return ERR_PTR(-EINVAL);
8948 }
8949
c3f00c70 8950 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 8951 if (!event)
d5d2bc0d 8952 return ERR_PTR(-ENOMEM);
0793a61d 8953
04289bb9 8954 /*
cdd6c482 8955 * Single events are their own group leaders, with an
04289bb9
IM
8956 * empty sibling list:
8957 */
8958 if (!group_leader)
cdd6c482 8959 group_leader = event;
04289bb9 8960
cdd6c482
IM
8961 mutex_init(&event->child_mutex);
8962 INIT_LIST_HEAD(&event->child_list);
fccc714b 8963
cdd6c482
IM
8964 INIT_LIST_HEAD(&event->group_entry);
8965 INIT_LIST_HEAD(&event->event_entry);
8966 INIT_LIST_HEAD(&event->sibling_list);
10c6db11 8967 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 8968 INIT_LIST_HEAD(&event->active_entry);
375637bc 8969 INIT_LIST_HEAD(&event->addr_filters.list);
f3ae75de
SE
8970 INIT_HLIST_NODE(&event->hlist_entry);
8971
10c6db11 8972
cdd6c482 8973 init_waitqueue_head(&event->waitq);
e360adbe 8974 init_irq_work(&event->pending, perf_pending_event);
0793a61d 8975
cdd6c482 8976 mutex_init(&event->mmap_mutex);
375637bc 8977 raw_spin_lock_init(&event->addr_filters.lock);
7b732a75 8978
a6fa941d 8979 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
8980 event->cpu = cpu;
8981 event->attr = *attr;
8982 event->group_leader = group_leader;
8983 event->pmu = NULL;
cdd6c482 8984 event->oncpu = -1;
a96bbc16 8985
cdd6c482 8986 event->parent = parent_event;
b84fbc9f 8987
17cf22c3 8988 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 8989 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 8990
cdd6c482 8991 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 8992
d580ff86
PZ
8993 if (task) {
8994 event->attach_state = PERF_ATTACH_TASK;
d580ff86 8995 /*
50f16a8b
PZ
8996 * XXX pmu::event_init needs to know what task to account to
8997 * and we cannot use the ctx information because we need the
8998 * pmu before we get a ctx.
d580ff86 8999 */
50f16a8b 9000 event->hw.target = task;
d580ff86
PZ
9001 }
9002
34f43927
PZ
9003 event->clock = &local_clock;
9004 if (parent_event)
9005 event->clock = parent_event->clock;
9006
4dc0da86 9007 if (!overflow_handler && parent_event) {
b326e956 9008 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
9009 context = parent_event->overflow_handler_context;
9010 }
66832eb4 9011
1879445d
WN
9012 if (overflow_handler) {
9013 event->overflow_handler = overflow_handler;
9014 event->overflow_handler_context = context;
9ecda41a
WN
9015 } else if (is_write_backward(event)){
9016 event->overflow_handler = perf_event_output_backward;
9017 event->overflow_handler_context = NULL;
1879445d 9018 } else {
9ecda41a 9019 event->overflow_handler = perf_event_output_forward;
1879445d
WN
9020 event->overflow_handler_context = NULL;
9021 }
97eaf530 9022
0231bb53 9023 perf_event__state_init(event);
a86ed508 9024
4aeb0b42 9025 pmu = NULL;
b8e83514 9026
cdd6c482 9027 hwc = &event->hw;
bd2b5b12 9028 hwc->sample_period = attr->sample_period;
0d48696f 9029 if (attr->freq && attr->sample_freq)
bd2b5b12 9030 hwc->sample_period = 1;
eced1dfc 9031 hwc->last_period = hwc->sample_period;
bd2b5b12 9032
e7850595 9033 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 9034
2023b359 9035 /*
cdd6c482 9036 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 9037 */
3dab77fb 9038 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
90983b16 9039 goto err_ns;
a46a2300
YZ
9040
9041 if (!has_branch_stack(event))
9042 event->attr.branch_sample_type = 0;
2023b359 9043
79dff51e
MF
9044 if (cgroup_fd != -1) {
9045 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
9046 if (err)
9047 goto err_ns;
9048 }
9049
b0a873eb 9050 pmu = perf_init_event(event);
4aeb0b42 9051 if (!pmu)
90983b16
FW
9052 goto err_ns;
9053 else if (IS_ERR(pmu)) {
4aeb0b42 9054 err = PTR_ERR(pmu);
90983b16 9055 goto err_ns;
621a01ea 9056 }
d5d2bc0d 9057
bed5b25a
AS
9058 err = exclusive_event_init(event);
9059 if (err)
9060 goto err_pmu;
9061
375637bc
AS
9062 if (has_addr_filter(event)) {
9063 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
9064 sizeof(unsigned long),
9065 GFP_KERNEL);
9066 if (!event->addr_filters_offs)
9067 goto err_per_task;
9068
9069 /* force hw sync on the address filters */
9070 event->addr_filters_gen = 1;
9071 }
9072
cdd6c482 9073 if (!event->parent) {
927c7a9e 9074 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
97c79a38 9075 err = get_callchain_buffers(attr->sample_max_stack);
90983b16 9076 if (err)
375637bc 9077 goto err_addr_filters;
d010b332 9078 }
f344011c 9079 }
9ee318a7 9080
927a5570
AS
9081 /* symmetric to unaccount_event() in _free_event() */
9082 account_event(event);
9083
cdd6c482 9084 return event;
90983b16 9085
375637bc
AS
9086err_addr_filters:
9087 kfree(event->addr_filters_offs);
9088
bed5b25a
AS
9089err_per_task:
9090 exclusive_event_destroy(event);
9091
90983b16
FW
9092err_pmu:
9093 if (event->destroy)
9094 event->destroy(event);
c464c76e 9095 module_put(pmu->module);
90983b16 9096err_ns:
79dff51e
MF
9097 if (is_cgroup_event(event))
9098 perf_detach_cgroup(event);
90983b16
FW
9099 if (event->ns)
9100 put_pid_ns(event->ns);
9101 kfree(event);
9102
9103 return ERR_PTR(err);
0793a61d
TG
9104}
9105
cdd6c482
IM
9106static int perf_copy_attr(struct perf_event_attr __user *uattr,
9107 struct perf_event_attr *attr)
974802ea 9108{
974802ea 9109 u32 size;
cdf8073d 9110 int ret;
974802ea
PZ
9111
9112 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9113 return -EFAULT;
9114
9115 /*
9116 * zero the full structure, so that a short copy will be nice.
9117 */
9118 memset(attr, 0, sizeof(*attr));
9119
9120 ret = get_user(size, &uattr->size);
9121 if (ret)
9122 return ret;
9123
9124 if (size > PAGE_SIZE) /* silly large */
9125 goto err_size;
9126
9127 if (!size) /* abi compat */
9128 size = PERF_ATTR_SIZE_VER0;
9129
9130 if (size < PERF_ATTR_SIZE_VER0)
9131 goto err_size;
9132
9133 /*
9134 * If we're handed a bigger struct than we know of,
cdf8073d
IS
9135 * ensure all the unknown bits are 0 - i.e. new
9136 * user-space does not rely on any kernel feature
9137 * extensions we dont know about yet.
974802ea
PZ
9138 */
9139 if (size > sizeof(*attr)) {
cdf8073d
IS
9140 unsigned char __user *addr;
9141 unsigned char __user *end;
9142 unsigned char val;
974802ea 9143
cdf8073d
IS
9144 addr = (void __user *)uattr + sizeof(*attr);
9145 end = (void __user *)uattr + size;
974802ea 9146
cdf8073d 9147 for (; addr < end; addr++) {
974802ea
PZ
9148 ret = get_user(val, addr);
9149 if (ret)
9150 return ret;
9151 if (val)
9152 goto err_size;
9153 }
b3e62e35 9154 size = sizeof(*attr);
974802ea
PZ
9155 }
9156
9157 ret = copy_from_user(attr, uattr, size);
9158 if (ret)
9159 return -EFAULT;
9160
cd757645 9161 if (attr->__reserved_1)
974802ea
PZ
9162 return -EINVAL;
9163
9164 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9165 return -EINVAL;
9166
9167 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9168 return -EINVAL;
9169
bce38cd5
SE
9170 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9171 u64 mask = attr->branch_sample_type;
9172
9173 /* only using defined bits */
9174 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9175 return -EINVAL;
9176
9177 /* at least one branch bit must be set */
9178 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9179 return -EINVAL;
9180
bce38cd5
SE
9181 /* propagate priv level, when not set for branch */
9182 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9183
9184 /* exclude_kernel checked on syscall entry */
9185 if (!attr->exclude_kernel)
9186 mask |= PERF_SAMPLE_BRANCH_KERNEL;
9187
9188 if (!attr->exclude_user)
9189 mask |= PERF_SAMPLE_BRANCH_USER;
9190
9191 if (!attr->exclude_hv)
9192 mask |= PERF_SAMPLE_BRANCH_HV;
9193 /*
9194 * adjust user setting (for HW filter setup)
9195 */
9196 attr->branch_sample_type = mask;
9197 }
e712209a
SE
9198 /* privileged levels capture (kernel, hv): check permissions */
9199 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
9200 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9201 return -EACCES;
bce38cd5 9202 }
4018994f 9203
c5ebcedb 9204 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 9205 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
9206 if (ret)
9207 return ret;
9208 }
9209
9210 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9211 if (!arch_perf_have_user_stack_dump())
9212 return -ENOSYS;
9213
9214 /*
9215 * We have __u32 type for the size, but so far
9216 * we can only use __u16 as maximum due to the
9217 * __u16 sample size limit.
9218 */
9219 if (attr->sample_stack_user >= USHRT_MAX)
9220 ret = -EINVAL;
9221 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9222 ret = -EINVAL;
9223 }
4018994f 9224
60e2364e
SE
9225 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9226 ret = perf_reg_validate(attr->sample_regs_intr);
974802ea
PZ
9227out:
9228 return ret;
9229
9230err_size:
9231 put_user(sizeof(*attr), &uattr->size);
9232 ret = -E2BIG;
9233 goto out;
9234}
9235
ac9721f3
PZ
9236static int
9237perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 9238{
b69cf536 9239 struct ring_buffer *rb = NULL;
a4be7c27
PZ
9240 int ret = -EINVAL;
9241
ac9721f3 9242 if (!output_event)
a4be7c27
PZ
9243 goto set;
9244
ac9721f3
PZ
9245 /* don't allow circular references */
9246 if (event == output_event)
a4be7c27
PZ
9247 goto out;
9248
0f139300
PZ
9249 /*
9250 * Don't allow cross-cpu buffers
9251 */
9252 if (output_event->cpu != event->cpu)
9253 goto out;
9254
9255 /*
76369139 9256 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
9257 */
9258 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9259 goto out;
9260
34f43927
PZ
9261 /*
9262 * Mixing clocks in the same buffer is trouble you don't need.
9263 */
9264 if (output_event->clock != event->clock)
9265 goto out;
9266
9ecda41a
WN
9267 /*
9268 * Either writing ring buffer from beginning or from end.
9269 * Mixing is not allowed.
9270 */
9271 if (is_write_backward(output_event) != is_write_backward(event))
9272 goto out;
9273
45bfb2e5
PZ
9274 /*
9275 * If both events generate aux data, they must be on the same PMU
9276 */
9277 if (has_aux(event) && has_aux(output_event) &&
9278 event->pmu != output_event->pmu)
9279 goto out;
9280
a4be7c27 9281set:
cdd6c482 9282 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
9283 /* Can't redirect output if we've got an active mmap() */
9284 if (atomic_read(&event->mmap_count))
9285 goto unlock;
a4be7c27 9286
ac9721f3 9287 if (output_event) {
76369139
FW
9288 /* get the rb we want to redirect to */
9289 rb = ring_buffer_get(output_event);
9290 if (!rb)
ac9721f3 9291 goto unlock;
a4be7c27
PZ
9292 }
9293
b69cf536 9294 ring_buffer_attach(event, rb);
9bb5d40c 9295
a4be7c27 9296 ret = 0;
ac9721f3
PZ
9297unlock:
9298 mutex_unlock(&event->mmap_mutex);
9299
a4be7c27 9300out:
a4be7c27
PZ
9301 return ret;
9302}
9303
f63a8daa
PZ
9304static void mutex_lock_double(struct mutex *a, struct mutex *b)
9305{
9306 if (b < a)
9307 swap(a, b);
9308
9309 mutex_lock(a);
9310 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9311}
9312
34f43927
PZ
9313static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9314{
9315 bool nmi_safe = false;
9316
9317 switch (clk_id) {
9318 case CLOCK_MONOTONIC:
9319 event->clock = &ktime_get_mono_fast_ns;
9320 nmi_safe = true;
9321 break;
9322
9323 case CLOCK_MONOTONIC_RAW:
9324 event->clock = &ktime_get_raw_fast_ns;
9325 nmi_safe = true;
9326 break;
9327
9328 case CLOCK_REALTIME:
9329 event->clock = &ktime_get_real_ns;
9330 break;
9331
9332 case CLOCK_BOOTTIME:
9333 event->clock = &ktime_get_boot_ns;
9334 break;
9335
9336 case CLOCK_TAI:
9337 event->clock = &ktime_get_tai_ns;
9338 break;
9339
9340 default:
9341 return -EINVAL;
9342 }
9343
9344 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9345 return -EINVAL;
9346
9347 return 0;
9348}
9349
0793a61d 9350/**
cdd6c482 9351 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 9352 *
cdd6c482 9353 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 9354 * @pid: target pid
9f66a381 9355 * @cpu: target cpu
cdd6c482 9356 * @group_fd: group leader event fd
0793a61d 9357 */
cdd6c482
IM
9358SYSCALL_DEFINE5(perf_event_open,
9359 struct perf_event_attr __user *, attr_uptr,
2743a5b0 9360 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 9361{
b04243ef
PZ
9362 struct perf_event *group_leader = NULL, *output_event = NULL;
9363 struct perf_event *event, *sibling;
cdd6c482 9364 struct perf_event_attr attr;
f63a8daa 9365 struct perf_event_context *ctx, *uninitialized_var(gctx);
cdd6c482 9366 struct file *event_file = NULL;
2903ff01 9367 struct fd group = {NULL, 0};
38a81da2 9368 struct task_struct *task = NULL;
89a1e187 9369 struct pmu *pmu;
ea635c64 9370 int event_fd;
b04243ef 9371 int move_group = 0;
dc86cabe 9372 int err;
a21b0b35 9373 int f_flags = O_RDWR;
79dff51e 9374 int cgroup_fd = -1;
0793a61d 9375
2743a5b0 9376 /* for future expandability... */
e5d1367f 9377 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
9378 return -EINVAL;
9379
dc86cabe
IM
9380 err = perf_copy_attr(attr_uptr, &attr);
9381 if (err)
9382 return err;
eab656ae 9383
0764771d
PZ
9384 if (!attr.exclude_kernel) {
9385 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9386 return -EACCES;
9387 }
9388
df58ab24 9389 if (attr.freq) {
cdd6c482 9390 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 9391 return -EINVAL;
0819b2e3
PZ
9392 } else {
9393 if (attr.sample_period & (1ULL << 63))
9394 return -EINVAL;
df58ab24
PZ
9395 }
9396
97c79a38
ACM
9397 if (!attr.sample_max_stack)
9398 attr.sample_max_stack = sysctl_perf_event_max_stack;
9399
e5d1367f
SE
9400 /*
9401 * In cgroup mode, the pid argument is used to pass the fd
9402 * opened to the cgroup directory in cgroupfs. The cpu argument
9403 * designates the cpu on which to monitor threads from that
9404 * cgroup.
9405 */
9406 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9407 return -EINVAL;
9408
a21b0b35
YD
9409 if (flags & PERF_FLAG_FD_CLOEXEC)
9410 f_flags |= O_CLOEXEC;
9411
9412 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
9413 if (event_fd < 0)
9414 return event_fd;
9415
ac9721f3 9416 if (group_fd != -1) {
2903ff01
AV
9417 err = perf_fget_light(group_fd, &group);
9418 if (err)
d14b12d7 9419 goto err_fd;
2903ff01 9420 group_leader = group.file->private_data;
ac9721f3
PZ
9421 if (flags & PERF_FLAG_FD_OUTPUT)
9422 output_event = group_leader;
9423 if (flags & PERF_FLAG_FD_NO_GROUP)
9424 group_leader = NULL;
9425 }
9426
e5d1367f 9427 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
9428 task = find_lively_task_by_vpid(pid);
9429 if (IS_ERR(task)) {
9430 err = PTR_ERR(task);
9431 goto err_group_fd;
9432 }
9433 }
9434
1f4ee503
PZ
9435 if (task && group_leader &&
9436 group_leader->attr.inherit != attr.inherit) {
9437 err = -EINVAL;
9438 goto err_task;
9439 }
9440
fbfc623f
YZ
9441 get_online_cpus();
9442
79c9ce57
PZ
9443 if (task) {
9444 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9445 if (err)
9446 goto err_cpus;
9447
9448 /*
9449 * Reuse ptrace permission checks for now.
9450 *
9451 * We must hold cred_guard_mutex across this and any potential
9452 * perf_install_in_context() call for this new event to
9453 * serialize against exec() altering our credentials (and the
9454 * perf_event_exit_task() that could imply).
9455 */
9456 err = -EACCES;
9457 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
9458 goto err_cred;
9459 }
9460
79dff51e
MF
9461 if (flags & PERF_FLAG_PID_CGROUP)
9462 cgroup_fd = pid;
9463
4dc0da86 9464 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
79dff51e 9465 NULL, NULL, cgroup_fd);
d14b12d7
SE
9466 if (IS_ERR(event)) {
9467 err = PTR_ERR(event);
79c9ce57 9468 goto err_cred;
d14b12d7
SE
9469 }
9470
53b25335
VW
9471 if (is_sampling_event(event)) {
9472 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
a1396555 9473 err = -EOPNOTSUPP;
53b25335
VW
9474 goto err_alloc;
9475 }
9476 }
9477
89a1e187
PZ
9478 /*
9479 * Special case software events and allow them to be part of
9480 * any hardware group.
9481 */
9482 pmu = event->pmu;
b04243ef 9483
34f43927
PZ
9484 if (attr.use_clockid) {
9485 err = perf_event_set_clock(event, attr.clockid);
9486 if (err)
9487 goto err_alloc;
9488 }
9489
4ff6a8de
DCC
9490 if (pmu->task_ctx_nr == perf_sw_context)
9491 event->event_caps |= PERF_EV_CAP_SOFTWARE;
9492
b04243ef
PZ
9493 if (group_leader &&
9494 (is_software_event(event) != is_software_event(group_leader))) {
9495 if (is_software_event(event)) {
9496 /*
9497 * If event and group_leader are not both a software
9498 * event, and event is, then group leader is not.
9499 *
9500 * Allow the addition of software events to !software
9501 * groups, this is safe because software events never
9502 * fail to schedule.
9503 */
9504 pmu = group_leader->pmu;
9505 } else if (is_software_event(group_leader) &&
4ff6a8de 9506 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
b04243ef
PZ
9507 /*
9508 * In case the group is a pure software group, and we
9509 * try to add a hardware event, move the whole group to
9510 * the hardware context.
9511 */
9512 move_group = 1;
9513 }
9514 }
89a1e187
PZ
9515
9516 /*
9517 * Get the target context (task or percpu):
9518 */
4af57ef2 9519 ctx = find_get_context(pmu, task, event);
89a1e187
PZ
9520 if (IS_ERR(ctx)) {
9521 err = PTR_ERR(ctx);
c6be5a5c 9522 goto err_alloc;
89a1e187
PZ
9523 }
9524
bed5b25a
AS
9525 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
9526 err = -EBUSY;
9527 goto err_context;
9528 }
9529
ccff286d 9530 /*
cdd6c482 9531 * Look up the group leader (we will attach this event to it):
04289bb9 9532 */
ac9721f3 9533 if (group_leader) {
dc86cabe 9534 err = -EINVAL;
04289bb9 9535
04289bb9 9536 /*
ccff286d
IM
9537 * Do not allow a recursive hierarchy (this new sibling
9538 * becoming part of another group-sibling):
9539 */
9540 if (group_leader->group_leader != group_leader)
c3f00c70 9541 goto err_context;
34f43927
PZ
9542
9543 /* All events in a group should have the same clock */
9544 if (group_leader->clock != event->clock)
9545 goto err_context;
9546
ccff286d
IM
9547 /*
9548 * Do not allow to attach to a group in a different
9549 * task or CPU context:
04289bb9 9550 */
b04243ef 9551 if (move_group) {
c3c87e77
PZ
9552 /*
9553 * Make sure we're both on the same task, or both
9554 * per-cpu events.
9555 */
9556 if (group_leader->ctx->task != ctx->task)
9557 goto err_context;
9558
9559 /*
9560 * Make sure we're both events for the same CPU;
9561 * grouping events for different CPUs is broken; since
9562 * you can never concurrently schedule them anyhow.
9563 */
9564 if (group_leader->cpu != event->cpu)
b04243ef
PZ
9565 goto err_context;
9566 } else {
9567 if (group_leader->ctx != ctx)
9568 goto err_context;
9569 }
9570
3b6f9e5c
PM
9571 /*
9572 * Only a group leader can be exclusive or pinned
9573 */
0d48696f 9574 if (attr.exclusive || attr.pinned)
c3f00c70 9575 goto err_context;
ac9721f3
PZ
9576 }
9577
9578 if (output_event) {
9579 err = perf_event_set_output(event, output_event);
9580 if (err)
c3f00c70 9581 goto err_context;
ac9721f3 9582 }
0793a61d 9583
a21b0b35
YD
9584 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
9585 f_flags);
ea635c64
AV
9586 if (IS_ERR(event_file)) {
9587 err = PTR_ERR(event_file);
201c2f85 9588 event_file = NULL;
c3f00c70 9589 goto err_context;
ea635c64 9590 }
9b51f66d 9591
b04243ef 9592 if (move_group) {
f63a8daa 9593 gctx = group_leader->ctx;
f55fc2a5 9594 mutex_lock_double(&gctx->mutex, &ctx->mutex);
84c4e620
PZ
9595 if (gctx->task == TASK_TOMBSTONE) {
9596 err = -ESRCH;
9597 goto err_locked;
9598 }
f55fc2a5
PZ
9599 } else {
9600 mutex_lock(&ctx->mutex);
9601 }
9602
84c4e620
PZ
9603 if (ctx->task == TASK_TOMBSTONE) {
9604 err = -ESRCH;
9605 goto err_locked;
9606 }
9607
a723968c
PZ
9608 if (!perf_event_validate_size(event)) {
9609 err = -E2BIG;
9610 goto err_locked;
9611 }
9612
f55fc2a5
PZ
9613 /*
9614 * Must be under the same ctx::mutex as perf_install_in_context(),
9615 * because we need to serialize with concurrent event creation.
9616 */
9617 if (!exclusive_event_installable(event, ctx)) {
9618 /* exclusive and group stuff are assumed mutually exclusive */
9619 WARN_ON_ONCE(move_group);
f63a8daa 9620
f55fc2a5
PZ
9621 err = -EBUSY;
9622 goto err_locked;
9623 }
f63a8daa 9624
f55fc2a5
PZ
9625 WARN_ON_ONCE(ctx->parent_ctx);
9626
79c9ce57
PZ
9627 /*
9628 * This is the point on no return; we cannot fail hereafter. This is
9629 * where we start modifying current state.
9630 */
9631
f55fc2a5 9632 if (move_group) {
f63a8daa
PZ
9633 /*
9634 * See perf_event_ctx_lock() for comments on the details
9635 * of swizzling perf_event::ctx.
9636 */
45a0e07a 9637 perf_remove_from_context(group_leader, 0);
0231bb53 9638
b04243ef
PZ
9639 list_for_each_entry(sibling, &group_leader->sibling_list,
9640 group_entry) {
45a0e07a 9641 perf_remove_from_context(sibling, 0);
b04243ef
PZ
9642 put_ctx(gctx);
9643 }
b04243ef 9644
f63a8daa
PZ
9645 /*
9646 * Wait for everybody to stop referencing the events through
9647 * the old lists, before installing it on new lists.
9648 */
0cda4c02 9649 synchronize_rcu();
f63a8daa 9650
8f95b435
PZI
9651 /*
9652 * Install the group siblings before the group leader.
9653 *
9654 * Because a group leader will try and install the entire group
9655 * (through the sibling list, which is still in-tact), we can
9656 * end up with siblings installed in the wrong context.
9657 *
9658 * By installing siblings first we NO-OP because they're not
9659 * reachable through the group lists.
9660 */
b04243ef
PZ
9661 list_for_each_entry(sibling, &group_leader->sibling_list,
9662 group_entry) {
8f95b435 9663 perf_event__state_init(sibling);
9fc81d87 9664 perf_install_in_context(ctx, sibling, sibling->cpu);
b04243ef
PZ
9665 get_ctx(ctx);
9666 }
8f95b435
PZI
9667
9668 /*
9669 * Removing from the context ends up with disabled
9670 * event. What we want here is event in the initial
9671 * startup state, ready to be add into new context.
9672 */
9673 perf_event__state_init(group_leader);
9674 perf_install_in_context(ctx, group_leader, group_leader->cpu);
9675 get_ctx(ctx);
b04243ef 9676
f55fc2a5
PZ
9677 /*
9678 * Now that all events are installed in @ctx, nothing
9679 * references @gctx anymore, so drop the last reference we have
9680 * on it.
9681 */
9682 put_ctx(gctx);
bed5b25a
AS
9683 }
9684
f73e22ab
PZ
9685 /*
9686 * Precalculate sample_data sizes; do while holding ctx::mutex such
9687 * that we're serialized against further additions and before
9688 * perf_install_in_context() which is the point the event is active and
9689 * can use these values.
9690 */
9691 perf_event__header_size(event);
9692 perf_event__id_header_size(event);
9693
78cd2c74
PZ
9694 event->owner = current;
9695
e2d37cd2 9696 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 9697 perf_unpin_context(ctx);
f63a8daa 9698
f55fc2a5 9699 if (move_group)
f63a8daa 9700 mutex_unlock(&gctx->mutex);
d859e29f 9701 mutex_unlock(&ctx->mutex);
9b51f66d 9702
79c9ce57
PZ
9703 if (task) {
9704 mutex_unlock(&task->signal->cred_guard_mutex);
9705 put_task_struct(task);
9706 }
9707
fbfc623f
YZ
9708 put_online_cpus();
9709
cdd6c482
IM
9710 mutex_lock(&current->perf_event_mutex);
9711 list_add_tail(&event->owner_entry, &current->perf_event_list);
9712 mutex_unlock(&current->perf_event_mutex);
082ff5a2 9713
8a49542c
PZ
9714 /*
9715 * Drop the reference on the group_event after placing the
9716 * new event on the sibling_list. This ensures destruction
9717 * of the group leader will find the pointer to itself in
9718 * perf_group_detach().
9719 */
2903ff01 9720 fdput(group);
ea635c64
AV
9721 fd_install(event_fd, event_file);
9722 return event_fd;
0793a61d 9723
f55fc2a5
PZ
9724err_locked:
9725 if (move_group)
9726 mutex_unlock(&gctx->mutex);
9727 mutex_unlock(&ctx->mutex);
9728/* err_file: */
9729 fput(event_file);
c3f00c70 9730err_context:
fe4b04fa 9731 perf_unpin_context(ctx);
ea635c64 9732 put_ctx(ctx);
c6be5a5c 9733err_alloc:
13005627
PZ
9734 /*
9735 * If event_file is set, the fput() above will have called ->release()
9736 * and that will take care of freeing the event.
9737 */
9738 if (!event_file)
9739 free_event(event);
79c9ce57
PZ
9740err_cred:
9741 if (task)
9742 mutex_unlock(&task->signal->cred_guard_mutex);
1f4ee503 9743err_cpus:
fbfc623f 9744 put_online_cpus();
1f4ee503 9745err_task:
e7d0bc04
PZ
9746 if (task)
9747 put_task_struct(task);
89a1e187 9748err_group_fd:
2903ff01 9749 fdput(group);
ea635c64
AV
9750err_fd:
9751 put_unused_fd(event_fd);
dc86cabe 9752 return err;
0793a61d
TG
9753}
9754
fb0459d7
AV
9755/**
9756 * perf_event_create_kernel_counter
9757 *
9758 * @attr: attributes of the counter to create
9759 * @cpu: cpu in which the counter is bound
38a81da2 9760 * @task: task to profile (NULL for percpu)
fb0459d7
AV
9761 */
9762struct perf_event *
9763perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 9764 struct task_struct *task,
4dc0da86
AK
9765 perf_overflow_handler_t overflow_handler,
9766 void *context)
fb0459d7 9767{
fb0459d7 9768 struct perf_event_context *ctx;
c3f00c70 9769 struct perf_event *event;
fb0459d7 9770 int err;
d859e29f 9771
fb0459d7
AV
9772 /*
9773 * Get the target context (task or percpu):
9774 */
d859e29f 9775
4dc0da86 9776 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
79dff51e 9777 overflow_handler, context, -1);
c3f00c70
PZ
9778 if (IS_ERR(event)) {
9779 err = PTR_ERR(event);
9780 goto err;
9781 }
d859e29f 9782
f8697762 9783 /* Mark owner so we could distinguish it from user events. */
63b6da39 9784 event->owner = TASK_TOMBSTONE;
f8697762 9785
4af57ef2 9786 ctx = find_get_context(event->pmu, task, event);
c6567f64
FW
9787 if (IS_ERR(ctx)) {
9788 err = PTR_ERR(ctx);
c3f00c70 9789 goto err_free;
d859e29f 9790 }
fb0459d7 9791
fb0459d7
AV
9792 WARN_ON_ONCE(ctx->parent_ctx);
9793 mutex_lock(&ctx->mutex);
84c4e620
PZ
9794 if (ctx->task == TASK_TOMBSTONE) {
9795 err = -ESRCH;
9796 goto err_unlock;
9797 }
9798
bed5b25a 9799 if (!exclusive_event_installable(event, ctx)) {
bed5b25a 9800 err = -EBUSY;
84c4e620 9801 goto err_unlock;
bed5b25a
AS
9802 }
9803
fb0459d7 9804 perf_install_in_context(ctx, event, cpu);
fe4b04fa 9805 perf_unpin_context(ctx);
fb0459d7
AV
9806 mutex_unlock(&ctx->mutex);
9807
fb0459d7
AV
9808 return event;
9809
84c4e620
PZ
9810err_unlock:
9811 mutex_unlock(&ctx->mutex);
9812 perf_unpin_context(ctx);
9813 put_ctx(ctx);
c3f00c70
PZ
9814err_free:
9815 free_event(event);
9816err:
c6567f64 9817 return ERR_PTR(err);
9b51f66d 9818}
fb0459d7 9819EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 9820
0cda4c02
YZ
9821void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
9822{
9823 struct perf_event_context *src_ctx;
9824 struct perf_event_context *dst_ctx;
9825 struct perf_event *event, *tmp;
9826 LIST_HEAD(events);
9827
9828 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
9829 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
9830
f63a8daa
PZ
9831 /*
9832 * See perf_event_ctx_lock() for comments on the details
9833 * of swizzling perf_event::ctx.
9834 */
9835 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
0cda4c02
YZ
9836 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
9837 event_entry) {
45a0e07a 9838 perf_remove_from_context(event, 0);
9a545de0 9839 unaccount_event_cpu(event, src_cpu);
0cda4c02 9840 put_ctx(src_ctx);
9886167d 9841 list_add(&event->migrate_entry, &events);
0cda4c02 9842 }
0cda4c02 9843
8f95b435
PZI
9844 /*
9845 * Wait for the events to quiesce before re-instating them.
9846 */
0cda4c02
YZ
9847 synchronize_rcu();
9848
8f95b435
PZI
9849 /*
9850 * Re-instate events in 2 passes.
9851 *
9852 * Skip over group leaders and only install siblings on this first
9853 * pass, siblings will not get enabled without a leader, however a
9854 * leader will enable its siblings, even if those are still on the old
9855 * context.
9856 */
9857 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
9858 if (event->group_leader == event)
9859 continue;
9860
9861 list_del(&event->migrate_entry);
9862 if (event->state >= PERF_EVENT_STATE_OFF)
9863 event->state = PERF_EVENT_STATE_INACTIVE;
9864 account_event_cpu(event, dst_cpu);
9865 perf_install_in_context(dst_ctx, event, dst_cpu);
9866 get_ctx(dst_ctx);
9867 }
9868
9869 /*
9870 * Once all the siblings are setup properly, install the group leaders
9871 * to make it go.
9872 */
9886167d
PZ
9873 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
9874 list_del(&event->migrate_entry);
0cda4c02
YZ
9875 if (event->state >= PERF_EVENT_STATE_OFF)
9876 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 9877 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
9878 perf_install_in_context(dst_ctx, event, dst_cpu);
9879 get_ctx(dst_ctx);
9880 }
9881 mutex_unlock(&dst_ctx->mutex);
f63a8daa 9882 mutex_unlock(&src_ctx->mutex);
0cda4c02
YZ
9883}
9884EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
9885
cdd6c482 9886static void sync_child_event(struct perf_event *child_event,
38b200d6 9887 struct task_struct *child)
d859e29f 9888{
cdd6c482 9889 struct perf_event *parent_event = child_event->parent;
8bc20959 9890 u64 child_val;
d859e29f 9891
cdd6c482
IM
9892 if (child_event->attr.inherit_stat)
9893 perf_event_read_event(child_event, child);
38b200d6 9894
b5e58793 9895 child_val = perf_event_count(child_event);
d859e29f
PM
9896
9897 /*
9898 * Add back the child's count to the parent's count:
9899 */
a6e6dea6 9900 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
9901 atomic64_add(child_event->total_time_enabled,
9902 &parent_event->child_total_time_enabled);
9903 atomic64_add(child_event->total_time_running,
9904 &parent_event->child_total_time_running);
d859e29f
PM
9905}
9906
9b51f66d 9907static void
8ba289b8
PZ
9908perf_event_exit_event(struct perf_event *child_event,
9909 struct perf_event_context *child_ctx,
9910 struct task_struct *child)
9b51f66d 9911{
8ba289b8
PZ
9912 struct perf_event *parent_event = child_event->parent;
9913
1903d50c
PZ
9914 /*
9915 * Do not destroy the 'original' grouping; because of the context
9916 * switch optimization the original events could've ended up in a
9917 * random child task.
9918 *
9919 * If we were to destroy the original group, all group related
9920 * operations would cease to function properly after this random
9921 * child dies.
9922 *
9923 * Do destroy all inherited groups, we don't care about those
9924 * and being thorough is better.
9925 */
32132a3d
PZ
9926 raw_spin_lock_irq(&child_ctx->lock);
9927 WARN_ON_ONCE(child_ctx->is_active);
9928
8ba289b8 9929 if (parent_event)
32132a3d
PZ
9930 perf_group_detach(child_event);
9931 list_del_event(child_event, child_ctx);
a69b0ca4 9932 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
32132a3d 9933 raw_spin_unlock_irq(&child_ctx->lock);
0cc0c027 9934
9b51f66d 9935 /*
8ba289b8 9936 * Parent events are governed by their filedesc, retain them.
9b51f66d 9937 */
8ba289b8 9938 if (!parent_event) {
179033b3 9939 perf_event_wakeup(child_event);
8ba289b8 9940 return;
4bcf349a 9941 }
8ba289b8
PZ
9942 /*
9943 * Child events can be cleaned up.
9944 */
9945
9946 sync_child_event(child_event, child);
9947
9948 /*
9949 * Remove this event from the parent's list
9950 */
9951 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
9952 mutex_lock(&parent_event->child_mutex);
9953 list_del_init(&child_event->child_list);
9954 mutex_unlock(&parent_event->child_mutex);
9955
9956 /*
9957 * Kick perf_poll() for is_event_hup().
9958 */
9959 perf_event_wakeup(parent_event);
9960 free_event(child_event);
9961 put_event(parent_event);
9b51f66d
IM
9962}
9963
8dc85d54 9964static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 9965{
211de6eb 9966 struct perf_event_context *child_ctx, *clone_ctx = NULL;
63b6da39 9967 struct perf_event *child_event, *next;
63b6da39
PZ
9968
9969 WARN_ON_ONCE(child != current);
9b51f66d 9970
6a3351b6 9971 child_ctx = perf_pin_task_context(child, ctxn);
63b6da39 9972 if (!child_ctx)
9b51f66d
IM
9973 return;
9974
ad3a37de 9975 /*
6a3351b6
PZ
9976 * In order to reduce the amount of tricky in ctx tear-down, we hold
9977 * ctx::mutex over the entire thing. This serializes against almost
9978 * everything that wants to access the ctx.
9979 *
9980 * The exception is sys_perf_event_open() /
9981 * perf_event_create_kernel_count() which does find_get_context()
9982 * without ctx::mutex (it cannot because of the move_group double mutex
9983 * lock thing). See the comments in perf_install_in_context().
ad3a37de 9984 */
6a3351b6 9985 mutex_lock(&child_ctx->mutex);
c93f7669
PM
9986
9987 /*
6a3351b6
PZ
9988 * In a single ctx::lock section, de-schedule the events and detach the
9989 * context from the task such that we cannot ever get it scheduled back
9990 * in.
c93f7669 9991 */
6a3351b6 9992 raw_spin_lock_irq(&child_ctx->lock);
63b6da39 9993 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
4a1c0f26 9994
71a851b4 9995 /*
63b6da39
PZ
9996 * Now that the context is inactive, destroy the task <-> ctx relation
9997 * and mark the context dead.
71a851b4 9998 */
63b6da39
PZ
9999 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
10000 put_ctx(child_ctx); /* cannot be last */
10001 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
10002 put_task_struct(current); /* cannot be last */
4a1c0f26 10003
211de6eb 10004 clone_ctx = unclone_ctx(child_ctx);
6a3351b6 10005 raw_spin_unlock_irq(&child_ctx->lock);
9f498cc5 10006
211de6eb
PZ
10007 if (clone_ctx)
10008 put_ctx(clone_ctx);
4a1c0f26 10009
9f498cc5 10010 /*
cdd6c482
IM
10011 * Report the task dead after unscheduling the events so that we
10012 * won't get any samples after PERF_RECORD_EXIT. We can however still
10013 * get a few PERF_RECORD_READ events.
9f498cc5 10014 */
cdd6c482 10015 perf_event_task(child, child_ctx, 0);
a63eaf34 10016
ebf905fc 10017 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8ba289b8 10018 perf_event_exit_event(child_event, child_ctx, child);
8bc20959 10019
a63eaf34
PM
10020 mutex_unlock(&child_ctx->mutex);
10021
10022 put_ctx(child_ctx);
9b51f66d
IM
10023}
10024
8dc85d54
PZ
10025/*
10026 * When a child task exits, feed back event values to parent events.
79c9ce57
PZ
10027 *
10028 * Can be called with cred_guard_mutex held when called from
10029 * install_exec_creds().
8dc85d54
PZ
10030 */
10031void perf_event_exit_task(struct task_struct *child)
10032{
8882135b 10033 struct perf_event *event, *tmp;
8dc85d54
PZ
10034 int ctxn;
10035
8882135b
PZ
10036 mutex_lock(&child->perf_event_mutex);
10037 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
10038 owner_entry) {
10039 list_del_init(&event->owner_entry);
10040
10041 /*
10042 * Ensure the list deletion is visible before we clear
10043 * the owner, closes a race against perf_release() where
10044 * we need to serialize on the owner->perf_event_mutex.
10045 */
f47c02c0 10046 smp_store_release(&event->owner, NULL);
8882135b
PZ
10047 }
10048 mutex_unlock(&child->perf_event_mutex);
10049
8dc85d54
PZ
10050 for_each_task_context_nr(ctxn)
10051 perf_event_exit_task_context(child, ctxn);
4e93ad60
JO
10052
10053 /*
10054 * The perf_event_exit_task_context calls perf_event_task
10055 * with child's task_ctx, which generates EXIT events for
10056 * child contexts and sets child->perf_event_ctxp[] to NULL.
10057 * At this point we need to send EXIT events to cpu contexts.
10058 */
10059 perf_event_task(child, NULL, 0);
8dc85d54
PZ
10060}
10061
889ff015
FW
10062static void perf_free_event(struct perf_event *event,
10063 struct perf_event_context *ctx)
10064{
10065 struct perf_event *parent = event->parent;
10066
10067 if (WARN_ON_ONCE(!parent))
10068 return;
10069
10070 mutex_lock(&parent->child_mutex);
10071 list_del_init(&event->child_list);
10072 mutex_unlock(&parent->child_mutex);
10073
a6fa941d 10074 put_event(parent);
889ff015 10075
652884fe 10076 raw_spin_lock_irq(&ctx->lock);
8a49542c 10077 perf_group_detach(event);
889ff015 10078 list_del_event(event, ctx);
652884fe 10079 raw_spin_unlock_irq(&ctx->lock);
889ff015
FW
10080 free_event(event);
10081}
10082
bbbee908 10083/*
652884fe 10084 * Free an unexposed, unused context as created by inheritance by
8dc85d54 10085 * perf_event_init_task below, used by fork() in case of fail.
652884fe
PZ
10086 *
10087 * Not all locks are strictly required, but take them anyway to be nice and
10088 * help out with the lockdep assertions.
bbbee908 10089 */
cdd6c482 10090void perf_event_free_task(struct task_struct *task)
bbbee908 10091{
8dc85d54 10092 struct perf_event_context *ctx;
cdd6c482 10093 struct perf_event *event, *tmp;
8dc85d54 10094 int ctxn;
bbbee908 10095
8dc85d54
PZ
10096 for_each_task_context_nr(ctxn) {
10097 ctx = task->perf_event_ctxp[ctxn];
10098 if (!ctx)
10099 continue;
bbbee908 10100
8dc85d54 10101 mutex_lock(&ctx->mutex);
bbbee908 10102again:
8dc85d54
PZ
10103 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
10104 group_entry)
10105 perf_free_event(event, ctx);
bbbee908 10106
8dc85d54
PZ
10107 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
10108 group_entry)
10109 perf_free_event(event, ctx);
bbbee908 10110
8dc85d54
PZ
10111 if (!list_empty(&ctx->pinned_groups) ||
10112 !list_empty(&ctx->flexible_groups))
10113 goto again;
bbbee908 10114
8dc85d54 10115 mutex_unlock(&ctx->mutex);
bbbee908 10116
8dc85d54
PZ
10117 put_ctx(ctx);
10118 }
889ff015
FW
10119}
10120
4e231c79
PZ
10121void perf_event_delayed_put(struct task_struct *task)
10122{
10123 int ctxn;
10124
10125 for_each_task_context_nr(ctxn)
10126 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10127}
10128
e03e7ee3 10129struct file *perf_event_get(unsigned int fd)
ffe8690c 10130{
e03e7ee3 10131 struct file *file;
ffe8690c 10132
e03e7ee3
AS
10133 file = fget_raw(fd);
10134 if (!file)
10135 return ERR_PTR(-EBADF);
ffe8690c 10136
e03e7ee3
AS
10137 if (file->f_op != &perf_fops) {
10138 fput(file);
10139 return ERR_PTR(-EBADF);
10140 }
ffe8690c 10141
e03e7ee3 10142 return file;
ffe8690c
KX
10143}
10144
10145const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10146{
10147 if (!event)
10148 return ERR_PTR(-EINVAL);
10149
10150 return &event->attr;
10151}
10152
97dee4f3
PZ
10153/*
10154 * inherit a event from parent task to child task:
10155 */
10156static struct perf_event *
10157inherit_event(struct perf_event *parent_event,
10158 struct task_struct *parent,
10159 struct perf_event_context *parent_ctx,
10160 struct task_struct *child,
10161 struct perf_event *group_leader,
10162 struct perf_event_context *child_ctx)
10163{
1929def9 10164 enum perf_event_active_state parent_state = parent_event->state;
97dee4f3 10165 struct perf_event *child_event;
cee010ec 10166 unsigned long flags;
97dee4f3
PZ
10167
10168 /*
10169 * Instead of creating recursive hierarchies of events,
10170 * we link inherited events back to the original parent,
10171 * which has a filp for sure, which we use as the reference
10172 * count:
10173 */
10174 if (parent_event->parent)
10175 parent_event = parent_event->parent;
10176
10177 child_event = perf_event_alloc(&parent_event->attr,
10178 parent_event->cpu,
d580ff86 10179 child,
97dee4f3 10180 group_leader, parent_event,
79dff51e 10181 NULL, NULL, -1);
97dee4f3
PZ
10182 if (IS_ERR(child_event))
10183 return child_event;
a6fa941d 10184
c6e5b732
PZ
10185 /*
10186 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10187 * must be under the same lock in order to serialize against
10188 * perf_event_release_kernel(), such that either we must observe
10189 * is_orphaned_event() or they will observe us on the child_list.
10190 */
10191 mutex_lock(&parent_event->child_mutex);
fadfe7be
JO
10192 if (is_orphaned_event(parent_event) ||
10193 !atomic_long_inc_not_zero(&parent_event->refcount)) {
c6e5b732 10194 mutex_unlock(&parent_event->child_mutex);
a6fa941d
AV
10195 free_event(child_event);
10196 return NULL;
10197 }
10198
97dee4f3
PZ
10199 get_ctx(child_ctx);
10200
10201 /*
10202 * Make the child state follow the state of the parent event,
10203 * not its attr.disabled bit. We hold the parent's mutex,
10204 * so we won't race with perf_event_{en, dis}able_family.
10205 */
1929def9 10206 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
97dee4f3
PZ
10207 child_event->state = PERF_EVENT_STATE_INACTIVE;
10208 else
10209 child_event->state = PERF_EVENT_STATE_OFF;
10210
10211 if (parent_event->attr.freq) {
10212 u64 sample_period = parent_event->hw.sample_period;
10213 struct hw_perf_event *hwc = &child_event->hw;
10214
10215 hwc->sample_period = sample_period;
10216 hwc->last_period = sample_period;
10217
10218 local64_set(&hwc->period_left, sample_period);
10219 }
10220
10221 child_event->ctx = child_ctx;
10222 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
10223 child_event->overflow_handler_context
10224 = parent_event->overflow_handler_context;
97dee4f3 10225
614b6780
TG
10226 /*
10227 * Precalculate sample_data sizes
10228 */
10229 perf_event__header_size(child_event);
6844c09d 10230 perf_event__id_header_size(child_event);
614b6780 10231
97dee4f3
PZ
10232 /*
10233 * Link it up in the child's context:
10234 */
cee010ec 10235 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 10236 add_event_to_ctx(child_event, child_ctx);
cee010ec 10237 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 10238
97dee4f3
PZ
10239 /*
10240 * Link this into the parent event's child list
10241 */
97dee4f3
PZ
10242 list_add_tail(&child_event->child_list, &parent_event->child_list);
10243 mutex_unlock(&parent_event->child_mutex);
10244
10245 return child_event;
10246}
10247
10248static int inherit_group(struct perf_event *parent_event,
10249 struct task_struct *parent,
10250 struct perf_event_context *parent_ctx,
10251 struct task_struct *child,
10252 struct perf_event_context *child_ctx)
10253{
10254 struct perf_event *leader;
10255 struct perf_event *sub;
10256 struct perf_event *child_ctr;
10257
10258 leader = inherit_event(parent_event, parent, parent_ctx,
10259 child, NULL, child_ctx);
10260 if (IS_ERR(leader))
10261 return PTR_ERR(leader);
10262 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10263 child_ctr = inherit_event(sub, parent, parent_ctx,
10264 child, leader, child_ctx);
10265 if (IS_ERR(child_ctr))
10266 return PTR_ERR(child_ctr);
10267 }
10268 return 0;
889ff015
FW
10269}
10270
10271static int
10272inherit_task_group(struct perf_event *event, struct task_struct *parent,
10273 struct perf_event_context *parent_ctx,
8dc85d54 10274 struct task_struct *child, int ctxn,
889ff015
FW
10275 int *inherited_all)
10276{
10277 int ret;
8dc85d54 10278 struct perf_event_context *child_ctx;
889ff015
FW
10279
10280 if (!event->attr.inherit) {
10281 *inherited_all = 0;
10282 return 0;
bbbee908
PZ
10283 }
10284
fe4b04fa 10285 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
10286 if (!child_ctx) {
10287 /*
10288 * This is executed from the parent task context, so
10289 * inherit events that have been marked for cloning.
10290 * First allocate and initialize a context for the
10291 * child.
10292 */
bbbee908 10293
734df5ab 10294 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
10295 if (!child_ctx)
10296 return -ENOMEM;
bbbee908 10297
8dc85d54 10298 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
10299 }
10300
10301 ret = inherit_group(event, parent, parent_ctx,
10302 child, child_ctx);
10303
10304 if (ret)
10305 *inherited_all = 0;
10306
10307 return ret;
bbbee908
PZ
10308}
10309
9b51f66d 10310/*
cdd6c482 10311 * Initialize the perf_event context in task_struct
9b51f66d 10312 */
985c8dcb 10313static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 10314{
889ff015 10315 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
10316 struct perf_event_context *cloned_ctx;
10317 struct perf_event *event;
9b51f66d 10318 struct task_struct *parent = current;
564c2b21 10319 int inherited_all = 1;
dddd3379 10320 unsigned long flags;
6ab423e0 10321 int ret = 0;
9b51f66d 10322
8dc85d54 10323 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
10324 return 0;
10325
ad3a37de 10326 /*
25346b93
PM
10327 * If the parent's context is a clone, pin it so it won't get
10328 * swapped under us.
ad3a37de 10329 */
8dc85d54 10330 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
10331 if (!parent_ctx)
10332 return 0;
25346b93 10333
ad3a37de
PM
10334 /*
10335 * No need to check if parent_ctx != NULL here; since we saw
10336 * it non-NULL earlier, the only reason for it to become NULL
10337 * is if we exit, and since we're currently in the middle of
10338 * a fork we can't be exiting at the same time.
10339 */
ad3a37de 10340
9b51f66d
IM
10341 /*
10342 * Lock the parent list. No need to lock the child - not PID
10343 * hashed yet and not running, so nobody can access it.
10344 */
d859e29f 10345 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
10346
10347 /*
10348 * We dont have to disable NMIs - we are only looking at
10349 * the list, not manipulating it:
10350 */
889ff015 10351 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
10352 ret = inherit_task_group(event, parent, parent_ctx,
10353 child, ctxn, &inherited_all);
889ff015
FW
10354 if (ret)
10355 break;
10356 }
b93f7978 10357
dddd3379
TG
10358 /*
10359 * We can't hold ctx->lock when iterating the ->flexible_group list due
10360 * to allocations, but we need to prevent rotation because
10361 * rotate_ctx() will change the list from interrupt context.
10362 */
10363 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10364 parent_ctx->rotate_disable = 1;
10365 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10366
889ff015 10367 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
10368 ret = inherit_task_group(event, parent, parent_ctx,
10369 child, ctxn, &inherited_all);
889ff015 10370 if (ret)
9b51f66d 10371 break;
564c2b21
PM
10372 }
10373
dddd3379
TG
10374 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10375 parent_ctx->rotate_disable = 0;
dddd3379 10376
8dc85d54 10377 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 10378
05cbaa28 10379 if (child_ctx && inherited_all) {
564c2b21
PM
10380 /*
10381 * Mark the child context as a clone of the parent
10382 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
10383 *
10384 * Note that if the parent is a clone, the holding of
10385 * parent_ctx->lock avoids it from being uncloned.
564c2b21 10386 */
c5ed5145 10387 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
10388 if (cloned_ctx) {
10389 child_ctx->parent_ctx = cloned_ctx;
25346b93 10390 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
10391 } else {
10392 child_ctx->parent_ctx = parent_ctx;
10393 child_ctx->parent_gen = parent_ctx->generation;
10394 }
10395 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
10396 }
10397
c5ed5145 10398 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 10399 mutex_unlock(&parent_ctx->mutex);
6ab423e0 10400
25346b93 10401 perf_unpin_context(parent_ctx);
fe4b04fa 10402 put_ctx(parent_ctx);
ad3a37de 10403
6ab423e0 10404 return ret;
9b51f66d
IM
10405}
10406
8dc85d54
PZ
10407/*
10408 * Initialize the perf_event context in task_struct
10409 */
10410int perf_event_init_task(struct task_struct *child)
10411{
10412 int ctxn, ret;
10413
8550d7cb
ON
10414 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
10415 mutex_init(&child->perf_event_mutex);
10416 INIT_LIST_HEAD(&child->perf_event_list);
10417
8dc85d54
PZ
10418 for_each_task_context_nr(ctxn) {
10419 ret = perf_event_init_context(child, ctxn);
6c72e350
PZ
10420 if (ret) {
10421 perf_event_free_task(child);
8dc85d54 10422 return ret;
6c72e350 10423 }
8dc85d54
PZ
10424 }
10425
10426 return 0;
10427}
10428
220b140b
PM
10429static void __init perf_event_init_all_cpus(void)
10430{
b28ab83c 10431 struct swevent_htable *swhash;
220b140b 10432 int cpu;
220b140b
PM
10433
10434 for_each_possible_cpu(cpu) {
b28ab83c
PZ
10435 swhash = &per_cpu(swevent_htable, cpu);
10436 mutex_init(&swhash->hlist_mutex);
2fde4f94 10437 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
f2fb6bef
KL
10438
10439 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
10440 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
e48c1788
PZ
10441
10442 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
220b140b
PM
10443 }
10444}
10445
00e16c3d 10446int perf_event_init_cpu(unsigned int cpu)
0793a61d 10447{
108b02cf 10448 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 10449
b28ab83c 10450 mutex_lock(&swhash->hlist_mutex);
059fcd8c 10451 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
76e1d904
FW
10452 struct swevent_hlist *hlist;
10453
b28ab83c
PZ
10454 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
10455 WARN_ON(!hlist);
10456 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 10457 }
b28ab83c 10458 mutex_unlock(&swhash->hlist_mutex);
00e16c3d 10459 return 0;
0793a61d
TG
10460}
10461
2965faa5 10462#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
108b02cf 10463static void __perf_event_exit_context(void *__info)
0793a61d 10464{
108b02cf 10465 struct perf_event_context *ctx = __info;
fae3fde6
PZ
10466 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
10467 struct perf_event *event;
0793a61d 10468
fae3fde6
PZ
10469 raw_spin_lock(&ctx->lock);
10470 list_for_each_entry(event, &ctx->event_list, event_entry)
45a0e07a 10471 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
fae3fde6 10472 raw_spin_unlock(&ctx->lock);
0793a61d 10473}
108b02cf
PZ
10474
10475static void perf_event_exit_cpu_context(int cpu)
10476{
10477 struct perf_event_context *ctx;
10478 struct pmu *pmu;
10479 int idx;
10480
10481 idx = srcu_read_lock(&pmus_srcu);
10482 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 10483 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
10484
10485 mutex_lock(&ctx->mutex);
10486 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
10487 mutex_unlock(&ctx->mutex);
10488 }
10489 srcu_read_unlock(&pmus_srcu, idx);
108b02cf 10490}
00e16c3d
TG
10491#else
10492
10493static void perf_event_exit_cpu_context(int cpu) { }
10494
10495#endif
108b02cf 10496
00e16c3d 10497int perf_event_exit_cpu(unsigned int cpu)
0793a61d 10498{
e3703f8c 10499 perf_event_exit_cpu_context(cpu);
00e16c3d 10500 return 0;
0793a61d 10501}
0793a61d 10502
c277443c
PZ
10503static int
10504perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
10505{
10506 int cpu;
10507
10508 for_each_online_cpu(cpu)
10509 perf_event_exit_cpu(cpu);
10510
10511 return NOTIFY_OK;
10512}
10513
10514/*
10515 * Run the perf reboot notifier at the very last possible moment so that
10516 * the generic watchdog code runs as long as possible.
10517 */
10518static struct notifier_block perf_reboot_notifier = {
10519 .notifier_call = perf_reboot,
10520 .priority = INT_MIN,
10521};
10522
cdd6c482 10523void __init perf_event_init(void)
0793a61d 10524{
3c502e7a
JW
10525 int ret;
10526
2e80a82a
PZ
10527 idr_init(&pmu_idr);
10528
220b140b 10529 perf_event_init_all_cpus();
b0a873eb 10530 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
10531 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
10532 perf_pmu_register(&perf_cpu_clock, NULL, -1);
10533 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb 10534 perf_tp_register();
00e16c3d 10535 perf_event_init_cpu(smp_processor_id());
c277443c 10536 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
10537
10538 ret = init_hw_breakpoint();
10539 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520 10540
b01c3a00
JO
10541 /*
10542 * Build time assertion that we keep the data_head at the intended
10543 * location. IOW, validation we got the __reserved[] size right.
10544 */
10545 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
10546 != 1024);
0793a61d 10547}
abe43400 10548
fd979c01
CS
10549ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
10550 char *page)
10551{
10552 struct perf_pmu_events_attr *pmu_attr =
10553 container_of(attr, struct perf_pmu_events_attr, attr);
10554
10555 if (pmu_attr->event_str)
10556 return sprintf(page, "%s\n", pmu_attr->event_str);
10557
10558 return 0;
10559}
675965b0 10560EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
fd979c01 10561
abe43400
PZ
10562static int __init perf_event_sysfs_init(void)
10563{
10564 struct pmu *pmu;
10565 int ret;
10566
10567 mutex_lock(&pmus_lock);
10568
10569 ret = bus_register(&pmu_bus);
10570 if (ret)
10571 goto unlock;
10572
10573 list_for_each_entry(pmu, &pmus, entry) {
10574 if (!pmu->name || pmu->type < 0)
10575 continue;
10576
10577 ret = pmu_dev_alloc(pmu);
10578 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
10579 }
10580 pmu_bus_running = 1;
10581 ret = 0;
10582
10583unlock:
10584 mutex_unlock(&pmus_lock);
10585
10586 return ret;
10587}
10588device_initcall(perf_event_sysfs_init);
e5d1367f
SE
10589
10590#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
10591static struct cgroup_subsys_state *
10592perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
10593{
10594 struct perf_cgroup *jc;
e5d1367f 10595
1b15d055 10596 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
10597 if (!jc)
10598 return ERR_PTR(-ENOMEM);
10599
e5d1367f
SE
10600 jc->info = alloc_percpu(struct perf_cgroup_info);
10601 if (!jc->info) {
10602 kfree(jc);
10603 return ERR_PTR(-ENOMEM);
10604 }
10605
e5d1367f
SE
10606 return &jc->css;
10607}
10608
eb95419b 10609static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 10610{
eb95419b
TH
10611 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
10612
e5d1367f
SE
10613 free_percpu(jc->info);
10614 kfree(jc);
10615}
10616
10617static int __perf_cgroup_move(void *info)
10618{
10619 struct task_struct *task = info;
ddaaf4e2 10620 rcu_read_lock();
e5d1367f 10621 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
ddaaf4e2 10622 rcu_read_unlock();
e5d1367f
SE
10623 return 0;
10624}
10625
1f7dd3e5 10626static void perf_cgroup_attach(struct cgroup_taskset *tset)
e5d1367f 10627{
bb9d97b6 10628 struct task_struct *task;
1f7dd3e5 10629 struct cgroup_subsys_state *css;
bb9d97b6 10630
1f7dd3e5 10631 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 10632 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
10633}
10634
073219e9 10635struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
10636 .css_alloc = perf_cgroup_css_alloc,
10637 .css_free = perf_cgroup_css_free,
bb9d97b6 10638 .attach = perf_cgroup_attach,
e5d1367f
SE
10639};
10640#endif /* CONFIG_CGROUP_PERF */