]> git.ipfire.org Git - thirdparty/linux.git/blame - kernel/events/core.c
Merge tag 'locks-v4.21-2' of git://git.kernel.org/pub/scm/linux/kernel/git/jlayton...
[thirdparty/linux.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
90eec103 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
39bed6cb 37#include <linux/cgroup.h>
cdd6c482 38#include <linux/perf_event.h>
af658dca 39#include <linux/trace_events.h>
3c502e7a 40#include <linux/hw_breakpoint.h>
c5ebcedb 41#include <linux/mm_types.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
b3f20785 44#include <linux/compat.h>
2541517c
AS
45#include <linux/bpf.h>
46#include <linux/filter.h>
375637bc
AS
47#include <linux/namei.h>
48#include <linux/parser.h>
e6017571 49#include <linux/sched/clock.h>
6e84f315 50#include <linux/sched/mm.h>
e4222673
HB
51#include <linux/proc_ns.h>
52#include <linux/mount.h>
0793a61d 53
76369139
FW
54#include "internal.h"
55
4e193bd4
TB
56#include <asm/irq_regs.h>
57
272325c4
PZ
58typedef int (*remote_function_f)(void *);
59
fe4b04fa 60struct remote_function_call {
e7e7ee2e 61 struct task_struct *p;
272325c4 62 remote_function_f func;
e7e7ee2e
IM
63 void *info;
64 int ret;
fe4b04fa
PZ
65};
66
67static void remote_function(void *data)
68{
69 struct remote_function_call *tfc = data;
70 struct task_struct *p = tfc->p;
71
72 if (p) {
0da4cf3e
PZ
73 /* -EAGAIN */
74 if (task_cpu(p) != smp_processor_id())
75 return;
76
77 /*
78 * Now that we're on right CPU with IRQs disabled, we can test
79 * if we hit the right task without races.
80 */
81
82 tfc->ret = -ESRCH; /* No such (running) process */
83 if (p != current)
fe4b04fa
PZ
84 return;
85 }
86
87 tfc->ret = tfc->func(tfc->info);
88}
89
90/**
91 * task_function_call - call a function on the cpu on which a task runs
92 * @p: the task to evaluate
93 * @func: the function to be called
94 * @info: the function call argument
95 *
96 * Calls the function @func when the task is currently running. This might
97 * be on the current CPU, which just calls the function directly
98 *
99 * returns: @func return value, or
100 * -ESRCH - when the process isn't running
101 * -EAGAIN - when the process moved away
102 */
103static int
272325c4 104task_function_call(struct task_struct *p, remote_function_f func, void *info)
fe4b04fa
PZ
105{
106 struct remote_function_call data = {
e7e7ee2e
IM
107 .p = p,
108 .func = func,
109 .info = info,
0da4cf3e 110 .ret = -EAGAIN,
fe4b04fa 111 };
0da4cf3e 112 int ret;
fe4b04fa 113
0da4cf3e
PZ
114 do {
115 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
116 if (!ret)
117 ret = data.ret;
118 } while (ret == -EAGAIN);
fe4b04fa 119
0da4cf3e 120 return ret;
fe4b04fa
PZ
121}
122
123/**
124 * cpu_function_call - call a function on the cpu
125 * @func: the function to be called
126 * @info: the function call argument
127 *
128 * Calls the function @func on the remote cpu.
129 *
130 * returns: @func return value or -ENXIO when the cpu is offline
131 */
272325c4 132static int cpu_function_call(int cpu, remote_function_f func, void *info)
fe4b04fa
PZ
133{
134 struct remote_function_call data = {
e7e7ee2e
IM
135 .p = NULL,
136 .func = func,
137 .info = info,
138 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
139 };
140
141 smp_call_function_single(cpu, remote_function, &data, 1);
142
143 return data.ret;
144}
145
fae3fde6
PZ
146static inline struct perf_cpu_context *
147__get_cpu_context(struct perf_event_context *ctx)
148{
149 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
150}
151
152static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
153 struct perf_event_context *ctx)
0017960f 154{
fae3fde6
PZ
155 raw_spin_lock(&cpuctx->ctx.lock);
156 if (ctx)
157 raw_spin_lock(&ctx->lock);
158}
159
160static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
161 struct perf_event_context *ctx)
162{
163 if (ctx)
164 raw_spin_unlock(&ctx->lock);
165 raw_spin_unlock(&cpuctx->ctx.lock);
166}
167
63b6da39
PZ
168#define TASK_TOMBSTONE ((void *)-1L)
169
170static bool is_kernel_event(struct perf_event *event)
171{
f47c02c0 172 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
63b6da39
PZ
173}
174
39a43640
PZ
175/*
176 * On task ctx scheduling...
177 *
178 * When !ctx->nr_events a task context will not be scheduled. This means
179 * we can disable the scheduler hooks (for performance) without leaving
180 * pending task ctx state.
181 *
182 * This however results in two special cases:
183 *
184 * - removing the last event from a task ctx; this is relatively straight
185 * forward and is done in __perf_remove_from_context.
186 *
187 * - adding the first event to a task ctx; this is tricky because we cannot
188 * rely on ctx->is_active and therefore cannot use event_function_call().
189 * See perf_install_in_context().
190 *
39a43640
PZ
191 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
192 */
193
fae3fde6
PZ
194typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
195 struct perf_event_context *, void *);
196
197struct event_function_struct {
198 struct perf_event *event;
199 event_f func;
200 void *data;
201};
202
203static int event_function(void *info)
204{
205 struct event_function_struct *efs = info;
206 struct perf_event *event = efs->event;
0017960f 207 struct perf_event_context *ctx = event->ctx;
fae3fde6
PZ
208 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
209 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63b6da39 210 int ret = 0;
fae3fde6 211
16444645 212 lockdep_assert_irqs_disabled();
fae3fde6 213
63b6da39 214 perf_ctx_lock(cpuctx, task_ctx);
fae3fde6
PZ
215 /*
216 * Since we do the IPI call without holding ctx->lock things can have
217 * changed, double check we hit the task we set out to hit.
fae3fde6
PZ
218 */
219 if (ctx->task) {
63b6da39 220 if (ctx->task != current) {
0da4cf3e 221 ret = -ESRCH;
63b6da39
PZ
222 goto unlock;
223 }
fae3fde6 224
fae3fde6
PZ
225 /*
226 * We only use event_function_call() on established contexts,
227 * and event_function() is only ever called when active (or
228 * rather, we'll have bailed in task_function_call() or the
229 * above ctx->task != current test), therefore we must have
230 * ctx->is_active here.
231 */
232 WARN_ON_ONCE(!ctx->is_active);
233 /*
234 * And since we have ctx->is_active, cpuctx->task_ctx must
235 * match.
236 */
63b6da39
PZ
237 WARN_ON_ONCE(task_ctx != ctx);
238 } else {
239 WARN_ON_ONCE(&cpuctx->ctx != ctx);
fae3fde6 240 }
63b6da39 241
fae3fde6 242 efs->func(event, cpuctx, ctx, efs->data);
63b6da39 243unlock:
fae3fde6
PZ
244 perf_ctx_unlock(cpuctx, task_ctx);
245
63b6da39 246 return ret;
fae3fde6
PZ
247}
248
fae3fde6 249static void event_function_call(struct perf_event *event, event_f func, void *data)
0017960f
PZ
250{
251 struct perf_event_context *ctx = event->ctx;
63b6da39 252 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
fae3fde6
PZ
253 struct event_function_struct efs = {
254 .event = event,
255 .func = func,
256 .data = data,
257 };
0017960f 258
c97f4736
PZ
259 if (!event->parent) {
260 /*
261 * If this is a !child event, we must hold ctx::mutex to
262 * stabilize the the event->ctx relation. See
263 * perf_event_ctx_lock().
264 */
265 lockdep_assert_held(&ctx->mutex);
266 }
0017960f
PZ
267
268 if (!task) {
fae3fde6 269 cpu_function_call(event->cpu, event_function, &efs);
0017960f
PZ
270 return;
271 }
272
63b6da39
PZ
273 if (task == TASK_TOMBSTONE)
274 return;
275
a096309b 276again:
fae3fde6 277 if (!task_function_call(task, event_function, &efs))
0017960f
PZ
278 return;
279
280 raw_spin_lock_irq(&ctx->lock);
63b6da39
PZ
281 /*
282 * Reload the task pointer, it might have been changed by
283 * a concurrent perf_event_context_sched_out().
284 */
285 task = ctx->task;
a096309b
PZ
286 if (task == TASK_TOMBSTONE) {
287 raw_spin_unlock_irq(&ctx->lock);
288 return;
0017960f 289 }
a096309b
PZ
290 if (ctx->is_active) {
291 raw_spin_unlock_irq(&ctx->lock);
292 goto again;
293 }
294 func(event, NULL, ctx, data);
0017960f
PZ
295 raw_spin_unlock_irq(&ctx->lock);
296}
297
cca20946
PZ
298/*
299 * Similar to event_function_call() + event_function(), but hard assumes IRQs
300 * are already disabled and we're on the right CPU.
301 */
302static void event_function_local(struct perf_event *event, event_f func, void *data)
303{
304 struct perf_event_context *ctx = event->ctx;
305 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
306 struct task_struct *task = READ_ONCE(ctx->task);
307 struct perf_event_context *task_ctx = NULL;
308
16444645 309 lockdep_assert_irqs_disabled();
cca20946
PZ
310
311 if (task) {
312 if (task == TASK_TOMBSTONE)
313 return;
314
315 task_ctx = ctx;
316 }
317
318 perf_ctx_lock(cpuctx, task_ctx);
319
320 task = ctx->task;
321 if (task == TASK_TOMBSTONE)
322 goto unlock;
323
324 if (task) {
325 /*
326 * We must be either inactive or active and the right task,
327 * otherwise we're screwed, since we cannot IPI to somewhere
328 * else.
329 */
330 if (ctx->is_active) {
331 if (WARN_ON_ONCE(task != current))
332 goto unlock;
333
334 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
335 goto unlock;
336 }
337 } else {
338 WARN_ON_ONCE(&cpuctx->ctx != ctx);
339 }
340
341 func(event, cpuctx, ctx, data);
342unlock:
343 perf_ctx_unlock(cpuctx, task_ctx);
344}
345
e5d1367f
SE
346#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
347 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
348 PERF_FLAG_PID_CGROUP |\
349 PERF_FLAG_FD_CLOEXEC)
e5d1367f 350
bce38cd5
SE
351/*
352 * branch priv levels that need permission checks
353 */
354#define PERF_SAMPLE_BRANCH_PERM_PLM \
355 (PERF_SAMPLE_BRANCH_KERNEL |\
356 PERF_SAMPLE_BRANCH_HV)
357
0b3fcf17
SE
358enum event_type_t {
359 EVENT_FLEXIBLE = 0x1,
360 EVENT_PINNED = 0x2,
3cbaa590 361 EVENT_TIME = 0x4,
487f05e1
AS
362 /* see ctx_resched() for details */
363 EVENT_CPU = 0x8,
0b3fcf17
SE
364 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
365};
366
e5d1367f
SE
367/*
368 * perf_sched_events : >0 events exist
369 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
370 */
9107c89e
PZ
371
372static void perf_sched_delayed(struct work_struct *work);
373DEFINE_STATIC_KEY_FALSE(perf_sched_events);
374static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
375static DEFINE_MUTEX(perf_sched_mutex);
376static atomic_t perf_sched_count;
377
e5d1367f 378static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
ba532500 379static DEFINE_PER_CPU(int, perf_sched_cb_usages);
f2fb6bef 380static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
e5d1367f 381
cdd6c482
IM
382static atomic_t nr_mmap_events __read_mostly;
383static atomic_t nr_comm_events __read_mostly;
e4222673 384static atomic_t nr_namespaces_events __read_mostly;
cdd6c482 385static atomic_t nr_task_events __read_mostly;
948b26b6 386static atomic_t nr_freq_events __read_mostly;
45ac1403 387static atomic_t nr_switch_events __read_mostly;
9ee318a7 388
108b02cf
PZ
389static LIST_HEAD(pmus);
390static DEFINE_MUTEX(pmus_lock);
391static struct srcu_struct pmus_srcu;
a63fbed7 392static cpumask_var_t perf_online_mask;
108b02cf 393
0764771d 394/*
cdd6c482 395 * perf event paranoia level:
0fbdea19
IM
396 * -1 - not paranoid at all
397 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 398 * 1 - disallow cpu events for unpriv
0fbdea19 399 * 2 - disallow kernel profiling for unpriv
0764771d 400 */
0161028b 401int sysctl_perf_event_paranoid __read_mostly = 2;
0764771d 402
20443384
FW
403/* Minimum for 512 kiB + 1 user control page */
404int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
405
406/*
cdd6c482 407 * max perf event sample rate
df58ab24 408 */
14c63f17
DH
409#define DEFAULT_MAX_SAMPLE_RATE 100000
410#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
411#define DEFAULT_CPU_TIME_MAX_PERCENT 25
412
413int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
414
415static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
416static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
417
d9494cb4
PZ
418static int perf_sample_allowed_ns __read_mostly =
419 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17 420
18ab2cd3 421static void update_perf_cpu_limits(void)
14c63f17
DH
422{
423 u64 tmp = perf_sample_period_ns;
424
425 tmp *= sysctl_perf_cpu_time_max_percent;
91a612ee
PZ
426 tmp = div_u64(tmp, 100);
427 if (!tmp)
428 tmp = 1;
429
430 WRITE_ONCE(perf_sample_allowed_ns, tmp);
14c63f17 431}
163ec435 432
8d5bce0c 433static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
9e630205 434
163ec435
PZ
435int perf_proc_update_handler(struct ctl_table *table, int write,
436 void __user *buffer, size_t *lenp,
437 loff_t *ppos)
438{
723478c8 439 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
440
441 if (ret || !write)
442 return ret;
443
ab7fdefb
KL
444 /*
445 * If throttling is disabled don't allow the write:
446 */
447 if (sysctl_perf_cpu_time_max_percent == 100 ||
448 sysctl_perf_cpu_time_max_percent == 0)
449 return -EINVAL;
450
163ec435 451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
453 update_perf_cpu_limits();
454
455 return 0;
456}
457
458int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
459
460int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
461 void __user *buffer, size_t *lenp,
462 loff_t *ppos)
463{
1572e45a 464 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
14c63f17
DH
465
466 if (ret || !write)
467 return ret;
468
b303e7c1
PZ
469 if (sysctl_perf_cpu_time_max_percent == 100 ||
470 sysctl_perf_cpu_time_max_percent == 0) {
91a612ee
PZ
471 printk(KERN_WARNING
472 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
473 WRITE_ONCE(perf_sample_allowed_ns, 0);
474 } else {
475 update_perf_cpu_limits();
476 }
163ec435
PZ
477
478 return 0;
479}
1ccd1549 480
14c63f17
DH
481/*
482 * perf samples are done in some very critical code paths (NMIs).
483 * If they take too much CPU time, the system can lock up and not
484 * get any real work done. This will drop the sample rate when
485 * we detect that events are taking too long.
486 */
487#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 488static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 489
91a612ee
PZ
490static u64 __report_avg;
491static u64 __report_allowed;
492
6a02ad66 493static void perf_duration_warn(struct irq_work *w)
14c63f17 494{
0d87d7ec 495 printk_ratelimited(KERN_INFO
91a612ee
PZ
496 "perf: interrupt took too long (%lld > %lld), lowering "
497 "kernel.perf_event_max_sample_rate to %d\n",
498 __report_avg, __report_allowed,
499 sysctl_perf_event_sample_rate);
6a02ad66
PZ
500}
501
502static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
503
504void perf_sample_event_took(u64 sample_len_ns)
505{
91a612ee
PZ
506 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
507 u64 running_len;
508 u64 avg_len;
509 u32 max;
14c63f17 510
91a612ee 511 if (max_len == 0)
14c63f17
DH
512 return;
513
91a612ee
PZ
514 /* Decay the counter by 1 average sample. */
515 running_len = __this_cpu_read(running_sample_length);
516 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
517 running_len += sample_len_ns;
518 __this_cpu_write(running_sample_length, running_len);
14c63f17
DH
519
520 /*
91a612ee
PZ
521 * Note: this will be biased artifically low until we have
522 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
14c63f17
DH
523 * from having to maintain a count.
524 */
91a612ee
PZ
525 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
526 if (avg_len <= max_len)
14c63f17
DH
527 return;
528
91a612ee
PZ
529 __report_avg = avg_len;
530 __report_allowed = max_len;
14c63f17 531
91a612ee
PZ
532 /*
533 * Compute a throttle threshold 25% below the current duration.
534 */
535 avg_len += avg_len / 4;
536 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
537 if (avg_len < max)
538 max /= (u32)avg_len;
539 else
540 max = 1;
14c63f17 541
91a612ee
PZ
542 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
543 WRITE_ONCE(max_samples_per_tick, max);
544
545 sysctl_perf_event_sample_rate = max * HZ;
546 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
6a02ad66 547
cd578abb 548 if (!irq_work_queue(&perf_duration_work)) {
91a612ee 549 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
cd578abb 550 "kernel.perf_event_max_sample_rate to %d\n",
91a612ee 551 __report_avg, __report_allowed,
cd578abb
PZ
552 sysctl_perf_event_sample_rate);
553 }
14c63f17
DH
554}
555
cdd6c482 556static atomic64_t perf_event_id;
a96bbc16 557
0b3fcf17
SE
558static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
559 enum event_type_t event_type);
560
561static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
562 enum event_type_t event_type,
563 struct task_struct *task);
564
565static void update_context_time(struct perf_event_context *ctx);
566static u64 perf_event_time(struct perf_event *event);
0b3fcf17 567
cdd6c482 568void __weak perf_event_print_debug(void) { }
0793a61d 569
84c79910 570extern __weak const char *perf_pmu_name(void)
0793a61d 571{
84c79910 572 return "pmu";
0793a61d
TG
573}
574
0b3fcf17
SE
575static inline u64 perf_clock(void)
576{
577 return local_clock();
578}
579
34f43927
PZ
580static inline u64 perf_event_clock(struct perf_event *event)
581{
582 return event->clock();
583}
584
0d3d73aa
PZ
585/*
586 * State based event timekeeping...
587 *
588 * The basic idea is to use event->state to determine which (if any) time
589 * fields to increment with the current delta. This means we only need to
590 * update timestamps when we change state or when they are explicitly requested
591 * (read).
592 *
593 * Event groups make things a little more complicated, but not terribly so. The
594 * rules for a group are that if the group leader is OFF the entire group is
595 * OFF, irrespecive of what the group member states are. This results in
596 * __perf_effective_state().
597 *
598 * A futher ramification is that when a group leader flips between OFF and
599 * !OFF, we need to update all group member times.
600 *
601 *
602 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
603 * need to make sure the relevant context time is updated before we try and
604 * update our timestamps.
605 */
606
607static __always_inline enum perf_event_state
608__perf_effective_state(struct perf_event *event)
609{
610 struct perf_event *leader = event->group_leader;
611
612 if (leader->state <= PERF_EVENT_STATE_OFF)
613 return leader->state;
614
615 return event->state;
616}
617
618static __always_inline void
619__perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
620{
621 enum perf_event_state state = __perf_effective_state(event);
622 u64 delta = now - event->tstamp;
623
624 *enabled = event->total_time_enabled;
625 if (state >= PERF_EVENT_STATE_INACTIVE)
626 *enabled += delta;
627
628 *running = event->total_time_running;
629 if (state >= PERF_EVENT_STATE_ACTIVE)
630 *running += delta;
631}
632
633static void perf_event_update_time(struct perf_event *event)
634{
635 u64 now = perf_event_time(event);
636
637 __perf_update_times(event, now, &event->total_time_enabled,
638 &event->total_time_running);
639 event->tstamp = now;
640}
641
642static void perf_event_update_sibling_time(struct perf_event *leader)
643{
644 struct perf_event *sibling;
645
edb39592 646 for_each_sibling_event(sibling, leader)
0d3d73aa
PZ
647 perf_event_update_time(sibling);
648}
649
650static void
651perf_event_set_state(struct perf_event *event, enum perf_event_state state)
652{
653 if (event->state == state)
654 return;
655
656 perf_event_update_time(event);
657 /*
658 * If a group leader gets enabled/disabled all its siblings
659 * are affected too.
660 */
661 if ((event->state < 0) ^ (state < 0))
662 perf_event_update_sibling_time(event);
663
664 WRITE_ONCE(event->state, state);
665}
666
e5d1367f
SE
667#ifdef CONFIG_CGROUP_PERF
668
e5d1367f
SE
669static inline bool
670perf_cgroup_match(struct perf_event *event)
671{
672 struct perf_event_context *ctx = event->ctx;
673 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
674
ef824fa1
TH
675 /* @event doesn't care about cgroup */
676 if (!event->cgrp)
677 return true;
678
679 /* wants specific cgroup scope but @cpuctx isn't associated with any */
680 if (!cpuctx->cgrp)
681 return false;
682
683 /*
684 * Cgroup scoping is recursive. An event enabled for a cgroup is
685 * also enabled for all its descendant cgroups. If @cpuctx's
686 * cgroup is a descendant of @event's (the test covers identity
687 * case), it's a match.
688 */
689 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
690 event->cgrp->css.cgroup);
e5d1367f
SE
691}
692
e5d1367f
SE
693static inline void perf_detach_cgroup(struct perf_event *event)
694{
4e2ba650 695 css_put(&event->cgrp->css);
e5d1367f
SE
696 event->cgrp = NULL;
697}
698
699static inline int is_cgroup_event(struct perf_event *event)
700{
701 return event->cgrp != NULL;
702}
703
704static inline u64 perf_cgroup_event_time(struct perf_event *event)
705{
706 struct perf_cgroup_info *t;
707
708 t = per_cpu_ptr(event->cgrp->info, event->cpu);
709 return t->time;
710}
711
712static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
713{
714 struct perf_cgroup_info *info;
715 u64 now;
716
717 now = perf_clock();
718
719 info = this_cpu_ptr(cgrp->info);
720
721 info->time += now - info->timestamp;
722 info->timestamp = now;
723}
724
725static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
726{
c917e0f2
SL
727 struct perf_cgroup *cgrp = cpuctx->cgrp;
728 struct cgroup_subsys_state *css;
729
730 if (cgrp) {
731 for (css = &cgrp->css; css; css = css->parent) {
732 cgrp = container_of(css, struct perf_cgroup, css);
733 __update_cgrp_time(cgrp);
734 }
735 }
e5d1367f
SE
736}
737
738static inline void update_cgrp_time_from_event(struct perf_event *event)
739{
3f7cce3c
SE
740 struct perf_cgroup *cgrp;
741
e5d1367f 742 /*
3f7cce3c
SE
743 * ensure we access cgroup data only when needed and
744 * when we know the cgroup is pinned (css_get)
e5d1367f 745 */
3f7cce3c 746 if (!is_cgroup_event(event))
e5d1367f
SE
747 return;
748
614e4c4e 749 cgrp = perf_cgroup_from_task(current, event->ctx);
3f7cce3c
SE
750 /*
751 * Do not update time when cgroup is not active
752 */
28fa741c 753 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
3f7cce3c 754 __update_cgrp_time(event->cgrp);
e5d1367f
SE
755}
756
757static inline void
3f7cce3c
SE
758perf_cgroup_set_timestamp(struct task_struct *task,
759 struct perf_event_context *ctx)
e5d1367f
SE
760{
761 struct perf_cgroup *cgrp;
762 struct perf_cgroup_info *info;
c917e0f2 763 struct cgroup_subsys_state *css;
e5d1367f 764
3f7cce3c
SE
765 /*
766 * ctx->lock held by caller
767 * ensure we do not access cgroup data
768 * unless we have the cgroup pinned (css_get)
769 */
770 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
771 return;
772
614e4c4e 773 cgrp = perf_cgroup_from_task(task, ctx);
c917e0f2
SL
774
775 for (css = &cgrp->css; css; css = css->parent) {
776 cgrp = container_of(css, struct perf_cgroup, css);
777 info = this_cpu_ptr(cgrp->info);
778 info->timestamp = ctx->timestamp;
779 }
e5d1367f
SE
780}
781
058fe1c0
DCC
782static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
783
e5d1367f
SE
784#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
785#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
786
787/*
788 * reschedule events based on the cgroup constraint of task.
789 *
790 * mode SWOUT : schedule out everything
791 * mode SWIN : schedule in based on cgroup for next
792 */
18ab2cd3 793static void perf_cgroup_switch(struct task_struct *task, int mode)
e5d1367f
SE
794{
795 struct perf_cpu_context *cpuctx;
058fe1c0 796 struct list_head *list;
e5d1367f
SE
797 unsigned long flags;
798
799 /*
058fe1c0
DCC
800 * Disable interrupts and preemption to avoid this CPU's
801 * cgrp_cpuctx_entry to change under us.
e5d1367f
SE
802 */
803 local_irq_save(flags);
804
058fe1c0
DCC
805 list = this_cpu_ptr(&cgrp_cpuctx_list);
806 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
807 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
e5d1367f 808
058fe1c0
DCC
809 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
810 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f 811
058fe1c0
DCC
812 if (mode & PERF_CGROUP_SWOUT) {
813 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
814 /*
815 * must not be done before ctxswout due
816 * to event_filter_match() in event_sched_out()
817 */
818 cpuctx->cgrp = NULL;
819 }
e5d1367f 820
058fe1c0
DCC
821 if (mode & PERF_CGROUP_SWIN) {
822 WARN_ON_ONCE(cpuctx->cgrp);
823 /*
824 * set cgrp before ctxsw in to allow
825 * event_filter_match() to not have to pass
826 * task around
827 * we pass the cpuctx->ctx to perf_cgroup_from_task()
828 * because cgorup events are only per-cpu
829 */
830 cpuctx->cgrp = perf_cgroup_from_task(task,
831 &cpuctx->ctx);
832 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
e5d1367f 833 }
058fe1c0
DCC
834 perf_pmu_enable(cpuctx->ctx.pmu);
835 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f
SE
836 }
837
e5d1367f
SE
838 local_irq_restore(flags);
839}
840
a8d757ef
SE
841static inline void perf_cgroup_sched_out(struct task_struct *task,
842 struct task_struct *next)
e5d1367f 843{
a8d757ef
SE
844 struct perf_cgroup *cgrp1;
845 struct perf_cgroup *cgrp2 = NULL;
846
ddaaf4e2 847 rcu_read_lock();
a8d757ef
SE
848 /*
849 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
850 * we do not need to pass the ctx here because we know
851 * we are holding the rcu lock
a8d757ef 852 */
614e4c4e 853 cgrp1 = perf_cgroup_from_task(task, NULL);
70a01657 854 cgrp2 = perf_cgroup_from_task(next, NULL);
a8d757ef
SE
855
856 /*
857 * only schedule out current cgroup events if we know
858 * that we are switching to a different cgroup. Otherwise,
859 * do no touch the cgroup events.
860 */
861 if (cgrp1 != cgrp2)
862 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
ddaaf4e2
SE
863
864 rcu_read_unlock();
e5d1367f
SE
865}
866
a8d757ef
SE
867static inline void perf_cgroup_sched_in(struct task_struct *prev,
868 struct task_struct *task)
e5d1367f 869{
a8d757ef
SE
870 struct perf_cgroup *cgrp1;
871 struct perf_cgroup *cgrp2 = NULL;
872
ddaaf4e2 873 rcu_read_lock();
a8d757ef
SE
874 /*
875 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
876 * we do not need to pass the ctx here because we know
877 * we are holding the rcu lock
a8d757ef 878 */
614e4c4e 879 cgrp1 = perf_cgroup_from_task(task, NULL);
614e4c4e 880 cgrp2 = perf_cgroup_from_task(prev, NULL);
a8d757ef
SE
881
882 /*
883 * only need to schedule in cgroup events if we are changing
884 * cgroup during ctxsw. Cgroup events were not scheduled
885 * out of ctxsw out if that was not the case.
886 */
887 if (cgrp1 != cgrp2)
888 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
ddaaf4e2
SE
889
890 rcu_read_unlock();
e5d1367f
SE
891}
892
893static inline int perf_cgroup_connect(int fd, struct perf_event *event,
894 struct perf_event_attr *attr,
895 struct perf_event *group_leader)
896{
897 struct perf_cgroup *cgrp;
898 struct cgroup_subsys_state *css;
2903ff01
AV
899 struct fd f = fdget(fd);
900 int ret = 0;
e5d1367f 901
2903ff01 902 if (!f.file)
e5d1367f
SE
903 return -EBADF;
904
b583043e 905 css = css_tryget_online_from_dir(f.file->f_path.dentry,
ec903c0c 906 &perf_event_cgrp_subsys);
3db272c0
LZ
907 if (IS_ERR(css)) {
908 ret = PTR_ERR(css);
909 goto out;
910 }
e5d1367f
SE
911
912 cgrp = container_of(css, struct perf_cgroup, css);
913 event->cgrp = cgrp;
914
915 /*
916 * all events in a group must monitor
917 * the same cgroup because a task belongs
918 * to only one perf cgroup at a time
919 */
920 if (group_leader && group_leader->cgrp != cgrp) {
921 perf_detach_cgroup(event);
922 ret = -EINVAL;
e5d1367f 923 }
3db272c0 924out:
2903ff01 925 fdput(f);
e5d1367f
SE
926 return ret;
927}
928
929static inline void
930perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
931{
932 struct perf_cgroup_info *t;
933 t = per_cpu_ptr(event->cgrp->info, event->cpu);
934 event->shadow_ctx_time = now - t->timestamp;
935}
936
db4a8356
DCC
937/*
938 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
939 * cleared when last cgroup event is removed.
940 */
941static inline void
942list_update_cgroup_event(struct perf_event *event,
943 struct perf_event_context *ctx, bool add)
944{
945 struct perf_cpu_context *cpuctx;
058fe1c0 946 struct list_head *cpuctx_entry;
db4a8356
DCC
947
948 if (!is_cgroup_event(event))
949 return;
950
db4a8356
DCC
951 /*
952 * Because cgroup events are always per-cpu events,
953 * this will always be called from the right CPU.
954 */
955 cpuctx = __get_cpu_context(ctx);
33801b94 956
957 /*
958 * Since setting cpuctx->cgrp is conditional on the current @cgrp
959 * matching the event's cgroup, we must do this for every new event,
960 * because if the first would mismatch, the second would not try again
961 * and we would leave cpuctx->cgrp unset.
962 */
963 if (add && !cpuctx->cgrp) {
be96b316
TH
964 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
965
be96b316
TH
966 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
967 cpuctx->cgrp = cgrp;
058fe1c0 968 }
33801b94 969
970 if (add && ctx->nr_cgroups++)
971 return;
972 else if (!add && --ctx->nr_cgroups)
973 return;
974
975 /* no cgroup running */
976 if (!add)
977 cpuctx->cgrp = NULL;
978
979 cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
980 if (add)
981 list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
982 else
983 list_del(cpuctx_entry);
db4a8356
DCC
984}
985
e5d1367f
SE
986#else /* !CONFIG_CGROUP_PERF */
987
988static inline bool
989perf_cgroup_match(struct perf_event *event)
990{
991 return true;
992}
993
994static inline void perf_detach_cgroup(struct perf_event *event)
995{}
996
997static inline int is_cgroup_event(struct perf_event *event)
998{
999 return 0;
1000}
1001
e5d1367f
SE
1002static inline void update_cgrp_time_from_event(struct perf_event *event)
1003{
1004}
1005
1006static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
1007{
1008}
1009
a8d757ef
SE
1010static inline void perf_cgroup_sched_out(struct task_struct *task,
1011 struct task_struct *next)
e5d1367f
SE
1012{
1013}
1014
a8d757ef
SE
1015static inline void perf_cgroup_sched_in(struct task_struct *prev,
1016 struct task_struct *task)
e5d1367f
SE
1017{
1018}
1019
1020static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1021 struct perf_event_attr *attr,
1022 struct perf_event *group_leader)
1023{
1024 return -EINVAL;
1025}
1026
1027static inline void
3f7cce3c
SE
1028perf_cgroup_set_timestamp(struct task_struct *task,
1029 struct perf_event_context *ctx)
e5d1367f
SE
1030{
1031}
1032
1033void
1034perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
1035{
1036}
1037
1038static inline void
1039perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
1040{
1041}
1042
1043static inline u64 perf_cgroup_event_time(struct perf_event *event)
1044{
1045 return 0;
1046}
1047
db4a8356
DCC
1048static inline void
1049list_update_cgroup_event(struct perf_event *event,
1050 struct perf_event_context *ctx, bool add)
1051{
1052}
1053
e5d1367f
SE
1054#endif
1055
9e630205
SE
1056/*
1057 * set default to be dependent on timer tick just
1058 * like original code
1059 */
1060#define PERF_CPU_HRTIMER (1000 / HZ)
1061/*
8a1115ff 1062 * function must be called with interrupts disabled
9e630205 1063 */
272325c4 1064static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
9e630205
SE
1065{
1066 struct perf_cpu_context *cpuctx;
8d5bce0c 1067 bool rotations;
9e630205 1068
16444645 1069 lockdep_assert_irqs_disabled();
9e630205
SE
1070
1071 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
9e630205
SE
1072 rotations = perf_rotate_context(cpuctx);
1073
4cfafd30
PZ
1074 raw_spin_lock(&cpuctx->hrtimer_lock);
1075 if (rotations)
9e630205 1076 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
4cfafd30
PZ
1077 else
1078 cpuctx->hrtimer_active = 0;
1079 raw_spin_unlock(&cpuctx->hrtimer_lock);
9e630205 1080
4cfafd30 1081 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
9e630205
SE
1082}
1083
272325c4 1084static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
9e630205 1085{
272325c4 1086 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1087 struct pmu *pmu = cpuctx->ctx.pmu;
272325c4 1088 u64 interval;
9e630205
SE
1089
1090 /* no multiplexing needed for SW PMU */
1091 if (pmu->task_ctx_nr == perf_sw_context)
1092 return;
1093
62b85639
SE
1094 /*
1095 * check default is sane, if not set then force to
1096 * default interval (1/tick)
1097 */
272325c4
PZ
1098 interval = pmu->hrtimer_interval_ms;
1099 if (interval < 1)
1100 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
62b85639 1101
272325c4 1102 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
9e630205 1103
4cfafd30
PZ
1104 raw_spin_lock_init(&cpuctx->hrtimer_lock);
1105 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
272325c4 1106 timer->function = perf_mux_hrtimer_handler;
9e630205
SE
1107}
1108
272325c4 1109static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
9e630205 1110{
272325c4 1111 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1112 struct pmu *pmu = cpuctx->ctx.pmu;
4cfafd30 1113 unsigned long flags;
9e630205
SE
1114
1115 /* not for SW PMU */
1116 if (pmu->task_ctx_nr == perf_sw_context)
272325c4 1117 return 0;
9e630205 1118
4cfafd30
PZ
1119 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1120 if (!cpuctx->hrtimer_active) {
1121 cpuctx->hrtimer_active = 1;
1122 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1123 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1124 }
1125 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
9e630205 1126
272325c4 1127 return 0;
9e630205
SE
1128}
1129
33696fc0 1130void perf_pmu_disable(struct pmu *pmu)
9e35ad38 1131{
33696fc0
PZ
1132 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1133 if (!(*count)++)
1134 pmu->pmu_disable(pmu);
9e35ad38 1135}
9e35ad38 1136
33696fc0 1137void perf_pmu_enable(struct pmu *pmu)
9e35ad38 1138{
33696fc0
PZ
1139 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1140 if (!--(*count))
1141 pmu->pmu_enable(pmu);
9e35ad38 1142}
9e35ad38 1143
2fde4f94 1144static DEFINE_PER_CPU(struct list_head, active_ctx_list);
e9d2b064
PZ
1145
1146/*
2fde4f94
MR
1147 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1148 * perf_event_task_tick() are fully serialized because they're strictly cpu
1149 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1150 * disabled, while perf_event_task_tick is called from IRQ context.
e9d2b064 1151 */
2fde4f94 1152static void perf_event_ctx_activate(struct perf_event_context *ctx)
9e35ad38 1153{
2fde4f94 1154 struct list_head *head = this_cpu_ptr(&active_ctx_list);
b5ab4cd5 1155
16444645 1156 lockdep_assert_irqs_disabled();
b5ab4cd5 1157
2fde4f94
MR
1158 WARN_ON(!list_empty(&ctx->active_ctx_list));
1159
1160 list_add(&ctx->active_ctx_list, head);
1161}
1162
1163static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1164{
16444645 1165 lockdep_assert_irqs_disabled();
2fde4f94
MR
1166
1167 WARN_ON(list_empty(&ctx->active_ctx_list));
1168
1169 list_del_init(&ctx->active_ctx_list);
9e35ad38 1170}
9e35ad38 1171
cdd6c482 1172static void get_ctx(struct perf_event_context *ctx)
a63eaf34 1173{
e5289d4a 1174 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
1175}
1176
4af57ef2
YZ
1177static void free_ctx(struct rcu_head *head)
1178{
1179 struct perf_event_context *ctx;
1180
1181 ctx = container_of(head, struct perf_event_context, rcu_head);
1182 kfree(ctx->task_ctx_data);
1183 kfree(ctx);
1184}
1185
cdd6c482 1186static void put_ctx(struct perf_event_context *ctx)
a63eaf34 1187{
564c2b21
PM
1188 if (atomic_dec_and_test(&ctx->refcount)) {
1189 if (ctx->parent_ctx)
1190 put_ctx(ctx->parent_ctx);
63b6da39 1191 if (ctx->task && ctx->task != TASK_TOMBSTONE)
c93f7669 1192 put_task_struct(ctx->task);
4af57ef2 1193 call_rcu(&ctx->rcu_head, free_ctx);
564c2b21 1194 }
a63eaf34
PM
1195}
1196
f63a8daa
PZ
1197/*
1198 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1199 * perf_pmu_migrate_context() we need some magic.
1200 *
1201 * Those places that change perf_event::ctx will hold both
1202 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1203 *
8b10c5e2
PZ
1204 * Lock ordering is by mutex address. There are two other sites where
1205 * perf_event_context::mutex nests and those are:
1206 *
1207 * - perf_event_exit_task_context() [ child , 0 ]
8ba289b8
PZ
1208 * perf_event_exit_event()
1209 * put_event() [ parent, 1 ]
8b10c5e2
PZ
1210 *
1211 * - perf_event_init_context() [ parent, 0 ]
1212 * inherit_task_group()
1213 * inherit_group()
1214 * inherit_event()
1215 * perf_event_alloc()
1216 * perf_init_event()
1217 * perf_try_init_event() [ child , 1 ]
1218 *
1219 * While it appears there is an obvious deadlock here -- the parent and child
1220 * nesting levels are inverted between the two. This is in fact safe because
1221 * life-time rules separate them. That is an exiting task cannot fork, and a
1222 * spawning task cannot (yet) exit.
1223 *
1224 * But remember that that these are parent<->child context relations, and
1225 * migration does not affect children, therefore these two orderings should not
1226 * interact.
f63a8daa
PZ
1227 *
1228 * The change in perf_event::ctx does not affect children (as claimed above)
1229 * because the sys_perf_event_open() case will install a new event and break
1230 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1231 * concerned with cpuctx and that doesn't have children.
1232 *
1233 * The places that change perf_event::ctx will issue:
1234 *
1235 * perf_remove_from_context();
1236 * synchronize_rcu();
1237 * perf_install_in_context();
1238 *
1239 * to affect the change. The remove_from_context() + synchronize_rcu() should
1240 * quiesce the event, after which we can install it in the new location. This
1241 * means that only external vectors (perf_fops, prctl) can perturb the event
1242 * while in transit. Therefore all such accessors should also acquire
1243 * perf_event_context::mutex to serialize against this.
1244 *
1245 * However; because event->ctx can change while we're waiting to acquire
1246 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1247 * function.
1248 *
1249 * Lock order:
79c9ce57 1250 * cred_guard_mutex
f63a8daa
PZ
1251 * task_struct::perf_event_mutex
1252 * perf_event_context::mutex
f63a8daa 1253 * perf_event::child_mutex;
07c4a776 1254 * perf_event_context::lock
f63a8daa
PZ
1255 * perf_event::mmap_mutex
1256 * mmap_sem
82d94856
PZ
1257 *
1258 * cpu_hotplug_lock
1259 * pmus_lock
1260 * cpuctx->mutex / perf_event_context::mutex
f63a8daa 1261 */
a83fe28e
PZ
1262static struct perf_event_context *
1263perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
f63a8daa
PZ
1264{
1265 struct perf_event_context *ctx;
1266
1267again:
1268 rcu_read_lock();
6aa7de05 1269 ctx = READ_ONCE(event->ctx);
f63a8daa
PZ
1270 if (!atomic_inc_not_zero(&ctx->refcount)) {
1271 rcu_read_unlock();
1272 goto again;
1273 }
1274 rcu_read_unlock();
1275
a83fe28e 1276 mutex_lock_nested(&ctx->mutex, nesting);
f63a8daa
PZ
1277 if (event->ctx != ctx) {
1278 mutex_unlock(&ctx->mutex);
1279 put_ctx(ctx);
1280 goto again;
1281 }
1282
1283 return ctx;
1284}
1285
a83fe28e
PZ
1286static inline struct perf_event_context *
1287perf_event_ctx_lock(struct perf_event *event)
1288{
1289 return perf_event_ctx_lock_nested(event, 0);
1290}
1291
f63a8daa
PZ
1292static void perf_event_ctx_unlock(struct perf_event *event,
1293 struct perf_event_context *ctx)
1294{
1295 mutex_unlock(&ctx->mutex);
1296 put_ctx(ctx);
1297}
1298
211de6eb
PZ
1299/*
1300 * This must be done under the ctx->lock, such as to serialize against
1301 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1302 * calling scheduler related locks and ctx->lock nests inside those.
1303 */
1304static __must_check struct perf_event_context *
1305unclone_ctx(struct perf_event_context *ctx)
71a851b4 1306{
211de6eb
PZ
1307 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1308
1309 lockdep_assert_held(&ctx->lock);
1310
1311 if (parent_ctx)
71a851b4 1312 ctx->parent_ctx = NULL;
5a3126d4 1313 ctx->generation++;
211de6eb
PZ
1314
1315 return parent_ctx;
71a851b4
PZ
1316}
1317
1d953111
ON
1318static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1319 enum pid_type type)
6844c09d 1320{
1d953111 1321 u32 nr;
6844c09d
ACM
1322 /*
1323 * only top level events have the pid namespace they were created in
1324 */
1325 if (event->parent)
1326 event = event->parent;
1327
1d953111
ON
1328 nr = __task_pid_nr_ns(p, type, event->ns);
1329 /* avoid -1 if it is idle thread or runs in another ns */
1330 if (!nr && !pid_alive(p))
1331 nr = -1;
1332 return nr;
6844c09d
ACM
1333}
1334
1d953111 1335static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
6844c09d 1336{
6883f81a 1337 return perf_event_pid_type(event, p, PIDTYPE_TGID);
1d953111 1338}
6844c09d 1339
1d953111
ON
1340static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1341{
1342 return perf_event_pid_type(event, p, PIDTYPE_PID);
6844c09d
ACM
1343}
1344
7f453c24 1345/*
cdd6c482 1346 * If we inherit events we want to return the parent event id
7f453c24
PZ
1347 * to userspace.
1348 */
cdd6c482 1349static u64 primary_event_id(struct perf_event *event)
7f453c24 1350{
cdd6c482 1351 u64 id = event->id;
7f453c24 1352
cdd6c482
IM
1353 if (event->parent)
1354 id = event->parent->id;
7f453c24
PZ
1355
1356 return id;
1357}
1358
25346b93 1359/*
cdd6c482 1360 * Get the perf_event_context for a task and lock it.
63b6da39 1361 *
25346b93
PM
1362 * This has to cope with with the fact that until it is locked,
1363 * the context could get moved to another task.
1364 */
cdd6c482 1365static struct perf_event_context *
8dc85d54 1366perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 1367{
cdd6c482 1368 struct perf_event_context *ctx;
25346b93 1369
9ed6060d 1370retry:
058ebd0e
PZ
1371 /*
1372 * One of the few rules of preemptible RCU is that one cannot do
1373 * rcu_read_unlock() while holding a scheduler (or nested) lock when
2fd59077 1374 * part of the read side critical section was irqs-enabled -- see
058ebd0e
PZ
1375 * rcu_read_unlock_special().
1376 *
1377 * Since ctx->lock nests under rq->lock we must ensure the entire read
2fd59077 1378 * side critical section has interrupts disabled.
058ebd0e 1379 */
2fd59077 1380 local_irq_save(*flags);
058ebd0e 1381 rcu_read_lock();
8dc85d54 1382 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
1383 if (ctx) {
1384 /*
1385 * If this context is a clone of another, it might
1386 * get swapped for another underneath us by
cdd6c482 1387 * perf_event_task_sched_out, though the
25346b93
PM
1388 * rcu_read_lock() protects us from any context
1389 * getting freed. Lock the context and check if it
1390 * got swapped before we could get the lock, and retry
1391 * if so. If we locked the right context, then it
1392 * can't get swapped on us any more.
1393 */
2fd59077 1394 raw_spin_lock(&ctx->lock);
8dc85d54 1395 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
2fd59077 1396 raw_spin_unlock(&ctx->lock);
058ebd0e 1397 rcu_read_unlock();
2fd59077 1398 local_irq_restore(*flags);
25346b93
PM
1399 goto retry;
1400 }
b49a9e7e 1401
63b6da39
PZ
1402 if (ctx->task == TASK_TOMBSTONE ||
1403 !atomic_inc_not_zero(&ctx->refcount)) {
2fd59077 1404 raw_spin_unlock(&ctx->lock);
b49a9e7e 1405 ctx = NULL;
828b6f0e
PZ
1406 } else {
1407 WARN_ON_ONCE(ctx->task != task);
b49a9e7e 1408 }
25346b93
PM
1409 }
1410 rcu_read_unlock();
2fd59077
PM
1411 if (!ctx)
1412 local_irq_restore(*flags);
25346b93
PM
1413 return ctx;
1414}
1415
1416/*
1417 * Get the context for a task and increment its pin_count so it
1418 * can't get swapped to another task. This also increments its
1419 * reference count so that the context can't get freed.
1420 */
8dc85d54
PZ
1421static struct perf_event_context *
1422perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1423{
cdd6c482 1424 struct perf_event_context *ctx;
25346b93
PM
1425 unsigned long flags;
1426
8dc85d54 1427 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1428 if (ctx) {
1429 ++ctx->pin_count;
e625cce1 1430 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1431 }
1432 return ctx;
1433}
1434
cdd6c482 1435static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1436{
1437 unsigned long flags;
1438
e625cce1 1439 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1440 --ctx->pin_count;
e625cce1 1441 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1442}
1443
f67218c3
PZ
1444/*
1445 * Update the record of the current time in a context.
1446 */
1447static void update_context_time(struct perf_event_context *ctx)
1448{
1449 u64 now = perf_clock();
1450
1451 ctx->time += now - ctx->timestamp;
1452 ctx->timestamp = now;
1453}
1454
4158755d
SE
1455static u64 perf_event_time(struct perf_event *event)
1456{
1457 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1458
1459 if (is_cgroup_event(event))
1460 return perf_cgroup_event_time(event);
1461
4158755d
SE
1462 return ctx ? ctx->time : 0;
1463}
1464
487f05e1
AS
1465static enum event_type_t get_event_type(struct perf_event *event)
1466{
1467 struct perf_event_context *ctx = event->ctx;
1468 enum event_type_t event_type;
1469
1470 lockdep_assert_held(&ctx->lock);
1471
3bda69c1
AS
1472 /*
1473 * It's 'group type', really, because if our group leader is
1474 * pinned, so are we.
1475 */
1476 if (event->group_leader != event)
1477 event = event->group_leader;
1478
487f05e1
AS
1479 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1480 if (!ctx->task)
1481 event_type |= EVENT_CPU;
1482
1483 return event_type;
1484}
1485
8e1a2031 1486/*
161c85fa 1487 * Helper function to initialize event group nodes.
8e1a2031 1488 */
161c85fa 1489static void init_event_group(struct perf_event *event)
8e1a2031
AB
1490{
1491 RB_CLEAR_NODE(&event->group_node);
1492 event->group_index = 0;
1493}
1494
1495/*
1496 * Extract pinned or flexible groups from the context
161c85fa 1497 * based on event attrs bits.
8e1a2031
AB
1498 */
1499static struct perf_event_groups *
1500get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
889ff015
FW
1501{
1502 if (event->attr.pinned)
1503 return &ctx->pinned_groups;
1504 else
1505 return &ctx->flexible_groups;
1506}
1507
8e1a2031 1508/*
161c85fa 1509 * Helper function to initializes perf_event_group trees.
8e1a2031 1510 */
161c85fa 1511static void perf_event_groups_init(struct perf_event_groups *groups)
8e1a2031
AB
1512{
1513 groups->tree = RB_ROOT;
1514 groups->index = 0;
1515}
1516
1517/*
1518 * Compare function for event groups;
161c85fa
PZ
1519 *
1520 * Implements complex key that first sorts by CPU and then by virtual index
1521 * which provides ordering when rotating groups for the same CPU.
8e1a2031 1522 */
161c85fa
PZ
1523static bool
1524perf_event_groups_less(struct perf_event *left, struct perf_event *right)
8e1a2031 1525{
161c85fa
PZ
1526 if (left->cpu < right->cpu)
1527 return true;
1528 if (left->cpu > right->cpu)
1529 return false;
1530
1531 if (left->group_index < right->group_index)
1532 return true;
1533 if (left->group_index > right->group_index)
1534 return false;
1535
1536 return false;
8e1a2031
AB
1537}
1538
1539/*
161c85fa
PZ
1540 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1541 * key (see perf_event_groups_less). This places it last inside the CPU
1542 * subtree.
8e1a2031
AB
1543 */
1544static void
1545perf_event_groups_insert(struct perf_event_groups *groups,
161c85fa 1546 struct perf_event *event)
8e1a2031
AB
1547{
1548 struct perf_event *node_event;
1549 struct rb_node *parent;
1550 struct rb_node **node;
1551
1552 event->group_index = ++groups->index;
1553
1554 node = &groups->tree.rb_node;
1555 parent = *node;
1556
1557 while (*node) {
1558 parent = *node;
161c85fa 1559 node_event = container_of(*node, struct perf_event, group_node);
8e1a2031
AB
1560
1561 if (perf_event_groups_less(event, node_event))
1562 node = &parent->rb_left;
1563 else
1564 node = &parent->rb_right;
1565 }
1566
1567 rb_link_node(&event->group_node, parent, node);
1568 rb_insert_color(&event->group_node, &groups->tree);
1569}
1570
1571/*
161c85fa 1572 * Helper function to insert event into the pinned or flexible groups.
8e1a2031
AB
1573 */
1574static void
1575add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1576{
1577 struct perf_event_groups *groups;
1578
1579 groups = get_event_groups(event, ctx);
1580 perf_event_groups_insert(groups, event);
1581}
1582
1583/*
161c85fa 1584 * Delete a group from a tree.
8e1a2031
AB
1585 */
1586static void
1587perf_event_groups_delete(struct perf_event_groups *groups,
161c85fa 1588 struct perf_event *event)
8e1a2031 1589{
161c85fa
PZ
1590 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1591 RB_EMPTY_ROOT(&groups->tree));
8e1a2031 1592
161c85fa 1593 rb_erase(&event->group_node, &groups->tree);
8e1a2031
AB
1594 init_event_group(event);
1595}
1596
1597/*
161c85fa 1598 * Helper function to delete event from its groups.
8e1a2031
AB
1599 */
1600static void
1601del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1602{
1603 struct perf_event_groups *groups;
1604
1605 groups = get_event_groups(event, ctx);
1606 perf_event_groups_delete(groups, event);
1607}
1608
1609/*
161c85fa 1610 * Get the leftmost event in the @cpu subtree.
8e1a2031
AB
1611 */
1612static struct perf_event *
1613perf_event_groups_first(struct perf_event_groups *groups, int cpu)
1614{
1615 struct perf_event *node_event = NULL, *match = NULL;
1616 struct rb_node *node = groups->tree.rb_node;
1617
1618 while (node) {
161c85fa 1619 node_event = container_of(node, struct perf_event, group_node);
8e1a2031
AB
1620
1621 if (cpu < node_event->cpu) {
1622 node = node->rb_left;
1623 } else if (cpu > node_event->cpu) {
1624 node = node->rb_right;
1625 } else {
1626 match = node_event;
1627 node = node->rb_left;
1628 }
1629 }
1630
1631 return match;
1632}
1633
1cac7b1a
PZ
1634/*
1635 * Like rb_entry_next_safe() for the @cpu subtree.
1636 */
1637static struct perf_event *
1638perf_event_groups_next(struct perf_event *event)
1639{
1640 struct perf_event *next;
1641
1642 next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
1643 if (next && next->cpu == event->cpu)
1644 return next;
1645
1646 return NULL;
1647}
1648
8e1a2031 1649/*
161c85fa 1650 * Iterate through the whole groups tree.
8e1a2031 1651 */
6e6804d2
PZ
1652#define perf_event_groups_for_each(event, groups) \
1653 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1654 typeof(*event), group_node); event; \
1655 event = rb_entry_safe(rb_next(&event->group_node), \
1656 typeof(*event), group_node))
8e1a2031 1657
fccc714b 1658/*
788faab7 1659 * Add an event from the lists for its context.
fccc714b
PZ
1660 * Must be called with ctx->mutex and ctx->lock held.
1661 */
04289bb9 1662static void
cdd6c482 1663list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1664{
c994d613
PZ
1665 lockdep_assert_held(&ctx->lock);
1666
8a49542c
PZ
1667 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1668 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9 1669
0d3d73aa
PZ
1670 event->tstamp = perf_event_time(event);
1671
04289bb9 1672 /*
8a49542c
PZ
1673 * If we're a stand alone event or group leader, we go to the context
1674 * list, group events are kept attached to the group so that
1675 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1676 */
8a49542c 1677 if (event->group_leader == event) {
4ff6a8de 1678 event->group_caps = event->event_caps;
8e1a2031 1679 add_event_to_groups(event, ctx);
5c148194 1680 }
592903cd 1681
db4a8356 1682 list_update_cgroup_event(event, ctx, true);
e5d1367f 1683
cdd6c482
IM
1684 list_add_rcu(&event->event_entry, &ctx->event_list);
1685 ctx->nr_events++;
1686 if (event->attr.inherit_stat)
bfbd3381 1687 ctx->nr_stat++;
5a3126d4
PZ
1688
1689 ctx->generation++;
04289bb9
IM
1690}
1691
0231bb53
JO
1692/*
1693 * Initialize event state based on the perf_event_attr::disabled.
1694 */
1695static inline void perf_event__state_init(struct perf_event *event)
1696{
1697 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1698 PERF_EVENT_STATE_INACTIVE;
1699}
1700
a723968c 1701static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
c320c7b7
ACM
1702{
1703 int entry = sizeof(u64); /* value */
1704 int size = 0;
1705 int nr = 1;
1706
1707 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1708 size += sizeof(u64);
1709
1710 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1711 size += sizeof(u64);
1712
1713 if (event->attr.read_format & PERF_FORMAT_ID)
1714 entry += sizeof(u64);
1715
1716 if (event->attr.read_format & PERF_FORMAT_GROUP) {
a723968c 1717 nr += nr_siblings;
c320c7b7
ACM
1718 size += sizeof(u64);
1719 }
1720
1721 size += entry * nr;
1722 event->read_size = size;
1723}
1724
a723968c 1725static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
c320c7b7
ACM
1726{
1727 struct perf_sample_data *data;
c320c7b7
ACM
1728 u16 size = 0;
1729
c320c7b7
ACM
1730 if (sample_type & PERF_SAMPLE_IP)
1731 size += sizeof(data->ip);
1732
6844c09d
ACM
1733 if (sample_type & PERF_SAMPLE_ADDR)
1734 size += sizeof(data->addr);
1735
1736 if (sample_type & PERF_SAMPLE_PERIOD)
1737 size += sizeof(data->period);
1738
c3feedf2
AK
1739 if (sample_type & PERF_SAMPLE_WEIGHT)
1740 size += sizeof(data->weight);
1741
6844c09d
ACM
1742 if (sample_type & PERF_SAMPLE_READ)
1743 size += event->read_size;
1744
d6be9ad6
SE
1745 if (sample_type & PERF_SAMPLE_DATA_SRC)
1746 size += sizeof(data->data_src.val);
1747
fdfbbd07
AK
1748 if (sample_type & PERF_SAMPLE_TRANSACTION)
1749 size += sizeof(data->txn);
1750
fc7ce9c7
KL
1751 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1752 size += sizeof(data->phys_addr);
1753
6844c09d
ACM
1754 event->header_size = size;
1755}
1756
a723968c
PZ
1757/*
1758 * Called at perf_event creation and when events are attached/detached from a
1759 * group.
1760 */
1761static void perf_event__header_size(struct perf_event *event)
1762{
1763 __perf_event_read_size(event,
1764 event->group_leader->nr_siblings);
1765 __perf_event_header_size(event, event->attr.sample_type);
1766}
1767
6844c09d
ACM
1768static void perf_event__id_header_size(struct perf_event *event)
1769{
1770 struct perf_sample_data *data;
1771 u64 sample_type = event->attr.sample_type;
1772 u16 size = 0;
1773
c320c7b7
ACM
1774 if (sample_type & PERF_SAMPLE_TID)
1775 size += sizeof(data->tid_entry);
1776
1777 if (sample_type & PERF_SAMPLE_TIME)
1778 size += sizeof(data->time);
1779
ff3d527c
AH
1780 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1781 size += sizeof(data->id);
1782
c320c7b7
ACM
1783 if (sample_type & PERF_SAMPLE_ID)
1784 size += sizeof(data->id);
1785
1786 if (sample_type & PERF_SAMPLE_STREAM_ID)
1787 size += sizeof(data->stream_id);
1788
1789 if (sample_type & PERF_SAMPLE_CPU)
1790 size += sizeof(data->cpu_entry);
1791
6844c09d 1792 event->id_header_size = size;
c320c7b7
ACM
1793}
1794
a723968c
PZ
1795static bool perf_event_validate_size(struct perf_event *event)
1796{
1797 /*
1798 * The values computed here will be over-written when we actually
1799 * attach the event.
1800 */
1801 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1802 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1803 perf_event__id_header_size(event);
1804
1805 /*
1806 * Sum the lot; should not exceed the 64k limit we have on records.
1807 * Conservative limit to allow for callchains and other variable fields.
1808 */
1809 if (event->read_size + event->header_size +
1810 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1811 return false;
1812
1813 return true;
1814}
1815
8a49542c
PZ
1816static void perf_group_attach(struct perf_event *event)
1817{
c320c7b7 1818 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1819
a76a82a3
PZ
1820 lockdep_assert_held(&event->ctx->lock);
1821
74c3337c
PZ
1822 /*
1823 * We can have double attach due to group movement in perf_event_open.
1824 */
1825 if (event->attach_state & PERF_ATTACH_GROUP)
1826 return;
1827
8a49542c
PZ
1828 event->attach_state |= PERF_ATTACH_GROUP;
1829
1830 if (group_leader == event)
1831 return;
1832
652884fe
PZ
1833 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1834
4ff6a8de 1835 group_leader->group_caps &= event->event_caps;
8a49542c 1836
8343aae6 1837 list_add_tail(&event->sibling_list, &group_leader->sibling_list);
8a49542c 1838 group_leader->nr_siblings++;
c320c7b7
ACM
1839
1840 perf_event__header_size(group_leader);
1841
edb39592 1842 for_each_sibling_event(pos, group_leader)
c320c7b7 1843 perf_event__header_size(pos);
8a49542c
PZ
1844}
1845
a63eaf34 1846/*
788faab7 1847 * Remove an event from the lists for its context.
fccc714b 1848 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1849 */
04289bb9 1850static void
cdd6c482 1851list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1852{
652884fe
PZ
1853 WARN_ON_ONCE(event->ctx != ctx);
1854 lockdep_assert_held(&ctx->lock);
1855
8a49542c
PZ
1856 /*
1857 * We can have double detach due to exit/hot-unplug + close.
1858 */
1859 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1860 return;
8a49542c
PZ
1861
1862 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1863
db4a8356 1864 list_update_cgroup_event(event, ctx, false);
e5d1367f 1865
cdd6c482
IM
1866 ctx->nr_events--;
1867 if (event->attr.inherit_stat)
bfbd3381 1868 ctx->nr_stat--;
8bc20959 1869
cdd6c482 1870 list_del_rcu(&event->event_entry);
04289bb9 1871
8a49542c 1872 if (event->group_leader == event)
8e1a2031 1873 del_event_from_groups(event, ctx);
5c148194 1874
b2e74a26
SE
1875 /*
1876 * If event was in error state, then keep it
1877 * that way, otherwise bogus counts will be
1878 * returned on read(). The only way to get out
1879 * of error state is by explicit re-enabling
1880 * of the event
1881 */
1882 if (event->state > PERF_EVENT_STATE_OFF)
0d3d73aa 1883 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
5a3126d4
PZ
1884
1885 ctx->generation++;
050735b0
PZ
1886}
1887
8a49542c 1888static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1889{
1890 struct perf_event *sibling, *tmp;
6668128a 1891 struct perf_event_context *ctx = event->ctx;
8a49542c 1892
6668128a 1893 lockdep_assert_held(&ctx->lock);
a76a82a3 1894
8a49542c
PZ
1895 /*
1896 * We can have double detach due to exit/hot-unplug + close.
1897 */
1898 if (!(event->attach_state & PERF_ATTACH_GROUP))
1899 return;
1900
1901 event->attach_state &= ~PERF_ATTACH_GROUP;
1902
1903 /*
1904 * If this is a sibling, remove it from its group.
1905 */
1906 if (event->group_leader != event) {
8343aae6 1907 list_del_init(&event->sibling_list);
8a49542c 1908 event->group_leader->nr_siblings--;
c320c7b7 1909 goto out;
8a49542c
PZ
1910 }
1911
04289bb9 1912 /*
cdd6c482
IM
1913 * If this was a group event with sibling events then
1914 * upgrade the siblings to singleton events by adding them
8a49542c 1915 * to whatever list we are on.
04289bb9 1916 */
8343aae6 1917 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
8e1a2031 1918
04289bb9 1919 sibling->group_leader = sibling;
24868367 1920 list_del_init(&sibling->sibling_list);
d6f962b5
FW
1921
1922 /* Inherit group flags from the previous leader */
4ff6a8de 1923 sibling->group_caps = event->group_caps;
652884fe 1924
8e1a2031 1925 if (!RB_EMPTY_NODE(&event->group_node)) {
8e1a2031 1926 add_event_to_groups(sibling, event->ctx);
6668128a
PZ
1927
1928 if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
1929 struct list_head *list = sibling->attr.pinned ?
1930 &ctx->pinned_active : &ctx->flexible_active;
1931
1932 list_add_tail(&sibling->active_list, list);
1933 }
8e1a2031
AB
1934 }
1935
652884fe 1936 WARN_ON_ONCE(sibling->ctx != event->ctx);
04289bb9 1937 }
c320c7b7
ACM
1938
1939out:
1940 perf_event__header_size(event->group_leader);
1941
edb39592 1942 for_each_sibling_event(tmp, event->group_leader)
c320c7b7 1943 perf_event__header_size(tmp);
04289bb9
IM
1944}
1945
fadfe7be
JO
1946static bool is_orphaned_event(struct perf_event *event)
1947{
a69b0ca4 1948 return event->state == PERF_EVENT_STATE_DEAD;
fadfe7be
JO
1949}
1950
2c81a647 1951static inline int __pmu_filter_match(struct perf_event *event)
66eb579e
MR
1952{
1953 struct pmu *pmu = event->pmu;
1954 return pmu->filter_match ? pmu->filter_match(event) : 1;
1955}
1956
2c81a647
MR
1957/*
1958 * Check whether we should attempt to schedule an event group based on
1959 * PMU-specific filtering. An event group can consist of HW and SW events,
1960 * potentially with a SW leader, so we must check all the filters, to
1961 * determine whether a group is schedulable:
1962 */
1963static inline int pmu_filter_match(struct perf_event *event)
1964{
edb39592 1965 struct perf_event *sibling;
2c81a647
MR
1966
1967 if (!__pmu_filter_match(event))
1968 return 0;
1969
edb39592
PZ
1970 for_each_sibling_event(sibling, event) {
1971 if (!__pmu_filter_match(sibling))
2c81a647
MR
1972 return 0;
1973 }
1974
1975 return 1;
1976}
1977
fa66f07a
SE
1978static inline int
1979event_filter_match(struct perf_event *event)
1980{
0b8f1e2e
PZ
1981 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1982 perf_cgroup_match(event) && pmu_filter_match(event);
fa66f07a
SE
1983}
1984
9ffcfa6f
SE
1985static void
1986event_sched_out(struct perf_event *event,
3b6f9e5c 1987 struct perf_cpu_context *cpuctx,
cdd6c482 1988 struct perf_event_context *ctx)
3b6f9e5c 1989{
0d3d73aa 1990 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
652884fe
PZ
1991
1992 WARN_ON_ONCE(event->ctx != ctx);
1993 lockdep_assert_held(&ctx->lock);
1994
cdd6c482 1995 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1996 return;
3b6f9e5c 1997
6668128a
PZ
1998 /*
1999 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
2000 * we can schedule events _OUT_ individually through things like
2001 * __perf_remove_from_context().
2002 */
2003 list_del_init(&event->active_list);
2004
44377277
AS
2005 perf_pmu_disable(event->pmu);
2006
28a967c3
PZ
2007 event->pmu->del(event, 0);
2008 event->oncpu = -1;
0d3d73aa 2009
cdd6c482
IM
2010 if (event->pending_disable) {
2011 event->pending_disable = 0;
0d3d73aa 2012 state = PERF_EVENT_STATE_OFF;
970892a9 2013 }
0d3d73aa 2014 perf_event_set_state(event, state);
3b6f9e5c 2015
cdd6c482 2016 if (!is_software_event(event))
3b6f9e5c 2017 cpuctx->active_oncpu--;
2fde4f94
MR
2018 if (!--ctx->nr_active)
2019 perf_event_ctx_deactivate(ctx);
0f5a2601
PZ
2020 if (event->attr.freq && event->attr.sample_freq)
2021 ctx->nr_freq--;
cdd6c482 2022 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 2023 cpuctx->exclusive = 0;
44377277
AS
2024
2025 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
2026}
2027
d859e29f 2028static void
cdd6c482 2029group_sched_out(struct perf_event *group_event,
d859e29f 2030 struct perf_cpu_context *cpuctx,
cdd6c482 2031 struct perf_event_context *ctx)
d859e29f 2032{
cdd6c482 2033 struct perf_event *event;
0d3d73aa
PZ
2034
2035 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
2036 return;
d859e29f 2037
3f005e7d
MR
2038 perf_pmu_disable(ctx->pmu);
2039
cdd6c482 2040 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
2041
2042 /*
2043 * Schedule out siblings (if any):
2044 */
edb39592 2045 for_each_sibling_event(event, group_event)
cdd6c482 2046 event_sched_out(event, cpuctx, ctx);
d859e29f 2047
3f005e7d
MR
2048 perf_pmu_enable(ctx->pmu);
2049
0d3d73aa 2050 if (group_event->attr.exclusive)
d859e29f
PM
2051 cpuctx->exclusive = 0;
2052}
2053
45a0e07a 2054#define DETACH_GROUP 0x01UL
0017960f 2055
0793a61d 2056/*
cdd6c482 2057 * Cross CPU call to remove a performance event
0793a61d 2058 *
cdd6c482 2059 * We disable the event on the hardware level first. After that we
0793a61d
TG
2060 * remove it from the context list.
2061 */
fae3fde6
PZ
2062static void
2063__perf_remove_from_context(struct perf_event *event,
2064 struct perf_cpu_context *cpuctx,
2065 struct perf_event_context *ctx,
2066 void *info)
0793a61d 2067{
45a0e07a 2068 unsigned long flags = (unsigned long)info;
0793a61d 2069
3c5c8711
PZ
2070 if (ctx->is_active & EVENT_TIME) {
2071 update_context_time(ctx);
2072 update_cgrp_time_from_cpuctx(cpuctx);
2073 }
2074
cdd6c482 2075 event_sched_out(event, cpuctx, ctx);
45a0e07a 2076 if (flags & DETACH_GROUP)
46ce0fe9 2077 perf_group_detach(event);
cdd6c482 2078 list_del_event(event, ctx);
39a43640
PZ
2079
2080 if (!ctx->nr_events && ctx->is_active) {
64ce3126 2081 ctx->is_active = 0;
39a43640
PZ
2082 if (ctx->task) {
2083 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2084 cpuctx->task_ctx = NULL;
2085 }
64ce3126 2086 }
0793a61d
TG
2087}
2088
0793a61d 2089/*
cdd6c482 2090 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 2091 *
cdd6c482
IM
2092 * If event->ctx is a cloned context, callers must make sure that
2093 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
2094 * remains valid. This is OK when called from perf_release since
2095 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 2096 * When called from perf_event_exit_task, it's OK because the
c93f7669 2097 * context has been detached from its task.
0793a61d 2098 */
45a0e07a 2099static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
0793a61d 2100{
a76a82a3
PZ
2101 struct perf_event_context *ctx = event->ctx;
2102
2103 lockdep_assert_held(&ctx->mutex);
0793a61d 2104
45a0e07a 2105 event_function_call(event, __perf_remove_from_context, (void *)flags);
a76a82a3
PZ
2106
2107 /*
2108 * The above event_function_call() can NO-OP when it hits
2109 * TASK_TOMBSTONE. In that case we must already have been detached
2110 * from the context (by perf_event_exit_event()) but the grouping
2111 * might still be in-tact.
2112 */
2113 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
2114 if ((flags & DETACH_GROUP) &&
2115 (event->attach_state & PERF_ATTACH_GROUP)) {
2116 /*
2117 * Since in that case we cannot possibly be scheduled, simply
2118 * detach now.
2119 */
2120 raw_spin_lock_irq(&ctx->lock);
2121 perf_group_detach(event);
2122 raw_spin_unlock_irq(&ctx->lock);
2123 }
0793a61d
TG
2124}
2125
d859e29f 2126/*
cdd6c482 2127 * Cross CPU call to disable a performance event
d859e29f 2128 */
fae3fde6
PZ
2129static void __perf_event_disable(struct perf_event *event,
2130 struct perf_cpu_context *cpuctx,
2131 struct perf_event_context *ctx,
2132 void *info)
7b648018 2133{
fae3fde6
PZ
2134 if (event->state < PERF_EVENT_STATE_INACTIVE)
2135 return;
7b648018 2136
3c5c8711
PZ
2137 if (ctx->is_active & EVENT_TIME) {
2138 update_context_time(ctx);
2139 update_cgrp_time_from_event(event);
2140 }
2141
fae3fde6
PZ
2142 if (event == event->group_leader)
2143 group_sched_out(event, cpuctx, ctx);
2144 else
2145 event_sched_out(event, cpuctx, ctx);
0d3d73aa
PZ
2146
2147 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
7b648018
PZ
2148}
2149
d859e29f 2150/*
788faab7 2151 * Disable an event.
c93f7669 2152 *
cdd6c482
IM
2153 * If event->ctx is a cloned context, callers must make sure that
2154 * every task struct that event->ctx->task could possibly point to
c93f7669 2155 * remains valid. This condition is satisifed when called through
cdd6c482
IM
2156 * perf_event_for_each_child or perf_event_for_each because they
2157 * hold the top-level event's child_mutex, so any descendant that
8ba289b8
PZ
2158 * goes to exit will block in perf_event_exit_event().
2159 *
cdd6c482 2160 * When called from perf_pending_event it's OK because event->ctx
c93f7669 2161 * is the current context on this CPU and preemption is disabled,
cdd6c482 2162 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 2163 */
f63a8daa 2164static void _perf_event_disable(struct perf_event *event)
d859e29f 2165{
cdd6c482 2166 struct perf_event_context *ctx = event->ctx;
d859e29f 2167
e625cce1 2168 raw_spin_lock_irq(&ctx->lock);
7b648018 2169 if (event->state <= PERF_EVENT_STATE_OFF) {
e625cce1 2170 raw_spin_unlock_irq(&ctx->lock);
7b648018 2171 return;
53cfbf59 2172 }
e625cce1 2173 raw_spin_unlock_irq(&ctx->lock);
7b648018 2174
fae3fde6
PZ
2175 event_function_call(event, __perf_event_disable, NULL);
2176}
2177
2178void perf_event_disable_local(struct perf_event *event)
2179{
2180 event_function_local(event, __perf_event_disable, NULL);
d859e29f 2181}
f63a8daa
PZ
2182
2183/*
2184 * Strictly speaking kernel users cannot create groups and therefore this
2185 * interface does not need the perf_event_ctx_lock() magic.
2186 */
2187void perf_event_disable(struct perf_event *event)
2188{
2189 struct perf_event_context *ctx;
2190
2191 ctx = perf_event_ctx_lock(event);
2192 _perf_event_disable(event);
2193 perf_event_ctx_unlock(event, ctx);
2194}
dcfce4a0 2195EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 2196
5aab90ce
JO
2197void perf_event_disable_inatomic(struct perf_event *event)
2198{
2199 event->pending_disable = 1;
2200 irq_work_queue(&event->pending);
2201}
2202
e5d1367f 2203static void perf_set_shadow_time(struct perf_event *event,
0d3d73aa 2204 struct perf_event_context *ctx)
e5d1367f
SE
2205{
2206 /*
2207 * use the correct time source for the time snapshot
2208 *
2209 * We could get by without this by leveraging the
2210 * fact that to get to this function, the caller
2211 * has most likely already called update_context_time()
2212 * and update_cgrp_time_xx() and thus both timestamp
2213 * are identical (or very close). Given that tstamp is,
2214 * already adjusted for cgroup, we could say that:
2215 * tstamp - ctx->timestamp
2216 * is equivalent to
2217 * tstamp - cgrp->timestamp.
2218 *
2219 * Then, in perf_output_read(), the calculation would
2220 * work with no changes because:
2221 * - event is guaranteed scheduled in
2222 * - no scheduled out in between
2223 * - thus the timestamp would be the same
2224 *
2225 * But this is a bit hairy.
2226 *
2227 * So instead, we have an explicit cgroup call to remain
2228 * within the time time source all along. We believe it
2229 * is cleaner and simpler to understand.
2230 */
2231 if (is_cgroup_event(event))
0d3d73aa 2232 perf_cgroup_set_shadow_time(event, event->tstamp);
e5d1367f 2233 else
0d3d73aa 2234 event->shadow_ctx_time = event->tstamp - ctx->timestamp;
e5d1367f
SE
2235}
2236
4fe757dd
PZ
2237#define MAX_INTERRUPTS (~0ULL)
2238
2239static void perf_log_throttle(struct perf_event *event, int enable);
ec0d7729 2240static void perf_log_itrace_start(struct perf_event *event);
4fe757dd 2241
235c7fc7 2242static int
9ffcfa6f 2243event_sched_in(struct perf_event *event,
235c7fc7 2244 struct perf_cpu_context *cpuctx,
6e37738a 2245 struct perf_event_context *ctx)
235c7fc7 2246{
44377277 2247 int ret = 0;
4158755d 2248
63342411
PZ
2249 lockdep_assert_held(&ctx->lock);
2250
cdd6c482 2251 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
2252 return 0;
2253
95ff4ca2
AS
2254 WRITE_ONCE(event->oncpu, smp_processor_id());
2255 /*
0c1cbc18
PZ
2256 * Order event::oncpu write to happen before the ACTIVE state is
2257 * visible. This allows perf_event_{stop,read}() to observe the correct
2258 * ->oncpu if it sees ACTIVE.
95ff4ca2
AS
2259 */
2260 smp_wmb();
0d3d73aa 2261 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
4fe757dd
PZ
2262
2263 /*
2264 * Unthrottle events, since we scheduled we might have missed several
2265 * ticks already, also for a heavily scheduling task there is little
2266 * guarantee it'll get a tick in a timely manner.
2267 */
2268 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2269 perf_log_throttle(event, 1);
2270 event->hw.interrupts = 0;
2271 }
2272
44377277
AS
2273 perf_pmu_disable(event->pmu);
2274
0d3d73aa 2275 perf_set_shadow_time(event, ctx);
72f669c0 2276
ec0d7729
AS
2277 perf_log_itrace_start(event);
2278
a4eaf7f1 2279 if (event->pmu->add(event, PERF_EF_START)) {
0d3d73aa 2280 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
cdd6c482 2281 event->oncpu = -1;
44377277
AS
2282 ret = -EAGAIN;
2283 goto out;
235c7fc7
IM
2284 }
2285
cdd6c482 2286 if (!is_software_event(event))
3b6f9e5c 2287 cpuctx->active_oncpu++;
2fde4f94
MR
2288 if (!ctx->nr_active++)
2289 perf_event_ctx_activate(ctx);
0f5a2601
PZ
2290 if (event->attr.freq && event->attr.sample_freq)
2291 ctx->nr_freq++;
235c7fc7 2292
cdd6c482 2293 if (event->attr.exclusive)
3b6f9e5c
PM
2294 cpuctx->exclusive = 1;
2295
44377277
AS
2296out:
2297 perf_pmu_enable(event->pmu);
2298
2299 return ret;
235c7fc7
IM
2300}
2301
6751b71e 2302static int
cdd6c482 2303group_sched_in(struct perf_event *group_event,
6751b71e 2304 struct perf_cpu_context *cpuctx,
6e37738a 2305 struct perf_event_context *ctx)
6751b71e 2306{
6bde9b6c 2307 struct perf_event *event, *partial_group = NULL;
4a234593 2308 struct pmu *pmu = ctx->pmu;
6751b71e 2309
cdd6c482 2310 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
2311 return 0;
2312
fbbe0701 2313 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
6bde9b6c 2314
9ffcfa6f 2315 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 2316 pmu->cancel_txn(pmu);
272325c4 2317 perf_mux_hrtimer_restart(cpuctx);
6751b71e 2318 return -EAGAIN;
90151c35 2319 }
6751b71e
PM
2320
2321 /*
2322 * Schedule in siblings as one group (if any):
2323 */
edb39592 2324 for_each_sibling_event(event, group_event) {
9ffcfa6f 2325 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 2326 partial_group = event;
6751b71e
PM
2327 goto group_error;
2328 }
2329 }
2330
9ffcfa6f 2331 if (!pmu->commit_txn(pmu))
6e85158c 2332 return 0;
9ffcfa6f 2333
6751b71e
PM
2334group_error:
2335 /*
2336 * Groups can be scheduled in as one unit only, so undo any
2337 * partial group before returning:
0d3d73aa 2338 * The events up to the failed event are scheduled out normally.
6751b71e 2339 */
edb39592 2340 for_each_sibling_event(event, group_event) {
cdd6c482 2341 if (event == partial_group)
0d3d73aa 2342 break;
d7842da4 2343
0d3d73aa 2344 event_sched_out(event, cpuctx, ctx);
6751b71e 2345 }
9ffcfa6f 2346 event_sched_out(group_event, cpuctx, ctx);
6751b71e 2347
ad5133b7 2348 pmu->cancel_txn(pmu);
90151c35 2349
272325c4 2350 perf_mux_hrtimer_restart(cpuctx);
9e630205 2351
6751b71e
PM
2352 return -EAGAIN;
2353}
2354
3b6f9e5c 2355/*
cdd6c482 2356 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 2357 */
cdd6c482 2358static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
2359 struct perf_cpu_context *cpuctx,
2360 int can_add_hw)
2361{
2362 /*
cdd6c482 2363 * Groups consisting entirely of software events can always go on.
3b6f9e5c 2364 */
4ff6a8de 2365 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
3b6f9e5c
PM
2366 return 1;
2367 /*
2368 * If an exclusive group is already on, no other hardware
cdd6c482 2369 * events can go on.
3b6f9e5c
PM
2370 */
2371 if (cpuctx->exclusive)
2372 return 0;
2373 /*
2374 * If this group is exclusive and there are already
cdd6c482 2375 * events on the CPU, it can't go on.
3b6f9e5c 2376 */
cdd6c482 2377 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
2378 return 0;
2379 /*
2380 * Otherwise, try to add it if all previous groups were able
2381 * to go on.
2382 */
2383 return can_add_hw;
2384}
2385
cdd6c482
IM
2386static void add_event_to_ctx(struct perf_event *event,
2387 struct perf_event_context *ctx)
53cfbf59 2388{
cdd6c482 2389 list_add_event(event, ctx);
8a49542c 2390 perf_group_attach(event);
53cfbf59
PM
2391}
2392
bd2afa49
PZ
2393static void ctx_sched_out(struct perf_event_context *ctx,
2394 struct perf_cpu_context *cpuctx,
2395 enum event_type_t event_type);
2c29ef0f
PZ
2396static void
2397ctx_sched_in(struct perf_event_context *ctx,
2398 struct perf_cpu_context *cpuctx,
2399 enum event_type_t event_type,
2400 struct task_struct *task);
fe4b04fa 2401
bd2afa49 2402static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
487f05e1
AS
2403 struct perf_event_context *ctx,
2404 enum event_type_t event_type)
bd2afa49
PZ
2405{
2406 if (!cpuctx->task_ctx)
2407 return;
2408
2409 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2410 return;
2411
487f05e1 2412 ctx_sched_out(ctx, cpuctx, event_type);
bd2afa49
PZ
2413}
2414
dce5855b
PZ
2415static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2416 struct perf_event_context *ctx,
2417 struct task_struct *task)
2418{
2419 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2420 if (ctx)
2421 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2422 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2423 if (ctx)
2424 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2425}
2426
487f05e1
AS
2427/*
2428 * We want to maintain the following priority of scheduling:
2429 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2430 * - task pinned (EVENT_PINNED)
2431 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2432 * - task flexible (EVENT_FLEXIBLE).
2433 *
2434 * In order to avoid unscheduling and scheduling back in everything every
2435 * time an event is added, only do it for the groups of equal priority and
2436 * below.
2437 *
2438 * This can be called after a batch operation on task events, in which case
2439 * event_type is a bit mask of the types of events involved. For CPU events,
2440 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2441 */
3e349507 2442static void ctx_resched(struct perf_cpu_context *cpuctx,
487f05e1
AS
2443 struct perf_event_context *task_ctx,
2444 enum event_type_t event_type)
0017960f 2445{
bd903afe 2446 enum event_type_t ctx_event_type;
487f05e1
AS
2447 bool cpu_event = !!(event_type & EVENT_CPU);
2448
2449 /*
2450 * If pinned groups are involved, flexible groups also need to be
2451 * scheduled out.
2452 */
2453 if (event_type & EVENT_PINNED)
2454 event_type |= EVENT_FLEXIBLE;
2455
bd903afe
SL
2456 ctx_event_type = event_type & EVENT_ALL;
2457
3e349507
PZ
2458 perf_pmu_disable(cpuctx->ctx.pmu);
2459 if (task_ctx)
487f05e1
AS
2460 task_ctx_sched_out(cpuctx, task_ctx, event_type);
2461
2462 /*
2463 * Decide which cpu ctx groups to schedule out based on the types
2464 * of events that caused rescheduling:
2465 * - EVENT_CPU: schedule out corresponding groups;
2466 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2467 * - otherwise, do nothing more.
2468 */
2469 if (cpu_event)
2470 cpu_ctx_sched_out(cpuctx, ctx_event_type);
2471 else if (ctx_event_type & EVENT_PINNED)
2472 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2473
3e349507
PZ
2474 perf_event_sched_in(cpuctx, task_ctx, current);
2475 perf_pmu_enable(cpuctx->ctx.pmu);
0017960f
PZ
2476}
2477
0793a61d 2478/*
cdd6c482 2479 * Cross CPU call to install and enable a performance event
682076ae 2480 *
a096309b
PZ
2481 * Very similar to remote_function() + event_function() but cannot assume that
2482 * things like ctx->is_active and cpuctx->task_ctx are set.
0793a61d 2483 */
fe4b04fa 2484static int __perf_install_in_context(void *info)
0793a61d 2485{
a096309b
PZ
2486 struct perf_event *event = info;
2487 struct perf_event_context *ctx = event->ctx;
108b02cf 2488 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f 2489 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63cae12b 2490 bool reprogram = true;
a096309b 2491 int ret = 0;
0793a61d 2492
63b6da39 2493 raw_spin_lock(&cpuctx->ctx.lock);
39a43640 2494 if (ctx->task) {
b58f6b0d
PZ
2495 raw_spin_lock(&ctx->lock);
2496 task_ctx = ctx;
a096309b 2497
63cae12b 2498 reprogram = (ctx->task == current);
b58f6b0d 2499
39a43640 2500 /*
63cae12b
PZ
2501 * If the task is running, it must be running on this CPU,
2502 * otherwise we cannot reprogram things.
2503 *
2504 * If its not running, we don't care, ctx->lock will
2505 * serialize against it becoming runnable.
39a43640 2506 */
63cae12b
PZ
2507 if (task_curr(ctx->task) && !reprogram) {
2508 ret = -ESRCH;
2509 goto unlock;
2510 }
a096309b 2511
63cae12b 2512 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
63b6da39
PZ
2513 } else if (task_ctx) {
2514 raw_spin_lock(&task_ctx->lock);
2c29ef0f 2515 }
b58f6b0d 2516
33801b94 2517#ifdef CONFIG_CGROUP_PERF
2518 if (is_cgroup_event(event)) {
2519 /*
2520 * If the current cgroup doesn't match the event's
2521 * cgroup, we should not try to schedule it.
2522 */
2523 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
2524 reprogram = cgroup_is_descendant(cgrp->css.cgroup,
2525 event->cgrp->css.cgroup);
2526 }
2527#endif
2528
63cae12b 2529 if (reprogram) {
a096309b
PZ
2530 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2531 add_event_to_ctx(event, ctx);
487f05e1 2532 ctx_resched(cpuctx, task_ctx, get_event_type(event));
a096309b
PZ
2533 } else {
2534 add_event_to_ctx(event, ctx);
2535 }
2536
63b6da39 2537unlock:
2c29ef0f 2538 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa 2539
a096309b 2540 return ret;
0793a61d
TG
2541}
2542
2543/*
a096309b
PZ
2544 * Attach a performance event to a context.
2545 *
2546 * Very similar to event_function_call, see comment there.
0793a61d
TG
2547 */
2548static void
cdd6c482
IM
2549perf_install_in_context(struct perf_event_context *ctx,
2550 struct perf_event *event,
0793a61d
TG
2551 int cpu)
2552{
a096309b 2553 struct task_struct *task = READ_ONCE(ctx->task);
39a43640 2554
fe4b04fa
PZ
2555 lockdep_assert_held(&ctx->mutex);
2556
0cda4c02
YZ
2557 if (event->cpu != -1)
2558 event->cpu = cpu;
c3f00c70 2559
0b8f1e2e
PZ
2560 /*
2561 * Ensures that if we can observe event->ctx, both the event and ctx
2562 * will be 'complete'. See perf_iterate_sb_cpu().
2563 */
2564 smp_store_release(&event->ctx, ctx);
2565
a096309b
PZ
2566 if (!task) {
2567 cpu_function_call(cpu, __perf_install_in_context, event);
2568 return;
2569 }
2570
2571 /*
2572 * Should not happen, we validate the ctx is still alive before calling.
2573 */
2574 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2575 return;
2576
39a43640
PZ
2577 /*
2578 * Installing events is tricky because we cannot rely on ctx->is_active
2579 * to be set in case this is the nr_events 0 -> 1 transition.
63cae12b
PZ
2580 *
2581 * Instead we use task_curr(), which tells us if the task is running.
2582 * However, since we use task_curr() outside of rq::lock, we can race
2583 * against the actual state. This means the result can be wrong.
2584 *
2585 * If we get a false positive, we retry, this is harmless.
2586 *
2587 * If we get a false negative, things are complicated. If we are after
2588 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2589 * value must be correct. If we're before, it doesn't matter since
2590 * perf_event_context_sched_in() will program the counter.
2591 *
2592 * However, this hinges on the remote context switch having observed
2593 * our task->perf_event_ctxp[] store, such that it will in fact take
2594 * ctx::lock in perf_event_context_sched_in().
2595 *
2596 * We do this by task_function_call(), if the IPI fails to hit the task
2597 * we know any future context switch of task must see the
2598 * perf_event_ctpx[] store.
39a43640 2599 */
63cae12b 2600
63b6da39 2601 /*
63cae12b
PZ
2602 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2603 * task_cpu() load, such that if the IPI then does not find the task
2604 * running, a future context switch of that task must observe the
2605 * store.
63b6da39 2606 */
63cae12b
PZ
2607 smp_mb();
2608again:
2609 if (!task_function_call(task, __perf_install_in_context, event))
a096309b
PZ
2610 return;
2611
2612 raw_spin_lock_irq(&ctx->lock);
2613 task = ctx->task;
84c4e620 2614 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
a096309b
PZ
2615 /*
2616 * Cannot happen because we already checked above (which also
2617 * cannot happen), and we hold ctx->mutex, which serializes us
2618 * against perf_event_exit_task_context().
2619 */
63b6da39
PZ
2620 raw_spin_unlock_irq(&ctx->lock);
2621 return;
2622 }
39a43640 2623 /*
63cae12b
PZ
2624 * If the task is not running, ctx->lock will avoid it becoming so,
2625 * thus we can safely install the event.
39a43640 2626 */
63cae12b
PZ
2627 if (task_curr(task)) {
2628 raw_spin_unlock_irq(&ctx->lock);
2629 goto again;
2630 }
2631 add_event_to_ctx(event, ctx);
2632 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
2633}
2634
d859e29f 2635/*
cdd6c482 2636 * Cross CPU call to enable a performance event
d859e29f 2637 */
fae3fde6
PZ
2638static void __perf_event_enable(struct perf_event *event,
2639 struct perf_cpu_context *cpuctx,
2640 struct perf_event_context *ctx,
2641 void *info)
04289bb9 2642{
cdd6c482 2643 struct perf_event *leader = event->group_leader;
fae3fde6 2644 struct perf_event_context *task_ctx;
04289bb9 2645
6e801e01
PZ
2646 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2647 event->state <= PERF_EVENT_STATE_ERROR)
fae3fde6 2648 return;
3cbed429 2649
bd2afa49
PZ
2650 if (ctx->is_active)
2651 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2652
0d3d73aa 2653 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
04289bb9 2654
fae3fde6
PZ
2655 if (!ctx->is_active)
2656 return;
2657
e5d1367f 2658 if (!event_filter_match(event)) {
bd2afa49 2659 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2660 return;
e5d1367f 2661 }
f4c4176f 2662
04289bb9 2663 /*
cdd6c482 2664 * If the event is in a group and isn't the group leader,
d859e29f 2665 * then don't put it on unless the group is on.
04289bb9 2666 */
bd2afa49
PZ
2667 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2668 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2669 return;
bd2afa49 2670 }
fe4b04fa 2671
fae3fde6
PZ
2672 task_ctx = cpuctx->task_ctx;
2673 if (ctx->task)
2674 WARN_ON_ONCE(task_ctx != ctx);
d859e29f 2675
487f05e1 2676 ctx_resched(cpuctx, task_ctx, get_event_type(event));
7b648018
PZ
2677}
2678
d859e29f 2679/*
788faab7 2680 * Enable an event.
c93f7669 2681 *
cdd6c482
IM
2682 * If event->ctx is a cloned context, callers must make sure that
2683 * every task struct that event->ctx->task could possibly point to
c93f7669 2684 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2685 * perf_event_for_each_child or perf_event_for_each as described
2686 * for perf_event_disable.
d859e29f 2687 */
f63a8daa 2688static void _perf_event_enable(struct perf_event *event)
d859e29f 2689{
cdd6c482 2690 struct perf_event_context *ctx = event->ctx;
d859e29f 2691
7b648018 2692 raw_spin_lock_irq(&ctx->lock);
6e801e01
PZ
2693 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2694 event->state < PERF_EVENT_STATE_ERROR) {
7b648018 2695 raw_spin_unlock_irq(&ctx->lock);
d859e29f
PM
2696 return;
2697 }
2698
d859e29f 2699 /*
cdd6c482 2700 * If the event is in error state, clear that first.
7b648018
PZ
2701 *
2702 * That way, if we see the event in error state below, we know that it
2703 * has gone back into error state, as distinct from the task having
2704 * been scheduled away before the cross-call arrived.
d859e29f 2705 */
cdd6c482
IM
2706 if (event->state == PERF_EVENT_STATE_ERROR)
2707 event->state = PERF_EVENT_STATE_OFF;
e625cce1 2708 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa 2709
fae3fde6 2710 event_function_call(event, __perf_event_enable, NULL);
d859e29f 2711}
f63a8daa
PZ
2712
2713/*
2714 * See perf_event_disable();
2715 */
2716void perf_event_enable(struct perf_event *event)
2717{
2718 struct perf_event_context *ctx;
2719
2720 ctx = perf_event_ctx_lock(event);
2721 _perf_event_enable(event);
2722 perf_event_ctx_unlock(event, ctx);
2723}
dcfce4a0 2724EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2725
375637bc
AS
2726struct stop_event_data {
2727 struct perf_event *event;
2728 unsigned int restart;
2729};
2730
95ff4ca2
AS
2731static int __perf_event_stop(void *info)
2732{
375637bc
AS
2733 struct stop_event_data *sd = info;
2734 struct perf_event *event = sd->event;
95ff4ca2 2735
375637bc 2736 /* if it's already INACTIVE, do nothing */
95ff4ca2
AS
2737 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2738 return 0;
2739
2740 /* matches smp_wmb() in event_sched_in() */
2741 smp_rmb();
2742
2743 /*
2744 * There is a window with interrupts enabled before we get here,
2745 * so we need to check again lest we try to stop another CPU's event.
2746 */
2747 if (READ_ONCE(event->oncpu) != smp_processor_id())
2748 return -EAGAIN;
2749
2750 event->pmu->stop(event, PERF_EF_UPDATE);
2751
375637bc
AS
2752 /*
2753 * May race with the actual stop (through perf_pmu_output_stop()),
2754 * but it is only used for events with AUX ring buffer, and such
2755 * events will refuse to restart because of rb::aux_mmap_count==0,
2756 * see comments in perf_aux_output_begin().
2757 *
788faab7 2758 * Since this is happening on an event-local CPU, no trace is lost
375637bc
AS
2759 * while restarting.
2760 */
2761 if (sd->restart)
c9bbdd48 2762 event->pmu->start(event, 0);
375637bc 2763
95ff4ca2
AS
2764 return 0;
2765}
2766
767ae086 2767static int perf_event_stop(struct perf_event *event, int restart)
375637bc
AS
2768{
2769 struct stop_event_data sd = {
2770 .event = event,
767ae086 2771 .restart = restart,
375637bc
AS
2772 };
2773 int ret = 0;
2774
2775 do {
2776 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2777 return 0;
2778
2779 /* matches smp_wmb() in event_sched_in() */
2780 smp_rmb();
2781
2782 /*
2783 * We only want to restart ACTIVE events, so if the event goes
2784 * inactive here (event->oncpu==-1), there's nothing more to do;
2785 * fall through with ret==-ENXIO.
2786 */
2787 ret = cpu_function_call(READ_ONCE(event->oncpu),
2788 __perf_event_stop, &sd);
2789 } while (ret == -EAGAIN);
2790
2791 return ret;
2792}
2793
2794/*
2795 * In order to contain the amount of racy and tricky in the address filter
2796 * configuration management, it is a two part process:
2797 *
2798 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2799 * we update the addresses of corresponding vmas in
2800 * event::addr_filters_offs array and bump the event::addr_filters_gen;
2801 * (p2) when an event is scheduled in (pmu::add), it calls
2802 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2803 * if the generation has changed since the previous call.
2804 *
2805 * If (p1) happens while the event is active, we restart it to force (p2).
2806 *
2807 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2808 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2809 * ioctl;
2810 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2811 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2812 * for reading;
2813 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2814 * of exec.
2815 */
2816void perf_event_addr_filters_sync(struct perf_event *event)
2817{
2818 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2819
2820 if (!has_addr_filter(event))
2821 return;
2822
2823 raw_spin_lock(&ifh->lock);
2824 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2825 event->pmu->addr_filters_sync(event);
2826 event->hw.addr_filters_gen = event->addr_filters_gen;
2827 }
2828 raw_spin_unlock(&ifh->lock);
2829}
2830EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2831
f63a8daa 2832static int _perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2833{
2023b359 2834 /*
cdd6c482 2835 * not supported on inherited events
2023b359 2836 */
2e939d1d 2837 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2838 return -EINVAL;
2839
cdd6c482 2840 atomic_add(refresh, &event->event_limit);
f63a8daa 2841 _perf_event_enable(event);
2023b359
PZ
2842
2843 return 0;
79f14641 2844}
f63a8daa
PZ
2845
2846/*
2847 * See perf_event_disable()
2848 */
2849int perf_event_refresh(struct perf_event *event, int refresh)
2850{
2851 struct perf_event_context *ctx;
2852 int ret;
2853
2854 ctx = perf_event_ctx_lock(event);
2855 ret = _perf_event_refresh(event, refresh);
2856 perf_event_ctx_unlock(event, ctx);
2857
2858 return ret;
2859}
26ca5c11 2860EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2861
32ff77e8
MC
2862static int perf_event_modify_breakpoint(struct perf_event *bp,
2863 struct perf_event_attr *attr)
2864{
2865 int err;
2866
2867 _perf_event_disable(bp);
2868
2869 err = modify_user_hw_breakpoint_check(bp, attr, true);
32ff77e8 2870
bf06278c 2871 if (!bp->attr.disabled)
32ff77e8 2872 _perf_event_enable(bp);
bf06278c
JO
2873
2874 return err;
32ff77e8
MC
2875}
2876
2877static int perf_event_modify_attr(struct perf_event *event,
2878 struct perf_event_attr *attr)
2879{
2880 if (event->attr.type != attr->type)
2881 return -EINVAL;
2882
2883 switch (event->attr.type) {
2884 case PERF_TYPE_BREAKPOINT:
2885 return perf_event_modify_breakpoint(event, attr);
2886 default:
2887 /* Place holder for future additions. */
2888 return -EOPNOTSUPP;
2889 }
2890}
2891
5b0311e1
FW
2892static void ctx_sched_out(struct perf_event_context *ctx,
2893 struct perf_cpu_context *cpuctx,
2894 enum event_type_t event_type)
235c7fc7 2895{
6668128a 2896 struct perf_event *event, *tmp;
db24d33e 2897 int is_active = ctx->is_active;
235c7fc7 2898
c994d613 2899 lockdep_assert_held(&ctx->lock);
235c7fc7 2900
39a43640
PZ
2901 if (likely(!ctx->nr_events)) {
2902 /*
2903 * See __perf_remove_from_context().
2904 */
2905 WARN_ON_ONCE(ctx->is_active);
2906 if (ctx->task)
2907 WARN_ON_ONCE(cpuctx->task_ctx);
facc4307 2908 return;
39a43640
PZ
2909 }
2910
db24d33e 2911 ctx->is_active &= ~event_type;
3cbaa590
PZ
2912 if (!(ctx->is_active & EVENT_ALL))
2913 ctx->is_active = 0;
2914
63e30d3e
PZ
2915 if (ctx->task) {
2916 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2917 if (!ctx->is_active)
2918 cpuctx->task_ctx = NULL;
2919 }
facc4307 2920
8fdc6539
PZ
2921 /*
2922 * Always update time if it was set; not only when it changes.
2923 * Otherwise we can 'forget' to update time for any but the last
2924 * context we sched out. For example:
2925 *
2926 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2927 * ctx_sched_out(.event_type = EVENT_PINNED)
2928 *
2929 * would only update time for the pinned events.
2930 */
3cbaa590
PZ
2931 if (is_active & EVENT_TIME) {
2932 /* update (and stop) ctx time */
2933 update_context_time(ctx);
2934 update_cgrp_time_from_cpuctx(cpuctx);
2935 }
2936
8fdc6539
PZ
2937 is_active ^= ctx->is_active; /* changed bits */
2938
3cbaa590 2939 if (!ctx->nr_active || !(is_active & EVENT_ALL))
facc4307 2940 return;
5b0311e1 2941
075e0b00 2942 perf_pmu_disable(ctx->pmu);
3cbaa590 2943 if (is_active & EVENT_PINNED) {
6668128a 2944 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
889ff015 2945 group_sched_out(event, cpuctx, ctx);
9ed6060d 2946 }
889ff015 2947
3cbaa590 2948 if (is_active & EVENT_FLEXIBLE) {
6668128a 2949 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
8c9ed8e1 2950 group_sched_out(event, cpuctx, ctx);
9ed6060d 2951 }
1b9a644f 2952 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2953}
2954
564c2b21 2955/*
5a3126d4
PZ
2956 * Test whether two contexts are equivalent, i.e. whether they have both been
2957 * cloned from the same version of the same context.
2958 *
2959 * Equivalence is measured using a generation number in the context that is
2960 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2961 * and list_del_event().
564c2b21 2962 */
cdd6c482
IM
2963static int context_equiv(struct perf_event_context *ctx1,
2964 struct perf_event_context *ctx2)
564c2b21 2965{
211de6eb
PZ
2966 lockdep_assert_held(&ctx1->lock);
2967 lockdep_assert_held(&ctx2->lock);
2968
5a3126d4
PZ
2969 /* Pinning disables the swap optimization */
2970 if (ctx1->pin_count || ctx2->pin_count)
2971 return 0;
2972
2973 /* If ctx1 is the parent of ctx2 */
2974 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2975 return 1;
2976
2977 /* If ctx2 is the parent of ctx1 */
2978 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2979 return 1;
2980
2981 /*
2982 * If ctx1 and ctx2 have the same parent; we flatten the parent
2983 * hierarchy, see perf_event_init_context().
2984 */
2985 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2986 ctx1->parent_gen == ctx2->parent_gen)
2987 return 1;
2988
2989 /* Unmatched */
2990 return 0;
564c2b21
PM
2991}
2992
cdd6c482
IM
2993static void __perf_event_sync_stat(struct perf_event *event,
2994 struct perf_event *next_event)
bfbd3381
PZ
2995{
2996 u64 value;
2997
cdd6c482 2998 if (!event->attr.inherit_stat)
bfbd3381
PZ
2999 return;
3000
3001 /*
cdd6c482 3002 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
3003 * because we're in the middle of a context switch and have IRQs
3004 * disabled, which upsets smp_call_function_single(), however
cdd6c482 3005 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
3006 * don't need to use it.
3007 */
0d3d73aa 3008 if (event->state == PERF_EVENT_STATE_ACTIVE)
3dbebf15 3009 event->pmu->read(event);
bfbd3381 3010
0d3d73aa 3011 perf_event_update_time(event);
bfbd3381
PZ
3012
3013 /*
cdd6c482 3014 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
3015 * values when we flip the contexts.
3016 */
e7850595
PZ
3017 value = local64_read(&next_event->count);
3018 value = local64_xchg(&event->count, value);
3019 local64_set(&next_event->count, value);
bfbd3381 3020
cdd6c482
IM
3021 swap(event->total_time_enabled, next_event->total_time_enabled);
3022 swap(event->total_time_running, next_event->total_time_running);
19d2e755 3023
bfbd3381 3024 /*
19d2e755 3025 * Since we swizzled the values, update the user visible data too.
bfbd3381 3026 */
cdd6c482
IM
3027 perf_event_update_userpage(event);
3028 perf_event_update_userpage(next_event);
bfbd3381
PZ
3029}
3030
cdd6c482
IM
3031static void perf_event_sync_stat(struct perf_event_context *ctx,
3032 struct perf_event_context *next_ctx)
bfbd3381 3033{
cdd6c482 3034 struct perf_event *event, *next_event;
bfbd3381
PZ
3035
3036 if (!ctx->nr_stat)
3037 return;
3038
02ffdbc8
PZ
3039 update_context_time(ctx);
3040
cdd6c482
IM
3041 event = list_first_entry(&ctx->event_list,
3042 struct perf_event, event_entry);
bfbd3381 3043
cdd6c482
IM
3044 next_event = list_first_entry(&next_ctx->event_list,
3045 struct perf_event, event_entry);
bfbd3381 3046
cdd6c482
IM
3047 while (&event->event_entry != &ctx->event_list &&
3048 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 3049
cdd6c482 3050 __perf_event_sync_stat(event, next_event);
bfbd3381 3051
cdd6c482
IM
3052 event = list_next_entry(event, event_entry);
3053 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
3054 }
3055}
3056
fe4b04fa
PZ
3057static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
3058 struct task_struct *next)
0793a61d 3059{
8dc85d54 3060 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 3061 struct perf_event_context *next_ctx;
5a3126d4 3062 struct perf_event_context *parent, *next_parent;
108b02cf 3063 struct perf_cpu_context *cpuctx;
c93f7669 3064 int do_switch = 1;
0793a61d 3065
108b02cf
PZ
3066 if (likely(!ctx))
3067 return;
10989fb2 3068
108b02cf
PZ
3069 cpuctx = __get_cpu_context(ctx);
3070 if (!cpuctx->task_ctx)
0793a61d
TG
3071 return;
3072
c93f7669 3073 rcu_read_lock();
8dc85d54 3074 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
3075 if (!next_ctx)
3076 goto unlock;
3077
3078 parent = rcu_dereference(ctx->parent_ctx);
3079 next_parent = rcu_dereference(next_ctx->parent_ctx);
3080
3081 /* If neither context have a parent context; they cannot be clones. */
802c8a61 3082 if (!parent && !next_parent)
5a3126d4
PZ
3083 goto unlock;
3084
3085 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
3086 /*
3087 * Looks like the two contexts are clones, so we might be
3088 * able to optimize the context switch. We lock both
3089 * contexts and check that they are clones under the
3090 * lock (including re-checking that neither has been
3091 * uncloned in the meantime). It doesn't matter which
3092 * order we take the locks because no other cpu could
3093 * be trying to lock both of these tasks.
3094 */
e625cce1
TG
3095 raw_spin_lock(&ctx->lock);
3096 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 3097 if (context_equiv(ctx, next_ctx)) {
63b6da39
PZ
3098 WRITE_ONCE(ctx->task, next);
3099 WRITE_ONCE(next_ctx->task, task);
5a158c3c
YZ
3100
3101 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
3102
63b6da39
PZ
3103 /*
3104 * RCU_INIT_POINTER here is safe because we've not
3105 * modified the ctx and the above modification of
3106 * ctx->task and ctx->task_ctx_data are immaterial
3107 * since those values are always verified under
3108 * ctx->lock which we're now holding.
3109 */
3110 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
3111 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
3112
c93f7669 3113 do_switch = 0;
bfbd3381 3114
cdd6c482 3115 perf_event_sync_stat(ctx, next_ctx);
c93f7669 3116 }
e625cce1
TG
3117 raw_spin_unlock(&next_ctx->lock);
3118 raw_spin_unlock(&ctx->lock);
564c2b21 3119 }
5a3126d4 3120unlock:
c93f7669 3121 rcu_read_unlock();
564c2b21 3122
c93f7669 3123 if (do_switch) {
facc4307 3124 raw_spin_lock(&ctx->lock);
487f05e1 3125 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
facc4307 3126 raw_spin_unlock(&ctx->lock);
c93f7669 3127 }
0793a61d
TG
3128}
3129
e48c1788
PZ
3130static DEFINE_PER_CPU(struct list_head, sched_cb_list);
3131
ba532500
YZ
3132void perf_sched_cb_dec(struct pmu *pmu)
3133{
e48c1788
PZ
3134 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3135
ba532500 3136 this_cpu_dec(perf_sched_cb_usages);
e48c1788
PZ
3137
3138 if (!--cpuctx->sched_cb_usage)
3139 list_del(&cpuctx->sched_cb_entry);
ba532500
YZ
3140}
3141
e48c1788 3142
ba532500
YZ
3143void perf_sched_cb_inc(struct pmu *pmu)
3144{
e48c1788
PZ
3145 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3146
3147 if (!cpuctx->sched_cb_usage++)
3148 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3149
ba532500
YZ
3150 this_cpu_inc(perf_sched_cb_usages);
3151}
3152
3153/*
3154 * This function provides the context switch callback to the lower code
3155 * layer. It is invoked ONLY when the context switch callback is enabled.
09e61b4f
PZ
3156 *
3157 * This callback is relevant even to per-cpu events; for example multi event
3158 * PEBS requires this to provide PID/TID information. This requires we flush
3159 * all queued PEBS records before we context switch to a new task.
ba532500
YZ
3160 */
3161static void perf_pmu_sched_task(struct task_struct *prev,
3162 struct task_struct *next,
3163 bool sched_in)
3164{
3165 struct perf_cpu_context *cpuctx;
3166 struct pmu *pmu;
ba532500
YZ
3167
3168 if (prev == next)
3169 return;
3170
e48c1788 3171 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
1fd7e416 3172 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
ba532500 3173
e48c1788
PZ
3174 if (WARN_ON_ONCE(!pmu->sched_task))
3175 continue;
ba532500 3176
e48c1788
PZ
3177 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3178 perf_pmu_disable(pmu);
ba532500 3179
e48c1788 3180 pmu->sched_task(cpuctx->task_ctx, sched_in);
ba532500 3181
e48c1788
PZ
3182 perf_pmu_enable(pmu);
3183 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
ba532500 3184 }
ba532500
YZ
3185}
3186
45ac1403
AH
3187static void perf_event_switch(struct task_struct *task,
3188 struct task_struct *next_prev, bool sched_in);
3189
8dc85d54
PZ
3190#define for_each_task_context_nr(ctxn) \
3191 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
3192
3193/*
3194 * Called from scheduler to remove the events of the current task,
3195 * with interrupts disabled.
3196 *
3197 * We stop each event and update the event value in event->count.
3198 *
3199 * This does not protect us against NMI, but disable()
3200 * sets the disabled bit in the control field of event _before_
3201 * accessing the event control register. If a NMI hits, then it will
3202 * not restart the event.
3203 */
ab0cce56
JO
3204void __perf_event_task_sched_out(struct task_struct *task,
3205 struct task_struct *next)
8dc85d54
PZ
3206{
3207 int ctxn;
3208
ba532500
YZ
3209 if (__this_cpu_read(perf_sched_cb_usages))
3210 perf_pmu_sched_task(task, next, false);
3211
45ac1403
AH
3212 if (atomic_read(&nr_switch_events))
3213 perf_event_switch(task, next, false);
3214
8dc85d54
PZ
3215 for_each_task_context_nr(ctxn)
3216 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
3217
3218 /*
3219 * if cgroup events exist on this CPU, then we need
3220 * to check if we have to switch out PMU state.
3221 * cgroup event are system-wide mode only
3222 */
4a32fea9 3223 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 3224 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
3225}
3226
5b0311e1
FW
3227/*
3228 * Called with IRQs disabled
3229 */
3230static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
3231 enum event_type_t event_type)
3232{
3233 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
3234}
3235
1cac7b1a
PZ
3236static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
3237 int (*func)(struct perf_event *, void *), void *data)
0793a61d 3238{
1cac7b1a
PZ
3239 struct perf_event **evt, *evt1, *evt2;
3240 int ret;
8e1a2031 3241
1cac7b1a
PZ
3242 evt1 = perf_event_groups_first(groups, -1);
3243 evt2 = perf_event_groups_first(groups, cpu);
3244
3245 while (evt1 || evt2) {
3246 if (evt1 && evt2) {
3247 if (evt1->group_index < evt2->group_index)
3248 evt = &evt1;
3249 else
3250 evt = &evt2;
3251 } else if (evt1) {
3252 evt = &evt1;
3253 } else {
3254 evt = &evt2;
8e1a2031 3255 }
1cac7b1a
PZ
3256
3257 ret = func(*evt, data);
3258 if (ret)
3259 return ret;
3260
3261 *evt = perf_event_groups_next(*evt);
8e1a2031 3262 }
0793a61d 3263
1cac7b1a
PZ
3264 return 0;
3265}
3266
3267struct sched_in_data {
3268 struct perf_event_context *ctx;
3269 struct perf_cpu_context *cpuctx;
3270 int can_add_hw;
3271};
3272
3273static int pinned_sched_in(struct perf_event *event, void *data)
3274{
3275 struct sched_in_data *sid = data;
3276
3277 if (event->state <= PERF_EVENT_STATE_OFF)
3278 return 0;
3279
3280 if (!event_filter_match(event))
3281 return 0;
3282
6668128a
PZ
3283 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
3284 if (!group_sched_in(event, sid->cpuctx, sid->ctx))
3285 list_add_tail(&event->active_list, &sid->ctx->pinned_active);
3286 }
1cac7b1a
PZ
3287
3288 /*
3289 * If this pinned group hasn't been scheduled,
3290 * put it in error state.
3291 */
3292 if (event->state == PERF_EVENT_STATE_INACTIVE)
3293 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
3294
3295 return 0;
3296}
3297
3298static int flexible_sched_in(struct perf_event *event, void *data)
3299{
3300 struct sched_in_data *sid = data;
3301
3302 if (event->state <= PERF_EVENT_STATE_OFF)
3303 return 0;
3304
3305 if (!event_filter_match(event))
3306 return 0;
3307
3308 if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
6668128a
PZ
3309 if (!group_sched_in(event, sid->cpuctx, sid->ctx))
3310 list_add_tail(&event->active_list, &sid->ctx->flexible_active);
3311 else
1cac7b1a 3312 sid->can_add_hw = 0;
3b6f9e5c 3313 }
1cac7b1a
PZ
3314
3315 return 0;
5b0311e1
FW
3316}
3317
3318static void
1cac7b1a
PZ
3319ctx_pinned_sched_in(struct perf_event_context *ctx,
3320 struct perf_cpu_context *cpuctx)
5b0311e1 3321{
1cac7b1a
PZ
3322 struct sched_in_data sid = {
3323 .ctx = ctx,
3324 .cpuctx = cpuctx,
3325 .can_add_hw = 1,
3326 };
3b6f9e5c 3327
1cac7b1a
PZ
3328 visit_groups_merge(&ctx->pinned_groups,
3329 smp_processor_id(),
3330 pinned_sched_in, &sid);
3331}
8e1a2031 3332
1cac7b1a
PZ
3333static void
3334ctx_flexible_sched_in(struct perf_event_context *ctx,
3335 struct perf_cpu_context *cpuctx)
3336{
3337 struct sched_in_data sid = {
3338 .ctx = ctx,
3339 .cpuctx = cpuctx,
3340 .can_add_hw = 1,
3341 };
0793a61d 3342
1cac7b1a
PZ
3343 visit_groups_merge(&ctx->flexible_groups,
3344 smp_processor_id(),
3345 flexible_sched_in, &sid);
5b0311e1
FW
3346}
3347
3348static void
3349ctx_sched_in(struct perf_event_context *ctx,
3350 struct perf_cpu_context *cpuctx,
e5d1367f
SE
3351 enum event_type_t event_type,
3352 struct task_struct *task)
5b0311e1 3353{
db24d33e 3354 int is_active = ctx->is_active;
c994d613
PZ
3355 u64 now;
3356
3357 lockdep_assert_held(&ctx->lock);
e5d1367f 3358
5b0311e1 3359 if (likely(!ctx->nr_events))
facc4307 3360 return;
5b0311e1 3361
3cbaa590 3362 ctx->is_active |= (event_type | EVENT_TIME);
63e30d3e
PZ
3363 if (ctx->task) {
3364 if (!is_active)
3365 cpuctx->task_ctx = ctx;
3366 else
3367 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3368 }
3369
3cbaa590
PZ
3370 is_active ^= ctx->is_active; /* changed bits */
3371
3372 if (is_active & EVENT_TIME) {
3373 /* start ctx time */
3374 now = perf_clock();
3375 ctx->timestamp = now;
3376 perf_cgroup_set_timestamp(task, ctx);
3377 }
3378
5b0311e1
FW
3379 /*
3380 * First go through the list and put on any pinned groups
3381 * in order to give them the best chance of going on.
3382 */
3cbaa590 3383 if (is_active & EVENT_PINNED)
6e37738a 3384 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
3385
3386 /* Then walk through the lower prio flexible groups */
3cbaa590 3387 if (is_active & EVENT_FLEXIBLE)
6e37738a 3388 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
3389}
3390
329c0e01 3391static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
3392 enum event_type_t event_type,
3393 struct task_struct *task)
329c0e01
FW
3394{
3395 struct perf_event_context *ctx = &cpuctx->ctx;
3396
e5d1367f 3397 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
3398}
3399
e5d1367f
SE
3400static void perf_event_context_sched_in(struct perf_event_context *ctx,
3401 struct task_struct *task)
235c7fc7 3402{
108b02cf 3403 struct perf_cpu_context *cpuctx;
235c7fc7 3404
108b02cf 3405 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
3406 if (cpuctx->task_ctx == ctx)
3407 return;
3408
facc4307 3409 perf_ctx_lock(cpuctx, ctx);
fdccc3fb 3410 /*
3411 * We must check ctx->nr_events while holding ctx->lock, such
3412 * that we serialize against perf_install_in_context().
3413 */
3414 if (!ctx->nr_events)
3415 goto unlock;
3416
1b9a644f 3417 perf_pmu_disable(ctx->pmu);
329c0e01
FW
3418 /*
3419 * We want to keep the following priority order:
3420 * cpu pinned (that don't need to move), task pinned,
3421 * cpu flexible, task flexible.
fe45bafb
AS
3422 *
3423 * However, if task's ctx is not carrying any pinned
3424 * events, no need to flip the cpuctx's events around.
329c0e01 3425 */
8e1a2031 3426 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
fe45bafb 3427 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
63e30d3e 3428 perf_event_sched_in(cpuctx, ctx, task);
facc4307 3429 perf_pmu_enable(ctx->pmu);
fdccc3fb 3430
3431unlock:
facc4307 3432 perf_ctx_unlock(cpuctx, ctx);
235c7fc7
IM
3433}
3434
8dc85d54
PZ
3435/*
3436 * Called from scheduler to add the events of the current task
3437 * with interrupts disabled.
3438 *
3439 * We restore the event value and then enable it.
3440 *
3441 * This does not protect us against NMI, but enable()
3442 * sets the enabled bit in the control field of event _before_
3443 * accessing the event control register. If a NMI hits, then it will
3444 * keep the event running.
3445 */
ab0cce56
JO
3446void __perf_event_task_sched_in(struct task_struct *prev,
3447 struct task_struct *task)
8dc85d54
PZ
3448{
3449 struct perf_event_context *ctx;
3450 int ctxn;
3451
7e41d177
PZ
3452 /*
3453 * If cgroup events exist on this CPU, then we need to check if we have
3454 * to switch in PMU state; cgroup event are system-wide mode only.
3455 *
3456 * Since cgroup events are CPU events, we must schedule these in before
3457 * we schedule in the task events.
3458 */
3459 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3460 perf_cgroup_sched_in(prev, task);
3461
8dc85d54
PZ
3462 for_each_task_context_nr(ctxn) {
3463 ctx = task->perf_event_ctxp[ctxn];
3464 if (likely(!ctx))
3465 continue;
3466
e5d1367f 3467 perf_event_context_sched_in(ctx, task);
8dc85d54 3468 }
d010b332 3469
45ac1403
AH
3470 if (atomic_read(&nr_switch_events))
3471 perf_event_switch(task, prev, true);
3472
ba532500
YZ
3473 if (__this_cpu_read(perf_sched_cb_usages))
3474 perf_pmu_sched_task(prev, task, true);
235c7fc7
IM
3475}
3476
abd50713
PZ
3477static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3478{
3479 u64 frequency = event->attr.sample_freq;
3480 u64 sec = NSEC_PER_SEC;
3481 u64 divisor, dividend;
3482
3483 int count_fls, nsec_fls, frequency_fls, sec_fls;
3484
3485 count_fls = fls64(count);
3486 nsec_fls = fls64(nsec);
3487 frequency_fls = fls64(frequency);
3488 sec_fls = 30;
3489
3490 /*
3491 * We got @count in @nsec, with a target of sample_freq HZ
3492 * the target period becomes:
3493 *
3494 * @count * 10^9
3495 * period = -------------------
3496 * @nsec * sample_freq
3497 *
3498 */
3499
3500 /*
3501 * Reduce accuracy by one bit such that @a and @b converge
3502 * to a similar magnitude.
3503 */
fe4b04fa 3504#define REDUCE_FLS(a, b) \
abd50713
PZ
3505do { \
3506 if (a##_fls > b##_fls) { \
3507 a >>= 1; \
3508 a##_fls--; \
3509 } else { \
3510 b >>= 1; \
3511 b##_fls--; \
3512 } \
3513} while (0)
3514
3515 /*
3516 * Reduce accuracy until either term fits in a u64, then proceed with
3517 * the other, so that finally we can do a u64/u64 division.
3518 */
3519 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3520 REDUCE_FLS(nsec, frequency);
3521 REDUCE_FLS(sec, count);
3522 }
3523
3524 if (count_fls + sec_fls > 64) {
3525 divisor = nsec * frequency;
3526
3527 while (count_fls + sec_fls > 64) {
3528 REDUCE_FLS(count, sec);
3529 divisor >>= 1;
3530 }
3531
3532 dividend = count * sec;
3533 } else {
3534 dividend = count * sec;
3535
3536 while (nsec_fls + frequency_fls > 64) {
3537 REDUCE_FLS(nsec, frequency);
3538 dividend >>= 1;
3539 }
3540
3541 divisor = nsec * frequency;
3542 }
3543
f6ab91ad
PZ
3544 if (!divisor)
3545 return dividend;
3546
abd50713
PZ
3547 return div64_u64(dividend, divisor);
3548}
3549
e050e3f0
SE
3550static DEFINE_PER_CPU(int, perf_throttled_count);
3551static DEFINE_PER_CPU(u64, perf_throttled_seq);
3552
f39d47ff 3553static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 3554{
cdd6c482 3555 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 3556 s64 period, sample_period;
bd2b5b12
PZ
3557 s64 delta;
3558
abd50713 3559 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
3560
3561 delta = (s64)(period - hwc->sample_period);
3562 delta = (delta + 7) / 8; /* low pass filter */
3563
3564 sample_period = hwc->sample_period + delta;
3565
3566 if (!sample_period)
3567 sample_period = 1;
3568
bd2b5b12 3569 hwc->sample_period = sample_period;
abd50713 3570
e7850595 3571 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
3572 if (disable)
3573 event->pmu->stop(event, PERF_EF_UPDATE);
3574
e7850595 3575 local64_set(&hwc->period_left, 0);
f39d47ff
SE
3576
3577 if (disable)
3578 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 3579 }
bd2b5b12
PZ
3580}
3581
e050e3f0
SE
3582/*
3583 * combine freq adjustment with unthrottling to avoid two passes over the
3584 * events. At the same time, make sure, having freq events does not change
3585 * the rate of unthrottling as that would introduce bias.
3586 */
3587static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3588 int needs_unthr)
60db5e09 3589{
cdd6c482
IM
3590 struct perf_event *event;
3591 struct hw_perf_event *hwc;
e050e3f0 3592 u64 now, period = TICK_NSEC;
abd50713 3593 s64 delta;
60db5e09 3594
e050e3f0
SE
3595 /*
3596 * only need to iterate over all events iff:
3597 * - context have events in frequency mode (needs freq adjust)
3598 * - there are events to unthrottle on this cpu
3599 */
3600 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
3601 return;
3602
e050e3f0 3603 raw_spin_lock(&ctx->lock);
f39d47ff 3604 perf_pmu_disable(ctx->pmu);
e050e3f0 3605
03541f8b 3606 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 3607 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
3608 continue;
3609
5632ab12 3610 if (!event_filter_match(event))
5d27c23d
PZ
3611 continue;
3612
44377277
AS
3613 perf_pmu_disable(event->pmu);
3614
cdd6c482 3615 hwc = &event->hw;
6a24ed6c 3616
ae23bff1 3617 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 3618 hwc->interrupts = 0;
cdd6c482 3619 perf_log_throttle(event, 1);
a4eaf7f1 3620 event->pmu->start(event, 0);
a78ac325
PZ
3621 }
3622
cdd6c482 3623 if (!event->attr.freq || !event->attr.sample_freq)
44377277 3624 goto next;
60db5e09 3625
e050e3f0
SE
3626 /*
3627 * stop the event and update event->count
3628 */
3629 event->pmu->stop(event, PERF_EF_UPDATE);
3630
e7850595 3631 now = local64_read(&event->count);
abd50713
PZ
3632 delta = now - hwc->freq_count_stamp;
3633 hwc->freq_count_stamp = now;
60db5e09 3634
e050e3f0
SE
3635 /*
3636 * restart the event
3637 * reload only if value has changed
f39d47ff
SE
3638 * we have stopped the event so tell that
3639 * to perf_adjust_period() to avoid stopping it
3640 * twice.
e050e3f0 3641 */
abd50713 3642 if (delta > 0)
f39d47ff 3643 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
3644
3645 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
3646 next:
3647 perf_pmu_enable(event->pmu);
60db5e09 3648 }
e050e3f0 3649
f39d47ff 3650 perf_pmu_enable(ctx->pmu);
e050e3f0 3651 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
3652}
3653
235c7fc7 3654/*
8703a7cf 3655 * Move @event to the tail of the @ctx's elegible events.
235c7fc7 3656 */
8703a7cf 3657static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
0793a61d 3658{
dddd3379
TG
3659 /*
3660 * Rotate the first entry last of non-pinned groups. Rotation might be
3661 * disabled by the inheritance code.
3662 */
8703a7cf
PZ
3663 if (ctx->rotate_disable)
3664 return;
8e1a2031 3665
8703a7cf
PZ
3666 perf_event_groups_delete(&ctx->flexible_groups, event);
3667 perf_event_groups_insert(&ctx->flexible_groups, event);
235c7fc7
IM
3668}
3669
8d5bce0c
PZ
3670static inline struct perf_event *
3671ctx_first_active(struct perf_event_context *ctx)
235c7fc7 3672{
8d5bce0c
PZ
3673 return list_first_entry_or_null(&ctx->flexible_active,
3674 struct perf_event, active_list);
3675}
3676
3677static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
3678{
3679 struct perf_event *cpu_event = NULL, *task_event = NULL;
3680 bool cpu_rotate = false, task_rotate = false;
8dc85d54 3681 struct perf_event_context *ctx = NULL;
8d5bce0c
PZ
3682
3683 /*
3684 * Since we run this from IRQ context, nobody can install new
3685 * events, thus the event count values are stable.
3686 */
7fc23a53 3687
b5ab4cd5 3688 if (cpuctx->ctx.nr_events) {
b5ab4cd5 3689 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
8d5bce0c 3690 cpu_rotate = true;
b5ab4cd5 3691 }
235c7fc7 3692
8dc85d54 3693 ctx = cpuctx->task_ctx;
b5ab4cd5 3694 if (ctx && ctx->nr_events) {
b5ab4cd5 3695 if (ctx->nr_events != ctx->nr_active)
8d5bce0c 3696 task_rotate = true;
b5ab4cd5 3697 }
9717e6cd 3698
8d5bce0c
PZ
3699 if (!(cpu_rotate || task_rotate))
3700 return false;
0f5a2601 3701
facc4307 3702 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 3703 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 3704
8d5bce0c
PZ
3705 if (task_rotate)
3706 task_event = ctx_first_active(ctx);
3707 if (cpu_rotate)
3708 cpu_event = ctx_first_active(&cpuctx->ctx);
8703a7cf 3709
8d5bce0c
PZ
3710 /*
3711 * As per the order given at ctx_resched() first 'pop' task flexible
3712 * and then, if needed CPU flexible.
3713 */
3714 if (task_event || (ctx && cpu_event))
e050e3f0 3715 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
8d5bce0c
PZ
3716 if (cpu_event)
3717 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
0793a61d 3718
8d5bce0c
PZ
3719 if (task_event)
3720 rotate_ctx(ctx, task_event);
3721 if (cpu_event)
3722 rotate_ctx(&cpuctx->ctx, cpu_event);
235c7fc7 3723
e050e3f0 3724 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 3725
0f5a2601
PZ
3726 perf_pmu_enable(cpuctx->ctx.pmu);
3727 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
9e630205 3728
8d5bce0c 3729 return true;
e9d2b064
PZ
3730}
3731
3732void perf_event_task_tick(void)
3733{
2fde4f94
MR
3734 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3735 struct perf_event_context *ctx, *tmp;
e050e3f0 3736 int throttled;
b5ab4cd5 3737
16444645 3738 lockdep_assert_irqs_disabled();
e9d2b064 3739
e050e3f0
SE
3740 __this_cpu_inc(perf_throttled_seq);
3741 throttled = __this_cpu_xchg(perf_throttled_count, 0);
555e0c1e 3742 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
e050e3f0 3743
2fde4f94 3744 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
e050e3f0 3745 perf_adjust_freq_unthr_context(ctx, throttled);
0793a61d
TG
3746}
3747
889ff015
FW
3748static int event_enable_on_exec(struct perf_event *event,
3749 struct perf_event_context *ctx)
3750{
3751 if (!event->attr.enable_on_exec)
3752 return 0;
3753
3754 event->attr.enable_on_exec = 0;
3755 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3756 return 0;
3757
0d3d73aa 3758 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
889ff015
FW
3759
3760 return 1;
3761}
3762
57e7986e 3763/*
cdd6c482 3764 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
3765 * This expects task == current.
3766 */
c1274499 3767static void perf_event_enable_on_exec(int ctxn)
57e7986e 3768{
c1274499 3769 struct perf_event_context *ctx, *clone_ctx = NULL;
487f05e1 3770 enum event_type_t event_type = 0;
3e349507 3771 struct perf_cpu_context *cpuctx;
cdd6c482 3772 struct perf_event *event;
57e7986e
PM
3773 unsigned long flags;
3774 int enabled = 0;
3775
3776 local_irq_save(flags);
c1274499 3777 ctx = current->perf_event_ctxp[ctxn];
cdd6c482 3778 if (!ctx || !ctx->nr_events)
57e7986e
PM
3779 goto out;
3780
3e349507
PZ
3781 cpuctx = __get_cpu_context(ctx);
3782 perf_ctx_lock(cpuctx, ctx);
7fce2509 3783 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
487f05e1 3784 list_for_each_entry(event, &ctx->event_list, event_entry) {
3e349507 3785 enabled |= event_enable_on_exec(event, ctx);
487f05e1
AS
3786 event_type |= get_event_type(event);
3787 }
57e7986e
PM
3788
3789 /*
3e349507 3790 * Unclone and reschedule this context if we enabled any event.
57e7986e 3791 */
3e349507 3792 if (enabled) {
211de6eb 3793 clone_ctx = unclone_ctx(ctx);
487f05e1 3794 ctx_resched(cpuctx, ctx, event_type);
7bbba0eb
PZ
3795 } else {
3796 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
3e349507
PZ
3797 }
3798 perf_ctx_unlock(cpuctx, ctx);
57e7986e 3799
9ed6060d 3800out:
57e7986e 3801 local_irq_restore(flags);
211de6eb
PZ
3802
3803 if (clone_ctx)
3804 put_ctx(clone_ctx);
57e7986e
PM
3805}
3806
0492d4c5
PZ
3807struct perf_read_data {
3808 struct perf_event *event;
3809 bool group;
7d88962e 3810 int ret;
0492d4c5
PZ
3811};
3812
451d24d1 3813static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
d6a2f903 3814{
d6a2f903
DCC
3815 u16 local_pkg, event_pkg;
3816
3817 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
451d24d1
PZ
3818 int local_cpu = smp_processor_id();
3819
3820 event_pkg = topology_physical_package_id(event_cpu);
3821 local_pkg = topology_physical_package_id(local_cpu);
d6a2f903
DCC
3822
3823 if (event_pkg == local_pkg)
3824 return local_cpu;
3825 }
3826
3827 return event_cpu;
3828}
3829
0793a61d 3830/*
cdd6c482 3831 * Cross CPU call to read the hardware event
0793a61d 3832 */
cdd6c482 3833static void __perf_event_read(void *info)
0793a61d 3834{
0492d4c5
PZ
3835 struct perf_read_data *data = info;
3836 struct perf_event *sub, *event = data->event;
cdd6c482 3837 struct perf_event_context *ctx = event->ctx;
108b02cf 3838 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
4a00c16e 3839 struct pmu *pmu = event->pmu;
621a01ea 3840
e1ac3614
PM
3841 /*
3842 * If this is a task context, we need to check whether it is
3843 * the current task context of this cpu. If not it has been
3844 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
3845 * event->count would have been updated to a recent sample
3846 * when the event was scheduled out.
e1ac3614
PM
3847 */
3848 if (ctx->task && cpuctx->task_ctx != ctx)
3849 return;
3850
e625cce1 3851 raw_spin_lock(&ctx->lock);
0c1cbc18 3852 if (ctx->is_active & EVENT_TIME) {
542e72fc 3853 update_context_time(ctx);
e5d1367f
SE
3854 update_cgrp_time_from_event(event);
3855 }
0492d4c5 3856
0d3d73aa
PZ
3857 perf_event_update_time(event);
3858 if (data->group)
3859 perf_event_update_sibling_time(event);
0c1cbc18 3860
4a00c16e
SB
3861 if (event->state != PERF_EVENT_STATE_ACTIVE)
3862 goto unlock;
0492d4c5 3863
4a00c16e
SB
3864 if (!data->group) {
3865 pmu->read(event);
3866 data->ret = 0;
0492d4c5 3867 goto unlock;
4a00c16e
SB
3868 }
3869
3870 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3871
3872 pmu->read(event);
0492d4c5 3873
edb39592 3874 for_each_sibling_event(sub, event) {
4a00c16e
SB
3875 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3876 /*
3877 * Use sibling's PMU rather than @event's since
3878 * sibling could be on different (eg: software) PMU.
3879 */
0492d4c5 3880 sub->pmu->read(sub);
4a00c16e 3881 }
0492d4c5 3882 }
4a00c16e
SB
3883
3884 data->ret = pmu->commit_txn(pmu);
0492d4c5
PZ
3885
3886unlock:
e625cce1 3887 raw_spin_unlock(&ctx->lock);
0793a61d
TG
3888}
3889
b5e58793
PZ
3890static inline u64 perf_event_count(struct perf_event *event)
3891{
c39a0e2c 3892 return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793
PZ
3893}
3894
ffe8690c
KX
3895/*
3896 * NMI-safe method to read a local event, that is an event that
3897 * is:
3898 * - either for the current task, or for this CPU
3899 * - does not have inherit set, for inherited task events
3900 * will not be local and we cannot read them atomically
3901 * - must not have a pmu::count method
3902 */
7d9285e8
YS
3903int perf_event_read_local(struct perf_event *event, u64 *value,
3904 u64 *enabled, u64 *running)
ffe8690c
KX
3905{
3906 unsigned long flags;
f91840a3 3907 int ret = 0;
ffe8690c
KX
3908
3909 /*
3910 * Disabling interrupts avoids all counter scheduling (context
3911 * switches, timer based rotation and IPIs).
3912 */
3913 local_irq_save(flags);
3914
ffe8690c
KX
3915 /*
3916 * It must not be an event with inherit set, we cannot read
3917 * all child counters from atomic context.
3918 */
f91840a3
AS
3919 if (event->attr.inherit) {
3920 ret = -EOPNOTSUPP;
3921 goto out;
3922 }
ffe8690c 3923
f91840a3
AS
3924 /* If this is a per-task event, it must be for current */
3925 if ((event->attach_state & PERF_ATTACH_TASK) &&
3926 event->hw.target != current) {
3927 ret = -EINVAL;
3928 goto out;
3929 }
3930
3931 /* If this is a per-CPU event, it must be for this CPU */
3932 if (!(event->attach_state & PERF_ATTACH_TASK) &&
3933 event->cpu != smp_processor_id()) {
3934 ret = -EINVAL;
3935 goto out;
3936 }
ffe8690c 3937
befb1b3c
RC
3938 /* If this is a pinned event it must be running on this CPU */
3939 if (event->attr.pinned && event->oncpu != smp_processor_id()) {
3940 ret = -EBUSY;
3941 goto out;
3942 }
3943
ffe8690c
KX
3944 /*
3945 * If the event is currently on this CPU, its either a per-task event,
3946 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3947 * oncpu == -1).
3948 */
3949 if (event->oncpu == smp_processor_id())
3950 event->pmu->read(event);
3951
f91840a3 3952 *value = local64_read(&event->count);
0d3d73aa
PZ
3953 if (enabled || running) {
3954 u64 now = event->shadow_ctx_time + perf_clock();
3955 u64 __enabled, __running;
3956
3957 __perf_update_times(event, now, &__enabled, &__running);
3958 if (enabled)
3959 *enabled = __enabled;
3960 if (running)
3961 *running = __running;
3962 }
f91840a3 3963out:
ffe8690c
KX
3964 local_irq_restore(flags);
3965
f91840a3 3966 return ret;
ffe8690c
KX
3967}
3968
7d88962e 3969static int perf_event_read(struct perf_event *event, bool group)
0793a61d 3970{
0c1cbc18 3971 enum perf_event_state state = READ_ONCE(event->state);
451d24d1 3972 int event_cpu, ret = 0;
7d88962e 3973
0793a61d 3974 /*
cdd6c482
IM
3975 * If event is enabled and currently active on a CPU, update the
3976 * value in the event structure:
0793a61d 3977 */
0c1cbc18
PZ
3978again:
3979 if (state == PERF_EVENT_STATE_ACTIVE) {
3980 struct perf_read_data data;
3981
3982 /*
3983 * Orders the ->state and ->oncpu loads such that if we see
3984 * ACTIVE we must also see the right ->oncpu.
3985 *
3986 * Matches the smp_wmb() from event_sched_in().
3987 */
3988 smp_rmb();
d6a2f903 3989
451d24d1
PZ
3990 event_cpu = READ_ONCE(event->oncpu);
3991 if ((unsigned)event_cpu >= nr_cpu_ids)
3992 return 0;
3993
0c1cbc18
PZ
3994 data = (struct perf_read_data){
3995 .event = event,
3996 .group = group,
3997 .ret = 0,
3998 };
3999
451d24d1
PZ
4000 preempt_disable();
4001 event_cpu = __perf_event_read_cpu(event, event_cpu);
d6a2f903 4002
58763148
PZ
4003 /*
4004 * Purposely ignore the smp_call_function_single() return
4005 * value.
4006 *
451d24d1 4007 * If event_cpu isn't a valid CPU it means the event got
58763148
PZ
4008 * scheduled out and that will have updated the event count.
4009 *
4010 * Therefore, either way, we'll have an up-to-date event count
4011 * after this.
4012 */
451d24d1
PZ
4013 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
4014 preempt_enable();
58763148 4015 ret = data.ret;
0c1cbc18
PZ
4016
4017 } else if (state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
4018 struct perf_event_context *ctx = event->ctx;
4019 unsigned long flags;
4020
e625cce1 4021 raw_spin_lock_irqsave(&ctx->lock, flags);
0c1cbc18
PZ
4022 state = event->state;
4023 if (state != PERF_EVENT_STATE_INACTIVE) {
4024 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4025 goto again;
4026 }
4027
c530ccd9 4028 /*
0c1cbc18
PZ
4029 * May read while context is not active (e.g., thread is
4030 * blocked), in that case we cannot update context time
c530ccd9 4031 */
0c1cbc18 4032 if (ctx->is_active & EVENT_TIME) {
c530ccd9 4033 update_context_time(ctx);
e5d1367f
SE
4034 update_cgrp_time_from_event(event);
4035 }
0c1cbc18 4036
0d3d73aa 4037 perf_event_update_time(event);
0492d4c5 4038 if (group)
0d3d73aa 4039 perf_event_update_sibling_time(event);
e625cce1 4040 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d 4041 }
7d88962e
SB
4042
4043 return ret;
0793a61d
TG
4044}
4045
a63eaf34 4046/*
cdd6c482 4047 * Initialize the perf_event context in a task_struct:
a63eaf34 4048 */
eb184479 4049static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 4050{
e625cce1 4051 raw_spin_lock_init(&ctx->lock);
a63eaf34 4052 mutex_init(&ctx->mutex);
2fde4f94 4053 INIT_LIST_HEAD(&ctx->active_ctx_list);
8e1a2031
AB
4054 perf_event_groups_init(&ctx->pinned_groups);
4055 perf_event_groups_init(&ctx->flexible_groups);
a63eaf34 4056 INIT_LIST_HEAD(&ctx->event_list);
6668128a
PZ
4057 INIT_LIST_HEAD(&ctx->pinned_active);
4058 INIT_LIST_HEAD(&ctx->flexible_active);
a63eaf34 4059 atomic_set(&ctx->refcount, 1);
eb184479
PZ
4060}
4061
4062static struct perf_event_context *
4063alloc_perf_context(struct pmu *pmu, struct task_struct *task)
4064{
4065 struct perf_event_context *ctx;
4066
4067 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4068 if (!ctx)
4069 return NULL;
4070
4071 __perf_event_init_context(ctx);
4072 if (task) {
4073 ctx->task = task;
4074 get_task_struct(task);
0793a61d 4075 }
eb184479
PZ
4076 ctx->pmu = pmu;
4077
4078 return ctx;
a63eaf34
PM
4079}
4080
2ebd4ffb
MH
4081static struct task_struct *
4082find_lively_task_by_vpid(pid_t vpid)
4083{
4084 struct task_struct *task;
0793a61d
TG
4085
4086 rcu_read_lock();
2ebd4ffb 4087 if (!vpid)
0793a61d
TG
4088 task = current;
4089 else
2ebd4ffb 4090 task = find_task_by_vpid(vpid);
0793a61d
TG
4091 if (task)
4092 get_task_struct(task);
4093 rcu_read_unlock();
4094
4095 if (!task)
4096 return ERR_PTR(-ESRCH);
4097
2ebd4ffb 4098 return task;
2ebd4ffb
MH
4099}
4100
fe4b04fa
PZ
4101/*
4102 * Returns a matching context with refcount and pincount.
4103 */
108b02cf 4104static struct perf_event_context *
4af57ef2
YZ
4105find_get_context(struct pmu *pmu, struct task_struct *task,
4106 struct perf_event *event)
0793a61d 4107{
211de6eb 4108 struct perf_event_context *ctx, *clone_ctx = NULL;
22a4f650 4109 struct perf_cpu_context *cpuctx;
4af57ef2 4110 void *task_ctx_data = NULL;
25346b93 4111 unsigned long flags;
8dc85d54 4112 int ctxn, err;
4af57ef2 4113 int cpu = event->cpu;
0793a61d 4114
22a4ec72 4115 if (!task) {
cdd6c482 4116 /* Must be root to operate on a CPU event: */
0764771d 4117 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
4118 return ERR_PTR(-EACCES);
4119
108b02cf 4120 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 4121 ctx = &cpuctx->ctx;
c93f7669 4122 get_ctx(ctx);
fe4b04fa 4123 ++ctx->pin_count;
0793a61d 4124
0793a61d
TG
4125 return ctx;
4126 }
4127
8dc85d54
PZ
4128 err = -EINVAL;
4129 ctxn = pmu->task_ctx_nr;
4130 if (ctxn < 0)
4131 goto errout;
4132
4af57ef2
YZ
4133 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
4134 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
4135 if (!task_ctx_data) {
4136 err = -ENOMEM;
4137 goto errout;
4138 }
4139 }
4140
9ed6060d 4141retry:
8dc85d54 4142 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 4143 if (ctx) {
211de6eb 4144 clone_ctx = unclone_ctx(ctx);
fe4b04fa 4145 ++ctx->pin_count;
4af57ef2
YZ
4146
4147 if (task_ctx_data && !ctx->task_ctx_data) {
4148 ctx->task_ctx_data = task_ctx_data;
4149 task_ctx_data = NULL;
4150 }
e625cce1 4151 raw_spin_unlock_irqrestore(&ctx->lock, flags);
211de6eb
PZ
4152
4153 if (clone_ctx)
4154 put_ctx(clone_ctx);
9137fb28 4155 } else {
eb184479 4156 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
4157 err = -ENOMEM;
4158 if (!ctx)
4159 goto errout;
eb184479 4160
4af57ef2
YZ
4161 if (task_ctx_data) {
4162 ctx->task_ctx_data = task_ctx_data;
4163 task_ctx_data = NULL;
4164 }
4165
dbe08d82
ON
4166 err = 0;
4167 mutex_lock(&task->perf_event_mutex);
4168 /*
4169 * If it has already passed perf_event_exit_task().
4170 * we must see PF_EXITING, it takes this mutex too.
4171 */
4172 if (task->flags & PF_EXITING)
4173 err = -ESRCH;
4174 else if (task->perf_event_ctxp[ctxn])
4175 err = -EAGAIN;
fe4b04fa 4176 else {
9137fb28 4177 get_ctx(ctx);
fe4b04fa 4178 ++ctx->pin_count;
dbe08d82 4179 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 4180 }
dbe08d82
ON
4181 mutex_unlock(&task->perf_event_mutex);
4182
4183 if (unlikely(err)) {
9137fb28 4184 put_ctx(ctx);
dbe08d82
ON
4185
4186 if (err == -EAGAIN)
4187 goto retry;
4188 goto errout;
a63eaf34
PM
4189 }
4190 }
4191
4af57ef2 4192 kfree(task_ctx_data);
0793a61d 4193 return ctx;
c93f7669 4194
9ed6060d 4195errout:
4af57ef2 4196 kfree(task_ctx_data);
c93f7669 4197 return ERR_PTR(err);
0793a61d
TG
4198}
4199
6fb2915d 4200static void perf_event_free_filter(struct perf_event *event);
2541517c 4201static void perf_event_free_bpf_prog(struct perf_event *event);
6fb2915d 4202
cdd6c482 4203static void free_event_rcu(struct rcu_head *head)
592903cd 4204{
cdd6c482 4205 struct perf_event *event;
592903cd 4206
cdd6c482
IM
4207 event = container_of(head, struct perf_event, rcu_head);
4208 if (event->ns)
4209 put_pid_ns(event->ns);
6fb2915d 4210 perf_event_free_filter(event);
cdd6c482 4211 kfree(event);
592903cd
PZ
4212}
4213
b69cf536
PZ
4214static void ring_buffer_attach(struct perf_event *event,
4215 struct ring_buffer *rb);
925d519a 4216
f2fb6bef
KL
4217static void detach_sb_event(struct perf_event *event)
4218{
4219 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
4220
4221 raw_spin_lock(&pel->lock);
4222 list_del_rcu(&event->sb_list);
4223 raw_spin_unlock(&pel->lock);
4224}
4225
a4f144eb 4226static bool is_sb_event(struct perf_event *event)
f2fb6bef 4227{
a4f144eb
DCC
4228 struct perf_event_attr *attr = &event->attr;
4229
f2fb6bef 4230 if (event->parent)
a4f144eb 4231 return false;
f2fb6bef
KL
4232
4233 if (event->attach_state & PERF_ATTACH_TASK)
a4f144eb 4234 return false;
f2fb6bef 4235
a4f144eb
DCC
4236 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
4237 attr->comm || attr->comm_exec ||
4238 attr->task ||
4239 attr->context_switch)
4240 return true;
4241 return false;
4242}
4243
4244static void unaccount_pmu_sb_event(struct perf_event *event)
4245{
4246 if (is_sb_event(event))
4247 detach_sb_event(event);
f2fb6bef
KL
4248}
4249
4beb31f3 4250static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 4251{
4beb31f3
FW
4252 if (event->parent)
4253 return;
4254
4beb31f3
FW
4255 if (is_cgroup_event(event))
4256 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
4257}
925d519a 4258
555e0c1e
FW
4259#ifdef CONFIG_NO_HZ_FULL
4260static DEFINE_SPINLOCK(nr_freq_lock);
4261#endif
4262
4263static void unaccount_freq_event_nohz(void)
4264{
4265#ifdef CONFIG_NO_HZ_FULL
4266 spin_lock(&nr_freq_lock);
4267 if (atomic_dec_and_test(&nr_freq_events))
4268 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
4269 spin_unlock(&nr_freq_lock);
4270#endif
4271}
4272
4273static void unaccount_freq_event(void)
4274{
4275 if (tick_nohz_full_enabled())
4276 unaccount_freq_event_nohz();
4277 else
4278 atomic_dec(&nr_freq_events);
4279}
4280
4beb31f3
FW
4281static void unaccount_event(struct perf_event *event)
4282{
25432ae9
PZ
4283 bool dec = false;
4284
4beb31f3
FW
4285 if (event->parent)
4286 return;
4287
4288 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 4289 dec = true;
4beb31f3
FW
4290 if (event->attr.mmap || event->attr.mmap_data)
4291 atomic_dec(&nr_mmap_events);
4292 if (event->attr.comm)
4293 atomic_dec(&nr_comm_events);
e4222673
HB
4294 if (event->attr.namespaces)
4295 atomic_dec(&nr_namespaces_events);
4beb31f3
FW
4296 if (event->attr.task)
4297 atomic_dec(&nr_task_events);
948b26b6 4298 if (event->attr.freq)
555e0c1e 4299 unaccount_freq_event();
45ac1403 4300 if (event->attr.context_switch) {
25432ae9 4301 dec = true;
45ac1403
AH
4302 atomic_dec(&nr_switch_events);
4303 }
4beb31f3 4304 if (is_cgroup_event(event))
25432ae9 4305 dec = true;
4beb31f3 4306 if (has_branch_stack(event))
25432ae9
PZ
4307 dec = true;
4308
9107c89e
PZ
4309 if (dec) {
4310 if (!atomic_add_unless(&perf_sched_count, -1, 1))
4311 schedule_delayed_work(&perf_sched_work, HZ);
4312 }
4beb31f3
FW
4313
4314 unaccount_event_cpu(event, event->cpu);
f2fb6bef
KL
4315
4316 unaccount_pmu_sb_event(event);
4beb31f3 4317}
925d519a 4318
9107c89e
PZ
4319static void perf_sched_delayed(struct work_struct *work)
4320{
4321 mutex_lock(&perf_sched_mutex);
4322 if (atomic_dec_and_test(&perf_sched_count))
4323 static_branch_disable(&perf_sched_events);
4324 mutex_unlock(&perf_sched_mutex);
4325}
4326
bed5b25a
AS
4327/*
4328 * The following implement mutual exclusion of events on "exclusive" pmus
4329 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4330 * at a time, so we disallow creating events that might conflict, namely:
4331 *
4332 * 1) cpu-wide events in the presence of per-task events,
4333 * 2) per-task events in the presence of cpu-wide events,
4334 * 3) two matching events on the same context.
4335 *
4336 * The former two cases are handled in the allocation path (perf_event_alloc(),
a0733e69 4337 * _free_event()), the latter -- before the first perf_install_in_context().
bed5b25a
AS
4338 */
4339static int exclusive_event_init(struct perf_event *event)
4340{
4341 struct pmu *pmu = event->pmu;
4342
4343 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4344 return 0;
4345
4346 /*
4347 * Prevent co-existence of per-task and cpu-wide events on the
4348 * same exclusive pmu.
4349 *
4350 * Negative pmu::exclusive_cnt means there are cpu-wide
4351 * events on this "exclusive" pmu, positive means there are
4352 * per-task events.
4353 *
4354 * Since this is called in perf_event_alloc() path, event::ctx
4355 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
4356 * to mean "per-task event", because unlike other attach states it
4357 * never gets cleared.
4358 */
4359 if (event->attach_state & PERF_ATTACH_TASK) {
4360 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
4361 return -EBUSY;
4362 } else {
4363 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
4364 return -EBUSY;
4365 }
4366
4367 return 0;
4368}
4369
4370static void exclusive_event_destroy(struct perf_event *event)
4371{
4372 struct pmu *pmu = event->pmu;
4373
4374 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4375 return;
4376
4377 /* see comment in exclusive_event_init() */
4378 if (event->attach_state & PERF_ATTACH_TASK)
4379 atomic_dec(&pmu->exclusive_cnt);
4380 else
4381 atomic_inc(&pmu->exclusive_cnt);
4382}
4383
4384static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
4385{
3bf6215a 4386 if ((e1->pmu == e2->pmu) &&
bed5b25a
AS
4387 (e1->cpu == e2->cpu ||
4388 e1->cpu == -1 ||
4389 e2->cpu == -1))
4390 return true;
4391 return false;
4392}
4393
4394/* Called under the same ctx::mutex as perf_install_in_context() */
4395static bool exclusive_event_installable(struct perf_event *event,
4396 struct perf_event_context *ctx)
4397{
4398 struct perf_event *iter_event;
4399 struct pmu *pmu = event->pmu;
4400
4401 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4402 return true;
4403
4404 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
4405 if (exclusive_event_match(iter_event, event))
4406 return false;
4407 }
4408
4409 return true;
4410}
4411
375637bc
AS
4412static void perf_addr_filters_splice(struct perf_event *event,
4413 struct list_head *head);
4414
683ede43 4415static void _free_event(struct perf_event *event)
f1600952 4416{
e360adbe 4417 irq_work_sync(&event->pending);
925d519a 4418
4beb31f3 4419 unaccount_event(event);
9ee318a7 4420
76369139 4421 if (event->rb) {
9bb5d40c
PZ
4422 /*
4423 * Can happen when we close an event with re-directed output.
4424 *
4425 * Since we have a 0 refcount, perf_mmap_close() will skip
4426 * over us; possibly making our ring_buffer_put() the last.
4427 */
4428 mutex_lock(&event->mmap_mutex);
b69cf536 4429 ring_buffer_attach(event, NULL);
9bb5d40c 4430 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
4431 }
4432
e5d1367f
SE
4433 if (is_cgroup_event(event))
4434 perf_detach_cgroup(event);
4435
a0733e69
PZ
4436 if (!event->parent) {
4437 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
4438 put_callchain_buffers();
4439 }
4440
4441 perf_event_free_bpf_prog(event);
375637bc
AS
4442 perf_addr_filters_splice(event, NULL);
4443 kfree(event->addr_filters_offs);
a0733e69
PZ
4444
4445 if (event->destroy)
4446 event->destroy(event);
4447
4448 if (event->ctx)
4449 put_ctx(event->ctx);
4450
621b6d2e
PB
4451 if (event->hw.target)
4452 put_task_struct(event->hw.target);
4453
62a92c8f
AS
4454 exclusive_event_destroy(event);
4455 module_put(event->pmu->module);
a0733e69
PZ
4456
4457 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
4458}
4459
683ede43
PZ
4460/*
4461 * Used to free events which have a known refcount of 1, such as in error paths
4462 * where the event isn't exposed yet and inherited events.
4463 */
4464static void free_event(struct perf_event *event)
0793a61d 4465{
683ede43
PZ
4466 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4467 "unexpected event refcount: %ld; ptr=%p\n",
4468 atomic_long_read(&event->refcount), event)) {
4469 /* leak to avoid use-after-free */
4470 return;
4471 }
0793a61d 4472
683ede43 4473 _free_event(event);
0793a61d
TG
4474}
4475
a66a3052 4476/*
f8697762 4477 * Remove user event from the owner task.
a66a3052 4478 */
f8697762 4479static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 4480{
8882135b 4481 struct task_struct *owner;
fb0459d7 4482
8882135b 4483 rcu_read_lock();
8882135b 4484 /*
f47c02c0
PZ
4485 * Matches the smp_store_release() in perf_event_exit_task(). If we
4486 * observe !owner it means the list deletion is complete and we can
4487 * indeed free this event, otherwise we need to serialize on
8882135b
PZ
4488 * owner->perf_event_mutex.
4489 */
506458ef 4490 owner = READ_ONCE(event->owner);
8882135b
PZ
4491 if (owner) {
4492 /*
4493 * Since delayed_put_task_struct() also drops the last
4494 * task reference we can safely take a new reference
4495 * while holding the rcu_read_lock().
4496 */
4497 get_task_struct(owner);
4498 }
4499 rcu_read_unlock();
4500
4501 if (owner) {
f63a8daa
PZ
4502 /*
4503 * If we're here through perf_event_exit_task() we're already
4504 * holding ctx->mutex which would be an inversion wrt. the
4505 * normal lock order.
4506 *
4507 * However we can safely take this lock because its the child
4508 * ctx->mutex.
4509 */
4510 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4511
8882135b
PZ
4512 /*
4513 * We have to re-check the event->owner field, if it is cleared
4514 * we raced with perf_event_exit_task(), acquiring the mutex
4515 * ensured they're done, and we can proceed with freeing the
4516 * event.
4517 */
f47c02c0 4518 if (event->owner) {
8882135b 4519 list_del_init(&event->owner_entry);
f47c02c0
PZ
4520 smp_store_release(&event->owner, NULL);
4521 }
8882135b
PZ
4522 mutex_unlock(&owner->perf_event_mutex);
4523 put_task_struct(owner);
4524 }
f8697762
JO
4525}
4526
f8697762
JO
4527static void put_event(struct perf_event *event)
4528{
f8697762
JO
4529 if (!atomic_long_dec_and_test(&event->refcount))
4530 return;
4531
c6e5b732
PZ
4532 _free_event(event);
4533}
4534
4535/*
4536 * Kill an event dead; while event:refcount will preserve the event
4537 * object, it will not preserve its functionality. Once the last 'user'
4538 * gives up the object, we'll destroy the thing.
4539 */
4540int perf_event_release_kernel(struct perf_event *event)
4541{
a4f4bb6d 4542 struct perf_event_context *ctx = event->ctx;
c6e5b732 4543 struct perf_event *child, *tmp;
82d94856 4544 LIST_HEAD(free_list);
c6e5b732 4545
a4f4bb6d
PZ
4546 /*
4547 * If we got here through err_file: fput(event_file); we will not have
4548 * attached to a context yet.
4549 */
4550 if (!ctx) {
4551 WARN_ON_ONCE(event->attach_state &
4552 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4553 goto no_ctx;
4554 }
4555
f8697762
JO
4556 if (!is_kernel_event(event))
4557 perf_remove_from_owner(event);
8882135b 4558
5fa7c8ec 4559 ctx = perf_event_ctx_lock(event);
a83fe28e 4560 WARN_ON_ONCE(ctx->parent_ctx);
a69b0ca4 4561 perf_remove_from_context(event, DETACH_GROUP);
683ede43 4562
a69b0ca4 4563 raw_spin_lock_irq(&ctx->lock);
683ede43 4564 /*
d8a8cfc7 4565 * Mark this event as STATE_DEAD, there is no external reference to it
a69b0ca4 4566 * anymore.
683ede43 4567 *
a69b0ca4
PZ
4568 * Anybody acquiring event->child_mutex after the below loop _must_
4569 * also see this, most importantly inherit_event() which will avoid
4570 * placing more children on the list.
683ede43 4571 *
c6e5b732
PZ
4572 * Thus this guarantees that we will in fact observe and kill _ALL_
4573 * child events.
683ede43 4574 */
a69b0ca4
PZ
4575 event->state = PERF_EVENT_STATE_DEAD;
4576 raw_spin_unlock_irq(&ctx->lock);
4577
4578 perf_event_ctx_unlock(event, ctx);
683ede43 4579
c6e5b732
PZ
4580again:
4581 mutex_lock(&event->child_mutex);
4582 list_for_each_entry(child, &event->child_list, child_list) {
a6fa941d 4583
c6e5b732
PZ
4584 /*
4585 * Cannot change, child events are not migrated, see the
4586 * comment with perf_event_ctx_lock_nested().
4587 */
506458ef 4588 ctx = READ_ONCE(child->ctx);
c6e5b732
PZ
4589 /*
4590 * Since child_mutex nests inside ctx::mutex, we must jump
4591 * through hoops. We start by grabbing a reference on the ctx.
4592 *
4593 * Since the event cannot get freed while we hold the
4594 * child_mutex, the context must also exist and have a !0
4595 * reference count.
4596 */
4597 get_ctx(ctx);
4598
4599 /*
4600 * Now that we have a ctx ref, we can drop child_mutex, and
4601 * acquire ctx::mutex without fear of it going away. Then we
4602 * can re-acquire child_mutex.
4603 */
4604 mutex_unlock(&event->child_mutex);
4605 mutex_lock(&ctx->mutex);
4606 mutex_lock(&event->child_mutex);
4607
4608 /*
4609 * Now that we hold ctx::mutex and child_mutex, revalidate our
4610 * state, if child is still the first entry, it didn't get freed
4611 * and we can continue doing so.
4612 */
4613 tmp = list_first_entry_or_null(&event->child_list,
4614 struct perf_event, child_list);
4615 if (tmp == child) {
4616 perf_remove_from_context(child, DETACH_GROUP);
82d94856 4617 list_move(&child->child_list, &free_list);
c6e5b732
PZ
4618 /*
4619 * This matches the refcount bump in inherit_event();
4620 * this can't be the last reference.
4621 */
4622 put_event(event);
4623 }
4624
4625 mutex_unlock(&event->child_mutex);
4626 mutex_unlock(&ctx->mutex);
4627 put_ctx(ctx);
4628 goto again;
4629 }
4630 mutex_unlock(&event->child_mutex);
4631
82d94856
PZ
4632 list_for_each_entry_safe(child, tmp, &free_list, child_list) {
4633 list_del(&child->child_list);
4634 free_event(child);
4635 }
4636
a4f4bb6d
PZ
4637no_ctx:
4638 put_event(event); /* Must be the 'last' reference */
683ede43
PZ
4639 return 0;
4640}
4641EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4642
8b10c5e2
PZ
4643/*
4644 * Called when the last reference to the file is gone.
4645 */
a6fa941d
AV
4646static int perf_release(struct inode *inode, struct file *file)
4647{
c6e5b732 4648 perf_event_release_kernel(file->private_data);
a6fa941d 4649 return 0;
fb0459d7 4650}
fb0459d7 4651
ca0dd44c 4652static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 4653{
cdd6c482 4654 struct perf_event *child;
e53c0994
PZ
4655 u64 total = 0;
4656
59ed446f
PZ
4657 *enabled = 0;
4658 *running = 0;
4659
6f10581a 4660 mutex_lock(&event->child_mutex);
01add3ea 4661
7d88962e 4662 (void)perf_event_read(event, false);
01add3ea
SB
4663 total += perf_event_count(event);
4664
59ed446f
PZ
4665 *enabled += event->total_time_enabled +
4666 atomic64_read(&event->child_total_time_enabled);
4667 *running += event->total_time_running +
4668 atomic64_read(&event->child_total_time_running);
4669
4670 list_for_each_entry(child, &event->child_list, child_list) {
7d88962e 4671 (void)perf_event_read(child, false);
01add3ea 4672 total += perf_event_count(child);
59ed446f
PZ
4673 *enabled += child->total_time_enabled;
4674 *running += child->total_time_running;
4675 }
6f10581a 4676 mutex_unlock(&event->child_mutex);
e53c0994
PZ
4677
4678 return total;
4679}
ca0dd44c
PZ
4680
4681u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
4682{
4683 struct perf_event_context *ctx;
4684 u64 count;
4685
4686 ctx = perf_event_ctx_lock(event);
4687 count = __perf_event_read_value(event, enabled, running);
4688 perf_event_ctx_unlock(event, ctx);
4689
4690 return count;
4691}
fb0459d7 4692EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 4693
7d88962e 4694static int __perf_read_group_add(struct perf_event *leader,
fa8c2693 4695 u64 read_format, u64 *values)
3dab77fb 4696{
2aeb1883 4697 struct perf_event_context *ctx = leader->ctx;
fa8c2693 4698 struct perf_event *sub;
2aeb1883 4699 unsigned long flags;
fa8c2693 4700 int n = 1; /* skip @nr */
7d88962e 4701 int ret;
f63a8daa 4702
7d88962e
SB
4703 ret = perf_event_read(leader, true);
4704 if (ret)
4705 return ret;
abf4868b 4706
a9cd8194
PZ
4707 raw_spin_lock_irqsave(&ctx->lock, flags);
4708
fa8c2693
PZ
4709 /*
4710 * Since we co-schedule groups, {enabled,running} times of siblings
4711 * will be identical to those of the leader, so we only publish one
4712 * set.
4713 */
4714 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4715 values[n++] += leader->total_time_enabled +
4716 atomic64_read(&leader->child_total_time_enabled);
4717 }
3dab77fb 4718
fa8c2693
PZ
4719 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4720 values[n++] += leader->total_time_running +
4721 atomic64_read(&leader->child_total_time_running);
4722 }
4723
4724 /*
4725 * Write {count,id} tuples for every sibling.
4726 */
4727 values[n++] += perf_event_count(leader);
abf4868b
PZ
4728 if (read_format & PERF_FORMAT_ID)
4729 values[n++] = primary_event_id(leader);
3dab77fb 4730
edb39592 4731 for_each_sibling_event(sub, leader) {
fa8c2693
PZ
4732 values[n++] += perf_event_count(sub);
4733 if (read_format & PERF_FORMAT_ID)
4734 values[n++] = primary_event_id(sub);
4735 }
7d88962e 4736
2aeb1883 4737 raw_spin_unlock_irqrestore(&ctx->lock, flags);
7d88962e 4738 return 0;
fa8c2693 4739}
3dab77fb 4740
fa8c2693
PZ
4741static int perf_read_group(struct perf_event *event,
4742 u64 read_format, char __user *buf)
4743{
4744 struct perf_event *leader = event->group_leader, *child;
4745 struct perf_event_context *ctx = leader->ctx;
7d88962e 4746 int ret;
fa8c2693 4747 u64 *values;
3dab77fb 4748
fa8c2693 4749 lockdep_assert_held(&ctx->mutex);
3dab77fb 4750
fa8c2693
PZ
4751 values = kzalloc(event->read_size, GFP_KERNEL);
4752 if (!values)
4753 return -ENOMEM;
3dab77fb 4754
fa8c2693
PZ
4755 values[0] = 1 + leader->nr_siblings;
4756
4757 /*
4758 * By locking the child_mutex of the leader we effectively
4759 * lock the child list of all siblings.. XXX explain how.
4760 */
4761 mutex_lock(&leader->child_mutex);
abf4868b 4762
7d88962e
SB
4763 ret = __perf_read_group_add(leader, read_format, values);
4764 if (ret)
4765 goto unlock;
4766
4767 list_for_each_entry(child, &leader->child_list, child_list) {
4768 ret = __perf_read_group_add(child, read_format, values);
4769 if (ret)
4770 goto unlock;
4771 }
abf4868b 4772
fa8c2693 4773 mutex_unlock(&leader->child_mutex);
abf4868b 4774
7d88962e 4775 ret = event->read_size;
fa8c2693
PZ
4776 if (copy_to_user(buf, values, event->read_size))
4777 ret = -EFAULT;
7d88962e 4778 goto out;
fa8c2693 4779
7d88962e
SB
4780unlock:
4781 mutex_unlock(&leader->child_mutex);
4782out:
fa8c2693 4783 kfree(values);
abf4868b 4784 return ret;
3dab77fb
PZ
4785}
4786
b15f495b 4787static int perf_read_one(struct perf_event *event,
3dab77fb
PZ
4788 u64 read_format, char __user *buf)
4789{
59ed446f 4790 u64 enabled, running;
3dab77fb
PZ
4791 u64 values[4];
4792 int n = 0;
4793
ca0dd44c 4794 values[n++] = __perf_event_read_value(event, &enabled, &running);
59ed446f
PZ
4795 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4796 values[n++] = enabled;
4797 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4798 values[n++] = running;
3dab77fb 4799 if (read_format & PERF_FORMAT_ID)
cdd6c482 4800 values[n++] = primary_event_id(event);
3dab77fb
PZ
4801
4802 if (copy_to_user(buf, values, n * sizeof(u64)))
4803 return -EFAULT;
4804
4805 return n * sizeof(u64);
4806}
4807
dc633982
JO
4808static bool is_event_hup(struct perf_event *event)
4809{
4810 bool no_children;
4811
a69b0ca4 4812 if (event->state > PERF_EVENT_STATE_EXIT)
dc633982
JO
4813 return false;
4814
4815 mutex_lock(&event->child_mutex);
4816 no_children = list_empty(&event->child_list);
4817 mutex_unlock(&event->child_mutex);
4818 return no_children;
4819}
4820
0793a61d 4821/*
cdd6c482 4822 * Read the performance event - simple non blocking version for now
0793a61d
TG
4823 */
4824static ssize_t
b15f495b 4825__perf_read(struct perf_event *event, char __user *buf, size_t count)
0793a61d 4826{
cdd6c482 4827 u64 read_format = event->attr.read_format;
3dab77fb 4828 int ret;
0793a61d 4829
3b6f9e5c 4830 /*
788faab7 4831 * Return end-of-file for a read on an event that is in
3b6f9e5c
PM
4832 * error state (i.e. because it was pinned but it couldn't be
4833 * scheduled on to the CPU at some point).
4834 */
cdd6c482 4835 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
4836 return 0;
4837
c320c7b7 4838 if (count < event->read_size)
3dab77fb
PZ
4839 return -ENOSPC;
4840
cdd6c482 4841 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 4842 if (read_format & PERF_FORMAT_GROUP)
b15f495b 4843 ret = perf_read_group(event, read_format, buf);
3dab77fb 4844 else
b15f495b 4845 ret = perf_read_one(event, read_format, buf);
0793a61d 4846
3dab77fb 4847 return ret;
0793a61d
TG
4848}
4849
0793a61d
TG
4850static ssize_t
4851perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4852{
cdd6c482 4853 struct perf_event *event = file->private_data;
f63a8daa
PZ
4854 struct perf_event_context *ctx;
4855 int ret;
0793a61d 4856
f63a8daa 4857 ctx = perf_event_ctx_lock(event);
b15f495b 4858 ret = __perf_read(event, buf, count);
f63a8daa
PZ
4859 perf_event_ctx_unlock(event, ctx);
4860
4861 return ret;
0793a61d
TG
4862}
4863
9dd95748 4864static __poll_t perf_poll(struct file *file, poll_table *wait)
0793a61d 4865{
cdd6c482 4866 struct perf_event *event = file->private_data;
76369139 4867 struct ring_buffer *rb;
a9a08845 4868 __poll_t events = EPOLLHUP;
c7138f37 4869
e708d7ad 4870 poll_wait(file, &event->waitq, wait);
179033b3 4871
dc633982 4872 if (is_event_hup(event))
179033b3 4873 return events;
c7138f37 4874
10c6db11 4875 /*
9bb5d40c
PZ
4876 * Pin the event->rb by taking event->mmap_mutex; otherwise
4877 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
4878 */
4879 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
4880 rb = event->rb;
4881 if (rb)
76369139 4882 events = atomic_xchg(&rb->poll, 0);
10c6db11 4883 mutex_unlock(&event->mmap_mutex);
0793a61d
TG
4884 return events;
4885}
4886
f63a8daa 4887static void _perf_event_reset(struct perf_event *event)
6de6a7b9 4888{
7d88962e 4889 (void)perf_event_read(event, false);
e7850595 4890 local64_set(&event->count, 0);
cdd6c482 4891 perf_event_update_userpage(event);
3df5edad
PZ
4892}
4893
c93f7669 4894/*
cdd6c482
IM
4895 * Holding the top-level event's child_mutex means that any
4896 * descendant process that has inherited this event will block
8ba289b8 4897 * in perf_event_exit_event() if it goes to exit, thus satisfying the
cdd6c482 4898 * task existence requirements of perf_event_enable/disable.
c93f7669 4899 */
cdd6c482
IM
4900static void perf_event_for_each_child(struct perf_event *event,
4901 void (*func)(struct perf_event *))
3df5edad 4902{
cdd6c482 4903 struct perf_event *child;
3df5edad 4904
cdd6c482 4905 WARN_ON_ONCE(event->ctx->parent_ctx);
f63a8daa 4906
cdd6c482
IM
4907 mutex_lock(&event->child_mutex);
4908 func(event);
4909 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 4910 func(child);
cdd6c482 4911 mutex_unlock(&event->child_mutex);
3df5edad
PZ
4912}
4913
cdd6c482
IM
4914static void perf_event_for_each(struct perf_event *event,
4915 void (*func)(struct perf_event *))
3df5edad 4916{
cdd6c482
IM
4917 struct perf_event_context *ctx = event->ctx;
4918 struct perf_event *sibling;
3df5edad 4919
f63a8daa
PZ
4920 lockdep_assert_held(&ctx->mutex);
4921
cdd6c482 4922 event = event->group_leader;
75f937f2 4923
cdd6c482 4924 perf_event_for_each_child(event, func);
edb39592 4925 for_each_sibling_event(sibling, event)
724b6daa 4926 perf_event_for_each_child(sibling, func);
6de6a7b9
PZ
4927}
4928
fae3fde6
PZ
4929static void __perf_event_period(struct perf_event *event,
4930 struct perf_cpu_context *cpuctx,
4931 struct perf_event_context *ctx,
4932 void *info)
c7999c6f 4933{
fae3fde6 4934 u64 value = *((u64 *)info);
c7999c6f 4935 bool active;
08247e31 4936
cdd6c482 4937 if (event->attr.freq) {
cdd6c482 4938 event->attr.sample_freq = value;
08247e31 4939 } else {
cdd6c482
IM
4940 event->attr.sample_period = value;
4941 event->hw.sample_period = value;
08247e31 4942 }
bad7192b
PZ
4943
4944 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4945 if (active) {
4946 perf_pmu_disable(ctx->pmu);
1e02cd40
PZ
4947 /*
4948 * We could be throttled; unthrottle now to avoid the tick
4949 * trying to unthrottle while we already re-started the event.
4950 */
4951 if (event->hw.interrupts == MAX_INTERRUPTS) {
4952 event->hw.interrupts = 0;
4953 perf_log_throttle(event, 1);
4954 }
bad7192b
PZ
4955 event->pmu->stop(event, PERF_EF_UPDATE);
4956 }
4957
4958 local64_set(&event->hw.period_left, 0);
4959
4960 if (active) {
4961 event->pmu->start(event, PERF_EF_RELOAD);
4962 perf_pmu_enable(ctx->pmu);
4963 }
c7999c6f
PZ
4964}
4965
4966static int perf_event_period(struct perf_event *event, u64 __user *arg)
4967{
c7999c6f
PZ
4968 u64 value;
4969
4970 if (!is_sampling_event(event))
4971 return -EINVAL;
4972
4973 if (copy_from_user(&value, arg, sizeof(value)))
4974 return -EFAULT;
4975
4976 if (!value)
4977 return -EINVAL;
4978
4979 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4980 return -EINVAL;
4981
fae3fde6 4982 event_function_call(event, __perf_event_period, &value);
08247e31 4983
c7999c6f 4984 return 0;
08247e31
PZ
4985}
4986
ac9721f3
PZ
4987static const struct file_operations perf_fops;
4988
2903ff01 4989static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 4990{
2903ff01
AV
4991 struct fd f = fdget(fd);
4992 if (!f.file)
4993 return -EBADF;
ac9721f3 4994
2903ff01
AV
4995 if (f.file->f_op != &perf_fops) {
4996 fdput(f);
4997 return -EBADF;
ac9721f3 4998 }
2903ff01
AV
4999 *p = f;
5000 return 0;
ac9721f3
PZ
5001}
5002
5003static int perf_event_set_output(struct perf_event *event,
5004 struct perf_event *output_event);
6fb2915d 5005static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2541517c 5006static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
32ff77e8
MC
5007static int perf_copy_attr(struct perf_event_attr __user *uattr,
5008 struct perf_event_attr *attr);
a4be7c27 5009
f63a8daa 5010static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
d859e29f 5011{
cdd6c482 5012 void (*func)(struct perf_event *);
3df5edad 5013 u32 flags = arg;
d859e29f
PM
5014
5015 switch (cmd) {
cdd6c482 5016 case PERF_EVENT_IOC_ENABLE:
f63a8daa 5017 func = _perf_event_enable;
d859e29f 5018 break;
cdd6c482 5019 case PERF_EVENT_IOC_DISABLE:
f63a8daa 5020 func = _perf_event_disable;
79f14641 5021 break;
cdd6c482 5022 case PERF_EVENT_IOC_RESET:
f63a8daa 5023 func = _perf_event_reset;
6de6a7b9 5024 break;
3df5edad 5025
cdd6c482 5026 case PERF_EVENT_IOC_REFRESH:
f63a8daa 5027 return _perf_event_refresh(event, arg);
08247e31 5028
cdd6c482
IM
5029 case PERF_EVENT_IOC_PERIOD:
5030 return perf_event_period(event, (u64 __user *)arg);
08247e31 5031
cf4957f1
JO
5032 case PERF_EVENT_IOC_ID:
5033 {
5034 u64 id = primary_event_id(event);
5035
5036 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
5037 return -EFAULT;
5038 return 0;
5039 }
5040
cdd6c482 5041 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 5042 {
ac9721f3 5043 int ret;
ac9721f3 5044 if (arg != -1) {
2903ff01
AV
5045 struct perf_event *output_event;
5046 struct fd output;
5047 ret = perf_fget_light(arg, &output);
5048 if (ret)
5049 return ret;
5050 output_event = output.file->private_data;
5051 ret = perf_event_set_output(event, output_event);
5052 fdput(output);
5053 } else {
5054 ret = perf_event_set_output(event, NULL);
ac9721f3 5055 }
ac9721f3
PZ
5056 return ret;
5057 }
a4be7c27 5058
6fb2915d
LZ
5059 case PERF_EVENT_IOC_SET_FILTER:
5060 return perf_event_set_filter(event, (void __user *)arg);
5061
2541517c
AS
5062 case PERF_EVENT_IOC_SET_BPF:
5063 return perf_event_set_bpf_prog(event, arg);
5064
86e7972f
WN
5065 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
5066 struct ring_buffer *rb;
5067
5068 rcu_read_lock();
5069 rb = rcu_dereference(event->rb);
5070 if (!rb || !rb->nr_pages) {
5071 rcu_read_unlock();
5072 return -EINVAL;
5073 }
5074 rb_toggle_paused(rb, !!arg);
5075 rcu_read_unlock();
5076 return 0;
5077 }
f371b304
YS
5078
5079 case PERF_EVENT_IOC_QUERY_BPF:
f4e2298e 5080 return perf_event_query_prog_array(event, (void __user *)arg);
32ff77e8
MC
5081
5082 case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
5083 struct perf_event_attr new_attr;
5084 int err = perf_copy_attr((struct perf_event_attr __user *)arg,
5085 &new_attr);
5086
5087 if (err)
5088 return err;
5089
5090 return perf_event_modify_attr(event, &new_attr);
5091 }
d859e29f 5092 default:
3df5edad 5093 return -ENOTTY;
d859e29f 5094 }
3df5edad
PZ
5095
5096 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 5097 perf_event_for_each(event, func);
3df5edad 5098 else
cdd6c482 5099 perf_event_for_each_child(event, func);
3df5edad
PZ
5100
5101 return 0;
d859e29f
PM
5102}
5103
f63a8daa
PZ
5104static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5105{
5106 struct perf_event *event = file->private_data;
5107 struct perf_event_context *ctx;
5108 long ret;
5109
5110 ctx = perf_event_ctx_lock(event);
5111 ret = _perf_ioctl(event, cmd, arg);
5112 perf_event_ctx_unlock(event, ctx);
5113
5114 return ret;
5115}
5116
b3f20785
PM
5117#ifdef CONFIG_COMPAT
5118static long perf_compat_ioctl(struct file *file, unsigned int cmd,
5119 unsigned long arg)
5120{
5121 switch (_IOC_NR(cmd)) {
5122 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
5123 case _IOC_NR(PERF_EVENT_IOC_ID):
82489c5f
ES
5124 case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
5125 case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
b3f20785
PM
5126 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
5127 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
5128 cmd &= ~IOCSIZE_MASK;
5129 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
5130 }
5131 break;
5132 }
5133 return perf_ioctl(file, cmd, arg);
5134}
5135#else
5136# define perf_compat_ioctl NULL
5137#endif
5138
cdd6c482 5139int perf_event_task_enable(void)
771d7cde 5140{
f63a8daa 5141 struct perf_event_context *ctx;
cdd6c482 5142 struct perf_event *event;
771d7cde 5143
cdd6c482 5144 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
5145 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
5146 ctx = perf_event_ctx_lock(event);
5147 perf_event_for_each_child(event, _perf_event_enable);
5148 perf_event_ctx_unlock(event, ctx);
5149 }
cdd6c482 5150 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
5151
5152 return 0;
5153}
5154
cdd6c482 5155int perf_event_task_disable(void)
771d7cde 5156{
f63a8daa 5157 struct perf_event_context *ctx;
cdd6c482 5158 struct perf_event *event;
771d7cde 5159
cdd6c482 5160 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
5161 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
5162 ctx = perf_event_ctx_lock(event);
5163 perf_event_for_each_child(event, _perf_event_disable);
5164 perf_event_ctx_unlock(event, ctx);
5165 }
cdd6c482 5166 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
5167
5168 return 0;
5169}
5170
cdd6c482 5171static int perf_event_index(struct perf_event *event)
194002b2 5172{
a4eaf7f1
PZ
5173 if (event->hw.state & PERF_HES_STOPPED)
5174 return 0;
5175
cdd6c482 5176 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
5177 return 0;
5178
35edc2a5 5179 return event->pmu->event_idx(event);
194002b2
PZ
5180}
5181
c4794295 5182static void calc_timer_values(struct perf_event *event,
e3f3541c 5183 u64 *now,
7f310a5d
EM
5184 u64 *enabled,
5185 u64 *running)
c4794295 5186{
e3f3541c 5187 u64 ctx_time;
c4794295 5188
e3f3541c
PZ
5189 *now = perf_clock();
5190 ctx_time = event->shadow_ctx_time + *now;
0d3d73aa 5191 __perf_update_times(event, ctx_time, enabled, running);
c4794295
EM
5192}
5193
fa731587
PZ
5194static void perf_event_init_userpage(struct perf_event *event)
5195{
5196 struct perf_event_mmap_page *userpg;
5197 struct ring_buffer *rb;
5198
5199 rcu_read_lock();
5200 rb = rcu_dereference(event->rb);
5201 if (!rb)
5202 goto unlock;
5203
5204 userpg = rb->user_page;
5205
5206 /* Allow new userspace to detect that bit 0 is deprecated */
5207 userpg->cap_bit0_is_deprecated = 1;
5208 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
e8c6deac
AS
5209 userpg->data_offset = PAGE_SIZE;
5210 userpg->data_size = perf_data_size(rb);
fa731587
PZ
5211
5212unlock:
5213 rcu_read_unlock();
5214}
5215
c1317ec2
AL
5216void __weak arch_perf_update_userpage(
5217 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
5218{
5219}
5220
38ff667b
PZ
5221/*
5222 * Callers need to ensure there can be no nesting of this function, otherwise
5223 * the seqlock logic goes bad. We can not serialize this because the arch
5224 * code calls this from NMI context.
5225 */
cdd6c482 5226void perf_event_update_userpage(struct perf_event *event)
37d81828 5227{
cdd6c482 5228 struct perf_event_mmap_page *userpg;
76369139 5229 struct ring_buffer *rb;
e3f3541c 5230 u64 enabled, running, now;
38ff667b
PZ
5231
5232 rcu_read_lock();
5ec4c599
PZ
5233 rb = rcu_dereference(event->rb);
5234 if (!rb)
5235 goto unlock;
5236
0d641208
EM
5237 /*
5238 * compute total_time_enabled, total_time_running
5239 * based on snapshot values taken when the event
5240 * was last scheduled in.
5241 *
5242 * we cannot simply called update_context_time()
5243 * because of locking issue as we can be called in
5244 * NMI context
5245 */
e3f3541c 5246 calc_timer_values(event, &now, &enabled, &running);
38ff667b 5247
76369139 5248 userpg = rb->user_page;
7b732a75 5249 /*
9d2dcc8f
MF
5250 * Disable preemption to guarantee consistent time stamps are stored to
5251 * the user page.
7b732a75
PZ
5252 */
5253 preempt_disable();
37d81828 5254 ++userpg->lock;
92f22a38 5255 barrier();
cdd6c482 5256 userpg->index = perf_event_index(event);
b5e58793 5257 userpg->offset = perf_event_count(event);
365a4038 5258 if (userpg->index)
e7850595 5259 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 5260
0d641208 5261 userpg->time_enabled = enabled +
cdd6c482 5262 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 5263
0d641208 5264 userpg->time_running = running +
cdd6c482 5265 atomic64_read(&event->child_total_time_running);
7f8b4e4e 5266
c1317ec2 5267 arch_perf_update_userpage(event, userpg, now);
e3f3541c 5268
92f22a38 5269 barrier();
37d81828 5270 ++userpg->lock;
7b732a75 5271 preempt_enable();
38ff667b 5272unlock:
7b732a75 5273 rcu_read_unlock();
37d81828 5274}
82975c46 5275EXPORT_SYMBOL_GPL(perf_event_update_userpage);
37d81828 5276
9e3ed2d7 5277static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
906010b2 5278{
11bac800 5279 struct perf_event *event = vmf->vma->vm_file->private_data;
76369139 5280 struct ring_buffer *rb;
9e3ed2d7 5281 vm_fault_t ret = VM_FAULT_SIGBUS;
906010b2
PZ
5282
5283 if (vmf->flags & FAULT_FLAG_MKWRITE) {
5284 if (vmf->pgoff == 0)
5285 ret = 0;
5286 return ret;
5287 }
5288
5289 rcu_read_lock();
76369139
FW
5290 rb = rcu_dereference(event->rb);
5291 if (!rb)
906010b2
PZ
5292 goto unlock;
5293
5294 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
5295 goto unlock;
5296
76369139 5297 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
5298 if (!vmf->page)
5299 goto unlock;
5300
5301 get_page(vmf->page);
11bac800 5302 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
906010b2
PZ
5303 vmf->page->index = vmf->pgoff;
5304
5305 ret = 0;
5306unlock:
5307 rcu_read_unlock();
5308
5309 return ret;
5310}
5311
10c6db11
PZ
5312static void ring_buffer_attach(struct perf_event *event,
5313 struct ring_buffer *rb)
5314{
b69cf536 5315 struct ring_buffer *old_rb = NULL;
10c6db11
PZ
5316 unsigned long flags;
5317
b69cf536
PZ
5318 if (event->rb) {
5319 /*
5320 * Should be impossible, we set this when removing
5321 * event->rb_entry and wait/clear when adding event->rb_entry.
5322 */
5323 WARN_ON_ONCE(event->rcu_pending);
10c6db11 5324
b69cf536 5325 old_rb = event->rb;
b69cf536
PZ
5326 spin_lock_irqsave(&old_rb->event_lock, flags);
5327 list_del_rcu(&event->rb_entry);
5328 spin_unlock_irqrestore(&old_rb->event_lock, flags);
10c6db11 5329
2f993cf0
ON
5330 event->rcu_batches = get_state_synchronize_rcu();
5331 event->rcu_pending = 1;
b69cf536 5332 }
10c6db11 5333
b69cf536 5334 if (rb) {
2f993cf0
ON
5335 if (event->rcu_pending) {
5336 cond_synchronize_rcu(event->rcu_batches);
5337 event->rcu_pending = 0;
5338 }
5339
b69cf536
PZ
5340 spin_lock_irqsave(&rb->event_lock, flags);
5341 list_add_rcu(&event->rb_entry, &rb->event_list);
5342 spin_unlock_irqrestore(&rb->event_lock, flags);
5343 }
5344
767ae086
AS
5345 /*
5346 * Avoid racing with perf_mmap_close(AUX): stop the event
5347 * before swizzling the event::rb pointer; if it's getting
5348 * unmapped, its aux_mmap_count will be 0 and it won't
5349 * restart. See the comment in __perf_pmu_output_stop().
5350 *
5351 * Data will inevitably be lost when set_output is done in
5352 * mid-air, but then again, whoever does it like this is
5353 * not in for the data anyway.
5354 */
5355 if (has_aux(event))
5356 perf_event_stop(event, 0);
5357
b69cf536
PZ
5358 rcu_assign_pointer(event->rb, rb);
5359
5360 if (old_rb) {
5361 ring_buffer_put(old_rb);
5362 /*
5363 * Since we detached before setting the new rb, so that we
5364 * could attach the new rb, we could have missed a wakeup.
5365 * Provide it now.
5366 */
5367 wake_up_all(&event->waitq);
5368 }
10c6db11
PZ
5369}
5370
5371static void ring_buffer_wakeup(struct perf_event *event)
5372{
5373 struct ring_buffer *rb;
5374
5375 rcu_read_lock();
5376 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
5377 if (rb) {
5378 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
5379 wake_up_all(&event->waitq);
5380 }
10c6db11
PZ
5381 rcu_read_unlock();
5382}
5383
fdc26706 5384struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 5385{
76369139 5386 struct ring_buffer *rb;
7b732a75 5387
ac9721f3 5388 rcu_read_lock();
76369139
FW
5389 rb = rcu_dereference(event->rb);
5390 if (rb) {
5391 if (!atomic_inc_not_zero(&rb->refcount))
5392 rb = NULL;
ac9721f3
PZ
5393 }
5394 rcu_read_unlock();
5395
76369139 5396 return rb;
ac9721f3
PZ
5397}
5398
fdc26706 5399void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 5400{
76369139 5401 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 5402 return;
7b732a75 5403
9bb5d40c 5404 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 5405
76369139 5406 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
5407}
5408
5409static void perf_mmap_open(struct vm_area_struct *vma)
5410{
cdd6c482 5411 struct perf_event *event = vma->vm_file->private_data;
7b732a75 5412
cdd6c482 5413 atomic_inc(&event->mmap_count);
9bb5d40c 5414 atomic_inc(&event->rb->mmap_count);
1e0fb9ec 5415
45bfb2e5
PZ
5416 if (vma->vm_pgoff)
5417 atomic_inc(&event->rb->aux_mmap_count);
5418
1e0fb9ec 5419 if (event->pmu->event_mapped)
bfe33492 5420 event->pmu->event_mapped(event, vma->vm_mm);
7b732a75
PZ
5421}
5422
95ff4ca2
AS
5423static void perf_pmu_output_stop(struct perf_event *event);
5424
9bb5d40c
PZ
5425/*
5426 * A buffer can be mmap()ed multiple times; either directly through the same
5427 * event, or through other events by use of perf_event_set_output().
5428 *
5429 * In order to undo the VM accounting done by perf_mmap() we need to destroy
5430 * the buffer here, where we still have a VM context. This means we need
5431 * to detach all events redirecting to us.
5432 */
7b732a75
PZ
5433static void perf_mmap_close(struct vm_area_struct *vma)
5434{
cdd6c482 5435 struct perf_event *event = vma->vm_file->private_data;
7b732a75 5436
b69cf536 5437 struct ring_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
5438 struct user_struct *mmap_user = rb->mmap_user;
5439 int mmap_locked = rb->mmap_locked;
5440 unsigned long size = perf_data_size(rb);
789f90fc 5441
1e0fb9ec 5442 if (event->pmu->event_unmapped)
bfe33492 5443 event->pmu->event_unmapped(event, vma->vm_mm);
1e0fb9ec 5444
45bfb2e5
PZ
5445 /*
5446 * rb->aux_mmap_count will always drop before rb->mmap_count and
5447 * event->mmap_count, so it is ok to use event->mmap_mutex to
5448 * serialize with perf_mmap here.
5449 */
5450 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
5451 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
95ff4ca2
AS
5452 /*
5453 * Stop all AUX events that are writing to this buffer,
5454 * so that we can free its AUX pages and corresponding PMU
5455 * data. Note that after rb::aux_mmap_count dropped to zero,
5456 * they won't start any more (see perf_aux_output_begin()).
5457 */
5458 perf_pmu_output_stop(event);
5459
5460 /* now it's safe to free the pages */
45bfb2e5
PZ
5461 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
5462 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
5463
95ff4ca2 5464 /* this has to be the last one */
45bfb2e5 5465 rb_free_aux(rb);
95ff4ca2
AS
5466 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
5467
45bfb2e5
PZ
5468 mutex_unlock(&event->mmap_mutex);
5469 }
5470
9bb5d40c
PZ
5471 atomic_dec(&rb->mmap_count);
5472
5473 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 5474 goto out_put;
9bb5d40c 5475
b69cf536 5476 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
5477 mutex_unlock(&event->mmap_mutex);
5478
5479 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
5480 if (atomic_read(&rb->mmap_count))
5481 goto out_put;
ac9721f3 5482
9bb5d40c
PZ
5483 /*
5484 * No other mmap()s, detach from all other events that might redirect
5485 * into the now unreachable buffer. Somewhat complicated by the
5486 * fact that rb::event_lock otherwise nests inside mmap_mutex.
5487 */
5488again:
5489 rcu_read_lock();
5490 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
5491 if (!atomic_long_inc_not_zero(&event->refcount)) {
5492 /*
5493 * This event is en-route to free_event() which will
5494 * detach it and remove it from the list.
5495 */
5496 continue;
5497 }
5498 rcu_read_unlock();
789f90fc 5499
9bb5d40c
PZ
5500 mutex_lock(&event->mmap_mutex);
5501 /*
5502 * Check we didn't race with perf_event_set_output() which can
5503 * swizzle the rb from under us while we were waiting to
5504 * acquire mmap_mutex.
5505 *
5506 * If we find a different rb; ignore this event, a next
5507 * iteration will no longer find it on the list. We have to
5508 * still restart the iteration to make sure we're not now
5509 * iterating the wrong list.
5510 */
b69cf536
PZ
5511 if (event->rb == rb)
5512 ring_buffer_attach(event, NULL);
5513
cdd6c482 5514 mutex_unlock(&event->mmap_mutex);
9bb5d40c 5515 put_event(event);
ac9721f3 5516
9bb5d40c
PZ
5517 /*
5518 * Restart the iteration; either we're on the wrong list or
5519 * destroyed its integrity by doing a deletion.
5520 */
5521 goto again;
7b732a75 5522 }
9bb5d40c
PZ
5523 rcu_read_unlock();
5524
5525 /*
5526 * It could be there's still a few 0-ref events on the list; they'll
5527 * get cleaned up by free_event() -- they'll also still have their
5528 * ref on the rb and will free it whenever they are done with it.
5529 *
5530 * Aside from that, this buffer is 'fully' detached and unmapped,
5531 * undo the VM accounting.
5532 */
5533
5534 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
5535 vma->vm_mm->pinned_vm -= mmap_locked;
5536 free_uid(mmap_user);
5537
b69cf536 5538out_put:
9bb5d40c 5539 ring_buffer_put(rb); /* could be last */
37d81828
PM
5540}
5541
f0f37e2f 5542static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8 5543 .open = perf_mmap_open,
fca0c116 5544 .close = perf_mmap_close, /* non mergeable */
43a21ea8
PZ
5545 .fault = perf_mmap_fault,
5546 .page_mkwrite = perf_mmap_fault,
37d81828
PM
5547};
5548
5549static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5550{
cdd6c482 5551 struct perf_event *event = file->private_data;
22a4f650 5552 unsigned long user_locked, user_lock_limit;
789f90fc 5553 struct user_struct *user = current_user();
22a4f650 5554 unsigned long locked, lock_limit;
45bfb2e5 5555 struct ring_buffer *rb = NULL;
7b732a75
PZ
5556 unsigned long vma_size;
5557 unsigned long nr_pages;
45bfb2e5 5558 long user_extra = 0, extra = 0;
d57e34fd 5559 int ret = 0, flags = 0;
37d81828 5560
c7920614
PZ
5561 /*
5562 * Don't allow mmap() of inherited per-task counters. This would
5563 * create a performance issue due to all children writing to the
76369139 5564 * same rb.
c7920614
PZ
5565 */
5566 if (event->cpu == -1 && event->attr.inherit)
5567 return -EINVAL;
5568
43a21ea8 5569 if (!(vma->vm_flags & VM_SHARED))
37d81828 5570 return -EINVAL;
7b732a75
PZ
5571
5572 vma_size = vma->vm_end - vma->vm_start;
45bfb2e5
PZ
5573
5574 if (vma->vm_pgoff == 0) {
5575 nr_pages = (vma_size / PAGE_SIZE) - 1;
5576 } else {
5577 /*
5578 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5579 * mapped, all subsequent mappings should have the same size
5580 * and offset. Must be above the normal perf buffer.
5581 */
5582 u64 aux_offset, aux_size;
5583
5584 if (!event->rb)
5585 return -EINVAL;
5586
5587 nr_pages = vma_size / PAGE_SIZE;
5588
5589 mutex_lock(&event->mmap_mutex);
5590 ret = -EINVAL;
5591
5592 rb = event->rb;
5593 if (!rb)
5594 goto aux_unlock;
5595
6aa7de05
MR
5596 aux_offset = READ_ONCE(rb->user_page->aux_offset);
5597 aux_size = READ_ONCE(rb->user_page->aux_size);
45bfb2e5
PZ
5598
5599 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5600 goto aux_unlock;
5601
5602 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5603 goto aux_unlock;
5604
5605 /* already mapped with a different offset */
5606 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5607 goto aux_unlock;
5608
5609 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5610 goto aux_unlock;
5611
5612 /* already mapped with a different size */
5613 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5614 goto aux_unlock;
5615
5616 if (!is_power_of_2(nr_pages))
5617 goto aux_unlock;
5618
5619 if (!atomic_inc_not_zero(&rb->mmap_count))
5620 goto aux_unlock;
5621
5622 if (rb_has_aux(rb)) {
5623 atomic_inc(&rb->aux_mmap_count);
5624 ret = 0;
5625 goto unlock;
5626 }
5627
5628 atomic_set(&rb->aux_mmap_count, 1);
5629 user_extra = nr_pages;
5630
5631 goto accounting;
5632 }
7b732a75 5633
7730d865 5634 /*
76369139 5635 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
5636 * can do bitmasks instead of modulo.
5637 */
2ed11312 5638 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
5639 return -EINVAL;
5640
7b732a75 5641 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
5642 return -EINVAL;
5643
cdd6c482 5644 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 5645again:
cdd6c482 5646 mutex_lock(&event->mmap_mutex);
76369139 5647 if (event->rb) {
9bb5d40c 5648 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 5649 ret = -EINVAL;
9bb5d40c
PZ
5650 goto unlock;
5651 }
5652
5653 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5654 /*
5655 * Raced against perf_mmap_close() through
5656 * perf_event_set_output(). Try again, hope for better
5657 * luck.
5658 */
5659 mutex_unlock(&event->mmap_mutex);
5660 goto again;
5661 }
5662
ebb3c4c4
PZ
5663 goto unlock;
5664 }
5665
789f90fc 5666 user_extra = nr_pages + 1;
45bfb2e5
PZ
5667
5668accounting:
cdd6c482 5669 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
5670
5671 /*
5672 * Increase the limit linearly with more CPUs:
5673 */
5674 user_lock_limit *= num_online_cpus();
5675
789f90fc 5676 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 5677
789f90fc
PZ
5678 if (user_locked > user_lock_limit)
5679 extra = user_locked - user_lock_limit;
7b732a75 5680
78d7d407 5681 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 5682 lock_limit >>= PAGE_SHIFT;
bc3e53f6 5683 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 5684
459ec28a
IM
5685 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5686 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
5687 ret = -EPERM;
5688 goto unlock;
5689 }
7b732a75 5690
45bfb2e5 5691 WARN_ON(!rb && event->rb);
906010b2 5692
d57e34fd 5693 if (vma->vm_flags & VM_WRITE)
76369139 5694 flags |= RING_BUFFER_WRITABLE;
d57e34fd 5695
76369139 5696 if (!rb) {
45bfb2e5
PZ
5697 rb = rb_alloc(nr_pages,
5698 event->attr.watermark ? event->attr.wakeup_watermark : 0,
5699 event->cpu, flags);
26cb63ad 5700
45bfb2e5
PZ
5701 if (!rb) {
5702 ret = -ENOMEM;
5703 goto unlock;
5704 }
43a21ea8 5705
45bfb2e5
PZ
5706 atomic_set(&rb->mmap_count, 1);
5707 rb->mmap_user = get_current_user();
5708 rb->mmap_locked = extra;
26cb63ad 5709
45bfb2e5 5710 ring_buffer_attach(event, rb);
ac9721f3 5711
45bfb2e5
PZ
5712 perf_event_init_userpage(event);
5713 perf_event_update_userpage(event);
5714 } else {
1a594131
AS
5715 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5716 event->attr.aux_watermark, flags);
45bfb2e5
PZ
5717 if (!ret)
5718 rb->aux_mmap_locked = extra;
5719 }
9a0f05cb 5720
ebb3c4c4 5721unlock:
45bfb2e5
PZ
5722 if (!ret) {
5723 atomic_long_add(user_extra, &user->locked_vm);
5724 vma->vm_mm->pinned_vm += extra;
5725
ac9721f3 5726 atomic_inc(&event->mmap_count);
45bfb2e5
PZ
5727 } else if (rb) {
5728 atomic_dec(&rb->mmap_count);
5729 }
5730aux_unlock:
cdd6c482 5731 mutex_unlock(&event->mmap_mutex);
37d81828 5732
9bb5d40c
PZ
5733 /*
5734 * Since pinned accounting is per vm we cannot allow fork() to copy our
5735 * vma.
5736 */
26cb63ad 5737 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 5738 vma->vm_ops = &perf_mmap_vmops;
7b732a75 5739
1e0fb9ec 5740 if (event->pmu->event_mapped)
bfe33492 5741 event->pmu->event_mapped(event, vma->vm_mm);
1e0fb9ec 5742
7b732a75 5743 return ret;
37d81828
PM
5744}
5745
3c446b3d
PZ
5746static int perf_fasync(int fd, struct file *filp, int on)
5747{
496ad9aa 5748 struct inode *inode = file_inode(filp);
cdd6c482 5749 struct perf_event *event = filp->private_data;
3c446b3d
PZ
5750 int retval;
5751
5955102c 5752 inode_lock(inode);
cdd6c482 5753 retval = fasync_helper(fd, filp, on, &event->fasync);
5955102c 5754 inode_unlock(inode);
3c446b3d
PZ
5755
5756 if (retval < 0)
5757 return retval;
5758
5759 return 0;
5760}
5761
0793a61d 5762static const struct file_operations perf_fops = {
3326c1ce 5763 .llseek = no_llseek,
0793a61d
TG
5764 .release = perf_release,
5765 .read = perf_read,
5766 .poll = perf_poll,
d859e29f 5767 .unlocked_ioctl = perf_ioctl,
b3f20785 5768 .compat_ioctl = perf_compat_ioctl,
37d81828 5769 .mmap = perf_mmap,
3c446b3d 5770 .fasync = perf_fasync,
0793a61d
TG
5771};
5772
925d519a 5773/*
cdd6c482 5774 * Perf event wakeup
925d519a
PZ
5775 *
5776 * If there's data, ensure we set the poll() state and publish everything
5777 * to user-space before waking everybody up.
5778 */
5779
fed66e2c
PZ
5780static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5781{
5782 /* only the parent has fasync state */
5783 if (event->parent)
5784 event = event->parent;
5785 return &event->fasync;
5786}
5787
cdd6c482 5788void perf_event_wakeup(struct perf_event *event)
925d519a 5789{
10c6db11 5790 ring_buffer_wakeup(event);
4c9e2542 5791
cdd6c482 5792 if (event->pending_kill) {
fed66e2c 5793 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
cdd6c482 5794 event->pending_kill = 0;
4c9e2542 5795 }
925d519a
PZ
5796}
5797
e360adbe 5798static void perf_pending_event(struct irq_work *entry)
79f14641 5799{
cdd6c482
IM
5800 struct perf_event *event = container_of(entry,
5801 struct perf_event, pending);
d525211f
PZ
5802 int rctx;
5803
5804 rctx = perf_swevent_get_recursion_context();
5805 /*
5806 * If we 'fail' here, that's OK, it means recursion is already disabled
5807 * and we won't recurse 'further'.
5808 */
79f14641 5809
cdd6c482
IM
5810 if (event->pending_disable) {
5811 event->pending_disable = 0;
fae3fde6 5812 perf_event_disable_local(event);
79f14641
PZ
5813 }
5814
cdd6c482
IM
5815 if (event->pending_wakeup) {
5816 event->pending_wakeup = 0;
5817 perf_event_wakeup(event);
79f14641 5818 }
d525211f
PZ
5819
5820 if (rctx >= 0)
5821 perf_swevent_put_recursion_context(rctx);
79f14641
PZ
5822}
5823
39447b38
ZY
5824/*
5825 * We assume there is only KVM supporting the callbacks.
5826 * Later on, we might change it to a list if there is
5827 * another virtualization implementation supporting the callbacks.
5828 */
5829struct perf_guest_info_callbacks *perf_guest_cbs;
5830
5831int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5832{
5833 perf_guest_cbs = cbs;
5834 return 0;
5835}
5836EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5837
5838int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5839{
5840 perf_guest_cbs = NULL;
5841 return 0;
5842}
5843EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5844
4018994f
JO
5845static void
5846perf_output_sample_regs(struct perf_output_handle *handle,
5847 struct pt_regs *regs, u64 mask)
5848{
5849 int bit;
29dd3288 5850 DECLARE_BITMAP(_mask, 64);
4018994f 5851
29dd3288
MS
5852 bitmap_from_u64(_mask, mask);
5853 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
4018994f
JO
5854 u64 val;
5855
5856 val = perf_reg_value(regs, bit);
5857 perf_output_put(handle, val);
5858 }
5859}
5860
60e2364e 5861static void perf_sample_regs_user(struct perf_regs *regs_user,
88a7c26a
AL
5862 struct pt_regs *regs,
5863 struct pt_regs *regs_user_copy)
4018994f 5864{
88a7c26a
AL
5865 if (user_mode(regs)) {
5866 regs_user->abi = perf_reg_abi(current);
2565711f 5867 regs_user->regs = regs;
88a7c26a
AL
5868 } else if (current->mm) {
5869 perf_get_regs_user(regs_user, regs, regs_user_copy);
2565711f
PZ
5870 } else {
5871 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5872 regs_user->regs = NULL;
4018994f
JO
5873 }
5874}
5875
60e2364e
SE
5876static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5877 struct pt_regs *regs)
5878{
5879 regs_intr->regs = regs;
5880 regs_intr->abi = perf_reg_abi(current);
5881}
5882
5883
c5ebcedb
JO
5884/*
5885 * Get remaining task size from user stack pointer.
5886 *
5887 * It'd be better to take stack vma map and limit this more
5888 * precisly, but there's no way to get it safely under interrupt,
5889 * so using TASK_SIZE as limit.
5890 */
5891static u64 perf_ustack_task_size(struct pt_regs *regs)
5892{
5893 unsigned long addr = perf_user_stack_pointer(regs);
5894
5895 if (!addr || addr >= TASK_SIZE)
5896 return 0;
5897
5898 return TASK_SIZE - addr;
5899}
5900
5901static u16
5902perf_sample_ustack_size(u16 stack_size, u16 header_size,
5903 struct pt_regs *regs)
5904{
5905 u64 task_size;
5906
5907 /* No regs, no stack pointer, no dump. */
5908 if (!regs)
5909 return 0;
5910
5911 /*
5912 * Check if we fit in with the requested stack size into the:
5913 * - TASK_SIZE
5914 * If we don't, we limit the size to the TASK_SIZE.
5915 *
5916 * - remaining sample size
5917 * If we don't, we customize the stack size to
5918 * fit in to the remaining sample size.
5919 */
5920
5921 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5922 stack_size = min(stack_size, (u16) task_size);
5923
5924 /* Current header size plus static size and dynamic size. */
5925 header_size += 2 * sizeof(u64);
5926
5927 /* Do we fit in with the current stack dump size? */
5928 if ((u16) (header_size + stack_size) < header_size) {
5929 /*
5930 * If we overflow the maximum size for the sample,
5931 * we customize the stack dump size to fit in.
5932 */
5933 stack_size = USHRT_MAX - header_size - sizeof(u64);
5934 stack_size = round_up(stack_size, sizeof(u64));
5935 }
5936
5937 return stack_size;
5938}
5939
5940static void
5941perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5942 struct pt_regs *regs)
5943{
5944 /* Case of a kernel thread, nothing to dump */
5945 if (!regs) {
5946 u64 size = 0;
5947 perf_output_put(handle, size);
5948 } else {
5949 unsigned long sp;
5950 unsigned int rem;
5951 u64 dyn_size;
02e18447 5952 mm_segment_t fs;
c5ebcedb
JO
5953
5954 /*
5955 * We dump:
5956 * static size
5957 * - the size requested by user or the best one we can fit
5958 * in to the sample max size
5959 * data
5960 * - user stack dump data
5961 * dynamic size
5962 * - the actual dumped size
5963 */
5964
5965 /* Static size. */
5966 perf_output_put(handle, dump_size);
5967
5968 /* Data. */
5969 sp = perf_user_stack_pointer(regs);
02e18447
YC
5970 fs = get_fs();
5971 set_fs(USER_DS);
c5ebcedb 5972 rem = __output_copy_user(handle, (void *) sp, dump_size);
02e18447 5973 set_fs(fs);
c5ebcedb
JO
5974 dyn_size = dump_size - rem;
5975
5976 perf_output_skip(handle, rem);
5977
5978 /* Dynamic size. */
5979 perf_output_put(handle, dyn_size);
5980 }
5981}
5982
c980d109
ACM
5983static void __perf_event_header__init_id(struct perf_event_header *header,
5984 struct perf_sample_data *data,
5985 struct perf_event *event)
6844c09d
ACM
5986{
5987 u64 sample_type = event->attr.sample_type;
5988
5989 data->type = sample_type;
5990 header->size += event->id_header_size;
5991
5992 if (sample_type & PERF_SAMPLE_TID) {
5993 /* namespace issues */
5994 data->tid_entry.pid = perf_event_pid(event, current);
5995 data->tid_entry.tid = perf_event_tid(event, current);
5996 }
5997
5998 if (sample_type & PERF_SAMPLE_TIME)
34f43927 5999 data->time = perf_event_clock(event);
6844c09d 6000
ff3d527c 6001 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
6002 data->id = primary_event_id(event);
6003
6004 if (sample_type & PERF_SAMPLE_STREAM_ID)
6005 data->stream_id = event->id;
6006
6007 if (sample_type & PERF_SAMPLE_CPU) {
6008 data->cpu_entry.cpu = raw_smp_processor_id();
6009 data->cpu_entry.reserved = 0;
6010 }
6011}
6012
76369139
FW
6013void perf_event_header__init_id(struct perf_event_header *header,
6014 struct perf_sample_data *data,
6015 struct perf_event *event)
c980d109
ACM
6016{
6017 if (event->attr.sample_id_all)
6018 __perf_event_header__init_id(header, data, event);
6019}
6020
6021static void __perf_event__output_id_sample(struct perf_output_handle *handle,
6022 struct perf_sample_data *data)
6023{
6024 u64 sample_type = data->type;
6025
6026 if (sample_type & PERF_SAMPLE_TID)
6027 perf_output_put(handle, data->tid_entry);
6028
6029 if (sample_type & PERF_SAMPLE_TIME)
6030 perf_output_put(handle, data->time);
6031
6032 if (sample_type & PERF_SAMPLE_ID)
6033 perf_output_put(handle, data->id);
6034
6035 if (sample_type & PERF_SAMPLE_STREAM_ID)
6036 perf_output_put(handle, data->stream_id);
6037
6038 if (sample_type & PERF_SAMPLE_CPU)
6039 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
6040
6041 if (sample_type & PERF_SAMPLE_IDENTIFIER)
6042 perf_output_put(handle, data->id);
c980d109
ACM
6043}
6044
76369139
FW
6045void perf_event__output_id_sample(struct perf_event *event,
6046 struct perf_output_handle *handle,
6047 struct perf_sample_data *sample)
c980d109
ACM
6048{
6049 if (event->attr.sample_id_all)
6050 __perf_event__output_id_sample(handle, sample);
6051}
6052
3dab77fb 6053static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
6054 struct perf_event *event,
6055 u64 enabled, u64 running)
3dab77fb 6056{
cdd6c482 6057 u64 read_format = event->attr.read_format;
3dab77fb
PZ
6058 u64 values[4];
6059 int n = 0;
6060
b5e58793 6061 values[n++] = perf_event_count(event);
3dab77fb 6062 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 6063 values[n++] = enabled +
cdd6c482 6064 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
6065 }
6066 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 6067 values[n++] = running +
cdd6c482 6068 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
6069 }
6070 if (read_format & PERF_FORMAT_ID)
cdd6c482 6071 values[n++] = primary_event_id(event);
3dab77fb 6072
76369139 6073 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
6074}
6075
3dab77fb 6076static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
6077 struct perf_event *event,
6078 u64 enabled, u64 running)
3dab77fb 6079{
cdd6c482
IM
6080 struct perf_event *leader = event->group_leader, *sub;
6081 u64 read_format = event->attr.read_format;
3dab77fb
PZ
6082 u64 values[5];
6083 int n = 0;
6084
6085 values[n++] = 1 + leader->nr_siblings;
6086
6087 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 6088 values[n++] = enabled;
3dab77fb
PZ
6089
6090 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 6091 values[n++] = running;
3dab77fb 6092
9e5b127d
PZ
6093 if ((leader != event) &&
6094 (leader->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
6095 leader->pmu->read(leader);
6096
b5e58793 6097 values[n++] = perf_event_count(leader);
3dab77fb 6098 if (read_format & PERF_FORMAT_ID)
cdd6c482 6099 values[n++] = primary_event_id(leader);
3dab77fb 6100
76369139 6101 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 6102
edb39592 6103 for_each_sibling_event(sub, leader) {
3dab77fb
PZ
6104 n = 0;
6105
6f5ab001
JO
6106 if ((sub != event) &&
6107 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
6108 sub->pmu->read(sub);
6109
b5e58793 6110 values[n++] = perf_event_count(sub);
3dab77fb 6111 if (read_format & PERF_FORMAT_ID)
cdd6c482 6112 values[n++] = primary_event_id(sub);
3dab77fb 6113
76369139 6114 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
6115 }
6116}
6117
eed01528
SE
6118#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
6119 PERF_FORMAT_TOTAL_TIME_RUNNING)
6120
ba5213ae
PZ
6121/*
6122 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
6123 *
6124 * The problem is that its both hard and excessively expensive to iterate the
6125 * child list, not to mention that its impossible to IPI the children running
6126 * on another CPU, from interrupt/NMI context.
6127 */
3dab77fb 6128static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 6129 struct perf_event *event)
3dab77fb 6130{
e3f3541c 6131 u64 enabled = 0, running = 0, now;
eed01528
SE
6132 u64 read_format = event->attr.read_format;
6133
6134 /*
6135 * compute total_time_enabled, total_time_running
6136 * based on snapshot values taken when the event
6137 * was last scheduled in.
6138 *
6139 * we cannot simply called update_context_time()
6140 * because of locking issue as we are called in
6141 * NMI context
6142 */
c4794295 6143 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 6144 calc_timer_values(event, &now, &enabled, &running);
eed01528 6145
cdd6c482 6146 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 6147 perf_output_read_group(handle, event, enabled, running);
3dab77fb 6148 else
eed01528 6149 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
6150}
6151
5622f295
MM
6152void perf_output_sample(struct perf_output_handle *handle,
6153 struct perf_event_header *header,
6154 struct perf_sample_data *data,
cdd6c482 6155 struct perf_event *event)
5622f295
MM
6156{
6157 u64 sample_type = data->type;
6158
6159 perf_output_put(handle, *header);
6160
ff3d527c
AH
6161 if (sample_type & PERF_SAMPLE_IDENTIFIER)
6162 perf_output_put(handle, data->id);
6163
5622f295
MM
6164 if (sample_type & PERF_SAMPLE_IP)
6165 perf_output_put(handle, data->ip);
6166
6167 if (sample_type & PERF_SAMPLE_TID)
6168 perf_output_put(handle, data->tid_entry);
6169
6170 if (sample_type & PERF_SAMPLE_TIME)
6171 perf_output_put(handle, data->time);
6172
6173 if (sample_type & PERF_SAMPLE_ADDR)
6174 perf_output_put(handle, data->addr);
6175
6176 if (sample_type & PERF_SAMPLE_ID)
6177 perf_output_put(handle, data->id);
6178
6179 if (sample_type & PERF_SAMPLE_STREAM_ID)
6180 perf_output_put(handle, data->stream_id);
6181
6182 if (sample_type & PERF_SAMPLE_CPU)
6183 perf_output_put(handle, data->cpu_entry);
6184
6185 if (sample_type & PERF_SAMPLE_PERIOD)
6186 perf_output_put(handle, data->period);
6187
6188 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 6189 perf_output_read(handle, event);
5622f295
MM
6190
6191 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
99e818cc 6192 int size = 1;
5622f295 6193
99e818cc
JO
6194 size += data->callchain->nr;
6195 size *= sizeof(u64);
6196 __output_copy(handle, data->callchain, size);
5622f295
MM
6197 }
6198
6199 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
6200 struct perf_raw_record *raw = data->raw;
6201
6202 if (raw) {
6203 struct perf_raw_frag *frag = &raw->frag;
6204
6205 perf_output_put(handle, raw->size);
6206 do {
6207 if (frag->copy) {
6208 __output_custom(handle, frag->copy,
6209 frag->data, frag->size);
6210 } else {
6211 __output_copy(handle, frag->data,
6212 frag->size);
6213 }
6214 if (perf_raw_frag_last(frag))
6215 break;
6216 frag = frag->next;
6217 } while (1);
6218 if (frag->pad)
6219 __output_skip(handle, NULL, frag->pad);
5622f295
MM
6220 } else {
6221 struct {
6222 u32 size;
6223 u32 data;
6224 } raw = {
6225 .size = sizeof(u32),
6226 .data = 0,
6227 };
6228 perf_output_put(handle, raw);
6229 }
6230 }
a7ac67ea 6231
bce38cd5
SE
6232 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
6233 if (data->br_stack) {
6234 size_t size;
6235
6236 size = data->br_stack->nr
6237 * sizeof(struct perf_branch_entry);
6238
6239 perf_output_put(handle, data->br_stack->nr);
6240 perf_output_copy(handle, data->br_stack->entries, size);
6241 } else {
6242 /*
6243 * we always store at least the value of nr
6244 */
6245 u64 nr = 0;
6246 perf_output_put(handle, nr);
6247 }
6248 }
4018994f
JO
6249
6250 if (sample_type & PERF_SAMPLE_REGS_USER) {
6251 u64 abi = data->regs_user.abi;
6252
6253 /*
6254 * If there are no regs to dump, notice it through
6255 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
6256 */
6257 perf_output_put(handle, abi);
6258
6259 if (abi) {
6260 u64 mask = event->attr.sample_regs_user;
6261 perf_output_sample_regs(handle,
6262 data->regs_user.regs,
6263 mask);
6264 }
6265 }
c5ebcedb 6266
a5cdd40c 6267 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
6268 perf_output_sample_ustack(handle,
6269 data->stack_user_size,
6270 data->regs_user.regs);
a5cdd40c 6271 }
c3feedf2
AK
6272
6273 if (sample_type & PERF_SAMPLE_WEIGHT)
6274 perf_output_put(handle, data->weight);
d6be9ad6
SE
6275
6276 if (sample_type & PERF_SAMPLE_DATA_SRC)
6277 perf_output_put(handle, data->data_src.val);
a5cdd40c 6278
fdfbbd07
AK
6279 if (sample_type & PERF_SAMPLE_TRANSACTION)
6280 perf_output_put(handle, data->txn);
6281
60e2364e
SE
6282 if (sample_type & PERF_SAMPLE_REGS_INTR) {
6283 u64 abi = data->regs_intr.abi;
6284 /*
6285 * If there are no regs to dump, notice it through
6286 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
6287 */
6288 perf_output_put(handle, abi);
6289
6290 if (abi) {
6291 u64 mask = event->attr.sample_regs_intr;
6292
6293 perf_output_sample_regs(handle,
6294 data->regs_intr.regs,
6295 mask);
6296 }
6297 }
6298
fc7ce9c7
KL
6299 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
6300 perf_output_put(handle, data->phys_addr);
6301
a5cdd40c
PZ
6302 if (!event->attr.watermark) {
6303 int wakeup_events = event->attr.wakeup_events;
6304
6305 if (wakeup_events) {
6306 struct ring_buffer *rb = handle->rb;
6307 int events = local_inc_return(&rb->events);
6308
6309 if (events >= wakeup_events) {
6310 local_sub(wakeup_events, &rb->events);
6311 local_inc(&rb->wakeup);
6312 }
6313 }
6314 }
5622f295
MM
6315}
6316
fc7ce9c7
KL
6317static u64 perf_virt_to_phys(u64 virt)
6318{
6319 u64 phys_addr = 0;
6320 struct page *p = NULL;
6321
6322 if (!virt)
6323 return 0;
6324
6325 if (virt >= TASK_SIZE) {
6326 /* If it's vmalloc()d memory, leave phys_addr as 0 */
6327 if (virt_addr_valid((void *)(uintptr_t)virt) &&
6328 !(virt >= VMALLOC_START && virt < VMALLOC_END))
6329 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
6330 } else {
6331 /*
6332 * Walking the pages tables for user address.
6333 * Interrupts are disabled, so it prevents any tear down
6334 * of the page tables.
6335 * Try IRQ-safe __get_user_pages_fast first.
6336 * If failed, leave phys_addr as 0.
6337 */
6338 if ((current->mm != NULL) &&
6339 (__get_user_pages_fast(virt, 1, 0, &p) == 1))
6340 phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
6341
6342 if (p)
6343 put_page(p);
6344 }
6345
6346 return phys_addr;
6347}
6348
99e818cc
JO
6349static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
6350
6cbc304f 6351struct perf_callchain_entry *
8cf7e0e2
JO
6352perf_callchain(struct perf_event *event, struct pt_regs *regs)
6353{
6354 bool kernel = !event->attr.exclude_callchain_kernel;
6355 bool user = !event->attr.exclude_callchain_user;
6356 /* Disallow cross-task user callchains. */
6357 bool crosstask = event->ctx->task && event->ctx->task != current;
6358 const u32 max_stack = event->attr.sample_max_stack;
99e818cc 6359 struct perf_callchain_entry *callchain;
8cf7e0e2
JO
6360
6361 if (!kernel && !user)
99e818cc 6362 return &__empty_callchain;
8cf7e0e2 6363
99e818cc
JO
6364 callchain = get_perf_callchain(regs, 0, kernel, user,
6365 max_stack, crosstask, true);
6366 return callchain ?: &__empty_callchain;
8cf7e0e2
JO
6367}
6368
5622f295
MM
6369void perf_prepare_sample(struct perf_event_header *header,
6370 struct perf_sample_data *data,
cdd6c482 6371 struct perf_event *event,
5622f295 6372 struct pt_regs *regs)
7b732a75 6373{
cdd6c482 6374 u64 sample_type = event->attr.sample_type;
7b732a75 6375
cdd6c482 6376 header->type = PERF_RECORD_SAMPLE;
c320c7b7 6377 header->size = sizeof(*header) + event->header_size;
5622f295
MM
6378
6379 header->misc = 0;
6380 header->misc |= perf_misc_flags(regs);
6fab0192 6381
c980d109 6382 __perf_event_header__init_id(header, data, event);
6844c09d 6383
c320c7b7 6384 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
6385 data->ip = perf_instruction_pointer(regs);
6386
b23f3325 6387 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 6388 int size = 1;
394ee076 6389
6cbc304f
PZ
6390 if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
6391 data->callchain = perf_callchain(event, regs);
6392
99e818cc 6393 size += data->callchain->nr;
5622f295
MM
6394
6395 header->size += size * sizeof(u64);
394ee076
PZ
6396 }
6397
3a43ce68 6398 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
6399 struct perf_raw_record *raw = data->raw;
6400 int size;
6401
6402 if (raw) {
6403 struct perf_raw_frag *frag = &raw->frag;
6404 u32 sum = 0;
6405
6406 do {
6407 sum += frag->size;
6408 if (perf_raw_frag_last(frag))
6409 break;
6410 frag = frag->next;
6411 } while (1);
6412
6413 size = round_up(sum + sizeof(u32), sizeof(u64));
6414 raw->size = size - sizeof(u32);
6415 frag->pad = raw->size - sum;
6416 } else {
6417 size = sizeof(u64);
6418 }
a044560c 6419
7e3f977e 6420 header->size += size;
7f453c24 6421 }
bce38cd5
SE
6422
6423 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
6424 int size = sizeof(u64); /* nr */
6425 if (data->br_stack) {
6426 size += data->br_stack->nr
6427 * sizeof(struct perf_branch_entry);
6428 }
6429 header->size += size;
6430 }
4018994f 6431
2565711f 6432 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
88a7c26a
AL
6433 perf_sample_regs_user(&data->regs_user, regs,
6434 &data->regs_user_copy);
2565711f 6435
4018994f
JO
6436 if (sample_type & PERF_SAMPLE_REGS_USER) {
6437 /* regs dump ABI info */
6438 int size = sizeof(u64);
6439
4018994f
JO
6440 if (data->regs_user.regs) {
6441 u64 mask = event->attr.sample_regs_user;
6442 size += hweight64(mask) * sizeof(u64);
6443 }
6444
6445 header->size += size;
6446 }
c5ebcedb
JO
6447
6448 if (sample_type & PERF_SAMPLE_STACK_USER) {
6449 /*
6450 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
6451 * processed as the last one or have additional check added
6452 * in case new sample type is added, because we could eat
6453 * up the rest of the sample size.
6454 */
c5ebcedb
JO
6455 u16 stack_size = event->attr.sample_stack_user;
6456 u16 size = sizeof(u64);
6457
c5ebcedb 6458 stack_size = perf_sample_ustack_size(stack_size, header->size,
2565711f 6459 data->regs_user.regs);
c5ebcedb
JO
6460
6461 /*
6462 * If there is something to dump, add space for the dump
6463 * itself and for the field that tells the dynamic size,
6464 * which is how many have been actually dumped.
6465 */
6466 if (stack_size)
6467 size += sizeof(u64) + stack_size;
6468
6469 data->stack_user_size = stack_size;
6470 header->size += size;
6471 }
60e2364e
SE
6472
6473 if (sample_type & PERF_SAMPLE_REGS_INTR) {
6474 /* regs dump ABI info */
6475 int size = sizeof(u64);
6476
6477 perf_sample_regs_intr(&data->regs_intr, regs);
6478
6479 if (data->regs_intr.regs) {
6480 u64 mask = event->attr.sample_regs_intr;
6481
6482 size += hweight64(mask) * sizeof(u64);
6483 }
6484
6485 header->size += size;
6486 }
fc7ce9c7
KL
6487
6488 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
6489 data->phys_addr = perf_virt_to_phys(data->addr);
5622f295 6490}
7f453c24 6491
93315101 6492static __always_inline void
9ecda41a
WN
6493__perf_event_output(struct perf_event *event,
6494 struct perf_sample_data *data,
6495 struct pt_regs *regs,
6496 int (*output_begin)(struct perf_output_handle *,
6497 struct perf_event *,
6498 unsigned int))
5622f295
MM
6499{
6500 struct perf_output_handle handle;
6501 struct perf_event_header header;
689802b2 6502
927c7a9e
FW
6503 /* protect the callchain buffers */
6504 rcu_read_lock();
6505
cdd6c482 6506 perf_prepare_sample(&header, data, event, regs);
5c148194 6507
9ecda41a 6508 if (output_begin(&handle, event, header.size))
927c7a9e 6509 goto exit;
0322cd6e 6510
cdd6c482 6511 perf_output_sample(&handle, &header, data, event);
f413cdb8 6512
8a057d84 6513 perf_output_end(&handle);
927c7a9e
FW
6514
6515exit:
6516 rcu_read_unlock();
0322cd6e
PZ
6517}
6518
9ecda41a
WN
6519void
6520perf_event_output_forward(struct perf_event *event,
6521 struct perf_sample_data *data,
6522 struct pt_regs *regs)
6523{
6524 __perf_event_output(event, data, regs, perf_output_begin_forward);
6525}
6526
6527void
6528perf_event_output_backward(struct perf_event *event,
6529 struct perf_sample_data *data,
6530 struct pt_regs *regs)
6531{
6532 __perf_event_output(event, data, regs, perf_output_begin_backward);
6533}
6534
6535void
6536perf_event_output(struct perf_event *event,
6537 struct perf_sample_data *data,
6538 struct pt_regs *regs)
6539{
6540 __perf_event_output(event, data, regs, perf_output_begin);
6541}
6542
38b200d6 6543/*
cdd6c482 6544 * read event_id
38b200d6
PZ
6545 */
6546
6547struct perf_read_event {
6548 struct perf_event_header header;
6549
6550 u32 pid;
6551 u32 tid;
38b200d6
PZ
6552};
6553
6554static void
cdd6c482 6555perf_event_read_event(struct perf_event *event,
38b200d6
PZ
6556 struct task_struct *task)
6557{
6558 struct perf_output_handle handle;
c980d109 6559 struct perf_sample_data sample;
dfc65094 6560 struct perf_read_event read_event = {
38b200d6 6561 .header = {
cdd6c482 6562 .type = PERF_RECORD_READ,
38b200d6 6563 .misc = 0,
c320c7b7 6564 .size = sizeof(read_event) + event->read_size,
38b200d6 6565 },
cdd6c482
IM
6566 .pid = perf_event_pid(event, task),
6567 .tid = perf_event_tid(event, task),
38b200d6 6568 };
3dab77fb 6569 int ret;
38b200d6 6570
c980d109 6571 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 6572 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
6573 if (ret)
6574 return;
6575
dfc65094 6576 perf_output_put(&handle, read_event);
cdd6c482 6577 perf_output_read(&handle, event);
c980d109 6578 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 6579
38b200d6
PZ
6580 perf_output_end(&handle);
6581}
6582
aab5b71e 6583typedef void (perf_iterate_f)(struct perf_event *event, void *data);
52d857a8
JO
6584
6585static void
aab5b71e
PZ
6586perf_iterate_ctx(struct perf_event_context *ctx,
6587 perf_iterate_f output,
b73e4fef 6588 void *data, bool all)
52d857a8
JO
6589{
6590 struct perf_event *event;
6591
6592 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
b73e4fef
AS
6593 if (!all) {
6594 if (event->state < PERF_EVENT_STATE_INACTIVE)
6595 continue;
6596 if (!event_filter_match(event))
6597 continue;
6598 }
6599
67516844 6600 output(event, data);
52d857a8
JO
6601 }
6602}
6603
aab5b71e 6604static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
f2fb6bef
KL
6605{
6606 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
6607 struct perf_event *event;
6608
6609 list_for_each_entry_rcu(event, &pel->list, sb_list) {
0b8f1e2e
PZ
6610 /*
6611 * Skip events that are not fully formed yet; ensure that
6612 * if we observe event->ctx, both event and ctx will be
6613 * complete enough. See perf_install_in_context().
6614 */
6615 if (!smp_load_acquire(&event->ctx))
6616 continue;
6617
f2fb6bef
KL
6618 if (event->state < PERF_EVENT_STATE_INACTIVE)
6619 continue;
6620 if (!event_filter_match(event))
6621 continue;
6622 output(event, data);
6623 }
6624}
6625
aab5b71e
PZ
6626/*
6627 * Iterate all events that need to receive side-band events.
6628 *
6629 * For new callers; ensure that account_pmu_sb_event() includes
6630 * your event, otherwise it might not get delivered.
6631 */
52d857a8 6632static void
aab5b71e 6633perf_iterate_sb(perf_iterate_f output, void *data,
52d857a8
JO
6634 struct perf_event_context *task_ctx)
6635{
52d857a8 6636 struct perf_event_context *ctx;
52d857a8
JO
6637 int ctxn;
6638
aab5b71e
PZ
6639 rcu_read_lock();
6640 preempt_disable();
6641
4e93ad60 6642 /*
aab5b71e
PZ
6643 * If we have task_ctx != NULL we only notify the task context itself.
6644 * The task_ctx is set only for EXIT events before releasing task
4e93ad60
JO
6645 * context.
6646 */
6647 if (task_ctx) {
aab5b71e
PZ
6648 perf_iterate_ctx(task_ctx, output, data, false);
6649 goto done;
4e93ad60
JO
6650 }
6651
aab5b71e 6652 perf_iterate_sb_cpu(output, data);
f2fb6bef
KL
6653
6654 for_each_task_context_nr(ctxn) {
52d857a8
JO
6655 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6656 if (ctx)
aab5b71e 6657 perf_iterate_ctx(ctx, output, data, false);
52d857a8 6658 }
aab5b71e 6659done:
f2fb6bef 6660 preempt_enable();
52d857a8 6661 rcu_read_unlock();
95ff4ca2
AS
6662}
6663
375637bc
AS
6664/*
6665 * Clear all file-based filters at exec, they'll have to be
6666 * re-instated when/if these objects are mmapped again.
6667 */
6668static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6669{
6670 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6671 struct perf_addr_filter *filter;
6672 unsigned int restart = 0, count = 0;
6673 unsigned long flags;
6674
6675 if (!has_addr_filter(event))
6676 return;
6677
6678 raw_spin_lock_irqsave(&ifh->lock, flags);
6679 list_for_each_entry(filter, &ifh->list, entry) {
9511bce9 6680 if (filter->path.dentry) {
375637bc
AS
6681 event->addr_filters_offs[count] = 0;
6682 restart++;
6683 }
6684
6685 count++;
6686 }
6687
6688 if (restart)
6689 event->addr_filters_gen++;
6690 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6691
6692 if (restart)
767ae086 6693 perf_event_stop(event, 1);
375637bc
AS
6694}
6695
6696void perf_event_exec(void)
6697{
6698 struct perf_event_context *ctx;
6699 int ctxn;
6700
6701 rcu_read_lock();
6702 for_each_task_context_nr(ctxn) {
6703 ctx = current->perf_event_ctxp[ctxn];
6704 if (!ctx)
6705 continue;
6706
6707 perf_event_enable_on_exec(ctxn);
6708
aab5b71e 6709 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
375637bc
AS
6710 true);
6711 }
6712 rcu_read_unlock();
6713}
6714
95ff4ca2
AS
6715struct remote_output {
6716 struct ring_buffer *rb;
6717 int err;
6718};
6719
6720static void __perf_event_output_stop(struct perf_event *event, void *data)
6721{
6722 struct perf_event *parent = event->parent;
6723 struct remote_output *ro = data;
6724 struct ring_buffer *rb = ro->rb;
375637bc
AS
6725 struct stop_event_data sd = {
6726 .event = event,
6727 };
95ff4ca2
AS
6728
6729 if (!has_aux(event))
6730 return;
6731
6732 if (!parent)
6733 parent = event;
6734
6735 /*
6736 * In case of inheritance, it will be the parent that links to the
767ae086
AS
6737 * ring-buffer, but it will be the child that's actually using it.
6738 *
6739 * We are using event::rb to determine if the event should be stopped,
6740 * however this may race with ring_buffer_attach() (through set_output),
6741 * which will make us skip the event that actually needs to be stopped.
6742 * So ring_buffer_attach() has to stop an aux event before re-assigning
6743 * its rb pointer.
95ff4ca2
AS
6744 */
6745 if (rcu_dereference(parent->rb) == rb)
375637bc 6746 ro->err = __perf_event_stop(&sd);
95ff4ca2
AS
6747}
6748
6749static int __perf_pmu_output_stop(void *info)
6750{
6751 struct perf_event *event = info;
6752 struct pmu *pmu = event->pmu;
8b6a3fe8 6753 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95ff4ca2
AS
6754 struct remote_output ro = {
6755 .rb = event->rb,
6756 };
6757
6758 rcu_read_lock();
aab5b71e 6759 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
95ff4ca2 6760 if (cpuctx->task_ctx)
aab5b71e 6761 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
b73e4fef 6762 &ro, false);
95ff4ca2
AS
6763 rcu_read_unlock();
6764
6765 return ro.err;
6766}
6767
6768static void perf_pmu_output_stop(struct perf_event *event)
6769{
6770 struct perf_event *iter;
6771 int err, cpu;
6772
6773restart:
6774 rcu_read_lock();
6775 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6776 /*
6777 * For per-CPU events, we need to make sure that neither they
6778 * nor their children are running; for cpu==-1 events it's
6779 * sufficient to stop the event itself if it's active, since
6780 * it can't have children.
6781 */
6782 cpu = iter->cpu;
6783 if (cpu == -1)
6784 cpu = READ_ONCE(iter->oncpu);
6785
6786 if (cpu == -1)
6787 continue;
6788
6789 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6790 if (err == -EAGAIN) {
6791 rcu_read_unlock();
6792 goto restart;
6793 }
6794 }
6795 rcu_read_unlock();
52d857a8
JO
6796}
6797
60313ebe 6798/*
9f498cc5
PZ
6799 * task tracking -- fork/exit
6800 *
13d7a241 6801 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
6802 */
6803
9f498cc5 6804struct perf_task_event {
3a80b4a3 6805 struct task_struct *task;
cdd6c482 6806 struct perf_event_context *task_ctx;
60313ebe
PZ
6807
6808 struct {
6809 struct perf_event_header header;
6810
6811 u32 pid;
6812 u32 ppid;
9f498cc5
PZ
6813 u32 tid;
6814 u32 ptid;
393b2ad8 6815 u64 time;
cdd6c482 6816 } event_id;
60313ebe
PZ
6817};
6818
67516844
JO
6819static int perf_event_task_match(struct perf_event *event)
6820{
13d7a241
SE
6821 return event->attr.comm || event->attr.mmap ||
6822 event->attr.mmap2 || event->attr.mmap_data ||
6823 event->attr.task;
67516844
JO
6824}
6825
cdd6c482 6826static void perf_event_task_output(struct perf_event *event,
52d857a8 6827 void *data)
60313ebe 6828{
52d857a8 6829 struct perf_task_event *task_event = data;
60313ebe 6830 struct perf_output_handle handle;
c980d109 6831 struct perf_sample_data sample;
9f498cc5 6832 struct task_struct *task = task_event->task;
c980d109 6833 int ret, size = task_event->event_id.header.size;
8bb39f9a 6834
67516844
JO
6835 if (!perf_event_task_match(event))
6836 return;
6837
c980d109 6838 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 6839
c980d109 6840 ret = perf_output_begin(&handle, event,
a7ac67ea 6841 task_event->event_id.header.size);
ef60777c 6842 if (ret)
c980d109 6843 goto out;
60313ebe 6844
cdd6c482
IM
6845 task_event->event_id.pid = perf_event_pid(event, task);
6846 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 6847
cdd6c482
IM
6848 task_event->event_id.tid = perf_event_tid(event, task);
6849 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 6850
34f43927
PZ
6851 task_event->event_id.time = perf_event_clock(event);
6852
cdd6c482 6853 perf_output_put(&handle, task_event->event_id);
393b2ad8 6854
c980d109
ACM
6855 perf_event__output_id_sample(event, &handle, &sample);
6856
60313ebe 6857 perf_output_end(&handle);
c980d109
ACM
6858out:
6859 task_event->event_id.header.size = size;
60313ebe
PZ
6860}
6861
cdd6c482
IM
6862static void perf_event_task(struct task_struct *task,
6863 struct perf_event_context *task_ctx,
3a80b4a3 6864 int new)
60313ebe 6865{
9f498cc5 6866 struct perf_task_event task_event;
60313ebe 6867
cdd6c482
IM
6868 if (!atomic_read(&nr_comm_events) &&
6869 !atomic_read(&nr_mmap_events) &&
6870 !atomic_read(&nr_task_events))
60313ebe
PZ
6871 return;
6872
9f498cc5 6873 task_event = (struct perf_task_event){
3a80b4a3
PZ
6874 .task = task,
6875 .task_ctx = task_ctx,
cdd6c482 6876 .event_id = {
60313ebe 6877 .header = {
cdd6c482 6878 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 6879 .misc = 0,
cdd6c482 6880 .size = sizeof(task_event.event_id),
60313ebe 6881 },
573402db
PZ
6882 /* .pid */
6883 /* .ppid */
9f498cc5
PZ
6884 /* .tid */
6885 /* .ptid */
34f43927 6886 /* .time */
60313ebe
PZ
6887 },
6888 };
6889
aab5b71e 6890 perf_iterate_sb(perf_event_task_output,
52d857a8
JO
6891 &task_event,
6892 task_ctx);
9f498cc5
PZ
6893}
6894
cdd6c482 6895void perf_event_fork(struct task_struct *task)
9f498cc5 6896{
cdd6c482 6897 perf_event_task(task, NULL, 1);
e4222673 6898 perf_event_namespaces(task);
60313ebe
PZ
6899}
6900
8d1b2d93
PZ
6901/*
6902 * comm tracking
6903 */
6904
6905struct perf_comm_event {
22a4f650
IM
6906 struct task_struct *task;
6907 char *comm;
8d1b2d93
PZ
6908 int comm_size;
6909
6910 struct {
6911 struct perf_event_header header;
6912
6913 u32 pid;
6914 u32 tid;
cdd6c482 6915 } event_id;
8d1b2d93
PZ
6916};
6917
67516844
JO
6918static int perf_event_comm_match(struct perf_event *event)
6919{
6920 return event->attr.comm;
6921}
6922
cdd6c482 6923static void perf_event_comm_output(struct perf_event *event,
52d857a8 6924 void *data)
8d1b2d93 6925{
52d857a8 6926 struct perf_comm_event *comm_event = data;
8d1b2d93 6927 struct perf_output_handle handle;
c980d109 6928 struct perf_sample_data sample;
cdd6c482 6929 int size = comm_event->event_id.header.size;
c980d109
ACM
6930 int ret;
6931
67516844
JO
6932 if (!perf_event_comm_match(event))
6933 return;
6934
c980d109
ACM
6935 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6936 ret = perf_output_begin(&handle, event,
a7ac67ea 6937 comm_event->event_id.header.size);
8d1b2d93
PZ
6938
6939 if (ret)
c980d109 6940 goto out;
8d1b2d93 6941
cdd6c482
IM
6942 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6943 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 6944
cdd6c482 6945 perf_output_put(&handle, comm_event->event_id);
76369139 6946 __output_copy(&handle, comm_event->comm,
8d1b2d93 6947 comm_event->comm_size);
c980d109
ACM
6948
6949 perf_event__output_id_sample(event, &handle, &sample);
6950
8d1b2d93 6951 perf_output_end(&handle);
c980d109
ACM
6952out:
6953 comm_event->event_id.header.size = size;
8d1b2d93
PZ
6954}
6955
cdd6c482 6956static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 6957{
413ee3b4 6958 char comm[TASK_COMM_LEN];
8d1b2d93 6959 unsigned int size;
8d1b2d93 6960
413ee3b4 6961 memset(comm, 0, sizeof(comm));
96b02d78 6962 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 6963 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
6964
6965 comm_event->comm = comm;
6966 comm_event->comm_size = size;
6967
cdd6c482 6968 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 6969
aab5b71e 6970 perf_iterate_sb(perf_event_comm_output,
52d857a8
JO
6971 comm_event,
6972 NULL);
8d1b2d93
PZ
6973}
6974
82b89778 6975void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 6976{
9ee318a7
PZ
6977 struct perf_comm_event comm_event;
6978
cdd6c482 6979 if (!atomic_read(&nr_comm_events))
9ee318a7 6980 return;
a63eaf34 6981
9ee318a7 6982 comm_event = (struct perf_comm_event){
8d1b2d93 6983 .task = task,
573402db
PZ
6984 /* .comm */
6985 /* .comm_size */
cdd6c482 6986 .event_id = {
573402db 6987 .header = {
cdd6c482 6988 .type = PERF_RECORD_COMM,
82b89778 6989 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
6990 /* .size */
6991 },
6992 /* .pid */
6993 /* .tid */
8d1b2d93
PZ
6994 },
6995 };
6996
cdd6c482 6997 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
6998}
6999
e4222673
HB
7000/*
7001 * namespaces tracking
7002 */
7003
7004struct perf_namespaces_event {
7005 struct task_struct *task;
7006
7007 struct {
7008 struct perf_event_header header;
7009
7010 u32 pid;
7011 u32 tid;
7012 u64 nr_namespaces;
7013 struct perf_ns_link_info link_info[NR_NAMESPACES];
7014 } event_id;
7015};
7016
7017static int perf_event_namespaces_match(struct perf_event *event)
7018{
7019 return event->attr.namespaces;
7020}
7021
7022static void perf_event_namespaces_output(struct perf_event *event,
7023 void *data)
7024{
7025 struct perf_namespaces_event *namespaces_event = data;
7026 struct perf_output_handle handle;
7027 struct perf_sample_data sample;
34900ec5 7028 u16 header_size = namespaces_event->event_id.header.size;
e4222673
HB
7029 int ret;
7030
7031 if (!perf_event_namespaces_match(event))
7032 return;
7033
7034 perf_event_header__init_id(&namespaces_event->event_id.header,
7035 &sample, event);
7036 ret = perf_output_begin(&handle, event,
7037 namespaces_event->event_id.header.size);
7038 if (ret)
34900ec5 7039 goto out;
e4222673
HB
7040
7041 namespaces_event->event_id.pid = perf_event_pid(event,
7042 namespaces_event->task);
7043 namespaces_event->event_id.tid = perf_event_tid(event,
7044 namespaces_event->task);
7045
7046 perf_output_put(&handle, namespaces_event->event_id);
7047
7048 perf_event__output_id_sample(event, &handle, &sample);
7049
7050 perf_output_end(&handle);
34900ec5
JO
7051out:
7052 namespaces_event->event_id.header.size = header_size;
e4222673
HB
7053}
7054
7055static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
7056 struct task_struct *task,
7057 const struct proc_ns_operations *ns_ops)
7058{
7059 struct path ns_path;
7060 struct inode *ns_inode;
7061 void *error;
7062
7063 error = ns_get_path(&ns_path, task, ns_ops);
7064 if (!error) {
7065 ns_inode = ns_path.dentry->d_inode;
7066 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
7067 ns_link_info->ino = ns_inode->i_ino;
0e18dd12 7068 path_put(&ns_path);
e4222673
HB
7069 }
7070}
7071
7072void perf_event_namespaces(struct task_struct *task)
7073{
7074 struct perf_namespaces_event namespaces_event;
7075 struct perf_ns_link_info *ns_link_info;
7076
7077 if (!atomic_read(&nr_namespaces_events))
7078 return;
7079
7080 namespaces_event = (struct perf_namespaces_event){
7081 .task = task,
7082 .event_id = {
7083 .header = {
7084 .type = PERF_RECORD_NAMESPACES,
7085 .misc = 0,
7086 .size = sizeof(namespaces_event.event_id),
7087 },
7088 /* .pid */
7089 /* .tid */
7090 .nr_namespaces = NR_NAMESPACES,
7091 /* .link_info[NR_NAMESPACES] */
7092 },
7093 };
7094
7095 ns_link_info = namespaces_event.event_id.link_info;
7096
7097 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
7098 task, &mntns_operations);
7099
7100#ifdef CONFIG_USER_NS
7101 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
7102 task, &userns_operations);
7103#endif
7104#ifdef CONFIG_NET_NS
7105 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
7106 task, &netns_operations);
7107#endif
7108#ifdef CONFIG_UTS_NS
7109 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
7110 task, &utsns_operations);
7111#endif
7112#ifdef CONFIG_IPC_NS
7113 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
7114 task, &ipcns_operations);
7115#endif
7116#ifdef CONFIG_PID_NS
7117 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
7118 task, &pidns_operations);
7119#endif
7120#ifdef CONFIG_CGROUPS
7121 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
7122 task, &cgroupns_operations);
7123#endif
7124
7125 perf_iterate_sb(perf_event_namespaces_output,
7126 &namespaces_event,
7127 NULL);
7128}
7129
0a4a9391
PZ
7130/*
7131 * mmap tracking
7132 */
7133
7134struct perf_mmap_event {
089dd79d
PZ
7135 struct vm_area_struct *vma;
7136
7137 const char *file_name;
7138 int file_size;
13d7a241
SE
7139 int maj, min;
7140 u64 ino;
7141 u64 ino_generation;
f972eb63 7142 u32 prot, flags;
0a4a9391
PZ
7143
7144 struct {
7145 struct perf_event_header header;
7146
7147 u32 pid;
7148 u32 tid;
7149 u64 start;
7150 u64 len;
7151 u64 pgoff;
cdd6c482 7152 } event_id;
0a4a9391
PZ
7153};
7154
67516844
JO
7155static int perf_event_mmap_match(struct perf_event *event,
7156 void *data)
7157{
7158 struct perf_mmap_event *mmap_event = data;
7159 struct vm_area_struct *vma = mmap_event->vma;
7160 int executable = vma->vm_flags & VM_EXEC;
7161
7162 return (!executable && event->attr.mmap_data) ||
13d7a241 7163 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
7164}
7165
cdd6c482 7166static void perf_event_mmap_output(struct perf_event *event,
52d857a8 7167 void *data)
0a4a9391 7168{
52d857a8 7169 struct perf_mmap_event *mmap_event = data;
0a4a9391 7170 struct perf_output_handle handle;
c980d109 7171 struct perf_sample_data sample;
cdd6c482 7172 int size = mmap_event->event_id.header.size;
c980d109 7173 int ret;
0a4a9391 7174
67516844
JO
7175 if (!perf_event_mmap_match(event, data))
7176 return;
7177
13d7a241
SE
7178 if (event->attr.mmap2) {
7179 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
7180 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
7181 mmap_event->event_id.header.size += sizeof(mmap_event->min);
7182 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 7183 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
7184 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
7185 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
7186 }
7187
c980d109
ACM
7188 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
7189 ret = perf_output_begin(&handle, event,
a7ac67ea 7190 mmap_event->event_id.header.size);
0a4a9391 7191 if (ret)
c980d109 7192 goto out;
0a4a9391 7193
cdd6c482
IM
7194 mmap_event->event_id.pid = perf_event_pid(event, current);
7195 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 7196
cdd6c482 7197 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
7198
7199 if (event->attr.mmap2) {
7200 perf_output_put(&handle, mmap_event->maj);
7201 perf_output_put(&handle, mmap_event->min);
7202 perf_output_put(&handle, mmap_event->ino);
7203 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
7204 perf_output_put(&handle, mmap_event->prot);
7205 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
7206 }
7207
76369139 7208 __output_copy(&handle, mmap_event->file_name,
0a4a9391 7209 mmap_event->file_size);
c980d109
ACM
7210
7211 perf_event__output_id_sample(event, &handle, &sample);
7212
78d613eb 7213 perf_output_end(&handle);
c980d109
ACM
7214out:
7215 mmap_event->event_id.header.size = size;
0a4a9391
PZ
7216}
7217
cdd6c482 7218static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 7219{
089dd79d
PZ
7220 struct vm_area_struct *vma = mmap_event->vma;
7221 struct file *file = vma->vm_file;
13d7a241
SE
7222 int maj = 0, min = 0;
7223 u64 ino = 0, gen = 0;
f972eb63 7224 u32 prot = 0, flags = 0;
0a4a9391
PZ
7225 unsigned int size;
7226 char tmp[16];
7227 char *buf = NULL;
2c42cfbf 7228 char *name;
413ee3b4 7229
0b3589be
PZ
7230 if (vma->vm_flags & VM_READ)
7231 prot |= PROT_READ;
7232 if (vma->vm_flags & VM_WRITE)
7233 prot |= PROT_WRITE;
7234 if (vma->vm_flags & VM_EXEC)
7235 prot |= PROT_EXEC;
7236
7237 if (vma->vm_flags & VM_MAYSHARE)
7238 flags = MAP_SHARED;
7239 else
7240 flags = MAP_PRIVATE;
7241
7242 if (vma->vm_flags & VM_DENYWRITE)
7243 flags |= MAP_DENYWRITE;
7244 if (vma->vm_flags & VM_MAYEXEC)
7245 flags |= MAP_EXECUTABLE;
7246 if (vma->vm_flags & VM_LOCKED)
7247 flags |= MAP_LOCKED;
7248 if (vma->vm_flags & VM_HUGETLB)
7249 flags |= MAP_HUGETLB;
7250
0a4a9391 7251 if (file) {
13d7a241
SE
7252 struct inode *inode;
7253 dev_t dev;
3ea2f2b9 7254
2c42cfbf 7255 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 7256 if (!buf) {
c7e548b4
ON
7257 name = "//enomem";
7258 goto cpy_name;
0a4a9391 7259 }
413ee3b4 7260 /*
3ea2f2b9 7261 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
7262 * need to add enough zero bytes after the string to handle
7263 * the 64bit alignment we do later.
7264 */
9bf39ab2 7265 name = file_path(file, buf, PATH_MAX - sizeof(u64));
0a4a9391 7266 if (IS_ERR(name)) {
c7e548b4
ON
7267 name = "//toolong";
7268 goto cpy_name;
0a4a9391 7269 }
13d7a241
SE
7270 inode = file_inode(vma->vm_file);
7271 dev = inode->i_sb->s_dev;
7272 ino = inode->i_ino;
7273 gen = inode->i_generation;
7274 maj = MAJOR(dev);
7275 min = MINOR(dev);
f972eb63 7276
c7e548b4 7277 goto got_name;
0a4a9391 7278 } else {
fbe26abe
JO
7279 if (vma->vm_ops && vma->vm_ops->name) {
7280 name = (char *) vma->vm_ops->name(vma);
7281 if (name)
7282 goto cpy_name;
7283 }
7284
2c42cfbf 7285 name = (char *)arch_vma_name(vma);
c7e548b4
ON
7286 if (name)
7287 goto cpy_name;
089dd79d 7288
32c5fb7e 7289 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 7290 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
7291 name = "[heap]";
7292 goto cpy_name;
32c5fb7e
ON
7293 }
7294 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 7295 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
7296 name = "[stack]";
7297 goto cpy_name;
089dd79d
PZ
7298 }
7299
c7e548b4
ON
7300 name = "//anon";
7301 goto cpy_name;
0a4a9391
PZ
7302 }
7303
c7e548b4
ON
7304cpy_name:
7305 strlcpy(tmp, name, sizeof(tmp));
7306 name = tmp;
0a4a9391 7307got_name:
2c42cfbf
PZ
7308 /*
7309 * Since our buffer works in 8 byte units we need to align our string
7310 * size to a multiple of 8. However, we must guarantee the tail end is
7311 * zero'd out to avoid leaking random bits to userspace.
7312 */
7313 size = strlen(name)+1;
7314 while (!IS_ALIGNED(size, sizeof(u64)))
7315 name[size++] = '\0';
0a4a9391
PZ
7316
7317 mmap_event->file_name = name;
7318 mmap_event->file_size = size;
13d7a241
SE
7319 mmap_event->maj = maj;
7320 mmap_event->min = min;
7321 mmap_event->ino = ino;
7322 mmap_event->ino_generation = gen;
f972eb63
PZ
7323 mmap_event->prot = prot;
7324 mmap_event->flags = flags;
0a4a9391 7325
2fe85427
SE
7326 if (!(vma->vm_flags & VM_EXEC))
7327 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
7328
cdd6c482 7329 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 7330
aab5b71e 7331 perf_iterate_sb(perf_event_mmap_output,
52d857a8
JO
7332 mmap_event,
7333 NULL);
665c2142 7334
0a4a9391
PZ
7335 kfree(buf);
7336}
7337
375637bc
AS
7338/*
7339 * Check whether inode and address range match filter criteria.
7340 */
7341static bool perf_addr_filter_match(struct perf_addr_filter *filter,
7342 struct file *file, unsigned long offset,
7343 unsigned long size)
7344{
7f635ff1
MP
7345 /* d_inode(NULL) won't be equal to any mapped user-space file */
7346 if (!filter->path.dentry)
7347 return false;
7348
9511bce9 7349 if (d_inode(filter->path.dentry) != file_inode(file))
375637bc
AS
7350 return false;
7351
7352 if (filter->offset > offset + size)
7353 return false;
7354
7355 if (filter->offset + filter->size < offset)
7356 return false;
7357
7358 return true;
7359}
7360
7361static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
7362{
7363 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
7364 struct vm_area_struct *vma = data;
7365 unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
7366 struct file *file = vma->vm_file;
7367 struct perf_addr_filter *filter;
7368 unsigned int restart = 0, count = 0;
7369
7370 if (!has_addr_filter(event))
7371 return;
7372
7373 if (!file)
7374 return;
7375
7376 raw_spin_lock_irqsave(&ifh->lock, flags);
7377 list_for_each_entry(filter, &ifh->list, entry) {
7378 if (perf_addr_filter_match(filter, file, off,
7379 vma->vm_end - vma->vm_start)) {
7380 event->addr_filters_offs[count] = vma->vm_start;
7381 restart++;
7382 }
7383
7384 count++;
7385 }
7386
7387 if (restart)
7388 event->addr_filters_gen++;
7389 raw_spin_unlock_irqrestore(&ifh->lock, flags);
7390
7391 if (restart)
767ae086 7392 perf_event_stop(event, 1);
375637bc
AS
7393}
7394
7395/*
7396 * Adjust all task's events' filters to the new vma
7397 */
7398static void perf_addr_filters_adjust(struct vm_area_struct *vma)
7399{
7400 struct perf_event_context *ctx;
7401 int ctxn;
7402
12b40a23
MP
7403 /*
7404 * Data tracing isn't supported yet and as such there is no need
7405 * to keep track of anything that isn't related to executable code:
7406 */
7407 if (!(vma->vm_flags & VM_EXEC))
7408 return;
7409
375637bc
AS
7410 rcu_read_lock();
7411 for_each_task_context_nr(ctxn) {
7412 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
7413 if (!ctx)
7414 continue;
7415
aab5b71e 7416 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
375637bc
AS
7417 }
7418 rcu_read_unlock();
7419}
7420
3af9e859 7421void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 7422{
9ee318a7
PZ
7423 struct perf_mmap_event mmap_event;
7424
cdd6c482 7425 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
7426 return;
7427
7428 mmap_event = (struct perf_mmap_event){
089dd79d 7429 .vma = vma,
573402db
PZ
7430 /* .file_name */
7431 /* .file_size */
cdd6c482 7432 .event_id = {
573402db 7433 .header = {
cdd6c482 7434 .type = PERF_RECORD_MMAP,
39447b38 7435 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
7436 /* .size */
7437 },
7438 /* .pid */
7439 /* .tid */
089dd79d
PZ
7440 .start = vma->vm_start,
7441 .len = vma->vm_end - vma->vm_start,
3a0304e9 7442 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 7443 },
13d7a241
SE
7444 /* .maj (attr_mmap2 only) */
7445 /* .min (attr_mmap2 only) */
7446 /* .ino (attr_mmap2 only) */
7447 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
7448 /* .prot (attr_mmap2 only) */
7449 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
7450 };
7451
375637bc 7452 perf_addr_filters_adjust(vma);
cdd6c482 7453 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
7454}
7455
68db7e98
AS
7456void perf_event_aux_event(struct perf_event *event, unsigned long head,
7457 unsigned long size, u64 flags)
7458{
7459 struct perf_output_handle handle;
7460 struct perf_sample_data sample;
7461 struct perf_aux_event {
7462 struct perf_event_header header;
7463 u64 offset;
7464 u64 size;
7465 u64 flags;
7466 } rec = {
7467 .header = {
7468 .type = PERF_RECORD_AUX,
7469 .misc = 0,
7470 .size = sizeof(rec),
7471 },
7472 .offset = head,
7473 .size = size,
7474 .flags = flags,
7475 };
7476 int ret;
7477
7478 perf_event_header__init_id(&rec.header, &sample, event);
7479 ret = perf_output_begin(&handle, event, rec.header.size);
7480
7481 if (ret)
7482 return;
7483
7484 perf_output_put(&handle, rec);
7485 perf_event__output_id_sample(event, &handle, &sample);
7486
7487 perf_output_end(&handle);
7488}
7489
f38b0dbb
KL
7490/*
7491 * Lost/dropped samples logging
7492 */
7493void perf_log_lost_samples(struct perf_event *event, u64 lost)
7494{
7495 struct perf_output_handle handle;
7496 struct perf_sample_data sample;
7497 int ret;
7498
7499 struct {
7500 struct perf_event_header header;
7501 u64 lost;
7502 } lost_samples_event = {
7503 .header = {
7504 .type = PERF_RECORD_LOST_SAMPLES,
7505 .misc = 0,
7506 .size = sizeof(lost_samples_event),
7507 },
7508 .lost = lost,
7509 };
7510
7511 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
7512
7513 ret = perf_output_begin(&handle, event,
7514 lost_samples_event.header.size);
7515 if (ret)
7516 return;
7517
7518 perf_output_put(&handle, lost_samples_event);
7519 perf_event__output_id_sample(event, &handle, &sample);
7520 perf_output_end(&handle);
7521}
7522
45ac1403
AH
7523/*
7524 * context_switch tracking
7525 */
7526
7527struct perf_switch_event {
7528 struct task_struct *task;
7529 struct task_struct *next_prev;
7530
7531 struct {
7532 struct perf_event_header header;
7533 u32 next_prev_pid;
7534 u32 next_prev_tid;
7535 } event_id;
7536};
7537
7538static int perf_event_switch_match(struct perf_event *event)
7539{
7540 return event->attr.context_switch;
7541}
7542
7543static void perf_event_switch_output(struct perf_event *event, void *data)
7544{
7545 struct perf_switch_event *se = data;
7546 struct perf_output_handle handle;
7547 struct perf_sample_data sample;
7548 int ret;
7549
7550 if (!perf_event_switch_match(event))
7551 return;
7552
7553 /* Only CPU-wide events are allowed to see next/prev pid/tid */
7554 if (event->ctx->task) {
7555 se->event_id.header.type = PERF_RECORD_SWITCH;
7556 se->event_id.header.size = sizeof(se->event_id.header);
7557 } else {
7558 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
7559 se->event_id.header.size = sizeof(se->event_id);
7560 se->event_id.next_prev_pid =
7561 perf_event_pid(event, se->next_prev);
7562 se->event_id.next_prev_tid =
7563 perf_event_tid(event, se->next_prev);
7564 }
7565
7566 perf_event_header__init_id(&se->event_id.header, &sample, event);
7567
7568 ret = perf_output_begin(&handle, event, se->event_id.header.size);
7569 if (ret)
7570 return;
7571
7572 if (event->ctx->task)
7573 perf_output_put(&handle, se->event_id.header);
7574 else
7575 perf_output_put(&handle, se->event_id);
7576
7577 perf_event__output_id_sample(event, &handle, &sample);
7578
7579 perf_output_end(&handle);
7580}
7581
7582static void perf_event_switch(struct task_struct *task,
7583 struct task_struct *next_prev, bool sched_in)
7584{
7585 struct perf_switch_event switch_event;
7586
7587 /* N.B. caller checks nr_switch_events != 0 */
7588
7589 switch_event = (struct perf_switch_event){
7590 .task = task,
7591 .next_prev = next_prev,
7592 .event_id = {
7593 .header = {
7594 /* .type */
7595 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
7596 /* .size */
7597 },
7598 /* .next_prev_pid */
7599 /* .next_prev_tid */
7600 },
7601 };
7602
101592b4
AB
7603 if (!sched_in && task->state == TASK_RUNNING)
7604 switch_event.event_id.header.misc |=
7605 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
7606
aab5b71e 7607 perf_iterate_sb(perf_event_switch_output,
45ac1403
AH
7608 &switch_event,
7609 NULL);
7610}
7611
a78ac325
PZ
7612/*
7613 * IRQ throttle logging
7614 */
7615
cdd6c482 7616static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
7617{
7618 struct perf_output_handle handle;
c980d109 7619 struct perf_sample_data sample;
a78ac325
PZ
7620 int ret;
7621
7622 struct {
7623 struct perf_event_header header;
7624 u64 time;
cca3f454 7625 u64 id;
7f453c24 7626 u64 stream_id;
a78ac325
PZ
7627 } throttle_event = {
7628 .header = {
cdd6c482 7629 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
7630 .misc = 0,
7631 .size = sizeof(throttle_event),
7632 },
34f43927 7633 .time = perf_event_clock(event),
cdd6c482
IM
7634 .id = primary_event_id(event),
7635 .stream_id = event->id,
a78ac325
PZ
7636 };
7637
966ee4d6 7638 if (enable)
cdd6c482 7639 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 7640
c980d109
ACM
7641 perf_event_header__init_id(&throttle_event.header, &sample, event);
7642
7643 ret = perf_output_begin(&handle, event,
a7ac67ea 7644 throttle_event.header.size);
a78ac325
PZ
7645 if (ret)
7646 return;
7647
7648 perf_output_put(&handle, throttle_event);
c980d109 7649 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
7650 perf_output_end(&handle);
7651}
7652
8d4e6c4c
AS
7653void perf_event_itrace_started(struct perf_event *event)
7654{
7655 event->attach_state |= PERF_ATTACH_ITRACE;
7656}
7657
ec0d7729
AS
7658static void perf_log_itrace_start(struct perf_event *event)
7659{
7660 struct perf_output_handle handle;
7661 struct perf_sample_data sample;
7662 struct perf_aux_event {
7663 struct perf_event_header header;
7664 u32 pid;
7665 u32 tid;
7666 } rec;
7667 int ret;
7668
7669 if (event->parent)
7670 event = event->parent;
7671
7672 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
8d4e6c4c 7673 event->attach_state & PERF_ATTACH_ITRACE)
ec0d7729
AS
7674 return;
7675
ec0d7729
AS
7676 rec.header.type = PERF_RECORD_ITRACE_START;
7677 rec.header.misc = 0;
7678 rec.header.size = sizeof(rec);
7679 rec.pid = perf_event_pid(event, current);
7680 rec.tid = perf_event_tid(event, current);
7681
7682 perf_event_header__init_id(&rec.header, &sample, event);
7683 ret = perf_output_begin(&handle, event, rec.header.size);
7684
7685 if (ret)
7686 return;
7687
7688 perf_output_put(&handle, rec);
7689 perf_event__output_id_sample(event, &handle, &sample);
7690
7691 perf_output_end(&handle);
7692}
7693
475113d9
JO
7694static int
7695__perf_event_account_interrupt(struct perf_event *event, int throttle)
f6c7d5fe 7696{
cdd6c482 7697 struct hw_perf_event *hwc = &event->hw;
79f14641 7698 int ret = 0;
475113d9 7699 u64 seq;
96398826 7700
e050e3f0
SE
7701 seq = __this_cpu_read(perf_throttled_seq);
7702 if (seq != hwc->interrupts_seq) {
7703 hwc->interrupts_seq = seq;
7704 hwc->interrupts = 1;
7705 } else {
7706 hwc->interrupts++;
7707 if (unlikely(throttle
7708 && hwc->interrupts >= max_samples_per_tick)) {
7709 __this_cpu_inc(perf_throttled_count);
555e0c1e 7710 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
163ec435
PZ
7711 hwc->interrupts = MAX_INTERRUPTS;
7712 perf_log_throttle(event, 0);
a78ac325
PZ
7713 ret = 1;
7714 }
e050e3f0 7715 }
60db5e09 7716
cdd6c482 7717 if (event->attr.freq) {
def0a9b2 7718 u64 now = perf_clock();
abd50713 7719 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 7720
abd50713 7721 hwc->freq_time_stamp = now;
bd2b5b12 7722
abd50713 7723 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 7724 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
7725 }
7726
475113d9
JO
7727 return ret;
7728}
7729
7730int perf_event_account_interrupt(struct perf_event *event)
7731{
7732 return __perf_event_account_interrupt(event, 1);
7733}
7734
7735/*
7736 * Generic event overflow handling, sampling.
7737 */
7738
7739static int __perf_event_overflow(struct perf_event *event,
7740 int throttle, struct perf_sample_data *data,
7741 struct pt_regs *regs)
7742{
7743 int events = atomic_read(&event->event_limit);
7744 int ret = 0;
7745
7746 /*
7747 * Non-sampling counters might still use the PMI to fold short
7748 * hardware counters, ignore those.
7749 */
7750 if (unlikely(!is_sampling_event(event)))
7751 return 0;
7752
7753 ret = __perf_event_account_interrupt(event, throttle);
cc1582c2 7754
2023b359
PZ
7755 /*
7756 * XXX event_limit might not quite work as expected on inherited
cdd6c482 7757 * events
2023b359
PZ
7758 */
7759
cdd6c482
IM
7760 event->pending_kill = POLL_IN;
7761 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 7762 ret = 1;
cdd6c482 7763 event->pending_kill = POLL_HUP;
5aab90ce
JO
7764
7765 perf_event_disable_inatomic(event);
79f14641
PZ
7766 }
7767
aa6a5f3c 7768 READ_ONCE(event->overflow_handler)(event, data, regs);
453f19ee 7769
fed66e2c 7770 if (*perf_event_fasync(event) && event->pending_kill) {
a8b0ca17
PZ
7771 event->pending_wakeup = 1;
7772 irq_work_queue(&event->pending);
f506b3dc
PZ
7773 }
7774
79f14641 7775 return ret;
f6c7d5fe
PZ
7776}
7777
a8b0ca17 7778int perf_event_overflow(struct perf_event *event,
5622f295
MM
7779 struct perf_sample_data *data,
7780 struct pt_regs *regs)
850bc73f 7781{
a8b0ca17 7782 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
7783}
7784
15dbf27c 7785/*
cdd6c482 7786 * Generic software event infrastructure
15dbf27c
PZ
7787 */
7788
b28ab83c
PZ
7789struct swevent_htable {
7790 struct swevent_hlist *swevent_hlist;
7791 struct mutex hlist_mutex;
7792 int hlist_refcount;
7793
7794 /* Recursion avoidance in each contexts */
7795 int recursion[PERF_NR_CONTEXTS];
7796};
7797
7798static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7799
7b4b6658 7800/*
cdd6c482
IM
7801 * We directly increment event->count and keep a second value in
7802 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
7803 * is kept in the range [-sample_period, 0] so that we can use the
7804 * sign as trigger.
7805 */
7806
ab573844 7807u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 7808{
cdd6c482 7809 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
7810 u64 period = hwc->last_period;
7811 u64 nr, offset;
7812 s64 old, val;
7813
7814 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
7815
7816again:
e7850595 7817 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
7818 if (val < 0)
7819 return 0;
15dbf27c 7820
7b4b6658
PZ
7821 nr = div64_u64(period + val, period);
7822 offset = nr * period;
7823 val -= offset;
e7850595 7824 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 7825 goto again;
15dbf27c 7826
7b4b6658 7827 return nr;
15dbf27c
PZ
7828}
7829
0cff784a 7830static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 7831 struct perf_sample_data *data,
5622f295 7832 struct pt_regs *regs)
15dbf27c 7833{
cdd6c482 7834 struct hw_perf_event *hwc = &event->hw;
850bc73f 7835 int throttle = 0;
15dbf27c 7836
0cff784a
PZ
7837 if (!overflow)
7838 overflow = perf_swevent_set_period(event);
15dbf27c 7839
7b4b6658
PZ
7840 if (hwc->interrupts == MAX_INTERRUPTS)
7841 return;
15dbf27c 7842
7b4b6658 7843 for (; overflow; overflow--) {
a8b0ca17 7844 if (__perf_event_overflow(event, throttle,
5622f295 7845 data, regs)) {
7b4b6658
PZ
7846 /*
7847 * We inhibit the overflow from happening when
7848 * hwc->interrupts == MAX_INTERRUPTS.
7849 */
7850 break;
7851 }
cf450a73 7852 throttle = 1;
7b4b6658 7853 }
15dbf27c
PZ
7854}
7855
a4eaf7f1 7856static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 7857 struct perf_sample_data *data,
5622f295 7858 struct pt_regs *regs)
7b4b6658 7859{
cdd6c482 7860 struct hw_perf_event *hwc = &event->hw;
d6d020e9 7861
e7850595 7862 local64_add(nr, &event->count);
d6d020e9 7863
0cff784a
PZ
7864 if (!regs)
7865 return;
7866
6c7e550f 7867 if (!is_sampling_event(event))
7b4b6658 7868 return;
d6d020e9 7869
5d81e5cf
AV
7870 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7871 data->period = nr;
7872 return perf_swevent_overflow(event, 1, data, regs);
7873 } else
7874 data->period = event->hw.last_period;
7875
0cff784a 7876 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 7877 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 7878
e7850595 7879 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 7880 return;
df1a132b 7881
a8b0ca17 7882 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
7883}
7884
f5ffe02e
FW
7885static int perf_exclude_event(struct perf_event *event,
7886 struct pt_regs *regs)
7887{
a4eaf7f1 7888 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 7889 return 1;
a4eaf7f1 7890
f5ffe02e
FW
7891 if (regs) {
7892 if (event->attr.exclude_user && user_mode(regs))
7893 return 1;
7894
7895 if (event->attr.exclude_kernel && !user_mode(regs))
7896 return 1;
7897 }
7898
7899 return 0;
7900}
7901
cdd6c482 7902static int perf_swevent_match(struct perf_event *event,
1c432d89 7903 enum perf_type_id type,
6fb2915d
LZ
7904 u32 event_id,
7905 struct perf_sample_data *data,
7906 struct pt_regs *regs)
15dbf27c 7907{
cdd6c482 7908 if (event->attr.type != type)
a21ca2ca 7909 return 0;
f5ffe02e 7910
cdd6c482 7911 if (event->attr.config != event_id)
15dbf27c
PZ
7912 return 0;
7913
f5ffe02e
FW
7914 if (perf_exclude_event(event, regs))
7915 return 0;
15dbf27c
PZ
7916
7917 return 1;
7918}
7919
76e1d904
FW
7920static inline u64 swevent_hash(u64 type, u32 event_id)
7921{
7922 u64 val = event_id | (type << 32);
7923
7924 return hash_64(val, SWEVENT_HLIST_BITS);
7925}
7926
49f135ed
FW
7927static inline struct hlist_head *
7928__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 7929{
49f135ed
FW
7930 u64 hash = swevent_hash(type, event_id);
7931
7932 return &hlist->heads[hash];
7933}
76e1d904 7934
49f135ed
FW
7935/* For the read side: events when they trigger */
7936static inline struct hlist_head *
b28ab83c 7937find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
7938{
7939 struct swevent_hlist *hlist;
76e1d904 7940
b28ab83c 7941 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
7942 if (!hlist)
7943 return NULL;
7944
49f135ed
FW
7945 return __find_swevent_head(hlist, type, event_id);
7946}
7947
7948/* For the event head insertion and removal in the hlist */
7949static inline struct hlist_head *
b28ab83c 7950find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
7951{
7952 struct swevent_hlist *hlist;
7953 u32 event_id = event->attr.config;
7954 u64 type = event->attr.type;
7955
7956 /*
7957 * Event scheduling is always serialized against hlist allocation
7958 * and release. Which makes the protected version suitable here.
7959 * The context lock guarantees that.
7960 */
b28ab83c 7961 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
7962 lockdep_is_held(&event->ctx->lock));
7963 if (!hlist)
7964 return NULL;
7965
7966 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
7967}
7968
7969static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 7970 u64 nr,
76e1d904
FW
7971 struct perf_sample_data *data,
7972 struct pt_regs *regs)
15dbf27c 7973{
4a32fea9 7974 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 7975 struct perf_event *event;
76e1d904 7976 struct hlist_head *head;
15dbf27c 7977
76e1d904 7978 rcu_read_lock();
b28ab83c 7979 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
7980 if (!head)
7981 goto end;
7982
b67bfe0d 7983 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 7984 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 7985 perf_swevent_event(event, nr, data, regs);
15dbf27c 7986 }
76e1d904
FW
7987end:
7988 rcu_read_unlock();
15dbf27c
PZ
7989}
7990
86038c5e
PZI
7991DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7992
4ed7c92d 7993int perf_swevent_get_recursion_context(void)
96f6d444 7994{
4a32fea9 7995 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
96f6d444 7996
b28ab83c 7997 return get_recursion_context(swhash->recursion);
96f6d444 7998}
645e8cc0 7999EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 8000
98b5c2c6 8001void perf_swevent_put_recursion_context(int rctx)
15dbf27c 8002{
4a32fea9 8003 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
927c7a9e 8004
b28ab83c 8005 put_recursion_context(swhash->recursion, rctx);
ce71b9df 8006}
15dbf27c 8007
86038c5e 8008void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 8009{
a4234bfc 8010 struct perf_sample_data data;
4ed7c92d 8011
86038c5e 8012 if (WARN_ON_ONCE(!regs))
4ed7c92d 8013 return;
a4234bfc 8014
fd0d000b 8015 perf_sample_data_init(&data, addr, 0);
a8b0ca17 8016 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
86038c5e
PZI
8017}
8018
8019void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
8020{
8021 int rctx;
8022
8023 preempt_disable_notrace();
8024 rctx = perf_swevent_get_recursion_context();
8025 if (unlikely(rctx < 0))
8026 goto fail;
8027
8028 ___perf_sw_event(event_id, nr, regs, addr);
4ed7c92d
PZ
8029
8030 perf_swevent_put_recursion_context(rctx);
86038c5e 8031fail:
1c024eca 8032 preempt_enable_notrace();
b8e83514
PZ
8033}
8034
cdd6c482 8035static void perf_swevent_read(struct perf_event *event)
15dbf27c 8036{
15dbf27c
PZ
8037}
8038
a4eaf7f1 8039static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 8040{
4a32fea9 8041 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 8042 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
8043 struct hlist_head *head;
8044
6c7e550f 8045 if (is_sampling_event(event)) {
7b4b6658 8046 hwc->last_period = hwc->sample_period;
cdd6c482 8047 perf_swevent_set_period(event);
7b4b6658 8048 }
76e1d904 8049
a4eaf7f1
PZ
8050 hwc->state = !(flags & PERF_EF_START);
8051
b28ab83c 8052 head = find_swevent_head(swhash, event);
12ca6ad2 8053 if (WARN_ON_ONCE(!head))
76e1d904
FW
8054 return -EINVAL;
8055
8056 hlist_add_head_rcu(&event->hlist_entry, head);
6a694a60 8057 perf_event_update_userpage(event);
76e1d904 8058
15dbf27c
PZ
8059 return 0;
8060}
8061
a4eaf7f1 8062static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 8063{
76e1d904 8064 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
8065}
8066
a4eaf7f1 8067static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 8068{
a4eaf7f1 8069 event->hw.state = 0;
d6d020e9 8070}
aa9c4c0f 8071
a4eaf7f1 8072static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 8073{
a4eaf7f1 8074 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
8075}
8076
49f135ed
FW
8077/* Deref the hlist from the update side */
8078static inline struct swevent_hlist *
b28ab83c 8079swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 8080{
b28ab83c
PZ
8081 return rcu_dereference_protected(swhash->swevent_hlist,
8082 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
8083}
8084
b28ab83c 8085static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 8086{
b28ab83c 8087 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 8088
49f135ed 8089 if (!hlist)
76e1d904
FW
8090 return;
8091
70691d4a 8092 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
fa4bbc4c 8093 kfree_rcu(hlist, rcu_head);
76e1d904
FW
8094}
8095
3b364d7b 8096static void swevent_hlist_put_cpu(int cpu)
76e1d904 8097{
b28ab83c 8098 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 8099
b28ab83c 8100 mutex_lock(&swhash->hlist_mutex);
76e1d904 8101
b28ab83c
PZ
8102 if (!--swhash->hlist_refcount)
8103 swevent_hlist_release(swhash);
76e1d904 8104
b28ab83c 8105 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
8106}
8107
3b364d7b 8108static void swevent_hlist_put(void)
76e1d904
FW
8109{
8110 int cpu;
8111
76e1d904 8112 for_each_possible_cpu(cpu)
3b364d7b 8113 swevent_hlist_put_cpu(cpu);
76e1d904
FW
8114}
8115
3b364d7b 8116static int swevent_hlist_get_cpu(int cpu)
76e1d904 8117{
b28ab83c 8118 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
8119 int err = 0;
8120
b28ab83c 8121 mutex_lock(&swhash->hlist_mutex);
a63fbed7
TG
8122 if (!swevent_hlist_deref(swhash) &&
8123 cpumask_test_cpu(cpu, perf_online_mask)) {
76e1d904
FW
8124 struct swevent_hlist *hlist;
8125
8126 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
8127 if (!hlist) {
8128 err = -ENOMEM;
8129 goto exit;
8130 }
b28ab83c 8131 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 8132 }
b28ab83c 8133 swhash->hlist_refcount++;
9ed6060d 8134exit:
b28ab83c 8135 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
8136
8137 return err;
8138}
8139
3b364d7b 8140static int swevent_hlist_get(void)
76e1d904 8141{
3b364d7b 8142 int err, cpu, failed_cpu;
76e1d904 8143
a63fbed7 8144 mutex_lock(&pmus_lock);
76e1d904 8145 for_each_possible_cpu(cpu) {
3b364d7b 8146 err = swevent_hlist_get_cpu(cpu);
76e1d904
FW
8147 if (err) {
8148 failed_cpu = cpu;
8149 goto fail;
8150 }
8151 }
a63fbed7 8152 mutex_unlock(&pmus_lock);
76e1d904 8153 return 0;
9ed6060d 8154fail:
76e1d904
FW
8155 for_each_possible_cpu(cpu) {
8156 if (cpu == failed_cpu)
8157 break;
3b364d7b 8158 swevent_hlist_put_cpu(cpu);
76e1d904 8159 }
a63fbed7 8160 mutex_unlock(&pmus_lock);
76e1d904
FW
8161 return err;
8162}
8163
c5905afb 8164struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 8165
b0a873eb
PZ
8166static void sw_perf_event_destroy(struct perf_event *event)
8167{
8168 u64 event_id = event->attr.config;
95476b64 8169
b0a873eb
PZ
8170 WARN_ON(event->parent);
8171
c5905afb 8172 static_key_slow_dec(&perf_swevent_enabled[event_id]);
3b364d7b 8173 swevent_hlist_put();
b0a873eb
PZ
8174}
8175
8176static int perf_swevent_init(struct perf_event *event)
8177{
8176cced 8178 u64 event_id = event->attr.config;
b0a873eb
PZ
8179
8180 if (event->attr.type != PERF_TYPE_SOFTWARE)
8181 return -ENOENT;
8182
2481c5fa
SE
8183 /*
8184 * no branch sampling for software events
8185 */
8186 if (has_branch_stack(event))
8187 return -EOPNOTSUPP;
8188
b0a873eb
PZ
8189 switch (event_id) {
8190 case PERF_COUNT_SW_CPU_CLOCK:
8191 case PERF_COUNT_SW_TASK_CLOCK:
8192 return -ENOENT;
8193
8194 default:
8195 break;
8196 }
8197
ce677831 8198 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
8199 return -ENOENT;
8200
8201 if (!event->parent) {
8202 int err;
8203
3b364d7b 8204 err = swevent_hlist_get();
b0a873eb
PZ
8205 if (err)
8206 return err;
8207
c5905afb 8208 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
8209 event->destroy = sw_perf_event_destroy;
8210 }
8211
8212 return 0;
8213}
8214
8215static struct pmu perf_swevent = {
89a1e187 8216 .task_ctx_nr = perf_sw_context,
95476b64 8217
34f43927
PZ
8218 .capabilities = PERF_PMU_CAP_NO_NMI,
8219
b0a873eb 8220 .event_init = perf_swevent_init,
a4eaf7f1
PZ
8221 .add = perf_swevent_add,
8222 .del = perf_swevent_del,
8223 .start = perf_swevent_start,
8224 .stop = perf_swevent_stop,
1c024eca 8225 .read = perf_swevent_read,
1c024eca
PZ
8226};
8227
b0a873eb
PZ
8228#ifdef CONFIG_EVENT_TRACING
8229
1c024eca
PZ
8230static int perf_tp_filter_match(struct perf_event *event,
8231 struct perf_sample_data *data)
8232{
7e3f977e 8233 void *record = data->raw->frag.data;
1c024eca 8234
b71b437e
PZ
8235 /* only top level events have filters set */
8236 if (event->parent)
8237 event = event->parent;
8238
1c024eca
PZ
8239 if (likely(!event->filter) || filter_match_preds(event->filter, record))
8240 return 1;
8241 return 0;
8242}
8243
8244static int perf_tp_event_match(struct perf_event *event,
8245 struct perf_sample_data *data,
8246 struct pt_regs *regs)
8247{
a0f7d0f7
FW
8248 if (event->hw.state & PERF_HES_STOPPED)
8249 return 0;
580d607c
PZ
8250 /*
8251 * All tracepoints are from kernel-space.
8252 */
8253 if (event->attr.exclude_kernel)
1c024eca
PZ
8254 return 0;
8255
8256 if (!perf_tp_filter_match(event, data))
8257 return 0;
8258
8259 return 1;
8260}
8261
85b67bcb
AS
8262void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
8263 struct trace_event_call *call, u64 count,
8264 struct pt_regs *regs, struct hlist_head *head,
8265 struct task_struct *task)
8266{
e87c6bc3 8267 if (bpf_prog_array_valid(call)) {
85b67bcb 8268 *(struct pt_regs **)raw_data = regs;
e87c6bc3 8269 if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
85b67bcb
AS
8270 perf_swevent_put_recursion_context(rctx);
8271 return;
8272 }
8273 }
8274 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
8fd0fbbe 8275 rctx, task);
85b67bcb
AS
8276}
8277EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
8278
1e1dcd93 8279void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
e6dab5ff 8280 struct pt_regs *regs, struct hlist_head *head, int rctx,
8fd0fbbe 8281 struct task_struct *task)
95476b64
FW
8282{
8283 struct perf_sample_data data;
8fd0fbbe 8284 struct perf_event *event;
1c024eca 8285
95476b64 8286 struct perf_raw_record raw = {
7e3f977e
DB
8287 .frag = {
8288 .size = entry_size,
8289 .data = record,
8290 },
95476b64
FW
8291 };
8292
1e1dcd93 8293 perf_sample_data_init(&data, 0, 0);
95476b64
FW
8294 data.raw = &raw;
8295
1e1dcd93
AS
8296 perf_trace_buf_update(record, event_type);
8297
8fd0fbbe 8298 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 8299 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 8300 perf_swevent_event(event, count, &data, regs);
4f41c013 8301 }
ecc55f84 8302
e6dab5ff
AV
8303 /*
8304 * If we got specified a target task, also iterate its context and
8305 * deliver this event there too.
8306 */
8307 if (task && task != current) {
8308 struct perf_event_context *ctx;
8309 struct trace_entry *entry = record;
8310
8311 rcu_read_lock();
8312 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
8313 if (!ctx)
8314 goto unlock;
8315
8316 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cd6fb677
JO
8317 if (event->cpu != smp_processor_id())
8318 continue;
e6dab5ff
AV
8319 if (event->attr.type != PERF_TYPE_TRACEPOINT)
8320 continue;
8321 if (event->attr.config != entry->type)
8322 continue;
8323 if (perf_tp_event_match(event, &data, regs))
8324 perf_swevent_event(event, count, &data, regs);
8325 }
8326unlock:
8327 rcu_read_unlock();
8328 }
8329
ecc55f84 8330 perf_swevent_put_recursion_context(rctx);
95476b64
FW
8331}
8332EXPORT_SYMBOL_GPL(perf_tp_event);
8333
cdd6c482 8334static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 8335{
1c024eca 8336 perf_trace_destroy(event);
e077df4f
PZ
8337}
8338
b0a873eb 8339static int perf_tp_event_init(struct perf_event *event)
e077df4f 8340{
76e1d904
FW
8341 int err;
8342
b0a873eb
PZ
8343 if (event->attr.type != PERF_TYPE_TRACEPOINT)
8344 return -ENOENT;
8345
2481c5fa
SE
8346 /*
8347 * no branch sampling for tracepoint events
8348 */
8349 if (has_branch_stack(event))
8350 return -EOPNOTSUPP;
8351
1c024eca
PZ
8352 err = perf_trace_init(event);
8353 if (err)
b0a873eb 8354 return err;
e077df4f 8355
cdd6c482 8356 event->destroy = tp_perf_event_destroy;
e077df4f 8357
b0a873eb
PZ
8358 return 0;
8359}
8360
8361static struct pmu perf_tracepoint = {
89a1e187
PZ
8362 .task_ctx_nr = perf_sw_context,
8363
b0a873eb 8364 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
8365 .add = perf_trace_add,
8366 .del = perf_trace_del,
8367 .start = perf_swevent_start,
8368 .stop = perf_swevent_stop,
b0a873eb 8369 .read = perf_swevent_read,
b0a873eb
PZ
8370};
8371
33ea4b24 8372#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
e12f03d7
SL
8373/*
8374 * Flags in config, used by dynamic PMU kprobe and uprobe
8375 * The flags should match following PMU_FORMAT_ATTR().
8376 *
8377 * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
8378 * if not set, create kprobe/uprobe
a6ca88b2
SL
8379 *
8380 * The following values specify a reference counter (or semaphore in the
8381 * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
8382 * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
8383 *
8384 * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset
8385 * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left
e12f03d7
SL
8386 */
8387enum perf_probe_config {
8388 PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */
a6ca88b2
SL
8389 PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
8390 PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
e12f03d7
SL
8391};
8392
8393PMU_FORMAT_ATTR(retprobe, "config:0");
a6ca88b2 8394#endif
e12f03d7 8395
a6ca88b2
SL
8396#ifdef CONFIG_KPROBE_EVENTS
8397static struct attribute *kprobe_attrs[] = {
e12f03d7
SL
8398 &format_attr_retprobe.attr,
8399 NULL,
8400};
8401
a6ca88b2 8402static struct attribute_group kprobe_format_group = {
e12f03d7 8403 .name = "format",
a6ca88b2 8404 .attrs = kprobe_attrs,
e12f03d7
SL
8405};
8406
a6ca88b2
SL
8407static const struct attribute_group *kprobe_attr_groups[] = {
8408 &kprobe_format_group,
e12f03d7
SL
8409 NULL,
8410};
8411
8412static int perf_kprobe_event_init(struct perf_event *event);
8413static struct pmu perf_kprobe = {
8414 .task_ctx_nr = perf_sw_context,
8415 .event_init = perf_kprobe_event_init,
8416 .add = perf_trace_add,
8417 .del = perf_trace_del,
8418 .start = perf_swevent_start,
8419 .stop = perf_swevent_stop,
8420 .read = perf_swevent_read,
a6ca88b2 8421 .attr_groups = kprobe_attr_groups,
e12f03d7
SL
8422};
8423
8424static int perf_kprobe_event_init(struct perf_event *event)
8425{
8426 int err;
8427 bool is_retprobe;
8428
8429 if (event->attr.type != perf_kprobe.type)
8430 return -ENOENT;
32e6e967
SL
8431
8432 if (!capable(CAP_SYS_ADMIN))
8433 return -EACCES;
8434
e12f03d7
SL
8435 /*
8436 * no branch sampling for probe events
8437 */
8438 if (has_branch_stack(event))
8439 return -EOPNOTSUPP;
8440
8441 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
8442 err = perf_kprobe_init(event, is_retprobe);
8443 if (err)
8444 return err;
8445
8446 event->destroy = perf_kprobe_destroy;
8447
8448 return 0;
8449}
8450#endif /* CONFIG_KPROBE_EVENTS */
8451
33ea4b24 8452#ifdef CONFIG_UPROBE_EVENTS
a6ca88b2
SL
8453PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
8454
8455static struct attribute *uprobe_attrs[] = {
8456 &format_attr_retprobe.attr,
8457 &format_attr_ref_ctr_offset.attr,
8458 NULL,
8459};
8460
8461static struct attribute_group uprobe_format_group = {
8462 .name = "format",
8463 .attrs = uprobe_attrs,
8464};
8465
8466static const struct attribute_group *uprobe_attr_groups[] = {
8467 &uprobe_format_group,
8468 NULL,
8469};
8470
33ea4b24
SL
8471static int perf_uprobe_event_init(struct perf_event *event);
8472static struct pmu perf_uprobe = {
8473 .task_ctx_nr = perf_sw_context,
8474 .event_init = perf_uprobe_event_init,
8475 .add = perf_trace_add,
8476 .del = perf_trace_del,
8477 .start = perf_swevent_start,
8478 .stop = perf_swevent_stop,
8479 .read = perf_swevent_read,
a6ca88b2 8480 .attr_groups = uprobe_attr_groups,
33ea4b24
SL
8481};
8482
8483static int perf_uprobe_event_init(struct perf_event *event)
8484{
8485 int err;
a6ca88b2 8486 unsigned long ref_ctr_offset;
33ea4b24
SL
8487 bool is_retprobe;
8488
8489 if (event->attr.type != perf_uprobe.type)
8490 return -ENOENT;
32e6e967
SL
8491
8492 if (!capable(CAP_SYS_ADMIN))
8493 return -EACCES;
8494
33ea4b24
SL
8495 /*
8496 * no branch sampling for probe events
8497 */
8498 if (has_branch_stack(event))
8499 return -EOPNOTSUPP;
8500
8501 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
a6ca88b2
SL
8502 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
8503 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
33ea4b24
SL
8504 if (err)
8505 return err;
8506
8507 event->destroy = perf_uprobe_destroy;
8508
8509 return 0;
8510}
8511#endif /* CONFIG_UPROBE_EVENTS */
8512
b0a873eb
PZ
8513static inline void perf_tp_register(void)
8514{
2e80a82a 8515 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e12f03d7
SL
8516#ifdef CONFIG_KPROBE_EVENTS
8517 perf_pmu_register(&perf_kprobe, "kprobe", -1);
8518#endif
33ea4b24
SL
8519#ifdef CONFIG_UPROBE_EVENTS
8520 perf_pmu_register(&perf_uprobe, "uprobe", -1);
8521#endif
e077df4f 8522}
6fb2915d 8523
6fb2915d
LZ
8524static void perf_event_free_filter(struct perf_event *event)
8525{
8526 ftrace_profile_free_filter(event);
8527}
8528
aa6a5f3c
AS
8529#ifdef CONFIG_BPF_SYSCALL
8530static void bpf_overflow_handler(struct perf_event *event,
8531 struct perf_sample_data *data,
8532 struct pt_regs *regs)
8533{
8534 struct bpf_perf_event_data_kern ctx = {
8535 .data = data,
7d9285e8 8536 .event = event,
aa6a5f3c
AS
8537 };
8538 int ret = 0;
8539
c895f6f7 8540 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
aa6a5f3c
AS
8541 preempt_disable();
8542 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
8543 goto out;
8544 rcu_read_lock();
88575199 8545 ret = BPF_PROG_RUN(event->prog, &ctx);
aa6a5f3c
AS
8546 rcu_read_unlock();
8547out:
8548 __this_cpu_dec(bpf_prog_active);
8549 preempt_enable();
8550 if (!ret)
8551 return;
8552
8553 event->orig_overflow_handler(event, data, regs);
8554}
8555
8556static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8557{
8558 struct bpf_prog *prog;
8559
8560 if (event->overflow_handler_context)
8561 /* hw breakpoint or kernel counter */
8562 return -EINVAL;
8563
8564 if (event->prog)
8565 return -EEXIST;
8566
8567 prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
8568 if (IS_ERR(prog))
8569 return PTR_ERR(prog);
8570
8571 event->prog = prog;
8572 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
8573 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
8574 return 0;
8575}
8576
8577static void perf_event_free_bpf_handler(struct perf_event *event)
8578{
8579 struct bpf_prog *prog = event->prog;
8580
8581 if (!prog)
8582 return;
8583
8584 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
8585 event->prog = NULL;
8586 bpf_prog_put(prog);
8587}
8588#else
8589static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8590{
8591 return -EOPNOTSUPP;
8592}
8593static void perf_event_free_bpf_handler(struct perf_event *event)
8594{
8595}
8596#endif
8597
e12f03d7
SL
8598/*
8599 * returns true if the event is a tracepoint, or a kprobe/upprobe created
8600 * with perf_event_open()
8601 */
8602static inline bool perf_event_is_tracing(struct perf_event *event)
8603{
8604 if (event->pmu == &perf_tracepoint)
8605 return true;
8606#ifdef CONFIG_KPROBE_EVENTS
8607 if (event->pmu == &perf_kprobe)
8608 return true;
33ea4b24
SL
8609#endif
8610#ifdef CONFIG_UPROBE_EVENTS
8611 if (event->pmu == &perf_uprobe)
8612 return true;
e12f03d7
SL
8613#endif
8614 return false;
8615}
8616
2541517c
AS
8617static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8618{
cf5f5cea 8619 bool is_kprobe, is_tracepoint, is_syscall_tp;
2541517c 8620 struct bpf_prog *prog;
e87c6bc3 8621 int ret;
2541517c 8622
e12f03d7 8623 if (!perf_event_is_tracing(event))
f91840a3 8624 return perf_event_set_bpf_handler(event, prog_fd);
2541517c 8625
98b5c2c6
AS
8626 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
8627 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
cf5f5cea
YS
8628 is_syscall_tp = is_syscall_trace_event(event->tp_event);
8629 if (!is_kprobe && !is_tracepoint && !is_syscall_tp)
98b5c2c6 8630 /* bpf programs can only be attached to u/kprobe or tracepoint */
2541517c
AS
8631 return -EINVAL;
8632
8633 prog = bpf_prog_get(prog_fd);
8634 if (IS_ERR(prog))
8635 return PTR_ERR(prog);
8636
98b5c2c6 8637 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
cf5f5cea
YS
8638 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
8639 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
2541517c
AS
8640 /* valid fd, but invalid bpf program type */
8641 bpf_prog_put(prog);
8642 return -EINVAL;
8643 }
8644
9802d865
JB
8645 /* Kprobe override only works for kprobes, not uprobes. */
8646 if (prog->kprobe_override &&
8647 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
8648 bpf_prog_put(prog);
8649 return -EINVAL;
8650 }
8651
cf5f5cea 8652 if (is_tracepoint || is_syscall_tp) {
32bbe007
AS
8653 int off = trace_event_get_offsets(event->tp_event);
8654
8655 if (prog->aux->max_ctx_offset > off) {
8656 bpf_prog_put(prog);
8657 return -EACCES;
8658 }
8659 }
2541517c 8660
e87c6bc3
YS
8661 ret = perf_event_attach_bpf_prog(event, prog);
8662 if (ret)
8663 bpf_prog_put(prog);
8664 return ret;
2541517c
AS
8665}
8666
8667static void perf_event_free_bpf_prog(struct perf_event *event)
8668{
e12f03d7 8669 if (!perf_event_is_tracing(event)) {
0b4c6841 8670 perf_event_free_bpf_handler(event);
2541517c 8671 return;
2541517c 8672 }
e87c6bc3 8673 perf_event_detach_bpf_prog(event);
2541517c
AS
8674}
8675
e077df4f 8676#else
6fb2915d 8677
b0a873eb 8678static inline void perf_tp_register(void)
e077df4f 8679{
e077df4f 8680}
6fb2915d 8681
6fb2915d
LZ
8682static void perf_event_free_filter(struct perf_event *event)
8683{
8684}
8685
2541517c
AS
8686static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8687{
8688 return -ENOENT;
8689}
8690
8691static void perf_event_free_bpf_prog(struct perf_event *event)
8692{
8693}
07b139c8 8694#endif /* CONFIG_EVENT_TRACING */
e077df4f 8695
24f1e32c 8696#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 8697void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 8698{
f5ffe02e
FW
8699 struct perf_sample_data sample;
8700 struct pt_regs *regs = data;
8701
fd0d000b 8702 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 8703
a4eaf7f1 8704 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 8705 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
8706}
8707#endif
8708
375637bc
AS
8709/*
8710 * Allocate a new address filter
8711 */
8712static struct perf_addr_filter *
8713perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
8714{
8715 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
8716 struct perf_addr_filter *filter;
8717
8718 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
8719 if (!filter)
8720 return NULL;
8721
8722 INIT_LIST_HEAD(&filter->entry);
8723 list_add_tail(&filter->entry, filters);
8724
8725 return filter;
8726}
8727
8728static void free_filters_list(struct list_head *filters)
8729{
8730 struct perf_addr_filter *filter, *iter;
8731
8732 list_for_each_entry_safe(filter, iter, filters, entry) {
9511bce9 8733 path_put(&filter->path);
375637bc
AS
8734 list_del(&filter->entry);
8735 kfree(filter);
8736 }
8737}
8738
8739/*
8740 * Free existing address filters and optionally install new ones
8741 */
8742static void perf_addr_filters_splice(struct perf_event *event,
8743 struct list_head *head)
8744{
8745 unsigned long flags;
8746 LIST_HEAD(list);
8747
8748 if (!has_addr_filter(event))
8749 return;
8750
8751 /* don't bother with children, they don't have their own filters */
8752 if (event->parent)
8753 return;
8754
8755 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
8756
8757 list_splice_init(&event->addr_filters.list, &list);
8758 if (head)
8759 list_splice(head, &event->addr_filters.list);
8760
8761 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
8762
8763 free_filters_list(&list);
8764}
8765
8766/*
8767 * Scan through mm's vmas and see if one of them matches the
8768 * @filter; if so, adjust filter's address range.
8769 * Called with mm::mmap_sem down for reading.
8770 */
8771static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
8772 struct mm_struct *mm)
8773{
8774 struct vm_area_struct *vma;
8775
8776 for (vma = mm->mmap; vma; vma = vma->vm_next) {
8777 struct file *file = vma->vm_file;
8778 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8779 unsigned long vma_size = vma->vm_end - vma->vm_start;
8780
8781 if (!file)
8782 continue;
8783
8784 if (!perf_addr_filter_match(filter, file, off, vma_size))
8785 continue;
8786
8787 return vma->vm_start;
8788 }
8789
8790 return 0;
8791}
8792
8793/*
8794 * Update event's address range filters based on the
8795 * task's existing mappings, if any.
8796 */
8797static void perf_event_addr_filters_apply(struct perf_event *event)
8798{
8799 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8800 struct task_struct *task = READ_ONCE(event->ctx->task);
8801 struct perf_addr_filter *filter;
8802 struct mm_struct *mm = NULL;
8803 unsigned int count = 0;
8804 unsigned long flags;
8805
8806 /*
8807 * We may observe TASK_TOMBSTONE, which means that the event tear-down
8808 * will stop on the parent's child_mutex that our caller is also holding
8809 */
8810 if (task == TASK_TOMBSTONE)
8811 return;
8812
6ce77bfd
AS
8813 if (!ifh->nr_file_filters)
8814 return;
8815
375637bc
AS
8816 mm = get_task_mm(event->ctx->task);
8817 if (!mm)
8818 goto restart;
8819
8820 down_read(&mm->mmap_sem);
8821
8822 raw_spin_lock_irqsave(&ifh->lock, flags);
8823 list_for_each_entry(filter, &ifh->list, entry) {
8824 event->addr_filters_offs[count] = 0;
8825
99f5bc9b
MP
8826 /*
8827 * Adjust base offset if the filter is associated to a binary
8828 * that needs to be mapped:
8829 */
9511bce9 8830 if (filter->path.dentry)
375637bc
AS
8831 event->addr_filters_offs[count] =
8832 perf_addr_filter_apply(filter, mm);
8833
8834 count++;
8835 }
8836
8837 event->addr_filters_gen++;
8838 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8839
8840 up_read(&mm->mmap_sem);
8841
8842 mmput(mm);
8843
8844restart:
767ae086 8845 perf_event_stop(event, 1);
375637bc
AS
8846}
8847
8848/*
8849 * Address range filtering: limiting the data to certain
8850 * instruction address ranges. Filters are ioctl()ed to us from
8851 * userspace as ascii strings.
8852 *
8853 * Filter string format:
8854 *
8855 * ACTION RANGE_SPEC
8856 * where ACTION is one of the
8857 * * "filter": limit the trace to this region
8858 * * "start": start tracing from this address
8859 * * "stop": stop tracing at this address/region;
8860 * RANGE_SPEC is
8861 * * for kernel addresses: <start address>[/<size>]
8862 * * for object files: <start address>[/<size>]@</path/to/object/file>
8863 *
6ed70cf3
AS
8864 * if <size> is not specified or is zero, the range is treated as a single
8865 * address; not valid for ACTION=="filter".
375637bc
AS
8866 */
8867enum {
e96271f3 8868 IF_ACT_NONE = -1,
375637bc
AS
8869 IF_ACT_FILTER,
8870 IF_ACT_START,
8871 IF_ACT_STOP,
8872 IF_SRC_FILE,
8873 IF_SRC_KERNEL,
8874 IF_SRC_FILEADDR,
8875 IF_SRC_KERNELADDR,
8876};
8877
8878enum {
8879 IF_STATE_ACTION = 0,
8880 IF_STATE_SOURCE,
8881 IF_STATE_END,
8882};
8883
8884static const match_table_t if_tokens = {
8885 { IF_ACT_FILTER, "filter" },
8886 { IF_ACT_START, "start" },
8887 { IF_ACT_STOP, "stop" },
8888 { IF_SRC_FILE, "%u/%u@%s" },
8889 { IF_SRC_KERNEL, "%u/%u" },
8890 { IF_SRC_FILEADDR, "%u@%s" },
8891 { IF_SRC_KERNELADDR, "%u" },
e96271f3 8892 { IF_ACT_NONE, NULL },
375637bc
AS
8893};
8894
8895/*
8896 * Address filter string parser
8897 */
8898static int
8899perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
8900 struct list_head *filters)
8901{
8902 struct perf_addr_filter *filter = NULL;
8903 char *start, *orig, *filename = NULL;
375637bc
AS
8904 substring_t args[MAX_OPT_ARGS];
8905 int state = IF_STATE_ACTION, token;
8906 unsigned int kernel = 0;
8907 int ret = -EINVAL;
8908
8909 orig = fstr = kstrdup(fstr, GFP_KERNEL);
8910 if (!fstr)
8911 return -ENOMEM;
8912
8913 while ((start = strsep(&fstr, " ,\n")) != NULL) {
6ed70cf3
AS
8914 static const enum perf_addr_filter_action_t actions[] = {
8915 [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER,
8916 [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START,
8917 [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP,
8918 };
375637bc
AS
8919 ret = -EINVAL;
8920
8921 if (!*start)
8922 continue;
8923
8924 /* filter definition begins */
8925 if (state == IF_STATE_ACTION) {
8926 filter = perf_addr_filter_new(event, filters);
8927 if (!filter)
8928 goto fail;
8929 }
8930
8931 token = match_token(start, if_tokens, args);
8932 switch (token) {
8933 case IF_ACT_FILTER:
8934 case IF_ACT_START:
375637bc
AS
8935 case IF_ACT_STOP:
8936 if (state != IF_STATE_ACTION)
8937 goto fail;
8938
6ed70cf3 8939 filter->action = actions[token];
375637bc
AS
8940 state = IF_STATE_SOURCE;
8941 break;
8942
8943 case IF_SRC_KERNELADDR:
8944 case IF_SRC_KERNEL:
8945 kernel = 1;
8946
8947 case IF_SRC_FILEADDR:
8948 case IF_SRC_FILE:
8949 if (state != IF_STATE_SOURCE)
8950 goto fail;
8951
375637bc
AS
8952 *args[0].to = 0;
8953 ret = kstrtoul(args[0].from, 0, &filter->offset);
8954 if (ret)
8955 goto fail;
8956
6ed70cf3 8957 if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) {
375637bc
AS
8958 *args[1].to = 0;
8959 ret = kstrtoul(args[1].from, 0, &filter->size);
8960 if (ret)
8961 goto fail;
8962 }
8963
4059ffd0 8964 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
6ed70cf3 8965 int fpos = token == IF_SRC_FILE ? 2 : 1;
4059ffd0
MP
8966
8967 filename = match_strdup(&args[fpos]);
375637bc
AS
8968 if (!filename) {
8969 ret = -ENOMEM;
8970 goto fail;
8971 }
8972 }
8973
8974 state = IF_STATE_END;
8975 break;
8976
8977 default:
8978 goto fail;
8979 }
8980
8981 /*
8982 * Filter definition is fully parsed, validate and install it.
8983 * Make sure that it doesn't contradict itself or the event's
8984 * attribute.
8985 */
8986 if (state == IF_STATE_END) {
9ccbfbb1 8987 ret = -EINVAL;
375637bc
AS
8988 if (kernel && event->attr.exclude_kernel)
8989 goto fail;
8990
6ed70cf3
AS
8991 /*
8992 * ACTION "filter" must have a non-zero length region
8993 * specified.
8994 */
8995 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER &&
8996 !filter->size)
8997 goto fail;
8998
375637bc
AS
8999 if (!kernel) {
9000 if (!filename)
9001 goto fail;
9002
6ce77bfd
AS
9003 /*
9004 * For now, we only support file-based filters
9005 * in per-task events; doing so for CPU-wide
9006 * events requires additional context switching
9007 * trickery, since same object code will be
9008 * mapped at different virtual addresses in
9009 * different processes.
9010 */
9011 ret = -EOPNOTSUPP;
9012 if (!event->ctx->task)
9013 goto fail_free_name;
9014
375637bc 9015 /* look up the path and grab its inode */
9511bce9
SL
9016 ret = kern_path(filename, LOOKUP_FOLLOW,
9017 &filter->path);
375637bc
AS
9018 if (ret)
9019 goto fail_free_name;
9020
375637bc
AS
9021 kfree(filename);
9022 filename = NULL;
9023
9024 ret = -EINVAL;
9511bce9
SL
9025 if (!filter->path.dentry ||
9026 !S_ISREG(d_inode(filter->path.dentry)
9027 ->i_mode))
375637bc 9028 goto fail;
6ce77bfd
AS
9029
9030 event->addr_filters.nr_file_filters++;
375637bc
AS
9031 }
9032
9033 /* ready to consume more filters */
9034 state = IF_STATE_ACTION;
9035 filter = NULL;
9036 }
9037 }
9038
9039 if (state != IF_STATE_ACTION)
9040 goto fail;
9041
9042 kfree(orig);
9043
9044 return 0;
9045
9046fail_free_name:
9047 kfree(filename);
9048fail:
9049 free_filters_list(filters);
9050 kfree(orig);
9051
9052 return ret;
9053}
9054
9055static int
9056perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
9057{
9058 LIST_HEAD(filters);
9059 int ret;
9060
9061 /*
9062 * Since this is called in perf_ioctl() path, we're already holding
9063 * ctx::mutex.
9064 */
9065 lockdep_assert_held(&event->ctx->mutex);
9066
9067 if (WARN_ON_ONCE(event->parent))
9068 return -EINVAL;
9069
375637bc
AS
9070 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
9071 if (ret)
6ce77bfd 9072 goto fail_clear_files;
375637bc
AS
9073
9074 ret = event->pmu->addr_filters_validate(&filters);
6ce77bfd
AS
9075 if (ret)
9076 goto fail_free_filters;
375637bc
AS
9077
9078 /* remove existing filters, if any */
9079 perf_addr_filters_splice(event, &filters);
9080
9081 /* install new filters */
9082 perf_event_for_each_child(event, perf_event_addr_filters_apply);
9083
6ce77bfd
AS
9084 return ret;
9085
9086fail_free_filters:
9087 free_filters_list(&filters);
9088
9089fail_clear_files:
9090 event->addr_filters.nr_file_filters = 0;
9091
375637bc
AS
9092 return ret;
9093}
9094
c796bbbe
AS
9095static int perf_event_set_filter(struct perf_event *event, void __user *arg)
9096{
c796bbbe 9097 int ret = -EINVAL;
e12f03d7 9098 char *filter_str;
c796bbbe
AS
9099
9100 filter_str = strndup_user(arg, PAGE_SIZE);
9101 if (IS_ERR(filter_str))
9102 return PTR_ERR(filter_str);
9103
e12f03d7
SL
9104#ifdef CONFIG_EVENT_TRACING
9105 if (perf_event_is_tracing(event)) {
9106 struct perf_event_context *ctx = event->ctx;
9107
9108 /*
9109 * Beware, here be dragons!!
9110 *
9111 * the tracepoint muck will deadlock against ctx->mutex, but
9112 * the tracepoint stuff does not actually need it. So
9113 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
9114 * already have a reference on ctx.
9115 *
9116 * This can result in event getting moved to a different ctx,
9117 * but that does not affect the tracepoint state.
9118 */
9119 mutex_unlock(&ctx->mutex);
9120 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
9121 mutex_lock(&ctx->mutex);
9122 } else
9123#endif
9124 if (has_addr_filter(event))
375637bc 9125 ret = perf_event_set_addr_filter(event, filter_str);
c796bbbe
AS
9126
9127 kfree(filter_str);
9128 return ret;
9129}
9130
b0a873eb
PZ
9131/*
9132 * hrtimer based swevent callback
9133 */
f29ac756 9134
b0a873eb 9135static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 9136{
b0a873eb
PZ
9137 enum hrtimer_restart ret = HRTIMER_RESTART;
9138 struct perf_sample_data data;
9139 struct pt_regs *regs;
9140 struct perf_event *event;
9141 u64 period;
f29ac756 9142
b0a873eb 9143 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
9144
9145 if (event->state != PERF_EVENT_STATE_ACTIVE)
9146 return HRTIMER_NORESTART;
9147
b0a873eb 9148 event->pmu->read(event);
f344011c 9149
fd0d000b 9150 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
9151 regs = get_irq_regs();
9152
9153 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 9154 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 9155 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
9156 ret = HRTIMER_NORESTART;
9157 }
24f1e32c 9158
b0a873eb
PZ
9159 period = max_t(u64, 10000, event->hw.sample_period);
9160 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 9161
b0a873eb 9162 return ret;
f29ac756
PZ
9163}
9164
b0a873eb 9165static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 9166{
b0a873eb 9167 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
9168 s64 period;
9169
9170 if (!is_sampling_event(event))
9171 return;
f5ffe02e 9172
5d508e82
FBH
9173 period = local64_read(&hwc->period_left);
9174 if (period) {
9175 if (period < 0)
9176 period = 10000;
fa407f35 9177
5d508e82
FBH
9178 local64_set(&hwc->period_left, 0);
9179 } else {
9180 period = max_t(u64, 10000, hwc->sample_period);
9181 }
3497d206
TG
9182 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
9183 HRTIMER_MODE_REL_PINNED);
24f1e32c 9184}
b0a873eb
PZ
9185
9186static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 9187{
b0a873eb
PZ
9188 struct hw_perf_event *hwc = &event->hw;
9189
6c7e550f 9190 if (is_sampling_event(event)) {
b0a873eb 9191 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 9192 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
9193
9194 hrtimer_cancel(&hwc->hrtimer);
9195 }
24f1e32c
FW
9196}
9197
ba3dd36c
PZ
9198static void perf_swevent_init_hrtimer(struct perf_event *event)
9199{
9200 struct hw_perf_event *hwc = &event->hw;
9201
9202 if (!is_sampling_event(event))
9203 return;
9204
9205 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
9206 hwc->hrtimer.function = perf_swevent_hrtimer;
9207
9208 /*
9209 * Since hrtimers have a fixed rate, we can do a static freq->period
9210 * mapping and avoid the whole period adjust feedback stuff.
9211 */
9212 if (event->attr.freq) {
9213 long freq = event->attr.sample_freq;
9214
9215 event->attr.sample_period = NSEC_PER_SEC / freq;
9216 hwc->sample_period = event->attr.sample_period;
9217 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 9218 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
9219 event->attr.freq = 0;
9220 }
9221}
9222
b0a873eb
PZ
9223/*
9224 * Software event: cpu wall time clock
9225 */
9226
9227static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 9228{
b0a873eb
PZ
9229 s64 prev;
9230 u64 now;
9231
a4eaf7f1 9232 now = local_clock();
b0a873eb
PZ
9233 prev = local64_xchg(&event->hw.prev_count, now);
9234 local64_add(now - prev, &event->count);
24f1e32c 9235}
24f1e32c 9236
a4eaf7f1 9237static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 9238{
a4eaf7f1 9239 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 9240 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
9241}
9242
a4eaf7f1 9243static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 9244{
b0a873eb
PZ
9245 perf_swevent_cancel_hrtimer(event);
9246 cpu_clock_event_update(event);
9247}
f29ac756 9248
a4eaf7f1
PZ
9249static int cpu_clock_event_add(struct perf_event *event, int flags)
9250{
9251 if (flags & PERF_EF_START)
9252 cpu_clock_event_start(event, flags);
6a694a60 9253 perf_event_update_userpage(event);
a4eaf7f1
PZ
9254
9255 return 0;
9256}
9257
9258static void cpu_clock_event_del(struct perf_event *event, int flags)
9259{
9260 cpu_clock_event_stop(event, flags);
9261}
9262
b0a873eb
PZ
9263static void cpu_clock_event_read(struct perf_event *event)
9264{
9265 cpu_clock_event_update(event);
9266}
f344011c 9267
b0a873eb
PZ
9268static int cpu_clock_event_init(struct perf_event *event)
9269{
9270 if (event->attr.type != PERF_TYPE_SOFTWARE)
9271 return -ENOENT;
9272
9273 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
9274 return -ENOENT;
9275
2481c5fa
SE
9276 /*
9277 * no branch sampling for software events
9278 */
9279 if (has_branch_stack(event))
9280 return -EOPNOTSUPP;
9281
ba3dd36c
PZ
9282 perf_swevent_init_hrtimer(event);
9283
b0a873eb 9284 return 0;
f29ac756
PZ
9285}
9286
b0a873eb 9287static struct pmu perf_cpu_clock = {
89a1e187
PZ
9288 .task_ctx_nr = perf_sw_context,
9289
34f43927
PZ
9290 .capabilities = PERF_PMU_CAP_NO_NMI,
9291
b0a873eb 9292 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
9293 .add = cpu_clock_event_add,
9294 .del = cpu_clock_event_del,
9295 .start = cpu_clock_event_start,
9296 .stop = cpu_clock_event_stop,
b0a873eb
PZ
9297 .read = cpu_clock_event_read,
9298};
9299
9300/*
9301 * Software event: task time clock
9302 */
9303
9304static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 9305{
b0a873eb
PZ
9306 u64 prev;
9307 s64 delta;
5c92d124 9308
b0a873eb
PZ
9309 prev = local64_xchg(&event->hw.prev_count, now);
9310 delta = now - prev;
9311 local64_add(delta, &event->count);
9312}
5c92d124 9313
a4eaf7f1 9314static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 9315{
a4eaf7f1 9316 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 9317 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
9318}
9319
a4eaf7f1 9320static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
9321{
9322 perf_swevent_cancel_hrtimer(event);
9323 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
9324}
9325
9326static int task_clock_event_add(struct perf_event *event, int flags)
9327{
9328 if (flags & PERF_EF_START)
9329 task_clock_event_start(event, flags);
6a694a60 9330 perf_event_update_userpage(event);
b0a873eb 9331
a4eaf7f1
PZ
9332 return 0;
9333}
9334
9335static void task_clock_event_del(struct perf_event *event, int flags)
9336{
9337 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
9338}
9339
9340static void task_clock_event_read(struct perf_event *event)
9341{
768a06e2
PZ
9342 u64 now = perf_clock();
9343 u64 delta = now - event->ctx->timestamp;
9344 u64 time = event->ctx->time + delta;
b0a873eb
PZ
9345
9346 task_clock_event_update(event, time);
9347}
9348
9349static int task_clock_event_init(struct perf_event *event)
6fb2915d 9350{
b0a873eb
PZ
9351 if (event->attr.type != PERF_TYPE_SOFTWARE)
9352 return -ENOENT;
9353
9354 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
9355 return -ENOENT;
9356
2481c5fa
SE
9357 /*
9358 * no branch sampling for software events
9359 */
9360 if (has_branch_stack(event))
9361 return -EOPNOTSUPP;
9362
ba3dd36c
PZ
9363 perf_swevent_init_hrtimer(event);
9364
b0a873eb 9365 return 0;
6fb2915d
LZ
9366}
9367
b0a873eb 9368static struct pmu perf_task_clock = {
89a1e187
PZ
9369 .task_ctx_nr = perf_sw_context,
9370
34f43927
PZ
9371 .capabilities = PERF_PMU_CAP_NO_NMI,
9372
b0a873eb 9373 .event_init = task_clock_event_init,
a4eaf7f1
PZ
9374 .add = task_clock_event_add,
9375 .del = task_clock_event_del,
9376 .start = task_clock_event_start,
9377 .stop = task_clock_event_stop,
b0a873eb
PZ
9378 .read = task_clock_event_read,
9379};
6fb2915d 9380
ad5133b7 9381static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 9382{
e077df4f 9383}
6fb2915d 9384
fbbe0701
SB
9385static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
9386{
9387}
9388
ad5133b7 9389static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 9390{
ad5133b7 9391 return 0;
6fb2915d
LZ
9392}
9393
18ab2cd3 9394static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
fbbe0701
SB
9395
9396static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
6fb2915d 9397{
fbbe0701
SB
9398 __this_cpu_write(nop_txn_flags, flags);
9399
9400 if (flags & ~PERF_PMU_TXN_ADD)
9401 return;
9402
ad5133b7 9403 perf_pmu_disable(pmu);
6fb2915d
LZ
9404}
9405
ad5133b7
PZ
9406static int perf_pmu_commit_txn(struct pmu *pmu)
9407{
fbbe0701
SB
9408 unsigned int flags = __this_cpu_read(nop_txn_flags);
9409
9410 __this_cpu_write(nop_txn_flags, 0);
9411
9412 if (flags & ~PERF_PMU_TXN_ADD)
9413 return 0;
9414
ad5133b7
PZ
9415 perf_pmu_enable(pmu);
9416 return 0;
9417}
e077df4f 9418
ad5133b7 9419static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 9420{
fbbe0701
SB
9421 unsigned int flags = __this_cpu_read(nop_txn_flags);
9422
9423 __this_cpu_write(nop_txn_flags, 0);
9424
9425 if (flags & ~PERF_PMU_TXN_ADD)
9426 return;
9427
ad5133b7 9428 perf_pmu_enable(pmu);
24f1e32c
FW
9429}
9430
35edc2a5
PZ
9431static int perf_event_idx_default(struct perf_event *event)
9432{
c719f560 9433 return 0;
35edc2a5
PZ
9434}
9435
8dc85d54
PZ
9436/*
9437 * Ensures all contexts with the same task_ctx_nr have the same
9438 * pmu_cpu_context too.
9439 */
9e317041 9440static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 9441{
8dc85d54 9442 struct pmu *pmu;
b326e956 9443
8dc85d54
PZ
9444 if (ctxn < 0)
9445 return NULL;
24f1e32c 9446
8dc85d54
PZ
9447 list_for_each_entry(pmu, &pmus, entry) {
9448 if (pmu->task_ctx_nr == ctxn)
9449 return pmu->pmu_cpu_context;
9450 }
24f1e32c 9451
8dc85d54 9452 return NULL;
24f1e32c
FW
9453}
9454
51676957
PZ
9455static void free_pmu_context(struct pmu *pmu)
9456{
df0062b2
WD
9457 /*
9458 * Static contexts such as perf_sw_context have a global lifetime
9459 * and may be shared between different PMUs. Avoid freeing them
9460 * when a single PMU is going away.
9461 */
9462 if (pmu->task_ctx_nr > perf_invalid_context)
9463 return;
9464
51676957 9465 free_percpu(pmu->pmu_cpu_context);
24f1e32c 9466}
6e855cd4
AS
9467
9468/*
9469 * Let userspace know that this PMU supports address range filtering:
9470 */
9471static ssize_t nr_addr_filters_show(struct device *dev,
9472 struct device_attribute *attr,
9473 char *page)
9474{
9475 struct pmu *pmu = dev_get_drvdata(dev);
9476
9477 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
9478}
9479DEVICE_ATTR_RO(nr_addr_filters);
9480
2e80a82a 9481static struct idr pmu_idr;
d6d020e9 9482
abe43400
PZ
9483static ssize_t
9484type_show(struct device *dev, struct device_attribute *attr, char *page)
9485{
9486 struct pmu *pmu = dev_get_drvdata(dev);
9487
9488 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
9489}
90826ca7 9490static DEVICE_ATTR_RO(type);
abe43400 9491
62b85639
SE
9492static ssize_t
9493perf_event_mux_interval_ms_show(struct device *dev,
9494 struct device_attribute *attr,
9495 char *page)
9496{
9497 struct pmu *pmu = dev_get_drvdata(dev);
9498
9499 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
9500}
9501
272325c4
PZ
9502static DEFINE_MUTEX(mux_interval_mutex);
9503
62b85639
SE
9504static ssize_t
9505perf_event_mux_interval_ms_store(struct device *dev,
9506 struct device_attribute *attr,
9507 const char *buf, size_t count)
9508{
9509 struct pmu *pmu = dev_get_drvdata(dev);
9510 int timer, cpu, ret;
9511
9512 ret = kstrtoint(buf, 0, &timer);
9513 if (ret)
9514 return ret;
9515
9516 if (timer < 1)
9517 return -EINVAL;
9518
9519 /* same value, noting to do */
9520 if (timer == pmu->hrtimer_interval_ms)
9521 return count;
9522
272325c4 9523 mutex_lock(&mux_interval_mutex);
62b85639
SE
9524 pmu->hrtimer_interval_ms = timer;
9525
9526 /* update all cpuctx for this PMU */
a63fbed7 9527 cpus_read_lock();
272325c4 9528 for_each_online_cpu(cpu) {
62b85639
SE
9529 struct perf_cpu_context *cpuctx;
9530 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
9531 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
9532
272325c4
PZ
9533 cpu_function_call(cpu,
9534 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
62b85639 9535 }
a63fbed7 9536 cpus_read_unlock();
272325c4 9537 mutex_unlock(&mux_interval_mutex);
62b85639
SE
9538
9539 return count;
9540}
90826ca7 9541static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 9542
90826ca7
GKH
9543static struct attribute *pmu_dev_attrs[] = {
9544 &dev_attr_type.attr,
9545 &dev_attr_perf_event_mux_interval_ms.attr,
9546 NULL,
abe43400 9547};
90826ca7 9548ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
9549
9550static int pmu_bus_running;
9551static struct bus_type pmu_bus = {
9552 .name = "event_source",
90826ca7 9553 .dev_groups = pmu_dev_groups,
abe43400
PZ
9554};
9555
9556static void pmu_dev_release(struct device *dev)
9557{
9558 kfree(dev);
9559}
9560
9561static int pmu_dev_alloc(struct pmu *pmu)
9562{
9563 int ret = -ENOMEM;
9564
9565 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
9566 if (!pmu->dev)
9567 goto out;
9568
0c9d42ed 9569 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
9570 device_initialize(pmu->dev);
9571 ret = dev_set_name(pmu->dev, "%s", pmu->name);
9572 if (ret)
9573 goto free_dev;
9574
9575 dev_set_drvdata(pmu->dev, pmu);
9576 pmu->dev->bus = &pmu_bus;
9577 pmu->dev->release = pmu_dev_release;
9578 ret = device_add(pmu->dev);
9579 if (ret)
9580 goto free_dev;
9581
6e855cd4
AS
9582 /* For PMUs with address filters, throw in an extra attribute: */
9583 if (pmu->nr_addr_filters)
9584 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
9585
9586 if (ret)
9587 goto del_dev;
9588
abe43400
PZ
9589out:
9590 return ret;
9591
6e855cd4
AS
9592del_dev:
9593 device_del(pmu->dev);
9594
abe43400
PZ
9595free_dev:
9596 put_device(pmu->dev);
9597 goto out;
9598}
9599
547e9fd7 9600static struct lock_class_key cpuctx_mutex;
facc4307 9601static struct lock_class_key cpuctx_lock;
547e9fd7 9602
03d8e80b 9603int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 9604{
108b02cf 9605 int cpu, ret;
24f1e32c 9606
b0a873eb 9607 mutex_lock(&pmus_lock);
33696fc0
PZ
9608 ret = -ENOMEM;
9609 pmu->pmu_disable_count = alloc_percpu(int);
9610 if (!pmu->pmu_disable_count)
9611 goto unlock;
f29ac756 9612
2e80a82a
PZ
9613 pmu->type = -1;
9614 if (!name)
9615 goto skip_type;
9616 pmu->name = name;
9617
9618 if (type < 0) {
0e9c3be2
TH
9619 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
9620 if (type < 0) {
9621 ret = type;
2e80a82a
PZ
9622 goto free_pdc;
9623 }
9624 }
9625 pmu->type = type;
9626
abe43400
PZ
9627 if (pmu_bus_running) {
9628 ret = pmu_dev_alloc(pmu);
9629 if (ret)
9630 goto free_idr;
9631 }
9632
2e80a82a 9633skip_type:
26657848
PZ
9634 if (pmu->task_ctx_nr == perf_hw_context) {
9635 static int hw_context_taken = 0;
9636
5101ef20
MR
9637 /*
9638 * Other than systems with heterogeneous CPUs, it never makes
9639 * sense for two PMUs to share perf_hw_context. PMUs which are
9640 * uncore must use perf_invalid_context.
9641 */
9642 if (WARN_ON_ONCE(hw_context_taken &&
9643 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
26657848
PZ
9644 pmu->task_ctx_nr = perf_invalid_context;
9645
9646 hw_context_taken = 1;
9647 }
9648
8dc85d54
PZ
9649 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
9650 if (pmu->pmu_cpu_context)
9651 goto got_cpu_context;
f29ac756 9652
c4814202 9653 ret = -ENOMEM;
108b02cf
PZ
9654 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
9655 if (!pmu->pmu_cpu_context)
abe43400 9656 goto free_dev;
f344011c 9657
108b02cf
PZ
9658 for_each_possible_cpu(cpu) {
9659 struct perf_cpu_context *cpuctx;
9660
9661 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 9662 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 9663 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 9664 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
108b02cf 9665 cpuctx->ctx.pmu = pmu;
a63fbed7 9666 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
9e630205 9667
272325c4 9668 __perf_mux_hrtimer_init(cpuctx, cpu);
108b02cf 9669 }
76e1d904 9670
8dc85d54 9671got_cpu_context:
ad5133b7
PZ
9672 if (!pmu->start_txn) {
9673 if (pmu->pmu_enable) {
9674 /*
9675 * If we have pmu_enable/pmu_disable calls, install
9676 * transaction stubs that use that to try and batch
9677 * hardware accesses.
9678 */
9679 pmu->start_txn = perf_pmu_start_txn;
9680 pmu->commit_txn = perf_pmu_commit_txn;
9681 pmu->cancel_txn = perf_pmu_cancel_txn;
9682 } else {
fbbe0701 9683 pmu->start_txn = perf_pmu_nop_txn;
ad5133b7
PZ
9684 pmu->commit_txn = perf_pmu_nop_int;
9685 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 9686 }
5c92d124 9687 }
15dbf27c 9688
ad5133b7
PZ
9689 if (!pmu->pmu_enable) {
9690 pmu->pmu_enable = perf_pmu_nop_void;
9691 pmu->pmu_disable = perf_pmu_nop_void;
9692 }
9693
35edc2a5
PZ
9694 if (!pmu->event_idx)
9695 pmu->event_idx = perf_event_idx_default;
9696
b0a873eb 9697 list_add_rcu(&pmu->entry, &pmus);
bed5b25a 9698 atomic_set(&pmu->exclusive_cnt, 0);
33696fc0
PZ
9699 ret = 0;
9700unlock:
b0a873eb
PZ
9701 mutex_unlock(&pmus_lock);
9702
33696fc0 9703 return ret;
108b02cf 9704
abe43400
PZ
9705free_dev:
9706 device_del(pmu->dev);
9707 put_device(pmu->dev);
9708
2e80a82a
PZ
9709free_idr:
9710 if (pmu->type >= PERF_TYPE_MAX)
9711 idr_remove(&pmu_idr, pmu->type);
9712
108b02cf
PZ
9713free_pdc:
9714 free_percpu(pmu->pmu_disable_count);
9715 goto unlock;
f29ac756 9716}
c464c76e 9717EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 9718
b0a873eb 9719void perf_pmu_unregister(struct pmu *pmu)
5c92d124 9720{
b0a873eb
PZ
9721 mutex_lock(&pmus_lock);
9722 list_del_rcu(&pmu->entry);
5c92d124 9723
0475f9ea 9724 /*
cde8e884
PZ
9725 * We dereference the pmu list under both SRCU and regular RCU, so
9726 * synchronize against both of those.
0475f9ea 9727 */
b0a873eb 9728 synchronize_srcu(&pmus_srcu);
cde8e884 9729 synchronize_rcu();
d6d020e9 9730
33696fc0 9731 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
9732 if (pmu->type >= PERF_TYPE_MAX)
9733 idr_remove(&pmu_idr, pmu->type);
a9f97721 9734 if (pmu_bus_running) {
0933840a
JO
9735 if (pmu->nr_addr_filters)
9736 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
9737 device_del(pmu->dev);
9738 put_device(pmu->dev);
9739 }
51676957 9740 free_pmu_context(pmu);
a9f97721 9741 mutex_unlock(&pmus_lock);
b0a873eb 9742}
c464c76e 9743EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 9744
cc34b98b
MR
9745static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
9746{
ccd41c86 9747 struct perf_event_context *ctx = NULL;
cc34b98b
MR
9748 int ret;
9749
9750 if (!try_module_get(pmu->module))
9751 return -ENODEV;
ccd41c86 9752
0c7296ca
PZ
9753 /*
9754 * A number of pmu->event_init() methods iterate the sibling_list to,
9755 * for example, validate if the group fits on the PMU. Therefore,
9756 * if this is a sibling event, acquire the ctx->mutex to protect
9757 * the sibling_list.
9758 */
9759 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
8b10c5e2
PZ
9760 /*
9761 * This ctx->mutex can nest when we're called through
9762 * inheritance. See the perf_event_ctx_lock_nested() comment.
9763 */
9764 ctx = perf_event_ctx_lock_nested(event->group_leader,
9765 SINGLE_DEPTH_NESTING);
ccd41c86
PZ
9766 BUG_ON(!ctx);
9767 }
9768
cc34b98b
MR
9769 event->pmu = pmu;
9770 ret = pmu->event_init(event);
ccd41c86
PZ
9771
9772 if (ctx)
9773 perf_event_ctx_unlock(event->group_leader, ctx);
9774
cc34b98b
MR
9775 if (ret)
9776 module_put(pmu->module);
9777
9778 return ret;
9779}
9780
18ab2cd3 9781static struct pmu *perf_init_event(struct perf_event *event)
b0a873eb 9782{
85c617ab 9783 struct pmu *pmu;
b0a873eb 9784 int idx;
940c5b29 9785 int ret;
b0a873eb
PZ
9786
9787 idx = srcu_read_lock(&pmus_srcu);
2e80a82a 9788
40999312
KL
9789 /* Try parent's PMU first: */
9790 if (event->parent && event->parent->pmu) {
9791 pmu = event->parent->pmu;
9792 ret = perf_try_init_event(pmu, event);
9793 if (!ret)
9794 goto unlock;
9795 }
9796
2e80a82a
PZ
9797 rcu_read_lock();
9798 pmu = idr_find(&pmu_idr, event->attr.type);
9799 rcu_read_unlock();
940c5b29 9800 if (pmu) {
cc34b98b 9801 ret = perf_try_init_event(pmu, event);
940c5b29
LM
9802 if (ret)
9803 pmu = ERR_PTR(ret);
2e80a82a 9804 goto unlock;
940c5b29 9805 }
2e80a82a 9806
b0a873eb 9807 list_for_each_entry_rcu(pmu, &pmus, entry) {
cc34b98b 9808 ret = perf_try_init_event(pmu, event);
b0a873eb 9809 if (!ret)
e5f4d339 9810 goto unlock;
76e1d904 9811
b0a873eb
PZ
9812 if (ret != -ENOENT) {
9813 pmu = ERR_PTR(ret);
e5f4d339 9814 goto unlock;
f344011c 9815 }
5c92d124 9816 }
e5f4d339
PZ
9817 pmu = ERR_PTR(-ENOENT);
9818unlock:
b0a873eb 9819 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 9820
4aeb0b42 9821 return pmu;
5c92d124
IM
9822}
9823
f2fb6bef
KL
9824static void attach_sb_event(struct perf_event *event)
9825{
9826 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
9827
9828 raw_spin_lock(&pel->lock);
9829 list_add_rcu(&event->sb_list, &pel->list);
9830 raw_spin_unlock(&pel->lock);
9831}
9832
aab5b71e
PZ
9833/*
9834 * We keep a list of all !task (and therefore per-cpu) events
9835 * that need to receive side-band records.
9836 *
9837 * This avoids having to scan all the various PMU per-cpu contexts
9838 * looking for them.
9839 */
f2fb6bef
KL
9840static void account_pmu_sb_event(struct perf_event *event)
9841{
a4f144eb 9842 if (is_sb_event(event))
f2fb6bef
KL
9843 attach_sb_event(event);
9844}
9845
4beb31f3
FW
9846static void account_event_cpu(struct perf_event *event, int cpu)
9847{
9848 if (event->parent)
9849 return;
9850
4beb31f3
FW
9851 if (is_cgroup_event(event))
9852 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
9853}
9854
555e0c1e
FW
9855/* Freq events need the tick to stay alive (see perf_event_task_tick). */
9856static void account_freq_event_nohz(void)
9857{
9858#ifdef CONFIG_NO_HZ_FULL
9859 /* Lock so we don't race with concurrent unaccount */
9860 spin_lock(&nr_freq_lock);
9861 if (atomic_inc_return(&nr_freq_events) == 1)
9862 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
9863 spin_unlock(&nr_freq_lock);
9864#endif
9865}
9866
9867static void account_freq_event(void)
9868{
9869 if (tick_nohz_full_enabled())
9870 account_freq_event_nohz();
9871 else
9872 atomic_inc(&nr_freq_events);
9873}
9874
9875
766d6c07
FW
9876static void account_event(struct perf_event *event)
9877{
25432ae9
PZ
9878 bool inc = false;
9879
4beb31f3
FW
9880 if (event->parent)
9881 return;
9882
766d6c07 9883 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 9884 inc = true;
766d6c07
FW
9885 if (event->attr.mmap || event->attr.mmap_data)
9886 atomic_inc(&nr_mmap_events);
9887 if (event->attr.comm)
9888 atomic_inc(&nr_comm_events);
e4222673
HB
9889 if (event->attr.namespaces)
9890 atomic_inc(&nr_namespaces_events);
766d6c07
FW
9891 if (event->attr.task)
9892 atomic_inc(&nr_task_events);
555e0c1e
FW
9893 if (event->attr.freq)
9894 account_freq_event();
45ac1403
AH
9895 if (event->attr.context_switch) {
9896 atomic_inc(&nr_switch_events);
25432ae9 9897 inc = true;
45ac1403 9898 }
4beb31f3 9899 if (has_branch_stack(event))
25432ae9 9900 inc = true;
4beb31f3 9901 if (is_cgroup_event(event))
25432ae9
PZ
9902 inc = true;
9903
9107c89e 9904 if (inc) {
5bce9db1
AS
9905 /*
9906 * We need the mutex here because static_branch_enable()
9907 * must complete *before* the perf_sched_count increment
9908 * becomes visible.
9909 */
9107c89e
PZ
9910 if (atomic_inc_not_zero(&perf_sched_count))
9911 goto enabled;
9912
9913 mutex_lock(&perf_sched_mutex);
9914 if (!atomic_read(&perf_sched_count)) {
9915 static_branch_enable(&perf_sched_events);
9916 /*
9917 * Guarantee that all CPUs observe they key change and
9918 * call the perf scheduling hooks before proceeding to
9919 * install events that need them.
9920 */
0809d954 9921 synchronize_rcu();
9107c89e
PZ
9922 }
9923 /*
9924 * Now that we have waited for the sync_sched(), allow further
9925 * increments to by-pass the mutex.
9926 */
9927 atomic_inc(&perf_sched_count);
9928 mutex_unlock(&perf_sched_mutex);
9929 }
9930enabled:
4beb31f3
FW
9931
9932 account_event_cpu(event, event->cpu);
f2fb6bef
KL
9933
9934 account_pmu_sb_event(event);
766d6c07
FW
9935}
9936
0793a61d 9937/*
788faab7 9938 * Allocate and initialize an event structure
0793a61d 9939 */
cdd6c482 9940static struct perf_event *
c3f00c70 9941perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
9942 struct task_struct *task,
9943 struct perf_event *group_leader,
9944 struct perf_event *parent_event,
4dc0da86 9945 perf_overflow_handler_t overflow_handler,
79dff51e 9946 void *context, int cgroup_fd)
0793a61d 9947{
51b0fe39 9948 struct pmu *pmu;
cdd6c482
IM
9949 struct perf_event *event;
9950 struct hw_perf_event *hwc;
90983b16 9951 long err = -EINVAL;
0793a61d 9952
66832eb4
ON
9953 if ((unsigned)cpu >= nr_cpu_ids) {
9954 if (!task || cpu != -1)
9955 return ERR_PTR(-EINVAL);
9956 }
9957
c3f00c70 9958 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 9959 if (!event)
d5d2bc0d 9960 return ERR_PTR(-ENOMEM);
0793a61d 9961
04289bb9 9962 /*
cdd6c482 9963 * Single events are their own group leaders, with an
04289bb9
IM
9964 * empty sibling list:
9965 */
9966 if (!group_leader)
cdd6c482 9967 group_leader = event;
04289bb9 9968
cdd6c482
IM
9969 mutex_init(&event->child_mutex);
9970 INIT_LIST_HEAD(&event->child_list);
fccc714b 9971
cdd6c482
IM
9972 INIT_LIST_HEAD(&event->event_entry);
9973 INIT_LIST_HEAD(&event->sibling_list);
6668128a 9974 INIT_LIST_HEAD(&event->active_list);
8e1a2031 9975 init_event_group(event);
10c6db11 9976 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 9977 INIT_LIST_HEAD(&event->active_entry);
375637bc 9978 INIT_LIST_HEAD(&event->addr_filters.list);
f3ae75de
SE
9979 INIT_HLIST_NODE(&event->hlist_entry);
9980
10c6db11 9981
cdd6c482 9982 init_waitqueue_head(&event->waitq);
e360adbe 9983 init_irq_work(&event->pending, perf_pending_event);
0793a61d 9984
cdd6c482 9985 mutex_init(&event->mmap_mutex);
375637bc 9986 raw_spin_lock_init(&event->addr_filters.lock);
7b732a75 9987
a6fa941d 9988 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
9989 event->cpu = cpu;
9990 event->attr = *attr;
9991 event->group_leader = group_leader;
9992 event->pmu = NULL;
cdd6c482 9993 event->oncpu = -1;
a96bbc16 9994
cdd6c482 9995 event->parent = parent_event;
b84fbc9f 9996
17cf22c3 9997 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 9998 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 9999
cdd6c482 10000 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 10001
d580ff86
PZ
10002 if (task) {
10003 event->attach_state = PERF_ATTACH_TASK;
d580ff86 10004 /*
50f16a8b
PZ
10005 * XXX pmu::event_init needs to know what task to account to
10006 * and we cannot use the ctx information because we need the
10007 * pmu before we get a ctx.
d580ff86 10008 */
621b6d2e 10009 get_task_struct(task);
50f16a8b 10010 event->hw.target = task;
d580ff86
PZ
10011 }
10012
34f43927
PZ
10013 event->clock = &local_clock;
10014 if (parent_event)
10015 event->clock = parent_event->clock;
10016
4dc0da86 10017 if (!overflow_handler && parent_event) {
b326e956 10018 overflow_handler = parent_event->overflow_handler;
4dc0da86 10019 context = parent_event->overflow_handler_context;
f1e4ba5b 10020#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
aa6a5f3c
AS
10021 if (overflow_handler == bpf_overflow_handler) {
10022 struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
10023
10024 if (IS_ERR(prog)) {
10025 err = PTR_ERR(prog);
10026 goto err_ns;
10027 }
10028 event->prog = prog;
10029 event->orig_overflow_handler =
10030 parent_event->orig_overflow_handler;
10031 }
10032#endif
4dc0da86 10033 }
66832eb4 10034
1879445d
WN
10035 if (overflow_handler) {
10036 event->overflow_handler = overflow_handler;
10037 event->overflow_handler_context = context;
9ecda41a
WN
10038 } else if (is_write_backward(event)){
10039 event->overflow_handler = perf_event_output_backward;
10040 event->overflow_handler_context = NULL;
1879445d 10041 } else {
9ecda41a 10042 event->overflow_handler = perf_event_output_forward;
1879445d
WN
10043 event->overflow_handler_context = NULL;
10044 }
97eaf530 10045
0231bb53 10046 perf_event__state_init(event);
a86ed508 10047
4aeb0b42 10048 pmu = NULL;
b8e83514 10049
cdd6c482 10050 hwc = &event->hw;
bd2b5b12 10051 hwc->sample_period = attr->sample_period;
0d48696f 10052 if (attr->freq && attr->sample_freq)
bd2b5b12 10053 hwc->sample_period = 1;
eced1dfc 10054 hwc->last_period = hwc->sample_period;
bd2b5b12 10055
e7850595 10056 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 10057
2023b359 10058 /*
ba5213ae
PZ
10059 * We currently do not support PERF_SAMPLE_READ on inherited events.
10060 * See perf_output_read().
2023b359 10061 */
ba5213ae 10062 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
90983b16 10063 goto err_ns;
a46a2300
YZ
10064
10065 if (!has_branch_stack(event))
10066 event->attr.branch_sample_type = 0;
2023b359 10067
79dff51e
MF
10068 if (cgroup_fd != -1) {
10069 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
10070 if (err)
10071 goto err_ns;
10072 }
10073
b0a873eb 10074 pmu = perf_init_event(event);
85c617ab 10075 if (IS_ERR(pmu)) {
4aeb0b42 10076 err = PTR_ERR(pmu);
90983b16 10077 goto err_ns;
621a01ea 10078 }
d5d2bc0d 10079
bed5b25a
AS
10080 err = exclusive_event_init(event);
10081 if (err)
10082 goto err_pmu;
10083
375637bc
AS
10084 if (has_addr_filter(event)) {
10085 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
10086 sizeof(unsigned long),
10087 GFP_KERNEL);
36cc2b92
DC
10088 if (!event->addr_filters_offs) {
10089 err = -ENOMEM;
375637bc 10090 goto err_per_task;
36cc2b92 10091 }
375637bc
AS
10092
10093 /* force hw sync on the address filters */
10094 event->addr_filters_gen = 1;
10095 }
10096
cdd6c482 10097 if (!event->parent) {
927c7a9e 10098 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
97c79a38 10099 err = get_callchain_buffers(attr->sample_max_stack);
90983b16 10100 if (err)
375637bc 10101 goto err_addr_filters;
d010b332 10102 }
f344011c 10103 }
9ee318a7 10104
927a5570
AS
10105 /* symmetric to unaccount_event() in _free_event() */
10106 account_event(event);
10107
cdd6c482 10108 return event;
90983b16 10109
375637bc
AS
10110err_addr_filters:
10111 kfree(event->addr_filters_offs);
10112
bed5b25a
AS
10113err_per_task:
10114 exclusive_event_destroy(event);
10115
90983b16
FW
10116err_pmu:
10117 if (event->destroy)
10118 event->destroy(event);
c464c76e 10119 module_put(pmu->module);
90983b16 10120err_ns:
79dff51e
MF
10121 if (is_cgroup_event(event))
10122 perf_detach_cgroup(event);
90983b16
FW
10123 if (event->ns)
10124 put_pid_ns(event->ns);
621b6d2e
PB
10125 if (event->hw.target)
10126 put_task_struct(event->hw.target);
90983b16
FW
10127 kfree(event);
10128
10129 return ERR_PTR(err);
0793a61d
TG
10130}
10131
cdd6c482
IM
10132static int perf_copy_attr(struct perf_event_attr __user *uattr,
10133 struct perf_event_attr *attr)
974802ea 10134{
974802ea 10135 u32 size;
cdf8073d 10136 int ret;
974802ea
PZ
10137
10138 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
10139 return -EFAULT;
10140
10141 /*
10142 * zero the full structure, so that a short copy will be nice.
10143 */
10144 memset(attr, 0, sizeof(*attr));
10145
10146 ret = get_user(size, &uattr->size);
10147 if (ret)
10148 return ret;
10149
10150 if (size > PAGE_SIZE) /* silly large */
10151 goto err_size;
10152
10153 if (!size) /* abi compat */
10154 size = PERF_ATTR_SIZE_VER0;
10155
10156 if (size < PERF_ATTR_SIZE_VER0)
10157 goto err_size;
10158
10159 /*
10160 * If we're handed a bigger struct than we know of,
cdf8073d
IS
10161 * ensure all the unknown bits are 0 - i.e. new
10162 * user-space does not rely on any kernel feature
10163 * extensions we dont know about yet.
974802ea
PZ
10164 */
10165 if (size > sizeof(*attr)) {
cdf8073d
IS
10166 unsigned char __user *addr;
10167 unsigned char __user *end;
10168 unsigned char val;
974802ea 10169
cdf8073d
IS
10170 addr = (void __user *)uattr + sizeof(*attr);
10171 end = (void __user *)uattr + size;
974802ea 10172
cdf8073d 10173 for (; addr < end; addr++) {
974802ea
PZ
10174 ret = get_user(val, addr);
10175 if (ret)
10176 return ret;
10177 if (val)
10178 goto err_size;
10179 }
b3e62e35 10180 size = sizeof(*attr);
974802ea
PZ
10181 }
10182
10183 ret = copy_from_user(attr, uattr, size);
10184 if (ret)
10185 return -EFAULT;
10186
f12f42ac
MX
10187 attr->size = size;
10188
cd757645 10189 if (attr->__reserved_1)
974802ea
PZ
10190 return -EINVAL;
10191
10192 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
10193 return -EINVAL;
10194
10195 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
10196 return -EINVAL;
10197
bce38cd5
SE
10198 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
10199 u64 mask = attr->branch_sample_type;
10200
10201 /* only using defined bits */
10202 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
10203 return -EINVAL;
10204
10205 /* at least one branch bit must be set */
10206 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
10207 return -EINVAL;
10208
bce38cd5
SE
10209 /* propagate priv level, when not set for branch */
10210 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
10211
10212 /* exclude_kernel checked on syscall entry */
10213 if (!attr->exclude_kernel)
10214 mask |= PERF_SAMPLE_BRANCH_KERNEL;
10215
10216 if (!attr->exclude_user)
10217 mask |= PERF_SAMPLE_BRANCH_USER;
10218
10219 if (!attr->exclude_hv)
10220 mask |= PERF_SAMPLE_BRANCH_HV;
10221 /*
10222 * adjust user setting (for HW filter setup)
10223 */
10224 attr->branch_sample_type = mask;
10225 }
e712209a
SE
10226 /* privileged levels capture (kernel, hv): check permissions */
10227 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
10228 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
10229 return -EACCES;
bce38cd5 10230 }
4018994f 10231
c5ebcedb 10232 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 10233 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
10234 if (ret)
10235 return ret;
10236 }
10237
10238 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
10239 if (!arch_perf_have_user_stack_dump())
10240 return -ENOSYS;
10241
10242 /*
10243 * We have __u32 type for the size, but so far
10244 * we can only use __u16 as maximum due to the
10245 * __u16 sample size limit.
10246 */
10247 if (attr->sample_stack_user >= USHRT_MAX)
78b562fb 10248 return -EINVAL;
c5ebcedb 10249 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
78b562fb 10250 return -EINVAL;
c5ebcedb 10251 }
4018994f 10252
5f970521
JO
10253 if (!attr->sample_max_stack)
10254 attr->sample_max_stack = sysctl_perf_event_max_stack;
10255
60e2364e
SE
10256 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
10257 ret = perf_reg_validate(attr->sample_regs_intr);
974802ea
PZ
10258out:
10259 return ret;
10260
10261err_size:
10262 put_user(sizeof(*attr), &uattr->size);
10263 ret = -E2BIG;
10264 goto out;
10265}
10266
ac9721f3
PZ
10267static int
10268perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 10269{
b69cf536 10270 struct ring_buffer *rb = NULL;
a4be7c27
PZ
10271 int ret = -EINVAL;
10272
ac9721f3 10273 if (!output_event)
a4be7c27
PZ
10274 goto set;
10275
ac9721f3
PZ
10276 /* don't allow circular references */
10277 if (event == output_event)
a4be7c27
PZ
10278 goto out;
10279
0f139300
PZ
10280 /*
10281 * Don't allow cross-cpu buffers
10282 */
10283 if (output_event->cpu != event->cpu)
10284 goto out;
10285
10286 /*
76369139 10287 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
10288 */
10289 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
10290 goto out;
10291
34f43927
PZ
10292 /*
10293 * Mixing clocks in the same buffer is trouble you don't need.
10294 */
10295 if (output_event->clock != event->clock)
10296 goto out;
10297
9ecda41a
WN
10298 /*
10299 * Either writing ring buffer from beginning or from end.
10300 * Mixing is not allowed.
10301 */
10302 if (is_write_backward(output_event) != is_write_backward(event))
10303 goto out;
10304
45bfb2e5
PZ
10305 /*
10306 * If both events generate aux data, they must be on the same PMU
10307 */
10308 if (has_aux(event) && has_aux(output_event) &&
10309 event->pmu != output_event->pmu)
10310 goto out;
10311
a4be7c27 10312set:
cdd6c482 10313 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
10314 /* Can't redirect output if we've got an active mmap() */
10315 if (atomic_read(&event->mmap_count))
10316 goto unlock;
a4be7c27 10317
ac9721f3 10318 if (output_event) {
76369139
FW
10319 /* get the rb we want to redirect to */
10320 rb = ring_buffer_get(output_event);
10321 if (!rb)
ac9721f3 10322 goto unlock;
a4be7c27
PZ
10323 }
10324
b69cf536 10325 ring_buffer_attach(event, rb);
9bb5d40c 10326
a4be7c27 10327 ret = 0;
ac9721f3
PZ
10328unlock:
10329 mutex_unlock(&event->mmap_mutex);
10330
a4be7c27 10331out:
a4be7c27
PZ
10332 return ret;
10333}
10334
f63a8daa
PZ
10335static void mutex_lock_double(struct mutex *a, struct mutex *b)
10336{
10337 if (b < a)
10338 swap(a, b);
10339
10340 mutex_lock(a);
10341 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
10342}
10343
34f43927
PZ
10344static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
10345{
10346 bool nmi_safe = false;
10347
10348 switch (clk_id) {
10349 case CLOCK_MONOTONIC:
10350 event->clock = &ktime_get_mono_fast_ns;
10351 nmi_safe = true;
10352 break;
10353
10354 case CLOCK_MONOTONIC_RAW:
10355 event->clock = &ktime_get_raw_fast_ns;
10356 nmi_safe = true;
10357 break;
10358
10359 case CLOCK_REALTIME:
10360 event->clock = &ktime_get_real_ns;
10361 break;
10362
10363 case CLOCK_BOOTTIME:
10364 event->clock = &ktime_get_boot_ns;
10365 break;
10366
10367 case CLOCK_TAI:
10368 event->clock = &ktime_get_tai_ns;
10369 break;
10370
10371 default:
10372 return -EINVAL;
10373 }
10374
10375 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
10376 return -EINVAL;
10377
10378 return 0;
10379}
10380
321027c1
PZ
10381/*
10382 * Variation on perf_event_ctx_lock_nested(), except we take two context
10383 * mutexes.
10384 */
10385static struct perf_event_context *
10386__perf_event_ctx_lock_double(struct perf_event *group_leader,
10387 struct perf_event_context *ctx)
10388{
10389 struct perf_event_context *gctx;
10390
10391again:
10392 rcu_read_lock();
10393 gctx = READ_ONCE(group_leader->ctx);
10394 if (!atomic_inc_not_zero(&gctx->refcount)) {
10395 rcu_read_unlock();
10396 goto again;
10397 }
10398 rcu_read_unlock();
10399
10400 mutex_lock_double(&gctx->mutex, &ctx->mutex);
10401
10402 if (group_leader->ctx != gctx) {
10403 mutex_unlock(&ctx->mutex);
10404 mutex_unlock(&gctx->mutex);
10405 put_ctx(gctx);
10406 goto again;
10407 }
10408
10409 return gctx;
10410}
10411
0793a61d 10412/**
cdd6c482 10413 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 10414 *
cdd6c482 10415 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 10416 * @pid: target pid
9f66a381 10417 * @cpu: target cpu
cdd6c482 10418 * @group_fd: group leader event fd
0793a61d 10419 */
cdd6c482
IM
10420SYSCALL_DEFINE5(perf_event_open,
10421 struct perf_event_attr __user *, attr_uptr,
2743a5b0 10422 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 10423{
b04243ef
PZ
10424 struct perf_event *group_leader = NULL, *output_event = NULL;
10425 struct perf_event *event, *sibling;
cdd6c482 10426 struct perf_event_attr attr;
f63a8daa 10427 struct perf_event_context *ctx, *uninitialized_var(gctx);
cdd6c482 10428 struct file *event_file = NULL;
2903ff01 10429 struct fd group = {NULL, 0};
38a81da2 10430 struct task_struct *task = NULL;
89a1e187 10431 struct pmu *pmu;
ea635c64 10432 int event_fd;
b04243ef 10433 int move_group = 0;
dc86cabe 10434 int err;
a21b0b35 10435 int f_flags = O_RDWR;
79dff51e 10436 int cgroup_fd = -1;
0793a61d 10437
2743a5b0 10438 /* for future expandability... */
e5d1367f 10439 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
10440 return -EINVAL;
10441
dc86cabe
IM
10442 err = perf_copy_attr(attr_uptr, &attr);
10443 if (err)
10444 return err;
eab656ae 10445
0764771d
PZ
10446 if (!attr.exclude_kernel) {
10447 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
10448 return -EACCES;
10449 }
10450
e4222673
HB
10451 if (attr.namespaces) {
10452 if (!capable(CAP_SYS_ADMIN))
10453 return -EACCES;
10454 }
10455
df58ab24 10456 if (attr.freq) {
cdd6c482 10457 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 10458 return -EINVAL;
0819b2e3
PZ
10459 } else {
10460 if (attr.sample_period & (1ULL << 63))
10461 return -EINVAL;
df58ab24
PZ
10462 }
10463
fc7ce9c7
KL
10464 /* Only privileged users can get physical addresses */
10465 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR) &&
10466 perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
10467 return -EACCES;
10468
e5d1367f
SE
10469 /*
10470 * In cgroup mode, the pid argument is used to pass the fd
10471 * opened to the cgroup directory in cgroupfs. The cpu argument
10472 * designates the cpu on which to monitor threads from that
10473 * cgroup.
10474 */
10475 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
10476 return -EINVAL;
10477
a21b0b35
YD
10478 if (flags & PERF_FLAG_FD_CLOEXEC)
10479 f_flags |= O_CLOEXEC;
10480
10481 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
10482 if (event_fd < 0)
10483 return event_fd;
10484
ac9721f3 10485 if (group_fd != -1) {
2903ff01
AV
10486 err = perf_fget_light(group_fd, &group);
10487 if (err)
d14b12d7 10488 goto err_fd;
2903ff01 10489 group_leader = group.file->private_data;
ac9721f3
PZ
10490 if (flags & PERF_FLAG_FD_OUTPUT)
10491 output_event = group_leader;
10492 if (flags & PERF_FLAG_FD_NO_GROUP)
10493 group_leader = NULL;
10494 }
10495
e5d1367f 10496 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
10497 task = find_lively_task_by_vpid(pid);
10498 if (IS_ERR(task)) {
10499 err = PTR_ERR(task);
10500 goto err_group_fd;
10501 }
10502 }
10503
1f4ee503
PZ
10504 if (task && group_leader &&
10505 group_leader->attr.inherit != attr.inherit) {
10506 err = -EINVAL;
10507 goto err_task;
10508 }
10509
79c9ce57
PZ
10510 if (task) {
10511 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
10512 if (err)
e5aeee51 10513 goto err_task;
79c9ce57
PZ
10514
10515 /*
10516 * Reuse ptrace permission checks for now.
10517 *
10518 * We must hold cred_guard_mutex across this and any potential
10519 * perf_install_in_context() call for this new event to
10520 * serialize against exec() altering our credentials (and the
10521 * perf_event_exit_task() that could imply).
10522 */
10523 err = -EACCES;
10524 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
10525 goto err_cred;
10526 }
10527
79dff51e
MF
10528 if (flags & PERF_FLAG_PID_CGROUP)
10529 cgroup_fd = pid;
10530
4dc0da86 10531 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
79dff51e 10532 NULL, NULL, cgroup_fd);
d14b12d7
SE
10533 if (IS_ERR(event)) {
10534 err = PTR_ERR(event);
79c9ce57 10535 goto err_cred;
d14b12d7
SE
10536 }
10537
53b25335
VW
10538 if (is_sampling_event(event)) {
10539 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
a1396555 10540 err = -EOPNOTSUPP;
53b25335
VW
10541 goto err_alloc;
10542 }
10543 }
10544
89a1e187
PZ
10545 /*
10546 * Special case software events and allow them to be part of
10547 * any hardware group.
10548 */
10549 pmu = event->pmu;
b04243ef 10550
34f43927
PZ
10551 if (attr.use_clockid) {
10552 err = perf_event_set_clock(event, attr.clockid);
10553 if (err)
10554 goto err_alloc;
10555 }
10556
4ff6a8de
DCC
10557 if (pmu->task_ctx_nr == perf_sw_context)
10558 event->event_caps |= PERF_EV_CAP_SOFTWARE;
10559
a1150c20
SL
10560 if (group_leader) {
10561 if (is_software_event(event) &&
10562 !in_software_context(group_leader)) {
b04243ef 10563 /*
a1150c20
SL
10564 * If the event is a sw event, but the group_leader
10565 * is on hw context.
b04243ef 10566 *
a1150c20
SL
10567 * Allow the addition of software events to hw
10568 * groups, this is safe because software events
10569 * never fail to schedule.
b04243ef 10570 */
a1150c20
SL
10571 pmu = group_leader->ctx->pmu;
10572 } else if (!is_software_event(event) &&
10573 is_software_event(group_leader) &&
4ff6a8de 10574 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
b04243ef
PZ
10575 /*
10576 * In case the group is a pure software group, and we
10577 * try to add a hardware event, move the whole group to
10578 * the hardware context.
10579 */
10580 move_group = 1;
10581 }
10582 }
89a1e187
PZ
10583
10584 /*
10585 * Get the target context (task or percpu):
10586 */
4af57ef2 10587 ctx = find_get_context(pmu, task, event);
89a1e187
PZ
10588 if (IS_ERR(ctx)) {
10589 err = PTR_ERR(ctx);
c6be5a5c 10590 goto err_alloc;
89a1e187
PZ
10591 }
10592
bed5b25a
AS
10593 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
10594 err = -EBUSY;
10595 goto err_context;
10596 }
10597
ccff286d 10598 /*
cdd6c482 10599 * Look up the group leader (we will attach this event to it):
04289bb9 10600 */
ac9721f3 10601 if (group_leader) {
dc86cabe 10602 err = -EINVAL;
04289bb9 10603
04289bb9 10604 /*
ccff286d
IM
10605 * Do not allow a recursive hierarchy (this new sibling
10606 * becoming part of another group-sibling):
10607 */
10608 if (group_leader->group_leader != group_leader)
c3f00c70 10609 goto err_context;
34f43927
PZ
10610
10611 /* All events in a group should have the same clock */
10612 if (group_leader->clock != event->clock)
10613 goto err_context;
10614
ccff286d 10615 /*
64aee2a9
MR
10616 * Make sure we're both events for the same CPU;
10617 * grouping events for different CPUs is broken; since
10618 * you can never concurrently schedule them anyhow.
04289bb9 10619 */
64aee2a9
MR
10620 if (group_leader->cpu != event->cpu)
10621 goto err_context;
c3c87e77 10622
64aee2a9
MR
10623 /*
10624 * Make sure we're both on the same task, or both
10625 * per-CPU events.
10626 */
10627 if (group_leader->ctx->task != ctx->task)
10628 goto err_context;
10629
10630 /*
10631 * Do not allow to attach to a group in a different task
10632 * or CPU context. If we're moving SW events, we'll fix
10633 * this up later, so allow that.
10634 */
10635 if (!move_group && group_leader->ctx != ctx)
10636 goto err_context;
b04243ef 10637
3b6f9e5c
PM
10638 /*
10639 * Only a group leader can be exclusive or pinned
10640 */
0d48696f 10641 if (attr.exclusive || attr.pinned)
c3f00c70 10642 goto err_context;
ac9721f3
PZ
10643 }
10644
10645 if (output_event) {
10646 err = perf_event_set_output(event, output_event);
10647 if (err)
c3f00c70 10648 goto err_context;
ac9721f3 10649 }
0793a61d 10650
a21b0b35
YD
10651 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
10652 f_flags);
ea635c64
AV
10653 if (IS_ERR(event_file)) {
10654 err = PTR_ERR(event_file);
201c2f85 10655 event_file = NULL;
c3f00c70 10656 goto err_context;
ea635c64 10657 }
9b51f66d 10658
b04243ef 10659 if (move_group) {
321027c1
PZ
10660 gctx = __perf_event_ctx_lock_double(group_leader, ctx);
10661
84c4e620
PZ
10662 if (gctx->task == TASK_TOMBSTONE) {
10663 err = -ESRCH;
10664 goto err_locked;
10665 }
321027c1
PZ
10666
10667 /*
10668 * Check if we raced against another sys_perf_event_open() call
10669 * moving the software group underneath us.
10670 */
10671 if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
10672 /*
10673 * If someone moved the group out from under us, check
10674 * if this new event wound up on the same ctx, if so
10675 * its the regular !move_group case, otherwise fail.
10676 */
10677 if (gctx != ctx) {
10678 err = -EINVAL;
10679 goto err_locked;
10680 } else {
10681 perf_event_ctx_unlock(group_leader, gctx);
10682 move_group = 0;
10683 }
10684 }
f55fc2a5
PZ
10685 } else {
10686 mutex_lock(&ctx->mutex);
10687 }
10688
84c4e620
PZ
10689 if (ctx->task == TASK_TOMBSTONE) {
10690 err = -ESRCH;
10691 goto err_locked;
10692 }
10693
a723968c
PZ
10694 if (!perf_event_validate_size(event)) {
10695 err = -E2BIG;
10696 goto err_locked;
10697 }
10698
a63fbed7
TG
10699 if (!task) {
10700 /*
10701 * Check if the @cpu we're creating an event for is online.
10702 *
10703 * We use the perf_cpu_context::ctx::mutex to serialize against
10704 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10705 */
10706 struct perf_cpu_context *cpuctx =
10707 container_of(ctx, struct perf_cpu_context, ctx);
10708
10709 if (!cpuctx->online) {
10710 err = -ENODEV;
10711 goto err_locked;
10712 }
10713 }
10714
10715
f55fc2a5
PZ
10716 /*
10717 * Must be under the same ctx::mutex as perf_install_in_context(),
10718 * because we need to serialize with concurrent event creation.
10719 */
10720 if (!exclusive_event_installable(event, ctx)) {
10721 /* exclusive and group stuff are assumed mutually exclusive */
10722 WARN_ON_ONCE(move_group);
f63a8daa 10723
f55fc2a5
PZ
10724 err = -EBUSY;
10725 goto err_locked;
10726 }
f63a8daa 10727
f55fc2a5
PZ
10728 WARN_ON_ONCE(ctx->parent_ctx);
10729
79c9ce57
PZ
10730 /*
10731 * This is the point on no return; we cannot fail hereafter. This is
10732 * where we start modifying current state.
10733 */
10734
f55fc2a5 10735 if (move_group) {
f63a8daa
PZ
10736 /*
10737 * See perf_event_ctx_lock() for comments on the details
10738 * of swizzling perf_event::ctx.
10739 */
45a0e07a 10740 perf_remove_from_context(group_leader, 0);
279b5165 10741 put_ctx(gctx);
0231bb53 10742
edb39592 10743 for_each_sibling_event(sibling, group_leader) {
45a0e07a 10744 perf_remove_from_context(sibling, 0);
b04243ef
PZ
10745 put_ctx(gctx);
10746 }
b04243ef 10747
f63a8daa
PZ
10748 /*
10749 * Wait for everybody to stop referencing the events through
10750 * the old lists, before installing it on new lists.
10751 */
0cda4c02 10752 synchronize_rcu();
f63a8daa 10753
8f95b435
PZI
10754 /*
10755 * Install the group siblings before the group leader.
10756 *
10757 * Because a group leader will try and install the entire group
10758 * (through the sibling list, which is still in-tact), we can
10759 * end up with siblings installed in the wrong context.
10760 *
10761 * By installing siblings first we NO-OP because they're not
10762 * reachable through the group lists.
10763 */
edb39592 10764 for_each_sibling_event(sibling, group_leader) {
8f95b435 10765 perf_event__state_init(sibling);
9fc81d87 10766 perf_install_in_context(ctx, sibling, sibling->cpu);
b04243ef
PZ
10767 get_ctx(ctx);
10768 }
8f95b435
PZI
10769
10770 /*
10771 * Removing from the context ends up with disabled
10772 * event. What we want here is event in the initial
10773 * startup state, ready to be add into new context.
10774 */
10775 perf_event__state_init(group_leader);
10776 perf_install_in_context(ctx, group_leader, group_leader->cpu);
10777 get_ctx(ctx);
bed5b25a
AS
10778 }
10779
f73e22ab
PZ
10780 /*
10781 * Precalculate sample_data sizes; do while holding ctx::mutex such
10782 * that we're serialized against further additions and before
10783 * perf_install_in_context() which is the point the event is active and
10784 * can use these values.
10785 */
10786 perf_event__header_size(event);
10787 perf_event__id_header_size(event);
10788
78cd2c74
PZ
10789 event->owner = current;
10790
e2d37cd2 10791 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 10792 perf_unpin_context(ctx);
f63a8daa 10793
f55fc2a5 10794 if (move_group)
321027c1 10795 perf_event_ctx_unlock(group_leader, gctx);
d859e29f 10796 mutex_unlock(&ctx->mutex);
9b51f66d 10797
79c9ce57
PZ
10798 if (task) {
10799 mutex_unlock(&task->signal->cred_guard_mutex);
10800 put_task_struct(task);
10801 }
10802
cdd6c482
IM
10803 mutex_lock(&current->perf_event_mutex);
10804 list_add_tail(&event->owner_entry, &current->perf_event_list);
10805 mutex_unlock(&current->perf_event_mutex);
082ff5a2 10806
8a49542c
PZ
10807 /*
10808 * Drop the reference on the group_event after placing the
10809 * new event on the sibling_list. This ensures destruction
10810 * of the group leader will find the pointer to itself in
10811 * perf_group_detach().
10812 */
2903ff01 10813 fdput(group);
ea635c64
AV
10814 fd_install(event_fd, event_file);
10815 return event_fd;
0793a61d 10816
f55fc2a5
PZ
10817err_locked:
10818 if (move_group)
321027c1 10819 perf_event_ctx_unlock(group_leader, gctx);
f55fc2a5
PZ
10820 mutex_unlock(&ctx->mutex);
10821/* err_file: */
10822 fput(event_file);
c3f00c70 10823err_context:
fe4b04fa 10824 perf_unpin_context(ctx);
ea635c64 10825 put_ctx(ctx);
c6be5a5c 10826err_alloc:
13005627
PZ
10827 /*
10828 * If event_file is set, the fput() above will have called ->release()
10829 * and that will take care of freeing the event.
10830 */
10831 if (!event_file)
10832 free_event(event);
79c9ce57
PZ
10833err_cred:
10834 if (task)
10835 mutex_unlock(&task->signal->cred_guard_mutex);
1f4ee503 10836err_task:
e7d0bc04
PZ
10837 if (task)
10838 put_task_struct(task);
89a1e187 10839err_group_fd:
2903ff01 10840 fdput(group);
ea635c64
AV
10841err_fd:
10842 put_unused_fd(event_fd);
dc86cabe 10843 return err;
0793a61d
TG
10844}
10845
fb0459d7
AV
10846/**
10847 * perf_event_create_kernel_counter
10848 *
10849 * @attr: attributes of the counter to create
10850 * @cpu: cpu in which the counter is bound
38a81da2 10851 * @task: task to profile (NULL for percpu)
fb0459d7
AV
10852 */
10853struct perf_event *
10854perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 10855 struct task_struct *task,
4dc0da86
AK
10856 perf_overflow_handler_t overflow_handler,
10857 void *context)
fb0459d7 10858{
fb0459d7 10859 struct perf_event_context *ctx;
c3f00c70 10860 struct perf_event *event;
fb0459d7 10861 int err;
d859e29f 10862
fb0459d7
AV
10863 /*
10864 * Get the target context (task or percpu):
10865 */
d859e29f 10866
4dc0da86 10867 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
79dff51e 10868 overflow_handler, context, -1);
c3f00c70
PZ
10869 if (IS_ERR(event)) {
10870 err = PTR_ERR(event);
10871 goto err;
10872 }
d859e29f 10873
f8697762 10874 /* Mark owner so we could distinguish it from user events. */
63b6da39 10875 event->owner = TASK_TOMBSTONE;
f8697762 10876
4af57ef2 10877 ctx = find_get_context(event->pmu, task, event);
c6567f64
FW
10878 if (IS_ERR(ctx)) {
10879 err = PTR_ERR(ctx);
c3f00c70 10880 goto err_free;
d859e29f 10881 }
fb0459d7 10882
fb0459d7
AV
10883 WARN_ON_ONCE(ctx->parent_ctx);
10884 mutex_lock(&ctx->mutex);
84c4e620
PZ
10885 if (ctx->task == TASK_TOMBSTONE) {
10886 err = -ESRCH;
10887 goto err_unlock;
10888 }
10889
a63fbed7
TG
10890 if (!task) {
10891 /*
10892 * Check if the @cpu we're creating an event for is online.
10893 *
10894 * We use the perf_cpu_context::ctx::mutex to serialize against
10895 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10896 */
10897 struct perf_cpu_context *cpuctx =
10898 container_of(ctx, struct perf_cpu_context, ctx);
10899 if (!cpuctx->online) {
10900 err = -ENODEV;
10901 goto err_unlock;
10902 }
10903 }
10904
bed5b25a 10905 if (!exclusive_event_installable(event, ctx)) {
bed5b25a 10906 err = -EBUSY;
84c4e620 10907 goto err_unlock;
bed5b25a
AS
10908 }
10909
fb0459d7 10910 perf_install_in_context(ctx, event, cpu);
fe4b04fa 10911 perf_unpin_context(ctx);
fb0459d7
AV
10912 mutex_unlock(&ctx->mutex);
10913
fb0459d7
AV
10914 return event;
10915
84c4e620
PZ
10916err_unlock:
10917 mutex_unlock(&ctx->mutex);
10918 perf_unpin_context(ctx);
10919 put_ctx(ctx);
c3f00c70
PZ
10920err_free:
10921 free_event(event);
10922err:
c6567f64 10923 return ERR_PTR(err);
9b51f66d 10924}
fb0459d7 10925EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 10926
0cda4c02
YZ
10927void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
10928{
10929 struct perf_event_context *src_ctx;
10930 struct perf_event_context *dst_ctx;
10931 struct perf_event *event, *tmp;
10932 LIST_HEAD(events);
10933
10934 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
10935 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
10936
f63a8daa
PZ
10937 /*
10938 * See perf_event_ctx_lock() for comments on the details
10939 * of swizzling perf_event::ctx.
10940 */
10941 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
0cda4c02
YZ
10942 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
10943 event_entry) {
45a0e07a 10944 perf_remove_from_context(event, 0);
9a545de0 10945 unaccount_event_cpu(event, src_cpu);
0cda4c02 10946 put_ctx(src_ctx);
9886167d 10947 list_add(&event->migrate_entry, &events);
0cda4c02 10948 }
0cda4c02 10949
8f95b435
PZI
10950 /*
10951 * Wait for the events to quiesce before re-instating them.
10952 */
0cda4c02
YZ
10953 synchronize_rcu();
10954
8f95b435
PZI
10955 /*
10956 * Re-instate events in 2 passes.
10957 *
10958 * Skip over group leaders and only install siblings on this first
10959 * pass, siblings will not get enabled without a leader, however a
10960 * leader will enable its siblings, even if those are still on the old
10961 * context.
10962 */
10963 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10964 if (event->group_leader == event)
10965 continue;
10966
10967 list_del(&event->migrate_entry);
10968 if (event->state >= PERF_EVENT_STATE_OFF)
10969 event->state = PERF_EVENT_STATE_INACTIVE;
10970 account_event_cpu(event, dst_cpu);
10971 perf_install_in_context(dst_ctx, event, dst_cpu);
10972 get_ctx(dst_ctx);
10973 }
10974
10975 /*
10976 * Once all the siblings are setup properly, install the group leaders
10977 * to make it go.
10978 */
9886167d
PZ
10979 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10980 list_del(&event->migrate_entry);
0cda4c02
YZ
10981 if (event->state >= PERF_EVENT_STATE_OFF)
10982 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 10983 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
10984 perf_install_in_context(dst_ctx, event, dst_cpu);
10985 get_ctx(dst_ctx);
10986 }
10987 mutex_unlock(&dst_ctx->mutex);
f63a8daa 10988 mutex_unlock(&src_ctx->mutex);
0cda4c02
YZ
10989}
10990EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
10991
cdd6c482 10992static void sync_child_event(struct perf_event *child_event,
38b200d6 10993 struct task_struct *child)
d859e29f 10994{
cdd6c482 10995 struct perf_event *parent_event = child_event->parent;
8bc20959 10996 u64 child_val;
d859e29f 10997
cdd6c482
IM
10998 if (child_event->attr.inherit_stat)
10999 perf_event_read_event(child_event, child);
38b200d6 11000
b5e58793 11001 child_val = perf_event_count(child_event);
d859e29f
PM
11002
11003 /*
11004 * Add back the child's count to the parent's count:
11005 */
a6e6dea6 11006 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
11007 atomic64_add(child_event->total_time_enabled,
11008 &parent_event->child_total_time_enabled);
11009 atomic64_add(child_event->total_time_running,
11010 &parent_event->child_total_time_running);
d859e29f
PM
11011}
11012
9b51f66d 11013static void
8ba289b8
PZ
11014perf_event_exit_event(struct perf_event *child_event,
11015 struct perf_event_context *child_ctx,
11016 struct task_struct *child)
9b51f66d 11017{
8ba289b8
PZ
11018 struct perf_event *parent_event = child_event->parent;
11019
1903d50c
PZ
11020 /*
11021 * Do not destroy the 'original' grouping; because of the context
11022 * switch optimization the original events could've ended up in a
11023 * random child task.
11024 *
11025 * If we were to destroy the original group, all group related
11026 * operations would cease to function properly after this random
11027 * child dies.
11028 *
11029 * Do destroy all inherited groups, we don't care about those
11030 * and being thorough is better.
11031 */
32132a3d
PZ
11032 raw_spin_lock_irq(&child_ctx->lock);
11033 WARN_ON_ONCE(child_ctx->is_active);
11034
8ba289b8 11035 if (parent_event)
32132a3d
PZ
11036 perf_group_detach(child_event);
11037 list_del_event(child_event, child_ctx);
0d3d73aa 11038 perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
32132a3d 11039 raw_spin_unlock_irq(&child_ctx->lock);
0cc0c027 11040
9b51f66d 11041 /*
8ba289b8 11042 * Parent events are governed by their filedesc, retain them.
9b51f66d 11043 */
8ba289b8 11044 if (!parent_event) {
179033b3 11045 perf_event_wakeup(child_event);
8ba289b8 11046 return;
4bcf349a 11047 }
8ba289b8
PZ
11048 /*
11049 * Child events can be cleaned up.
11050 */
11051
11052 sync_child_event(child_event, child);
11053
11054 /*
11055 * Remove this event from the parent's list
11056 */
11057 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
11058 mutex_lock(&parent_event->child_mutex);
11059 list_del_init(&child_event->child_list);
11060 mutex_unlock(&parent_event->child_mutex);
11061
11062 /*
11063 * Kick perf_poll() for is_event_hup().
11064 */
11065 perf_event_wakeup(parent_event);
11066 free_event(child_event);
11067 put_event(parent_event);
9b51f66d
IM
11068}
11069
8dc85d54 11070static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 11071{
211de6eb 11072 struct perf_event_context *child_ctx, *clone_ctx = NULL;
63b6da39 11073 struct perf_event *child_event, *next;
63b6da39
PZ
11074
11075 WARN_ON_ONCE(child != current);
9b51f66d 11076
6a3351b6 11077 child_ctx = perf_pin_task_context(child, ctxn);
63b6da39 11078 if (!child_ctx)
9b51f66d
IM
11079 return;
11080
ad3a37de 11081 /*
6a3351b6
PZ
11082 * In order to reduce the amount of tricky in ctx tear-down, we hold
11083 * ctx::mutex over the entire thing. This serializes against almost
11084 * everything that wants to access the ctx.
11085 *
11086 * The exception is sys_perf_event_open() /
11087 * perf_event_create_kernel_count() which does find_get_context()
11088 * without ctx::mutex (it cannot because of the move_group double mutex
11089 * lock thing). See the comments in perf_install_in_context().
ad3a37de 11090 */
6a3351b6 11091 mutex_lock(&child_ctx->mutex);
c93f7669
PM
11092
11093 /*
6a3351b6
PZ
11094 * In a single ctx::lock section, de-schedule the events and detach the
11095 * context from the task such that we cannot ever get it scheduled back
11096 * in.
c93f7669 11097 */
6a3351b6 11098 raw_spin_lock_irq(&child_ctx->lock);
487f05e1 11099 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
4a1c0f26 11100
71a851b4 11101 /*
63b6da39
PZ
11102 * Now that the context is inactive, destroy the task <-> ctx relation
11103 * and mark the context dead.
71a851b4 11104 */
63b6da39
PZ
11105 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
11106 put_ctx(child_ctx); /* cannot be last */
11107 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
11108 put_task_struct(current); /* cannot be last */
4a1c0f26 11109
211de6eb 11110 clone_ctx = unclone_ctx(child_ctx);
6a3351b6 11111 raw_spin_unlock_irq(&child_ctx->lock);
9f498cc5 11112
211de6eb
PZ
11113 if (clone_ctx)
11114 put_ctx(clone_ctx);
4a1c0f26 11115
9f498cc5 11116 /*
cdd6c482
IM
11117 * Report the task dead after unscheduling the events so that we
11118 * won't get any samples after PERF_RECORD_EXIT. We can however still
11119 * get a few PERF_RECORD_READ events.
9f498cc5 11120 */
cdd6c482 11121 perf_event_task(child, child_ctx, 0);
a63eaf34 11122
ebf905fc 11123 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8ba289b8 11124 perf_event_exit_event(child_event, child_ctx, child);
8bc20959 11125
a63eaf34
PM
11126 mutex_unlock(&child_ctx->mutex);
11127
11128 put_ctx(child_ctx);
9b51f66d
IM
11129}
11130
8dc85d54
PZ
11131/*
11132 * When a child task exits, feed back event values to parent events.
79c9ce57
PZ
11133 *
11134 * Can be called with cred_guard_mutex held when called from
11135 * install_exec_creds().
8dc85d54
PZ
11136 */
11137void perf_event_exit_task(struct task_struct *child)
11138{
8882135b 11139 struct perf_event *event, *tmp;
8dc85d54
PZ
11140 int ctxn;
11141
8882135b
PZ
11142 mutex_lock(&child->perf_event_mutex);
11143 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
11144 owner_entry) {
11145 list_del_init(&event->owner_entry);
11146
11147 /*
11148 * Ensure the list deletion is visible before we clear
11149 * the owner, closes a race against perf_release() where
11150 * we need to serialize on the owner->perf_event_mutex.
11151 */
f47c02c0 11152 smp_store_release(&event->owner, NULL);
8882135b
PZ
11153 }
11154 mutex_unlock(&child->perf_event_mutex);
11155
8dc85d54
PZ
11156 for_each_task_context_nr(ctxn)
11157 perf_event_exit_task_context(child, ctxn);
4e93ad60
JO
11158
11159 /*
11160 * The perf_event_exit_task_context calls perf_event_task
11161 * with child's task_ctx, which generates EXIT events for
11162 * child contexts and sets child->perf_event_ctxp[] to NULL.
11163 * At this point we need to send EXIT events to cpu contexts.
11164 */
11165 perf_event_task(child, NULL, 0);
8dc85d54
PZ
11166}
11167
889ff015
FW
11168static void perf_free_event(struct perf_event *event,
11169 struct perf_event_context *ctx)
11170{
11171 struct perf_event *parent = event->parent;
11172
11173 if (WARN_ON_ONCE(!parent))
11174 return;
11175
11176 mutex_lock(&parent->child_mutex);
11177 list_del_init(&event->child_list);
11178 mutex_unlock(&parent->child_mutex);
11179
a6fa941d 11180 put_event(parent);
889ff015 11181
652884fe 11182 raw_spin_lock_irq(&ctx->lock);
8a49542c 11183 perf_group_detach(event);
889ff015 11184 list_del_event(event, ctx);
652884fe 11185 raw_spin_unlock_irq(&ctx->lock);
889ff015
FW
11186 free_event(event);
11187}
11188
bbbee908 11189/*
652884fe 11190 * Free an unexposed, unused context as created by inheritance by
8dc85d54 11191 * perf_event_init_task below, used by fork() in case of fail.
652884fe
PZ
11192 *
11193 * Not all locks are strictly required, but take them anyway to be nice and
11194 * help out with the lockdep assertions.
bbbee908 11195 */
cdd6c482 11196void perf_event_free_task(struct task_struct *task)
bbbee908 11197{
8dc85d54 11198 struct perf_event_context *ctx;
cdd6c482 11199 struct perf_event *event, *tmp;
8dc85d54 11200 int ctxn;
bbbee908 11201
8dc85d54
PZ
11202 for_each_task_context_nr(ctxn) {
11203 ctx = task->perf_event_ctxp[ctxn];
11204 if (!ctx)
11205 continue;
bbbee908 11206
8dc85d54 11207 mutex_lock(&ctx->mutex);
e552a838
PZ
11208 raw_spin_lock_irq(&ctx->lock);
11209 /*
11210 * Destroy the task <-> ctx relation and mark the context dead.
11211 *
11212 * This is important because even though the task hasn't been
11213 * exposed yet the context has been (through child_list).
11214 */
11215 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
11216 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
11217 put_task_struct(task); /* cannot be last */
11218 raw_spin_unlock_irq(&ctx->lock);
bbbee908 11219
15121c78 11220 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
8dc85d54 11221 perf_free_event(event, ctx);
bbbee908 11222
8dc85d54 11223 mutex_unlock(&ctx->mutex);
8dc85d54
PZ
11224 put_ctx(ctx);
11225 }
889ff015
FW
11226}
11227
4e231c79
PZ
11228void perf_event_delayed_put(struct task_struct *task)
11229{
11230 int ctxn;
11231
11232 for_each_task_context_nr(ctxn)
11233 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
11234}
11235
e03e7ee3 11236struct file *perf_event_get(unsigned int fd)
ffe8690c 11237{
e03e7ee3 11238 struct file *file;
ffe8690c 11239
e03e7ee3
AS
11240 file = fget_raw(fd);
11241 if (!file)
11242 return ERR_PTR(-EBADF);
ffe8690c 11243
e03e7ee3
AS
11244 if (file->f_op != &perf_fops) {
11245 fput(file);
11246 return ERR_PTR(-EBADF);
11247 }
ffe8690c 11248
e03e7ee3 11249 return file;
ffe8690c
KX
11250}
11251
f8d959a5
YS
11252const struct perf_event *perf_get_event(struct file *file)
11253{
11254 if (file->f_op != &perf_fops)
11255 return ERR_PTR(-EINVAL);
11256
11257 return file->private_data;
11258}
11259
ffe8690c
KX
11260const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
11261{
11262 if (!event)
11263 return ERR_PTR(-EINVAL);
11264
11265 return &event->attr;
11266}
11267
97dee4f3 11268/*
788faab7 11269 * Inherit an event from parent task to child task.
d8a8cfc7
PZ
11270 *
11271 * Returns:
11272 * - valid pointer on success
11273 * - NULL for orphaned events
11274 * - IS_ERR() on error
97dee4f3
PZ
11275 */
11276static struct perf_event *
11277inherit_event(struct perf_event *parent_event,
11278 struct task_struct *parent,
11279 struct perf_event_context *parent_ctx,
11280 struct task_struct *child,
11281 struct perf_event *group_leader,
11282 struct perf_event_context *child_ctx)
11283{
8ca2bd41 11284 enum perf_event_state parent_state = parent_event->state;
97dee4f3 11285 struct perf_event *child_event;
cee010ec 11286 unsigned long flags;
97dee4f3
PZ
11287
11288 /*
11289 * Instead of creating recursive hierarchies of events,
11290 * we link inherited events back to the original parent,
11291 * which has a filp for sure, which we use as the reference
11292 * count:
11293 */
11294 if (parent_event->parent)
11295 parent_event = parent_event->parent;
11296
11297 child_event = perf_event_alloc(&parent_event->attr,
11298 parent_event->cpu,
d580ff86 11299 child,
97dee4f3 11300 group_leader, parent_event,
79dff51e 11301 NULL, NULL, -1);
97dee4f3
PZ
11302 if (IS_ERR(child_event))
11303 return child_event;
a6fa941d 11304
313ccb96
JO
11305
11306 if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) &&
11307 !child_ctx->task_ctx_data) {
11308 struct pmu *pmu = child_event->pmu;
11309
11310 child_ctx->task_ctx_data = kzalloc(pmu->task_ctx_size,
11311 GFP_KERNEL);
11312 if (!child_ctx->task_ctx_data) {
11313 free_event(child_event);
11314 return NULL;
11315 }
11316 }
11317
c6e5b732
PZ
11318 /*
11319 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
11320 * must be under the same lock in order to serialize against
11321 * perf_event_release_kernel(), such that either we must observe
11322 * is_orphaned_event() or they will observe us on the child_list.
11323 */
11324 mutex_lock(&parent_event->child_mutex);
fadfe7be
JO
11325 if (is_orphaned_event(parent_event) ||
11326 !atomic_long_inc_not_zero(&parent_event->refcount)) {
c6e5b732 11327 mutex_unlock(&parent_event->child_mutex);
313ccb96 11328 /* task_ctx_data is freed with child_ctx */
a6fa941d
AV
11329 free_event(child_event);
11330 return NULL;
11331 }
11332
97dee4f3
PZ
11333 get_ctx(child_ctx);
11334
11335 /*
11336 * Make the child state follow the state of the parent event,
11337 * not its attr.disabled bit. We hold the parent's mutex,
11338 * so we won't race with perf_event_{en, dis}able_family.
11339 */
1929def9 11340 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
97dee4f3
PZ
11341 child_event->state = PERF_EVENT_STATE_INACTIVE;
11342 else
11343 child_event->state = PERF_EVENT_STATE_OFF;
11344
11345 if (parent_event->attr.freq) {
11346 u64 sample_period = parent_event->hw.sample_period;
11347 struct hw_perf_event *hwc = &child_event->hw;
11348
11349 hwc->sample_period = sample_period;
11350 hwc->last_period = sample_period;
11351
11352 local64_set(&hwc->period_left, sample_period);
11353 }
11354
11355 child_event->ctx = child_ctx;
11356 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
11357 child_event->overflow_handler_context
11358 = parent_event->overflow_handler_context;
97dee4f3 11359
614b6780
TG
11360 /*
11361 * Precalculate sample_data sizes
11362 */
11363 perf_event__header_size(child_event);
6844c09d 11364 perf_event__id_header_size(child_event);
614b6780 11365
97dee4f3
PZ
11366 /*
11367 * Link it up in the child's context:
11368 */
cee010ec 11369 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 11370 add_event_to_ctx(child_event, child_ctx);
cee010ec 11371 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 11372
97dee4f3
PZ
11373 /*
11374 * Link this into the parent event's child list
11375 */
97dee4f3
PZ
11376 list_add_tail(&child_event->child_list, &parent_event->child_list);
11377 mutex_unlock(&parent_event->child_mutex);
11378
11379 return child_event;
11380}
11381
d8a8cfc7
PZ
11382/*
11383 * Inherits an event group.
11384 *
11385 * This will quietly suppress orphaned events; !inherit_event() is not an error.
11386 * This matches with perf_event_release_kernel() removing all child events.
11387 *
11388 * Returns:
11389 * - 0 on success
11390 * - <0 on error
11391 */
97dee4f3
PZ
11392static int inherit_group(struct perf_event *parent_event,
11393 struct task_struct *parent,
11394 struct perf_event_context *parent_ctx,
11395 struct task_struct *child,
11396 struct perf_event_context *child_ctx)
11397{
11398 struct perf_event *leader;
11399 struct perf_event *sub;
11400 struct perf_event *child_ctr;
11401
11402 leader = inherit_event(parent_event, parent, parent_ctx,
11403 child, NULL, child_ctx);
11404 if (IS_ERR(leader))
11405 return PTR_ERR(leader);
d8a8cfc7
PZ
11406 /*
11407 * @leader can be NULL here because of is_orphaned_event(). In this
11408 * case inherit_event() will create individual events, similar to what
11409 * perf_group_detach() would do anyway.
11410 */
edb39592 11411 for_each_sibling_event(sub, parent_event) {
97dee4f3
PZ
11412 child_ctr = inherit_event(sub, parent, parent_ctx,
11413 child, leader, child_ctx);
11414 if (IS_ERR(child_ctr))
11415 return PTR_ERR(child_ctr);
11416 }
11417 return 0;
889ff015
FW
11418}
11419
d8a8cfc7
PZ
11420/*
11421 * Creates the child task context and tries to inherit the event-group.
11422 *
11423 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
11424 * inherited_all set when we 'fail' to inherit an orphaned event; this is
11425 * consistent with perf_event_release_kernel() removing all child events.
11426 *
11427 * Returns:
11428 * - 0 on success
11429 * - <0 on error
11430 */
889ff015
FW
11431static int
11432inherit_task_group(struct perf_event *event, struct task_struct *parent,
11433 struct perf_event_context *parent_ctx,
8dc85d54 11434 struct task_struct *child, int ctxn,
889ff015
FW
11435 int *inherited_all)
11436{
11437 int ret;
8dc85d54 11438 struct perf_event_context *child_ctx;
889ff015
FW
11439
11440 if (!event->attr.inherit) {
11441 *inherited_all = 0;
11442 return 0;
bbbee908
PZ
11443 }
11444
fe4b04fa 11445 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
11446 if (!child_ctx) {
11447 /*
11448 * This is executed from the parent task context, so
11449 * inherit events that have been marked for cloning.
11450 * First allocate and initialize a context for the
11451 * child.
11452 */
734df5ab 11453 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
11454 if (!child_ctx)
11455 return -ENOMEM;
bbbee908 11456
8dc85d54 11457 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
11458 }
11459
11460 ret = inherit_group(event, parent, parent_ctx,
11461 child, child_ctx);
11462
11463 if (ret)
11464 *inherited_all = 0;
11465
11466 return ret;
bbbee908
PZ
11467}
11468
9b51f66d 11469/*
cdd6c482 11470 * Initialize the perf_event context in task_struct
9b51f66d 11471 */
985c8dcb 11472static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 11473{
889ff015 11474 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
11475 struct perf_event_context *cloned_ctx;
11476 struct perf_event *event;
9b51f66d 11477 struct task_struct *parent = current;
564c2b21 11478 int inherited_all = 1;
dddd3379 11479 unsigned long flags;
6ab423e0 11480 int ret = 0;
9b51f66d 11481
8dc85d54 11482 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
11483 return 0;
11484
ad3a37de 11485 /*
25346b93
PM
11486 * If the parent's context is a clone, pin it so it won't get
11487 * swapped under us.
ad3a37de 11488 */
8dc85d54 11489 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
11490 if (!parent_ctx)
11491 return 0;
25346b93 11492
ad3a37de
PM
11493 /*
11494 * No need to check if parent_ctx != NULL here; since we saw
11495 * it non-NULL earlier, the only reason for it to become NULL
11496 * is if we exit, and since we're currently in the middle of
11497 * a fork we can't be exiting at the same time.
11498 */
ad3a37de 11499
9b51f66d
IM
11500 /*
11501 * Lock the parent list. No need to lock the child - not PID
11502 * hashed yet and not running, so nobody can access it.
11503 */
d859e29f 11504 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
11505
11506 /*
11507 * We dont have to disable NMIs - we are only looking at
11508 * the list, not manipulating it:
11509 */
6e6804d2 11510 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
8dc85d54
PZ
11511 ret = inherit_task_group(event, parent, parent_ctx,
11512 child, ctxn, &inherited_all);
889ff015 11513 if (ret)
e7cc4865 11514 goto out_unlock;
889ff015 11515 }
b93f7978 11516
dddd3379
TG
11517 /*
11518 * We can't hold ctx->lock when iterating the ->flexible_group list due
11519 * to allocations, but we need to prevent rotation because
11520 * rotate_ctx() will change the list from interrupt context.
11521 */
11522 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
11523 parent_ctx->rotate_disable = 1;
11524 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
11525
6e6804d2 11526 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
8dc85d54
PZ
11527 ret = inherit_task_group(event, parent, parent_ctx,
11528 child, ctxn, &inherited_all);
889ff015 11529 if (ret)
e7cc4865 11530 goto out_unlock;
564c2b21
PM
11531 }
11532
dddd3379
TG
11533 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
11534 parent_ctx->rotate_disable = 0;
dddd3379 11535
8dc85d54 11536 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 11537
05cbaa28 11538 if (child_ctx && inherited_all) {
564c2b21
PM
11539 /*
11540 * Mark the child context as a clone of the parent
11541 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
11542 *
11543 * Note that if the parent is a clone, the holding of
11544 * parent_ctx->lock avoids it from being uncloned.
564c2b21 11545 */
c5ed5145 11546 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
11547 if (cloned_ctx) {
11548 child_ctx->parent_ctx = cloned_ctx;
25346b93 11549 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
11550 } else {
11551 child_ctx->parent_ctx = parent_ctx;
11552 child_ctx->parent_gen = parent_ctx->generation;
11553 }
11554 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
11555 }
11556
c5ed5145 11557 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
e7cc4865 11558out_unlock:
d859e29f 11559 mutex_unlock(&parent_ctx->mutex);
6ab423e0 11560
25346b93 11561 perf_unpin_context(parent_ctx);
fe4b04fa 11562 put_ctx(parent_ctx);
ad3a37de 11563
6ab423e0 11564 return ret;
9b51f66d
IM
11565}
11566
8dc85d54
PZ
11567/*
11568 * Initialize the perf_event context in task_struct
11569 */
11570int perf_event_init_task(struct task_struct *child)
11571{
11572 int ctxn, ret;
11573
8550d7cb
ON
11574 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
11575 mutex_init(&child->perf_event_mutex);
11576 INIT_LIST_HEAD(&child->perf_event_list);
11577
8dc85d54
PZ
11578 for_each_task_context_nr(ctxn) {
11579 ret = perf_event_init_context(child, ctxn);
6c72e350
PZ
11580 if (ret) {
11581 perf_event_free_task(child);
8dc85d54 11582 return ret;
6c72e350 11583 }
8dc85d54
PZ
11584 }
11585
11586 return 0;
11587}
11588
220b140b
PM
11589static void __init perf_event_init_all_cpus(void)
11590{
b28ab83c 11591 struct swevent_htable *swhash;
220b140b 11592 int cpu;
220b140b 11593
a63fbed7
TG
11594 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
11595
220b140b 11596 for_each_possible_cpu(cpu) {
b28ab83c
PZ
11597 swhash = &per_cpu(swevent_htable, cpu);
11598 mutex_init(&swhash->hlist_mutex);
2fde4f94 11599 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
f2fb6bef
KL
11600
11601 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
11602 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
e48c1788 11603
058fe1c0
DCC
11604#ifdef CONFIG_CGROUP_PERF
11605 INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
11606#endif
e48c1788 11607 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
220b140b
PM
11608 }
11609}
11610
a63fbed7 11611void perf_swevent_init_cpu(unsigned int cpu)
0793a61d 11612{
108b02cf 11613 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 11614
b28ab83c 11615 mutex_lock(&swhash->hlist_mutex);
059fcd8c 11616 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
76e1d904
FW
11617 struct swevent_hlist *hlist;
11618
b28ab83c
PZ
11619 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
11620 WARN_ON(!hlist);
11621 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 11622 }
b28ab83c 11623 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
11624}
11625
2965faa5 11626#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
108b02cf 11627static void __perf_event_exit_context(void *__info)
0793a61d 11628{
108b02cf 11629 struct perf_event_context *ctx = __info;
fae3fde6
PZ
11630 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
11631 struct perf_event *event;
0793a61d 11632
fae3fde6 11633 raw_spin_lock(&ctx->lock);
0ee098c9 11634 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
fae3fde6 11635 list_for_each_entry(event, &ctx->event_list, event_entry)
45a0e07a 11636 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
fae3fde6 11637 raw_spin_unlock(&ctx->lock);
0793a61d 11638}
108b02cf
PZ
11639
11640static void perf_event_exit_cpu_context(int cpu)
11641{
a63fbed7 11642 struct perf_cpu_context *cpuctx;
108b02cf
PZ
11643 struct perf_event_context *ctx;
11644 struct pmu *pmu;
108b02cf 11645
a63fbed7
TG
11646 mutex_lock(&pmus_lock);
11647 list_for_each_entry(pmu, &pmus, entry) {
11648 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11649 ctx = &cpuctx->ctx;
108b02cf
PZ
11650
11651 mutex_lock(&ctx->mutex);
11652 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
a63fbed7 11653 cpuctx->online = 0;
108b02cf
PZ
11654 mutex_unlock(&ctx->mutex);
11655 }
a63fbed7
TG
11656 cpumask_clear_cpu(cpu, perf_online_mask);
11657 mutex_unlock(&pmus_lock);
108b02cf 11658}
00e16c3d
TG
11659#else
11660
11661static void perf_event_exit_cpu_context(int cpu) { }
11662
11663#endif
108b02cf 11664
a63fbed7
TG
11665int perf_event_init_cpu(unsigned int cpu)
11666{
11667 struct perf_cpu_context *cpuctx;
11668 struct perf_event_context *ctx;
11669 struct pmu *pmu;
11670
11671 perf_swevent_init_cpu(cpu);
11672
11673 mutex_lock(&pmus_lock);
11674 cpumask_set_cpu(cpu, perf_online_mask);
11675 list_for_each_entry(pmu, &pmus, entry) {
11676 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11677 ctx = &cpuctx->ctx;
11678
11679 mutex_lock(&ctx->mutex);
11680 cpuctx->online = 1;
11681 mutex_unlock(&ctx->mutex);
11682 }
11683 mutex_unlock(&pmus_lock);
11684
11685 return 0;
11686}
11687
00e16c3d 11688int perf_event_exit_cpu(unsigned int cpu)
0793a61d 11689{
e3703f8c 11690 perf_event_exit_cpu_context(cpu);
00e16c3d 11691 return 0;
0793a61d 11692}
0793a61d 11693
c277443c
PZ
11694static int
11695perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
11696{
11697 int cpu;
11698
11699 for_each_online_cpu(cpu)
11700 perf_event_exit_cpu(cpu);
11701
11702 return NOTIFY_OK;
11703}
11704
11705/*
11706 * Run the perf reboot notifier at the very last possible moment so that
11707 * the generic watchdog code runs as long as possible.
11708 */
11709static struct notifier_block perf_reboot_notifier = {
11710 .notifier_call = perf_reboot,
11711 .priority = INT_MIN,
11712};
11713
cdd6c482 11714void __init perf_event_init(void)
0793a61d 11715{
3c502e7a
JW
11716 int ret;
11717
2e80a82a
PZ
11718 idr_init(&pmu_idr);
11719
220b140b 11720 perf_event_init_all_cpus();
b0a873eb 11721 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
11722 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
11723 perf_pmu_register(&perf_cpu_clock, NULL, -1);
11724 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb 11725 perf_tp_register();
00e16c3d 11726 perf_event_init_cpu(smp_processor_id());
c277443c 11727 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
11728
11729 ret = init_hw_breakpoint();
11730 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520 11731
b01c3a00
JO
11732 /*
11733 * Build time assertion that we keep the data_head at the intended
11734 * location. IOW, validation we got the __reserved[] size right.
11735 */
11736 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
11737 != 1024);
0793a61d 11738}
abe43400 11739
fd979c01
CS
11740ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
11741 char *page)
11742{
11743 struct perf_pmu_events_attr *pmu_attr =
11744 container_of(attr, struct perf_pmu_events_attr, attr);
11745
11746 if (pmu_attr->event_str)
11747 return sprintf(page, "%s\n", pmu_attr->event_str);
11748
11749 return 0;
11750}
675965b0 11751EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
fd979c01 11752
abe43400
PZ
11753static int __init perf_event_sysfs_init(void)
11754{
11755 struct pmu *pmu;
11756 int ret;
11757
11758 mutex_lock(&pmus_lock);
11759
11760 ret = bus_register(&pmu_bus);
11761 if (ret)
11762 goto unlock;
11763
11764 list_for_each_entry(pmu, &pmus, entry) {
11765 if (!pmu->name || pmu->type < 0)
11766 continue;
11767
11768 ret = pmu_dev_alloc(pmu);
11769 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
11770 }
11771 pmu_bus_running = 1;
11772 ret = 0;
11773
11774unlock:
11775 mutex_unlock(&pmus_lock);
11776
11777 return ret;
11778}
11779device_initcall(perf_event_sysfs_init);
e5d1367f
SE
11780
11781#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
11782static struct cgroup_subsys_state *
11783perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
11784{
11785 struct perf_cgroup *jc;
e5d1367f 11786
1b15d055 11787 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
11788 if (!jc)
11789 return ERR_PTR(-ENOMEM);
11790
e5d1367f
SE
11791 jc->info = alloc_percpu(struct perf_cgroup_info);
11792 if (!jc->info) {
11793 kfree(jc);
11794 return ERR_PTR(-ENOMEM);
11795 }
11796
e5d1367f
SE
11797 return &jc->css;
11798}
11799
eb95419b 11800static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 11801{
eb95419b
TH
11802 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
11803
e5d1367f
SE
11804 free_percpu(jc->info);
11805 kfree(jc);
11806}
11807
11808static int __perf_cgroup_move(void *info)
11809{
11810 struct task_struct *task = info;
ddaaf4e2 11811 rcu_read_lock();
e5d1367f 11812 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
ddaaf4e2 11813 rcu_read_unlock();
e5d1367f
SE
11814 return 0;
11815}
11816
1f7dd3e5 11817static void perf_cgroup_attach(struct cgroup_taskset *tset)
e5d1367f 11818{
bb9d97b6 11819 struct task_struct *task;
1f7dd3e5 11820 struct cgroup_subsys_state *css;
bb9d97b6 11821
1f7dd3e5 11822 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 11823 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
11824}
11825
073219e9 11826struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
11827 .css_alloc = perf_cgroup_css_alloc,
11828 .css_free = perf_cgroup_css_free,
bb9d97b6 11829 .attach = perf_cgroup_attach,
968ebff1
TH
11830 /*
11831 * Implicitly enable on dfl hierarchy so that perf events can
11832 * always be filtered by cgroup2 path as long as perf_event
11833 * controller is not mounted on a legacy hierarchy.
11834 */
11835 .implicit_on_dfl = true,
8cfd8147 11836 .threaded = true,
e5d1367f
SE
11837};
11838#endif /* CONFIG_CGROUP_PERF */