]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - arch/ia64/kernel/perfmon.c
treewide: Add SPDX license identifier for missed files
[thirdparty/kernel/stable.git] / arch / ia64 / kernel / perfmon.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * This file implements the perfmon-2 subsystem which is used
4 * to program the IA-64 Performance Monitoring Unit (PMU).
5 *
6 * The initial version of perfmon.c was written by
7 * Ganesh Venkitachalam, IBM Corp.
8 *
9 * Then it was modified for perfmon-1.x by Stephane Eranian and
10 * David Mosberger, Hewlett Packard Co.
11 *
12 * Version Perfmon-2.x is a rewrite of perfmon-1.x
13 * by Stephane Eranian, Hewlett Packard Co.
14 *
a1ecf7f6 15 * Copyright (C) 1999-2005 Hewlett Packard Co
1da177e4
LT
16 * Stephane Eranian <eranian@hpl.hp.com>
17 * David Mosberger-Tang <davidm@hpl.hp.com>
18 *
19 * More information about perfmon available at:
20 * http://www.hpl.hp.com/research/linux/perfmon
21 */
22
1da177e4
LT
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
29930025 26#include <linux/sched/task.h>
68db0cf1 27#include <linux/sched/task_stack.h>
1da177e4 28#include <linux/interrupt.h>
1da177e4
LT
29#include <linux/proc_fs.h>
30#include <linux/seq_file.h>
31#include <linux/init.h>
32#include <linux/vmalloc.h>
33#include <linux/mm.h>
34#include <linux/sysctl.h>
35#include <linux/list.h>
36#include <linux/file.h>
37#include <linux/poll.h>
38#include <linux/vfs.h>
a3bc0dbc 39#include <linux/smp.h>
1da177e4
LT
40#include <linux/pagemap.h>
41#include <linux/mount.h>
1da177e4 42#include <linux/bitops.h>
a9415644 43#include <linux/capability.h>
badf1662 44#include <linux/rcupdate.h>
60f1c444 45#include <linux/completion.h>
f14488cc 46#include <linux/tracehook.h>
5a0e3ad6 47#include <linux/slab.h>
91d591c3 48#include <linux/cpu.h>
1da177e4
LT
49
50#include <asm/errno.h>
51#include <asm/intrinsics.h>
52#include <asm/page.h>
53#include <asm/perfmon.h>
54#include <asm/processor.h>
55#include <asm/signal.h>
7c0f6ba6 56#include <linux/uaccess.h>
1da177e4
LT
57#include <asm/delay.h>
58
59#ifdef CONFIG_PERFMON
60/*
61 * perfmon context state
62 */
63#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
64#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
65#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
66#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
67
68#define PFM_INVALID_ACTIVATION (~0UL)
69
35589a8f
KA
70#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
71#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
72
1da177e4
LT
73/*
74 * depth of message queue
75 */
76#define PFM_MAX_MSGS 32
77#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
78
79/*
80 * type of a PMU register (bitmask).
81 * bitmask structure:
82 * bit0 : register implemented
83 * bit1 : end marker
84 * bit2-3 : reserved
85 * bit4 : pmc has pmc.pm
86 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
87 * bit6-7 : register type
88 * bit8-31: reserved
89 */
90#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
91#define PFM_REG_IMPL 0x1 /* register implemented */
92#define PFM_REG_END 0x2 /* end marker */
93#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
94#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
95#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
96#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
97#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
98
99#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
100#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
101
102#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
103
104/* i assumed unsigned */
105#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
106#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
107
108/* XXX: these assume that register i is implemented */
109#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
110#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
111#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
112#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
113
114#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
115#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
116#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
117#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
118
119#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
120#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
121
122#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
123#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
124#define PFM_CTX_TASK(h) (h)->ctx_task
125
126#define PMU_PMC_OI 5 /* position of pmc.oi bit */
127
128/* XXX: does not support more than 64 PMDs */
129#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
130#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
131
132#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
133
134#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
135#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
136#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
137#define PFM_CODE_RR 0 /* requesting code range restriction */
138#define PFM_DATA_RR 1 /* requestion data range restriction */
139
140#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
141#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
142#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
143
144#define RDEP(x) (1UL<<(x))
145
146/*
147 * context protection macros
148 * in SMP:
149 * - we need to protect against CPU concurrency (spin_lock)
150 * - we need to protect against PMU overflow interrupts (local_irq_disable)
151 * in UP:
152 * - we need to protect against PMU overflow interrupts (local_irq_disable)
153 *
85d1fe09 154 * spin_lock_irqsave()/spin_unlock_irqrestore():
1da177e4
LT
155 * in SMP: local_irq_disable + spin_lock
156 * in UP : local_irq_disable
157 *
158 * spin_lock()/spin_lock():
159 * in UP : removed automatically
160 * in SMP: protect against context accesses from other CPU. interrupts
161 * are not masked. This is useful for the PMU interrupt handler
162 * because we know we will not get PMU concurrency in that code.
163 */
164#define PROTECT_CTX(c, f) \
165 do { \
19c5870c 166 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4 167 spin_lock_irqsave(&(c)->ctx_lock, f); \
19c5870c 168 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
169 } while(0)
170
171#define UNPROTECT_CTX(c, f) \
172 do { \
19c5870c 173 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
174 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
175 } while(0)
176
177#define PROTECT_CTX_NOPRINT(c, f) \
178 do { \
179 spin_lock_irqsave(&(c)->ctx_lock, f); \
180 } while(0)
181
182
183#define UNPROTECT_CTX_NOPRINT(c, f) \
184 do { \
185 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
186 } while(0)
187
188
189#define PROTECT_CTX_NOIRQ(c) \
190 do { \
191 spin_lock(&(c)->ctx_lock); \
192 } while(0)
193
194#define UNPROTECT_CTX_NOIRQ(c) \
195 do { \
196 spin_unlock(&(c)->ctx_lock); \
197 } while(0)
198
199
200#ifdef CONFIG_SMP
201
202#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
203#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
204#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
205
206#else /* !CONFIG_SMP */
207#define SET_ACTIVATION(t) do {} while(0)
208#define GET_ACTIVATION(t) do {} while(0)
209#define INC_ACTIVATION(t) do {} while(0)
210#endif /* CONFIG_SMP */
211
212#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
213#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
214#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
215
216#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
217#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
218
219#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
220
221/*
222 * cmp0 must be the value of pmc0
223 */
224#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
225
226#define PFMFS_MAGIC 0xa0b4d889
227
228/*
229 * debugging
230 */
231#define PFM_DEBUGGING 1
232#ifdef PFM_DEBUGGING
233#define DPRINT(a) \
234 do { \
d4ed8084 235 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
236 } while (0)
237
238#define DPRINT_ovfl(a) \
239 do { \
d4ed8084 240 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
241 } while (0)
242#endif
243
244/*
245 * 64-bit software counter structure
246 *
247 * the next_reset_type is applied to the next call to pfm_reset_regs()
248 */
249typedef struct {
250 unsigned long val; /* virtual 64bit counter value */
251 unsigned long lval; /* last reset value */
252 unsigned long long_reset; /* reset value on sampling overflow */
253 unsigned long short_reset; /* reset value on overflow */
254 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
255 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
256 unsigned long seed; /* seed for random-number generator */
257 unsigned long mask; /* mask for random-number generator */
258 unsigned int flags; /* notify/do not notify */
259 unsigned long eventid; /* overflow event identifier */
260} pfm_counter_t;
261
262/*
263 * context flags
264 */
265typedef struct {
266 unsigned int block:1; /* when 1, task will blocked on user notifications */
267 unsigned int system:1; /* do system wide monitoring */
268 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
269 unsigned int is_sampling:1; /* true if using a custom format */
270 unsigned int excl_idle:1; /* exclude idle task in system wide session */
271 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
272 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
273 unsigned int no_msg:1; /* no message sent on overflow */
274 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
275 unsigned int reserved:22;
276} pfm_context_flags_t;
277
278#define PFM_TRAP_REASON_NONE 0x0 /* default value */
279#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
280#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
281
282
283/*
284 * perfmon context: encapsulates all the state of a monitoring session
285 */
286
287typedef struct pfm_context {
288 spinlock_t ctx_lock; /* context protection */
289
290 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
291 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
292
293 struct task_struct *ctx_task; /* task to which context is attached */
294
295 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
296
60f1c444 297 struct completion ctx_restart_done; /* use for blocking notification mode */
1da177e4
LT
298
299 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
300 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
301 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
302
303 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
304 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
305 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
306
35589a8f 307 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
1da177e4
LT
308
309 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
310 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
311 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
312 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
313
35589a8f
KA
314 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
315
316 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
317 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
1da177e4 318
e088a4ad 319 unsigned long ctx_saved_psr_up; /* only contains psr.up value */
1da177e4
LT
320
321 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
322 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
323 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
324
325 int ctx_fd; /* file descriptor used my this context */
326 pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
327
328 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
329 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
330 unsigned long ctx_smpl_size; /* size of sampling buffer */
331 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
332
333 wait_queue_head_t ctx_msgq_wait;
334 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
335 int ctx_msgq_head;
336 int ctx_msgq_tail;
337 struct fasync_struct *ctx_async_queue;
338
339 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
340} pfm_context_t;
341
342/*
343 * magic number used to verify that structure is really
344 * a perfmon context
345 */
346#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
347
348#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
349
350#ifdef CONFIG_SMP
351#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
352#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
353#else
354#define SET_LAST_CPU(ctx, v) do {} while(0)
355#define GET_LAST_CPU(ctx) do {} while(0)
356#endif
357
358
359#define ctx_fl_block ctx_flags.block
360#define ctx_fl_system ctx_flags.system
361#define ctx_fl_using_dbreg ctx_flags.using_dbreg
362#define ctx_fl_is_sampling ctx_flags.is_sampling
363#define ctx_fl_excl_idle ctx_flags.excl_idle
364#define ctx_fl_going_zombie ctx_flags.going_zombie
365#define ctx_fl_trap_reason ctx_flags.trap_reason
366#define ctx_fl_no_msg ctx_flags.no_msg
367#define ctx_fl_can_restart ctx_flags.can_restart
368
369#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
370#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
371
372/*
373 * global information about all sessions
374 * mostly used to synchronize between system wide and per-process
375 */
376typedef struct {
377 spinlock_t pfs_lock; /* lock the structure */
378
379 unsigned int pfs_task_sessions; /* number of per task sessions */
380 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
381 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
382 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
383 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
384} pfm_session_t;
385
386/*
387 * information about a PMC or PMD.
388 * dep_pmd[]: a bitmask of dependent PMD registers
389 * dep_pmc[]: a bitmask of dependent PMC registers
390 */
391typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
392typedef struct {
393 unsigned int type;
394 int pm_pos;
395 unsigned long default_value; /* power-on default value */
396 unsigned long reserved_mask; /* bitmask of reserved bits */
397 pfm_reg_check_t read_check;
398 pfm_reg_check_t write_check;
399 unsigned long dep_pmd[4];
400 unsigned long dep_pmc[4];
401} pfm_reg_desc_t;
402
403/* assume cnum is a valid monitor */
404#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
405
406/*
407 * This structure is initialized at boot time and contains
408 * a description of the PMU main characteristics.
409 *
410 * If the probe function is defined, detection is based
411 * on its return value:
412 * - 0 means recognized PMU
413 * - anything else means not supported
414 * When the probe function is not defined, then the pmu_family field
415 * is used and it must match the host CPU family such that:
416 * - cpu->family & config->pmu_family != 0
417 */
418typedef struct {
419 unsigned long ovfl_val; /* overflow value for counters */
420
421 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
422 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
423
424 unsigned int num_pmcs; /* number of PMCS: computed at init time */
425 unsigned int num_pmds; /* number of PMDS: computed at init time */
426 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
427 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
428
429 char *pmu_name; /* PMU family name */
430 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
431 unsigned int flags; /* pmu specific flags */
432 unsigned int num_ibrs; /* number of IBRS: computed at init time */
433 unsigned int num_dbrs; /* number of DBRS: computed at init time */
434 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
435 int (*probe)(void); /* customized probe routine */
436 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
437} pmu_config_t;
438/*
439 * PMU specific flags
440 */
441#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
442
443/*
444 * debug register related type definitions
445 */
446typedef struct {
447 unsigned long ibr_mask:56;
448 unsigned long ibr_plm:4;
449 unsigned long ibr_ig:3;
450 unsigned long ibr_x:1;
451} ibr_mask_reg_t;
452
453typedef struct {
454 unsigned long dbr_mask:56;
455 unsigned long dbr_plm:4;
456 unsigned long dbr_ig:2;
457 unsigned long dbr_w:1;
458 unsigned long dbr_r:1;
459} dbr_mask_reg_t;
460
461typedef union {
462 unsigned long val;
463 ibr_mask_reg_t ibr;
464 dbr_mask_reg_t dbr;
465} dbreg_t;
466
467
468/*
469 * perfmon command descriptions
470 */
471typedef struct {
472 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
473 char *cmd_name;
474 int cmd_flags;
475 unsigned int cmd_narg;
476 size_t cmd_argsize;
477 int (*cmd_getsize)(void *arg, size_t *sz);
478} pfm_cmd_desc_t;
479
480#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
481#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
482#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
483#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
484
485
486#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
487#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
488#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
489#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
490#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
491
492#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
493
1da177e4
LT
494typedef struct {
495 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
496 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
497 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
498 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
499 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
500 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
501 unsigned long pfm_smpl_handler_calls;
502 unsigned long pfm_smpl_handler_cycles;
503 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
504} pfm_stats_t;
505
506/*
507 * perfmon internal variables
508 */
509static pfm_stats_t pfm_stats[NR_CPUS];
510static pfm_session_t pfm_sessions; /* global sessions information */
511
a9f6a0dd 512static DEFINE_SPINLOCK(pfm_alt_install_check);
a1ecf7f6
TL
513static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
514
1da177e4
LT
515static struct proc_dir_entry *perfmon_dir;
516static pfm_uuid_t pfm_null_uuid = {0,};
517
518static spinlock_t pfm_buffer_fmt_lock;
519static LIST_HEAD(pfm_buffer_fmt_list);
520
521static pmu_config_t *pmu_conf;
522
523/* sysctl() controls */
4944930a
SE
524pfm_sysctl_t pfm_sysctl;
525EXPORT_SYMBOL(pfm_sysctl);
1da177e4 526
2841efa6 527static struct ctl_table pfm_ctl_table[] = {
4e009901 528 {
4e009901
EB
529 .procname = "debug",
530 .data = &pfm_sysctl.debug,
531 .maxlen = sizeof(int),
532 .mode = 0666,
6d456111 533 .proc_handler = proc_dointvec,
4e009901
EB
534 },
535 {
4e009901
EB
536 .procname = "debug_ovfl",
537 .data = &pfm_sysctl.debug_ovfl,
538 .maxlen = sizeof(int),
539 .mode = 0666,
6d456111 540 .proc_handler = proc_dointvec,
4e009901
EB
541 },
542 {
4e009901
EB
543 .procname = "fastctxsw",
544 .data = &pfm_sysctl.fastctxsw,
545 .maxlen = sizeof(int),
546 .mode = 0600,
6d456111 547 .proc_handler = proc_dointvec,
4e009901
EB
548 },
549 {
4e009901
EB
550 .procname = "expert_mode",
551 .data = &pfm_sysctl.expert_mode,
552 .maxlen = sizeof(int),
553 .mode = 0600,
6d456111 554 .proc_handler = proc_dointvec,
4e009901
EB
555 },
556 {}
1da177e4 557};
2841efa6 558static struct ctl_table pfm_sysctl_dir[] = {
4e009901 559 {
4e009901 560 .procname = "perfmon",
e3ad42be 561 .mode = 0555,
4e009901
EB
562 .child = pfm_ctl_table,
563 },
564 {}
1da177e4 565};
2841efa6 566static struct ctl_table pfm_sysctl_root[] = {
4e009901 567 {
4e009901 568 .procname = "kernel",
e3ad42be 569 .mode = 0555,
4e009901
EB
570 .child = pfm_sysctl_dir,
571 },
572 {}
1da177e4
LT
573};
574static struct ctl_table_header *pfm_sysctl_header;
575
576static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
1da177e4
LT
577
578#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
579#define pfm_get_cpu_data(a,b) per_cpu(a, b)
580
581static inline void
582pfm_put_task(struct task_struct *task)
583{
584 if (task != current) put_task_struct(task);
585}
586
1da177e4
LT
587static inline unsigned long
588pfm_protect_ctx_ctxsw(pfm_context_t *x)
589{
590 spin_lock(&(x)->ctx_lock);
591 return 0UL;
592}
593
24b8e0cc 594static inline void
1da177e4
LT
595pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
596{
597 spin_unlock(&(x)->ctx_lock);
598}
599
c74a1cbb 600/* forward declaration */
09579770 601static const struct dentry_operations pfmfs_dentry_operations;
1da177e4 602
51139ada
AV
603static struct dentry *
604pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
1da177e4 605{
c74a1cbb
AV
606 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
607 PFMFS_MAGIC);
1da177e4
LT
608}
609
610static struct file_system_type pfm_fs_type = {
611 .name = "pfmfs",
51139ada 612 .mount = pfmfs_mount,
1da177e4
LT
613 .kill_sb = kill_anon_super,
614};
7f78e035 615MODULE_ALIAS_FS("pfmfs");
1da177e4
LT
616
617DEFINE_PER_CPU(unsigned long, pfm_syst_info);
618DEFINE_PER_CPU(struct task_struct *, pmu_owner);
619DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
620DEFINE_PER_CPU(unsigned long, pmu_activation_number);
fffcc150 621EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
1da177e4
LT
622
623
624/* forward declaration */
5dfe4c96 625static const struct file_operations pfm_file_ops;
1da177e4
LT
626
627/*
628 * forward declarations
629 */
630#ifndef CONFIG_SMP
631static void pfm_lazy_save_regs (struct task_struct *ta);
632#endif
633
634void dump_pmu_state(const char *);
635static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
636
637#include "perfmon_itanium.h"
638#include "perfmon_mckinley.h"
9179cb65 639#include "perfmon_montecito.h"
1da177e4
LT
640#include "perfmon_generic.h"
641
642static pmu_config_t *pmu_confs[]={
9179cb65 643 &pmu_conf_mont,
1da177e4
LT
644 &pmu_conf_mck,
645 &pmu_conf_ita,
646 &pmu_conf_gen, /* must be last */
647 NULL
648};
649
650
651static int pfm_end_notify_user(pfm_context_t *ctx);
652
653static inline void
654pfm_clear_psr_pp(void)
655{
656 ia64_rsm(IA64_PSR_PP);
657 ia64_srlz_i();
658}
659
660static inline void
661pfm_set_psr_pp(void)
662{
663 ia64_ssm(IA64_PSR_PP);
664 ia64_srlz_i();
665}
666
667static inline void
668pfm_clear_psr_up(void)
669{
670 ia64_rsm(IA64_PSR_UP);
671 ia64_srlz_i();
672}
673
674static inline void
675pfm_set_psr_up(void)
676{
677 ia64_ssm(IA64_PSR_UP);
678 ia64_srlz_i();
679}
680
681static inline unsigned long
682pfm_get_psr(void)
683{
684 unsigned long tmp;
685 tmp = ia64_getreg(_IA64_REG_PSR);
686 ia64_srlz_i();
687 return tmp;
688}
689
690static inline void
691pfm_set_psr_l(unsigned long val)
692{
693 ia64_setreg(_IA64_REG_PSR_L, val);
694 ia64_srlz_i();
695}
696
697static inline void
698pfm_freeze_pmu(void)
699{
700 ia64_set_pmc(0,1UL);
701 ia64_srlz_d();
702}
703
704static inline void
705pfm_unfreeze_pmu(void)
706{
707 ia64_set_pmc(0,0UL);
708 ia64_srlz_d();
709}
710
711static inline void
712pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
713{
714 int i;
715
716 for (i=0; i < nibrs; i++) {
717 ia64_set_ibr(i, ibrs[i]);
718 ia64_dv_serialize_instruction();
719 }
720 ia64_srlz_i();
721}
722
723static inline void
724pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
725{
726 int i;
727
728 for (i=0; i < ndbrs; i++) {
729 ia64_set_dbr(i, dbrs[i]);
730 ia64_dv_serialize_data();
731 }
732 ia64_srlz_d();
733}
734
735/*
736 * PMD[i] must be a counter. no check is made
737 */
738static inline unsigned long
739pfm_read_soft_counter(pfm_context_t *ctx, int i)
740{
741 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
742}
743
744/*
745 * PMD[i] must be a counter. no check is made
746 */
747static inline void
748pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
749{
750 unsigned long ovfl_val = pmu_conf->ovfl_val;
751
752 ctx->ctx_pmds[i].val = val & ~ovfl_val;
753 /*
754 * writing to unimplemented part is ignore, so we do not need to
755 * mask off top part
756 */
757 ia64_set_pmd(i, val & ovfl_val);
758}
759
760static pfm_msg_t *
761pfm_get_new_msg(pfm_context_t *ctx)
762{
763 int idx, next;
764
765 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
766
767 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
768 if (next == ctx->ctx_msgq_head) return NULL;
769
770 idx = ctx->ctx_msgq_tail;
771 ctx->ctx_msgq_tail = next;
772
773 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
774
775 return ctx->ctx_msgq+idx;
776}
777
778static pfm_msg_t *
779pfm_get_next_msg(pfm_context_t *ctx)
780{
781 pfm_msg_t *msg;
782
783 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
784
785 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
786
787 /*
788 * get oldest message
789 */
790 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
791
792 /*
793 * and move forward
794 */
795 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
796
797 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
798
799 return msg;
800}
801
802static void
803pfm_reset_msgq(pfm_context_t *ctx)
804{
805 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
806 DPRINT(("ctx=%p msgq reset\n", ctx));
807}
808
1da177e4 809static pfm_context_t *
f8e811b9 810pfm_context_alloc(int ctx_flags)
1da177e4
LT
811{
812 pfm_context_t *ctx;
813
814 /*
815 * allocate context descriptor
816 * must be able to free with interrupts disabled
817 */
52fd9108 818 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
1da177e4 819 if (ctx) {
1da177e4 820 DPRINT(("alloc ctx @%p\n", ctx));
f8e811b9
AV
821
822 /*
823 * init context protection lock
824 */
825 spin_lock_init(&ctx->ctx_lock);
826
827 /*
828 * context is unloaded
829 */
830 ctx->ctx_state = PFM_CTX_UNLOADED;
831
832 /*
833 * initialization of context's flags
834 */
835 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
836 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
837 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
838 /*
839 * will move to set properties
840 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
841 */
842
843 /*
844 * init restart semaphore to locked
845 */
846 init_completion(&ctx->ctx_restart_done);
847
848 /*
849 * activation is used in SMP only
850 */
851 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
852 SET_LAST_CPU(ctx, -1);
853
854 /*
855 * initialize notification message queue
856 */
857 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
858 init_waitqueue_head(&ctx->ctx_msgq_wait);
859 init_waitqueue_head(&ctx->ctx_zombieq);
860
1da177e4
LT
861 }
862 return ctx;
863}
864
865static void
866pfm_context_free(pfm_context_t *ctx)
867{
868 if (ctx) {
869 DPRINT(("free ctx @%p\n", ctx));
870 kfree(ctx);
871 }
872}
873
874static void
875pfm_mask_monitoring(struct task_struct *task)
876{
877 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
878 unsigned long mask, val, ovfl_mask;
879 int i;
880
19c5870c 881 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
882
883 ovfl_mask = pmu_conf->ovfl_val;
884 /*
885 * monitoring can only be masked as a result of a valid
886 * counter overflow. In UP, it means that the PMU still
887 * has an owner. Note that the owner can be different
888 * from the current task. However the PMU state belongs
889 * to the owner.
890 * In SMP, a valid overflow only happens when task is
891 * current. Therefore if we come here, we know that
892 * the PMU state belongs to the current task, therefore
893 * we can access the live registers.
894 *
895 * So in both cases, the live register contains the owner's
896 * state. We can ONLY touch the PMU registers and NOT the PSR.
897 *
35589a8f 898 * As a consequence to this call, the ctx->th_pmds[] array
1da177e4
LT
899 * contains stale information which must be ignored
900 * when context is reloaded AND monitoring is active (see
901 * pfm_restart).
902 */
903 mask = ctx->ctx_used_pmds[0];
904 for (i = 0; mask; i++, mask>>=1) {
905 /* skip non used pmds */
906 if ((mask & 0x1) == 0) continue;
907 val = ia64_get_pmd(i);
908
909 if (PMD_IS_COUNTING(i)) {
910 /*
911 * we rebuild the full 64 bit value of the counter
912 */
913 ctx->ctx_pmds[i].val += (val & ovfl_mask);
914 } else {
915 ctx->ctx_pmds[i].val = val;
916 }
917 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
918 i,
919 ctx->ctx_pmds[i].val,
920 val & ovfl_mask));
921 }
922 /*
923 * mask monitoring by setting the privilege level to 0
924 * we cannot use psr.pp/psr.up for this, it is controlled by
925 * the user
926 *
927 * if task is current, modify actual registers, otherwise modify
928 * thread save state, i.e., what will be restored in pfm_load_regs()
929 */
930 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
931 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
932 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
933 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
934 ctx->th_pmcs[i] &= ~0xfUL;
935 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
936 }
937 /*
938 * make all of this visible
939 */
940 ia64_srlz_d();
941}
942
943/*
944 * must always be done with task == current
945 *
946 * context must be in MASKED state when calling
947 */
948static void
949pfm_restore_monitoring(struct task_struct *task)
950{
951 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
952 unsigned long mask, ovfl_mask;
953 unsigned long psr, val;
954 int i, is_system;
955
956 is_system = ctx->ctx_fl_system;
957 ovfl_mask = pmu_conf->ovfl_val;
958
959 if (task != current) {
19c5870c 960 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1da177e4
LT
961 return;
962 }
963 if (ctx->ctx_state != PFM_CTX_MASKED) {
964 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
19c5870c 965 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1da177e4
LT
966 return;
967 }
968 psr = pfm_get_psr();
969 /*
970 * monitoring is masked via the PMC.
971 * As we restore their value, we do not want each counter to
972 * restart right away. We stop monitoring using the PSR,
973 * restore the PMC (and PMD) and then re-establish the psr
974 * as it was. Note that there can be no pending overflow at
975 * this point, because monitoring was MASKED.
976 *
977 * system-wide session are pinned and self-monitoring
978 */
979 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
980 /* disable dcr pp */
981 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
982 pfm_clear_psr_pp();
983 } else {
984 pfm_clear_psr_up();
985 }
986 /*
987 * first, we restore the PMD
988 */
989 mask = ctx->ctx_used_pmds[0];
990 for (i = 0; mask; i++, mask>>=1) {
991 /* skip non used pmds */
992 if ((mask & 0x1) == 0) continue;
993
994 if (PMD_IS_COUNTING(i)) {
995 /*
996 * we split the 64bit value according to
997 * counter width
998 */
999 val = ctx->ctx_pmds[i].val & ovfl_mask;
1000 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1001 } else {
1002 val = ctx->ctx_pmds[i].val;
1003 }
1004 ia64_set_pmd(i, val);
1005
1006 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1007 i,
1008 ctx->ctx_pmds[i].val,
1009 val));
1010 }
1011 /*
1012 * restore the PMCs
1013 */
1014 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1015 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1016 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
1017 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1018 ia64_set_pmc(i, ctx->th_pmcs[i]);
19c5870c
AD
1019 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1020 task_pid_nr(task), i, ctx->th_pmcs[i]));
1da177e4
LT
1021 }
1022 ia64_srlz_d();
1023
1024 /*
1025 * must restore DBR/IBR because could be modified while masked
1026 * XXX: need to optimize
1027 */
1028 if (ctx->ctx_fl_using_dbreg) {
1029 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1030 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1031 }
1032
1033 /*
1034 * now restore PSR
1035 */
1036 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1037 /* enable dcr pp */
1038 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1039 ia64_srlz_i();
1040 }
1041 pfm_set_psr_l(psr);
1042}
1043
1044static inline void
1045pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1046{
1047 int i;
1048
1049 ia64_srlz_d();
1050
1051 for (i=0; mask; i++, mask>>=1) {
1052 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1053 }
1054}
1055
1056/*
1057 * reload from thread state (used for ctxw only)
1058 */
1059static inline void
1060pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1061{
1062 int i;
1063 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1064
1065 for (i=0; mask; i++, mask>>=1) {
1066 if ((mask & 0x1) == 0) continue;
1067 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1068 ia64_set_pmd(i, val);
1069 }
1070 ia64_srlz_d();
1071}
1072
1073/*
1074 * propagate PMD from context to thread-state
1075 */
1076static inline void
1077pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1078{
1da177e4
LT
1079 unsigned long ovfl_val = pmu_conf->ovfl_val;
1080 unsigned long mask = ctx->ctx_all_pmds[0];
1081 unsigned long val;
1082 int i;
1083
1084 DPRINT(("mask=0x%lx\n", mask));
1085
1086 for (i=0; mask; i++, mask>>=1) {
1087
1088 val = ctx->ctx_pmds[i].val;
1089
1090 /*
1091 * We break up the 64 bit value into 2 pieces
1092 * the lower bits go to the machine state in the
1093 * thread (will be reloaded on ctxsw in).
1094 * The upper part stays in the soft-counter.
1095 */
1096 if (PMD_IS_COUNTING(i)) {
1097 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1098 val &= ovfl_val;
1099 }
35589a8f 1100 ctx->th_pmds[i] = val;
1da177e4
LT
1101
1102 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1103 i,
35589a8f 1104 ctx->th_pmds[i],
1da177e4
LT
1105 ctx->ctx_pmds[i].val));
1106 }
1107}
1108
1109/*
1110 * propagate PMC from context to thread-state
1111 */
1112static inline void
1113pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1114{
1da177e4
LT
1115 unsigned long mask = ctx->ctx_all_pmcs[0];
1116 int i;
1117
1118 DPRINT(("mask=0x%lx\n", mask));
1119
1120 for (i=0; mask; i++, mask>>=1) {
1121 /* masking 0 with ovfl_val yields 0 */
35589a8f
KA
1122 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1123 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
1124 }
1125}
1126
1127
1128
1129static inline void
1130pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1131{
1132 int i;
1133
1134 for (i=0; mask; i++, mask>>=1) {
1135 if ((mask & 0x1) == 0) continue;
1136 ia64_set_pmc(i, pmcs[i]);
1137 }
1138 ia64_srlz_d();
1139}
1140
1141static inline int
1142pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1143{
1144 return memcmp(a, b, sizeof(pfm_uuid_t));
1145}
1146
1147static inline int
1148pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1149{
1150 int ret = 0;
1151 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1152 return ret;
1153}
1154
1155static inline int
1156pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1157{
1158 int ret = 0;
1159 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1160 return ret;
1161}
1162
1163
1164static inline int
1165pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1166 int cpu, void *arg)
1167{
1168 int ret = 0;
1169 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1170 return ret;
1171}
1172
1173static inline int
1174pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1175 int cpu, void *arg)
1176{
1177 int ret = 0;
1178 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1179 return ret;
1180}
1181
1182static inline int
1183pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1184{
1185 int ret = 0;
1186 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1187 return ret;
1188}
1189
1190static inline int
1191pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1192{
1193 int ret = 0;
1194 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1195 return ret;
1196}
1197
1198static pfm_buffer_fmt_t *
1199__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1200{
1201 struct list_head * pos;
1202 pfm_buffer_fmt_t * entry;
1203
1204 list_for_each(pos, &pfm_buffer_fmt_list) {
1205 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1206 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1207 return entry;
1208 }
1209 return NULL;
1210}
1211
1212/*
1213 * find a buffer format based on its uuid
1214 */
1215static pfm_buffer_fmt_t *
1216pfm_find_buffer_fmt(pfm_uuid_t uuid)
1217{
1218 pfm_buffer_fmt_t * fmt;
1219 spin_lock(&pfm_buffer_fmt_lock);
1220 fmt = __pfm_find_buffer_fmt(uuid);
1221 spin_unlock(&pfm_buffer_fmt_lock);
1222 return fmt;
1223}
1224
1225int
1226pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1227{
1228 int ret = 0;
1229
1230 /* some sanity checks */
1231 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1232
1233 /* we need at least a handler */
1234 if (fmt->fmt_handler == NULL) return -EINVAL;
1235
1236 /*
1237 * XXX: need check validity of fmt_arg_size
1238 */
1239
1240 spin_lock(&pfm_buffer_fmt_lock);
1241
1242 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1243 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1244 ret = -EBUSY;
1245 goto out;
1246 }
1247 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1248 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1249
1250out:
1251 spin_unlock(&pfm_buffer_fmt_lock);
1252 return ret;
1253}
1254EXPORT_SYMBOL(pfm_register_buffer_fmt);
1255
1256int
1257pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1258{
1259 pfm_buffer_fmt_t *fmt;
1260 int ret = 0;
1261
1262 spin_lock(&pfm_buffer_fmt_lock);
1263
1264 fmt = __pfm_find_buffer_fmt(uuid);
1265 if (!fmt) {
1266 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1267 ret = -EINVAL;
1268 goto out;
1269 }
1270 list_del_init(&fmt->fmt_list);
1271 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1272
1273out:
1274 spin_unlock(&pfm_buffer_fmt_lock);
1275 return ret;
1276
1277}
1278EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1279
1280static int
1281pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1282{
1283 unsigned long flags;
1284 /*
72fdbdce 1285 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1286 */
1287 LOCK_PFS(flags);
1288
1289 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1290 pfm_sessions.pfs_sys_sessions,
1291 pfm_sessions.pfs_task_sessions,
1292 pfm_sessions.pfs_sys_use_dbregs,
1293 is_syswide,
1294 cpu));
1295
1296 if (is_syswide) {
1297 /*
1298 * cannot mix system wide and per-task sessions
1299 */
1300 if (pfm_sessions.pfs_task_sessions > 0UL) {
1301 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1302 pfm_sessions.pfs_task_sessions));
1303 goto abort;
1304 }
1305
1306 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1307
1308 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1309
1310 pfm_sessions.pfs_sys_session[cpu] = task;
1311
1312 pfm_sessions.pfs_sys_sessions++ ;
1313
1314 } else {
1315 if (pfm_sessions.pfs_sys_sessions) goto abort;
1316 pfm_sessions.pfs_task_sessions++;
1317 }
1318
1319 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1320 pfm_sessions.pfs_sys_sessions,
1321 pfm_sessions.pfs_task_sessions,
1322 pfm_sessions.pfs_sys_use_dbregs,
1323 is_syswide,
1324 cpu));
1325
8df5a500 1326 /*
91d591c3 1327 * Force idle() into poll mode
8df5a500 1328 */
91d591c3 1329 cpu_idle_poll_ctrl(true);
8df5a500 1330
1da177e4
LT
1331 UNLOCK_PFS(flags);
1332
1333 return 0;
1334
1335error_conflict:
1336 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
19c5870c 1337 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
a1ecf7f6 1338 cpu));
1da177e4
LT
1339abort:
1340 UNLOCK_PFS(flags);
1341
1342 return -EBUSY;
1343
1344}
1345
1346static int
1347pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1348{
1349 unsigned long flags;
1350 /*
72fdbdce 1351 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1352 */
1353 LOCK_PFS(flags);
1354
1355 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1356 pfm_sessions.pfs_sys_sessions,
1357 pfm_sessions.pfs_task_sessions,
1358 pfm_sessions.pfs_sys_use_dbregs,
1359 is_syswide,
1360 cpu));
1361
1362
1363 if (is_syswide) {
1364 pfm_sessions.pfs_sys_session[cpu] = NULL;
1365 /*
1366 * would not work with perfmon+more than one bit in cpu_mask
1367 */
1368 if (ctx && ctx->ctx_fl_using_dbreg) {
1369 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1370 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1371 } else {
1372 pfm_sessions.pfs_sys_use_dbregs--;
1373 }
1374 }
1375 pfm_sessions.pfs_sys_sessions--;
1376 } else {
1377 pfm_sessions.pfs_task_sessions--;
1378 }
1379 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1380 pfm_sessions.pfs_sys_sessions,
1381 pfm_sessions.pfs_task_sessions,
1382 pfm_sessions.pfs_sys_use_dbregs,
1383 is_syswide,
1384 cpu));
1385
91d591c3
TG
1386 /* Undo forced polling. Last session reenables pal_halt */
1387 cpu_idle_poll_ctrl(false);
8df5a500 1388
1da177e4
LT
1389 UNLOCK_PFS(flags);
1390
1391 return 0;
1392}
1393
1394/*
1395 * removes virtual mapping of the sampling buffer.
1396 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1397 * a PROTECT_CTX() section.
1398 */
1399static int
9f3a4afb 1400pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1da177e4 1401{
9f3a4afb 1402 struct task_struct *task = current;
1da177e4
LT
1403 int r;
1404
1405 /* sanity checks */
1406 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
19c5870c 1407 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1da177e4
LT
1408 return -EINVAL;
1409 }
1410
1411 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1412
1413 /*
1414 * does the actual unmapping
1415 */
bfce281c 1416 r = vm_munmap((unsigned long)vaddr, size);
1da177e4 1417
1da177e4 1418 if (r !=0) {
19c5870c 1419 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1da177e4
LT
1420 }
1421
1422 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1423
1424 return 0;
1425}
1426
1427/*
1428 * free actual physical storage used by sampling buffer
1429 */
1430#if 0
1431static int
1432pfm_free_smpl_buffer(pfm_context_t *ctx)
1433{
1434 pfm_buffer_fmt_t *fmt;
1435
1436 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1437
1438 /*
1439 * we won't use the buffer format anymore
1440 */
1441 fmt = ctx->ctx_buf_fmt;
1442
1443 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1444 ctx->ctx_smpl_hdr,
1445 ctx->ctx_smpl_size,
1446 ctx->ctx_smpl_vaddr));
1447
1448 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1449
1450 /*
1451 * free the buffer
1452 */
731351d1 1453 vfree(ctx->ctx_smpl_hdr);
1da177e4
LT
1454
1455 ctx->ctx_smpl_hdr = NULL;
1456 ctx->ctx_smpl_size = 0UL;
1457
1458 return 0;
1459
1460invalid_free:
19c5870c 1461 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1da177e4
LT
1462 return -EINVAL;
1463}
1464#endif
1465
1466static inline void
1467pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1468{
1469 if (fmt == NULL) return;
1470
1471 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1472
1473}
1474
1475/*
1476 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1477 * no real gain from having the whole whorehouse mounted. So we don't need
1478 * any operations on the root directory. However, we need a non-trivial
1479 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1480 */
b3e19d92 1481static struct vfsmount *pfmfs_mnt __read_mostly;
1da177e4
LT
1482
1483static int __init
1484init_pfm_fs(void)
1485{
1486 int err = register_filesystem(&pfm_fs_type);
1487 if (!err) {
1488 pfmfs_mnt = kern_mount(&pfm_fs_type);
1489 err = PTR_ERR(pfmfs_mnt);
1490 if (IS_ERR(pfmfs_mnt))
1491 unregister_filesystem(&pfm_fs_type);
1492 else
1493 err = 0;
1494 }
1495 return err;
1496}
1497
1da177e4
LT
1498static ssize_t
1499pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1500{
1501 pfm_context_t *ctx;
1502 pfm_msg_t *msg;
1503 ssize_t ret;
1504 unsigned long flags;
1505 DECLARE_WAITQUEUE(wait, current);
1506 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1507 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1508 return -EINVAL;
1509 }
1510
df0a59a1 1511 ctx = filp->private_data;
1da177e4 1512 if (ctx == NULL) {
19c5870c 1513 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1514 return -EINVAL;
1515 }
1516
1517 /*
1518 * check even when there is no message
1519 */
1520 if (size < sizeof(pfm_msg_t)) {
1521 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1522 return -EINVAL;
1523 }
1524
1525 PROTECT_CTX(ctx, flags);
1526
1527 /*
1528 * put ourselves on the wait queue
1529 */
1530 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1531
1532
1533 for(;;) {
1534 /*
1535 * check wait queue
1536 */
1537
1538 set_current_state(TASK_INTERRUPTIBLE);
1539
1540 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1541
1542 ret = 0;
1543 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1544
1545 UNPROTECT_CTX(ctx, flags);
1546
1547 /*
1548 * check non-blocking read
1549 */
1550 ret = -EAGAIN;
1551 if(filp->f_flags & O_NONBLOCK) break;
1552
1553 /*
1554 * check pending signals
1555 */
1556 if(signal_pending(current)) {
1557 ret = -EINTR;
1558 break;
1559 }
1560 /*
1561 * no message, so wait
1562 */
1563 schedule();
1564
1565 PROTECT_CTX(ctx, flags);
1566 }
19c5870c 1567 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1da177e4
LT
1568 set_current_state(TASK_RUNNING);
1569 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1570
1571 if (ret < 0) goto abort;
1572
1573 ret = -EINVAL;
1574 msg = pfm_get_next_msg(ctx);
1575 if (msg == NULL) {
19c5870c 1576 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1da177e4
LT
1577 goto abort_locked;
1578 }
1579
4944930a 1580 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1da177e4
LT
1581
1582 ret = -EFAULT;
1583 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1584
1585abort_locked:
1586 UNPROTECT_CTX(ctx, flags);
1587abort:
1588 return ret;
1589}
1590
1591static ssize_t
1592pfm_write(struct file *file, const char __user *ubuf,
1593 size_t size, loff_t *ppos)
1594{
1595 DPRINT(("pfm_write called\n"));
1596 return -EINVAL;
1597}
1598
e720f32f 1599static __poll_t
1da177e4
LT
1600pfm_poll(struct file *filp, poll_table * wait)
1601{
1602 pfm_context_t *ctx;
1603 unsigned long flags;
e720f32f 1604 __poll_t mask = 0;
1da177e4
LT
1605
1606 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1607 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1608 return 0;
1609 }
1610
df0a59a1 1611 ctx = filp->private_data;
1da177e4 1612 if (ctx == NULL) {
19c5870c 1613 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1614 return 0;
1615 }
1616
1617
1618 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1619
1620 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1621
1622 PROTECT_CTX(ctx, flags);
1623
1624 if (PFM_CTXQ_EMPTY(ctx) == 0)
a9a08845 1625 mask = EPOLLIN | EPOLLRDNORM;
1da177e4
LT
1626
1627 UNPROTECT_CTX(ctx, flags);
1628
1629 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1630
1631 return mask;
1632}
1633
ba58aebf
AB
1634static long
1635pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1da177e4
LT
1636{
1637 DPRINT(("pfm_ioctl called\n"));
1638 return -EINVAL;
1639}
1640
1641/*
1642 * interrupt cannot be masked when coming here
1643 */
1644static inline int
1645pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1646{
1647 int ret;
1648
1649 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1650
1651 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
19c5870c 1652 task_pid_nr(current),
1da177e4
LT
1653 fd,
1654 on,
1655 ctx->ctx_async_queue, ret));
1656
1657 return ret;
1658}
1659
1660static int
1661pfm_fasync(int fd, struct file *filp, int on)
1662{
1663 pfm_context_t *ctx;
1664 int ret;
1665
1666 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1667 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1668 return -EBADF;
1669 }
1670
df0a59a1 1671 ctx = filp->private_data;
1da177e4 1672 if (ctx == NULL) {
19c5870c 1673 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1674 return -EBADF;
1675 }
1676 /*
1677 * we cannot mask interrupts during this call because this may
1678 * may go to sleep if memory is not readily avalaible.
1679 *
1680 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1681 * done in caller. Serialization of this function is ensured by caller.
1682 */
1683 ret = pfm_do_fasync(fd, filp, ctx, on);
1684
1685
1686 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1687 fd,
1688 on,
1689 ctx->ctx_async_queue, ret));
1690
1691 return ret;
1692}
1693
1694#ifdef CONFIG_SMP
1695/*
1696 * this function is exclusively called from pfm_close().
1697 * The context is not protected at that time, nor are interrupts
1698 * on the remote CPU. That's necessary to avoid deadlocks.
1699 */
1700static void
1701pfm_syswide_force_stop(void *info)
1702{
1703 pfm_context_t *ctx = (pfm_context_t *)info;
6450578f 1704 struct pt_regs *regs = task_pt_regs(current);
1da177e4
LT
1705 struct task_struct *owner;
1706 unsigned long flags;
1707 int ret;
1708
1709 if (ctx->ctx_cpu != smp_processor_id()) {
1710 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1711 ctx->ctx_cpu,
1712 smp_processor_id());
1713 return;
1714 }
1715 owner = GET_PMU_OWNER();
1716 if (owner != ctx->ctx_task) {
1717 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1718 smp_processor_id(),
19c5870c 1719 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1da177e4
LT
1720 return;
1721 }
1722 if (GET_PMU_CTX() != ctx) {
1723 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1724 smp_processor_id(),
1725 GET_PMU_CTX(), ctx);
1726 return;
1727 }
1728
19c5870c 1729 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1da177e4
LT
1730 /*
1731 * the context is already protected in pfm_close(), we simply
1732 * need to mask interrupts to avoid a PMU interrupt race on
1733 * this CPU
1734 */
1735 local_irq_save(flags);
1736
1737 ret = pfm_context_unload(ctx, NULL, 0, regs);
1738 if (ret) {
1739 DPRINT(("context_unload returned %d\n", ret));
1740 }
1741
1742 /*
1743 * unmask interrupts, PMU interrupts are now spurious here
1744 */
1745 local_irq_restore(flags);
1746}
1747
1748static void
1749pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1750{
1751 int ret;
1752
1753 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
8691e5a8 1754 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1da177e4
LT
1755 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1756}
1757#endif /* CONFIG_SMP */
1758
1759/*
1760 * called for each close(). Partially free resources.
1761 * When caller is self-monitoring, the context is unloaded.
1762 */
1763static int
75e1fcc0 1764pfm_flush(struct file *filp, fl_owner_t id)
1da177e4
LT
1765{
1766 pfm_context_t *ctx;
1767 struct task_struct *task;
1768 struct pt_regs *regs;
1769 unsigned long flags;
1770 unsigned long smpl_buf_size = 0UL;
1771 void *smpl_buf_vaddr = NULL;
1772 int state, is_system;
1773
1774 if (PFM_IS_FILE(filp) == 0) {
1775 DPRINT(("bad magic for\n"));
1776 return -EBADF;
1777 }
1778
df0a59a1 1779 ctx = filp->private_data;
1da177e4 1780 if (ctx == NULL) {
19c5870c 1781 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1782 return -EBADF;
1783 }
1784
1785 /*
1786 * remove our file from the async queue, if we use this mode.
1787 * This can be done without the context being protected. We come
72fdbdce 1788 * here when the context has become unreachable by other tasks.
1da177e4
LT
1789 *
1790 * We may still have active monitoring at this point and we may
1791 * end up in pfm_overflow_handler(). However, fasync_helper()
1792 * operates with interrupts disabled and it cleans up the
1793 * queue. If the PMU handler is called prior to entering
1794 * fasync_helper() then it will send a signal. If it is
1795 * invoked after, it will find an empty queue and no
1796 * signal will be sent. In both case, we are safe
1797 */
1da177e4
LT
1798 PROTECT_CTX(ctx, flags);
1799
1800 state = ctx->ctx_state;
1801 is_system = ctx->ctx_fl_system;
1802
1803 task = PFM_CTX_TASK(ctx);
6450578f 1804 regs = task_pt_regs(task);
1da177e4
LT
1805
1806 DPRINT(("ctx_state=%d is_current=%d\n",
1807 state,
1808 task == current ? 1 : 0));
1809
1810 /*
1811 * if state == UNLOADED, then task is NULL
1812 */
1813
1814 /*
1815 * we must stop and unload because we are losing access to the context.
1816 */
1817 if (task == current) {
1818#ifdef CONFIG_SMP
1819 /*
1820 * the task IS the owner but it migrated to another CPU: that's bad
1821 * but we must handle this cleanly. Unfortunately, the kernel does
1822 * not provide a mechanism to block migration (while the context is loaded).
1823 *
1824 * We need to release the resource on the ORIGINAL cpu.
1825 */
1826 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1827
1828 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1829 /*
1830 * keep context protected but unmask interrupt for IPI
1831 */
1832 local_irq_restore(flags);
1833
1834 pfm_syswide_cleanup_other_cpu(ctx);
1835
1836 /*
1837 * restore interrupt masking
1838 */
1839 local_irq_save(flags);
1840
1841 /*
1842 * context is unloaded at this point
1843 */
1844 } else
1845#endif /* CONFIG_SMP */
1846 {
1847
1848 DPRINT(("forcing unload\n"));
1849 /*
1850 * stop and unload, returning with state UNLOADED
1851 * and session unreserved.
1852 */
1853 pfm_context_unload(ctx, NULL, 0, regs);
1854
1855 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1856 }
1857 }
1858
1859 /*
1860 * remove virtual mapping, if any, for the calling task.
1861 * cannot reset ctx field until last user is calling close().
1862 *
1863 * ctx_smpl_vaddr must never be cleared because it is needed
1864 * by every task with access to the context
1865 *
1866 * When called from do_exit(), the mm context is gone already, therefore
1867 * mm is NULL, i.e., the VMA is already gone and we do not have to
1868 * do anything here
1869 */
1870 if (ctx->ctx_smpl_vaddr && current->mm) {
1871 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1872 smpl_buf_size = ctx->ctx_smpl_size;
1873 }
1874
1875 UNPROTECT_CTX(ctx, flags);
1876
1877 /*
1878 * if there was a mapping, then we systematically remove it
1879 * at this point. Cannot be done inside critical section
1880 * because some VM function reenables interrupts.
1881 *
1882 */
9f3a4afb 1883 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1da177e4
LT
1884
1885 return 0;
1886}
1887/*
1888 * called either on explicit close() or from exit_files().
1889 * Only the LAST user of the file gets to this point, i.e., it is
1890 * called only ONCE.
1891 *
1892 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1893 * (fput()),i.e, last task to access the file. Nobody else can access the
1894 * file at this point.
1895 *
1896 * When called from exit_files(), the VMA has been freed because exit_mm()
1897 * is executed before exit_files().
1898 *
1899 * When called from exit_files(), the current task is not yet ZOMBIE but we
1900 * flush the PMU state to the context.
1901 */
1902static int
1903pfm_close(struct inode *inode, struct file *filp)
1904{
1905 pfm_context_t *ctx;
1906 struct task_struct *task;
1907 struct pt_regs *regs;
1908 DECLARE_WAITQUEUE(wait, current);
1909 unsigned long flags;
1910 unsigned long smpl_buf_size = 0UL;
1911 void *smpl_buf_addr = NULL;
1912 int free_possible = 1;
1913 int state, is_system;
1914
1915 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1916
1917 if (PFM_IS_FILE(filp) == 0) {
1918 DPRINT(("bad magic\n"));
1919 return -EBADF;
1920 }
1921
df0a59a1 1922 ctx = filp->private_data;
1da177e4 1923 if (ctx == NULL) {
19c5870c 1924 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1925 return -EBADF;
1926 }
1927
1928 PROTECT_CTX(ctx, flags);
1929
1930 state = ctx->ctx_state;
1931 is_system = ctx->ctx_fl_system;
1932
1933 task = PFM_CTX_TASK(ctx);
6450578f 1934 regs = task_pt_regs(task);
1da177e4
LT
1935
1936 DPRINT(("ctx_state=%d is_current=%d\n",
1937 state,
1938 task == current ? 1 : 0));
1939
1940 /*
1941 * if task == current, then pfm_flush() unloaded the context
1942 */
1943 if (state == PFM_CTX_UNLOADED) goto doit;
1944
1945 /*
1946 * context is loaded/masked and task != current, we need to
1947 * either force an unload or go zombie
1948 */
1949
1950 /*
1951 * The task is currently blocked or will block after an overflow.
1952 * we must force it to wakeup to get out of the
1953 * MASKED state and transition to the unloaded state by itself.
1954 *
1955 * This situation is only possible for per-task mode
1956 */
1957 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
1958
1959 /*
1960 * set a "partial" zombie state to be checked
1961 * upon return from down() in pfm_handle_work().
1962 *
1963 * We cannot use the ZOMBIE state, because it is checked
1964 * by pfm_load_regs() which is called upon wakeup from down().
1965 * In such case, it would free the context and then we would
1966 * return to pfm_handle_work() which would access the
1967 * stale context. Instead, we set a flag invisible to pfm_load_regs()
1968 * but visible to pfm_handle_work().
1969 *
1970 * For some window of time, we have a zombie context with
1971 * ctx_state = MASKED and not ZOMBIE
1972 */
1973 ctx->ctx_fl_going_zombie = 1;
1974
1975 /*
1976 * force task to wake up from MASKED state
1977 */
60f1c444 1978 complete(&ctx->ctx_restart_done);
1da177e4
LT
1979
1980 DPRINT(("waking up ctx_state=%d\n", state));
1981
1982 /*
1983 * put ourself to sleep waiting for the other
1984 * task to report completion
1985 *
1986 * the context is protected by mutex, therefore there
1987 * is no risk of being notified of completion before
1988 * begin actually on the waitq.
1989 */
1990 set_current_state(TASK_INTERRUPTIBLE);
1991 add_wait_queue(&ctx->ctx_zombieq, &wait);
1992
1993 UNPROTECT_CTX(ctx, flags);
1994
1995 /*
1996 * XXX: check for signals :
1997 * - ok for explicit close
1998 * - not ok when coming from exit_files()
1999 */
2000 schedule();
2001
2002
2003 PROTECT_CTX(ctx, flags);
2004
2005
2006 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2007 set_current_state(TASK_RUNNING);
2008
2009 /*
2010 * context is unloaded at this point
2011 */
2012 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2013 }
2014 else if (task != current) {
2015#ifdef CONFIG_SMP
2016 /*
2017 * switch context to zombie state
2018 */
2019 ctx->ctx_state = PFM_CTX_ZOMBIE;
2020
19c5870c 2021 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2022 /*
2023 * cannot free the context on the spot. deferred until
2024 * the task notices the ZOMBIE state
2025 */
2026 free_possible = 0;
2027#else
2028 pfm_context_unload(ctx, NULL, 0, regs);
2029#endif
2030 }
2031
2032doit:
2033 /* reload state, may have changed during opening of critical section */
2034 state = ctx->ctx_state;
2035
2036 /*
2037 * the context is still attached to a task (possibly current)
2038 * we cannot destroy it right now
2039 */
2040
2041 /*
2042 * we must free the sampling buffer right here because
2043 * we cannot rely on it being cleaned up later by the
2044 * monitored task. It is not possible to free vmalloc'ed
2045 * memory in pfm_load_regs(). Instead, we remove the buffer
2046 * now. should there be subsequent PMU overflow originally
2047 * meant for sampling, the will be converted to spurious
2048 * and that's fine because the monitoring tools is gone anyway.
2049 */
2050 if (ctx->ctx_smpl_hdr) {
2051 smpl_buf_addr = ctx->ctx_smpl_hdr;
2052 smpl_buf_size = ctx->ctx_smpl_size;
2053 /* no more sampling */
2054 ctx->ctx_smpl_hdr = NULL;
2055 ctx->ctx_fl_is_sampling = 0;
2056 }
2057
2058 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2059 state,
2060 free_possible,
2061 smpl_buf_addr,
2062 smpl_buf_size));
2063
2064 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2065
2066 /*
2067 * UNLOADED that the session has already been unreserved.
2068 */
2069 if (state == PFM_CTX_ZOMBIE) {
2070 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2071 }
2072
2073 /*
2074 * disconnect file descriptor from context must be done
2075 * before we unlock.
2076 */
2077 filp->private_data = NULL;
2078
2079 /*
72fdbdce 2080 * if we free on the spot, the context is now completely unreachable
1da177e4
LT
2081 * from the callers side. The monitored task side is also cut, so we
2082 * can freely cut.
2083 *
2084 * If we have a deferred free, only the caller side is disconnected.
2085 */
2086 UNPROTECT_CTX(ctx, flags);
2087
2088 /*
2089 * All memory free operations (especially for vmalloc'ed memory)
2090 * MUST be done with interrupts ENABLED.
2091 */
731351d1 2092 vfree(smpl_buf_addr);
1da177e4
LT
2093
2094 /*
2095 * return the memory used by the context
2096 */
2097 if (free_possible) pfm_context_free(ctx);
2098
2099 return 0;
2100}
2101
5dfe4c96 2102static const struct file_operations pfm_file_ops = {
ba58aebf
AB
2103 .llseek = no_llseek,
2104 .read = pfm_read,
2105 .write = pfm_write,
2106 .poll = pfm_poll,
2107 .unlocked_ioctl = pfm_ioctl,
ba58aebf
AB
2108 .fasync = pfm_fasync,
2109 .release = pfm_close,
2110 .flush = pfm_flush
1da177e4
LT
2111};
2112
7ae6bdbd
MS
2113static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2114{
2115 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
75c3cfa8 2116 d_inode(dentry)->i_ino);
7ae6bdbd
MS
2117}
2118
3ba13d17 2119static const struct dentry_operations pfmfs_dentry_operations = {
b26d4cd3 2120 .d_delete = always_delete_dentry,
7ae6bdbd 2121 .d_dname = pfmfs_dname,
1da177e4
LT
2122};
2123
2124
f8e811b9
AV
2125static struct file *
2126pfm_alloc_file(pfm_context_t *ctx)
1da177e4 2127{
f8e811b9
AV
2128 struct file *file;
2129 struct inode *inode;
2c48b9c4 2130 struct path path;
7ae6bdbd 2131 struct qstr this = { .name = "" };
1da177e4 2132
1da177e4
LT
2133 /*
2134 * allocate a new inode
2135 */
2136 inode = new_inode(pfmfs_mnt->mnt_sb);
f8e811b9
AV
2137 if (!inode)
2138 return ERR_PTR(-ENOMEM);
1da177e4
LT
2139
2140 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2141
2142 inode->i_mode = S_IFCHR|S_IRUGO;
ef81ee98
DH
2143 inode->i_uid = current_fsuid();
2144 inode->i_gid = current_fsgid();
1da177e4 2145
1da177e4
LT
2146 /*
2147 * allocate a new dcache entry
2148 */
4c1d5a64 2149 path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
2c48b9c4 2150 if (!path.dentry) {
f8e811b9
AV
2151 iput(inode);
2152 return ERR_PTR(-ENOMEM);
2153 }
2c48b9c4 2154 path.mnt = mntget(pfmfs_mnt);
1da177e4 2155
2c48b9c4 2156 d_add(path.dentry, inode);
1da177e4 2157
2c48b9c4 2158 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
39b65252 2159 if (IS_ERR(file)) {
2c48b9c4 2160 path_put(&path);
39b65252 2161 return file;
f8e811b9 2162 }
1da177e4 2163
1da177e4 2164 file->f_flags = O_RDONLY;
f8e811b9 2165 file->private_data = ctx;
1da177e4 2166
f8e811b9 2167 return file;
1da177e4
LT
2168}
2169
2170static int
2171pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2172{
2173 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2174
2175 while (size > 0) {
2176 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2177
2178
2179 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2180 return -ENOMEM;
2181
2182 addr += PAGE_SIZE;
2183 buf += PAGE_SIZE;
2184 size -= PAGE_SIZE;
2185 }
2186 return 0;
2187}
2188
2189/*
2190 * allocate a sampling buffer and remaps it into the user address space of the task
2191 */
2192static int
41d5e5d7 2193pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
1da177e4
LT
2194{
2195 struct mm_struct *mm = task->mm;
2196 struct vm_area_struct *vma = NULL;
2197 unsigned long size;
2198 void *smpl_buf;
2199
2200
2201 /*
2202 * the fixed header + requested size and align to page boundary
2203 */
2204 size = PAGE_ALIGN(rsize);
2205
2206 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2207
2208 /*
2209 * check requested size to avoid Denial-of-service attacks
2210 * XXX: may have to refine this test
2211 * Check against address space limit.
2212 *
2213 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2214 * return -ENOMEM;
2215 */
02b763b8 2216 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
1da177e4
LT
2217 return -ENOMEM;
2218
2219 /*
2220 * We do the easy to undo allocations first.
1da177e4 2221 */
731351d1 2222 smpl_buf = vzalloc(size);
1da177e4
LT
2223 if (smpl_buf == NULL) {
2224 DPRINT(("Can't allocate sampling buffer\n"));
2225 return -ENOMEM;
2226 }
2227
2228 DPRINT(("smpl_buf @%p\n", smpl_buf));
2229
2230 /* allocate vma */
490fc053 2231 vma = vm_area_alloc(mm);
1da177e4
LT
2232 if (!vma) {
2233 DPRINT(("Cannot allocate vma\n"));
2234 goto error_kmem;
2235 }
1da177e4
LT
2236
2237 /*
2238 * partially initialize the vma for the sampling buffer
2239 */
cb0942b8 2240 vma->vm_file = get_file(filp);
314e51b9 2241 vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
1da177e4
LT
2242 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2243
2244 /*
2245 * Now we have everything we need and we can initialize
2246 * and connect all the data structures
2247 */
2248
2249 ctx->ctx_smpl_hdr = smpl_buf;
2250 ctx->ctx_smpl_size = size; /* aligned size */
2251
2252 /*
2253 * Let's do the difficult operations next.
2254 *
2255 * now we atomically find some area in the address space and
2256 * remap the buffer in it.
2257 */
2258 down_write(&task->mm->mmap_sem);
2259
2260 /* find some free area in address space, must have mmap sem held */
4ad310b8
AV
2261 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2262 if (IS_ERR_VALUE(vma->vm_start)) {
1da177e4
LT
2263 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2264 up_write(&task->mm->mmap_sem);
2265 goto error;
2266 }
2267 vma->vm_end = vma->vm_start + size;
2268 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2269
2270 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2271
2272 /* can only be applied to current task, need to have the mm semaphore held when called */
2273 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2274 DPRINT(("Can't remap buffer\n"));
2275 up_write(&task->mm->mmap_sem);
2276 goto error;
2277 }
2278
2279 /*
2280 * now insert the vma in the vm list for the process, must be
2281 * done with mmap lock held
2282 */
2283 insert_vm_struct(mm, vma);
2284
84638335 2285 vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
1da177e4
LT
2286 up_write(&task->mm->mmap_sem);
2287
2288 /*
2289 * keep track of user level virtual address
2290 */
2291 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2292 *(unsigned long *)user_vaddr = vma->vm_start;
2293
2294 return 0;
2295
2296error:
3928d4f5 2297 vm_area_free(vma);
1da177e4 2298error_kmem:
731351d1 2299 vfree(smpl_buf);
1da177e4
LT
2300
2301 return -ENOMEM;
2302}
2303
2304/*
2305 * XXX: do something better here
2306 */
2307static int
2308pfm_bad_permissions(struct task_struct *task)
2309{
c69e8d9c 2310 const struct cred *tcred;
6c1ee033
EB
2311 kuid_t uid = current_uid();
2312 kgid_t gid = current_gid();
c69e8d9c
DH
2313 int ret;
2314
2315 rcu_read_lock();
2316 tcred = __task_cred(task);
ef81ee98 2317
1da177e4
LT
2318 /* inspired by ptrace_attach() */
2319 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
6c1ee033
EB
2320 from_kuid(&init_user_ns, uid),
2321 from_kgid(&init_user_ns, gid),
2322 from_kuid(&init_user_ns, tcred->euid),
2323 from_kuid(&init_user_ns, tcred->suid),
2324 from_kuid(&init_user_ns, tcred->uid),
2325 from_kgid(&init_user_ns, tcred->egid),
2326 from_kgid(&init_user_ns, tcred->sgid)));
2327
2328 ret = ((!uid_eq(uid, tcred->euid))
2329 || (!uid_eq(uid, tcred->suid))
2330 || (!uid_eq(uid, tcred->uid))
2331 || (!gid_eq(gid, tcred->egid))
2332 || (!gid_eq(gid, tcred->sgid))
2333 || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
c69e8d9c
DH
2334
2335 rcu_read_unlock();
2336 return ret;
1da177e4
LT
2337}
2338
2339static int
2340pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2341{
2342 int ctx_flags;
2343
2344 /* valid signal */
2345
2346 ctx_flags = pfx->ctx_flags;
2347
2348 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2349
2350 /*
2351 * cannot block in this mode
2352 */
2353 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2354 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2355 return -EINVAL;
2356 }
2357 } else {
2358 }
2359 /* probably more to add here */
2360
2361 return 0;
2362}
2363
2364static int
41d5e5d7 2365pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
1da177e4
LT
2366 unsigned int cpu, pfarg_context_t *arg)
2367{
2368 pfm_buffer_fmt_t *fmt = NULL;
2369 unsigned long size = 0UL;
2370 void *uaddr = NULL;
2371 void *fmt_arg = NULL;
2372 int ret = 0;
2373#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2374
2375 /* invoke and lock buffer format, if found */
2376 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2377 if (fmt == NULL) {
19c5870c 2378 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
1da177e4
LT
2379 return -EINVAL;
2380 }
2381
2382 /*
2383 * buffer argument MUST be contiguous to pfarg_context_t
2384 */
2385 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2386
2387 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2388
19c5870c 2389 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
1da177e4
LT
2390
2391 if (ret) goto error;
2392
2393 /* link buffer format and context */
2394 ctx->ctx_buf_fmt = fmt;
f8e811b9 2395 ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
1da177e4
LT
2396
2397 /*
2398 * check if buffer format wants to use perfmon buffer allocation/mapping service
2399 */
2400 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2401 if (ret) goto error;
2402
2403 if (size) {
2404 /*
2405 * buffer is always remapped into the caller's address space
2406 */
41d5e5d7 2407 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
1da177e4
LT
2408 if (ret) goto error;
2409
2410 /* keep track of user address of buffer */
2411 arg->ctx_smpl_vaddr = uaddr;
2412 }
2413 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2414
2415error:
2416 return ret;
2417}
2418
2419static void
2420pfm_reset_pmu_state(pfm_context_t *ctx)
2421{
2422 int i;
2423
2424 /*
2425 * install reset values for PMC.
2426 */
2427 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2428 if (PMC_IS_IMPL(i) == 0) continue;
2429 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2430 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2431 }
2432 /*
2433 * PMD registers are set to 0UL when the context in memset()
2434 */
2435
2436 /*
2437 * On context switched restore, we must restore ALL pmc and ALL pmd even
2438 * when they are not actively used by the task. In UP, the incoming process
2439 * may otherwise pick up left over PMC, PMD state from the previous process.
2440 * As opposed to PMD, stale PMC can cause harm to the incoming
2441 * process because they may change what is being measured.
2442 * Therefore, we must systematically reinstall the entire
2443 * PMC state. In SMP, the same thing is possible on the
2444 * same CPU but also on between 2 CPUs.
2445 *
2446 * The problem with PMD is information leaking especially
2447 * to user level when psr.sp=0
2448 *
2449 * There is unfortunately no easy way to avoid this problem
2450 * on either UP or SMP. This definitively slows down the
2451 * pfm_load_regs() function.
2452 */
2453
2454 /*
2455 * bitmask of all PMCs accessible to this context
2456 *
2457 * PMC0 is treated differently.
2458 */
2459 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2460
2461 /*
72fdbdce 2462 * bitmask of all PMDs that are accessible to this context
1da177e4
LT
2463 */
2464 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2465
2466 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2467
2468 /*
2469 * useful in case of re-enable after disable
2470 */
2471 ctx->ctx_used_ibrs[0] = 0UL;
2472 ctx->ctx_used_dbrs[0] = 0UL;
2473}
2474
2475static int
2476pfm_ctx_getsize(void *arg, size_t *sz)
2477{
2478 pfarg_context_t *req = (pfarg_context_t *)arg;
2479 pfm_buffer_fmt_t *fmt;
2480
2481 *sz = 0;
2482
2483 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2484
2485 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2486 if (fmt == NULL) {
2487 DPRINT(("cannot find buffer format\n"));
2488 return -EINVAL;
2489 }
2490 /* get just enough to copy in user parameters */
2491 *sz = fmt->fmt_arg_size;
2492 DPRINT(("arg_size=%lu\n", *sz));
2493
2494 return 0;
2495}
2496
2497
2498
2499/*
2500 * cannot attach if :
2501 * - kernel task
2502 * - task not owned by caller
2503 * - task incompatible with context mode
2504 */
2505static int
2506pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2507{
2508 /*
2509 * no kernel task or task not owner by caller
2510 */
2511 if (task->mm == NULL) {
19c5870c 2512 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
1da177e4
LT
2513 return -EPERM;
2514 }
2515 if (pfm_bad_permissions(task)) {
19c5870c 2516 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
1da177e4
LT
2517 return -EPERM;
2518 }
2519 /*
2520 * cannot block in self-monitoring mode
2521 */
2522 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
19c5870c 2523 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2524 return -EINVAL;
2525 }
2526
2527 if (task->exit_state == EXIT_ZOMBIE) {
19c5870c 2528 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
1da177e4
LT
2529 return -EBUSY;
2530 }
2531
2532 /*
2533 * always ok for self
2534 */
2535 if (task == current) return 0;
2536
21498223 2537 if (!task_is_stopped_or_traced(task)) {
19c5870c 2538 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
1da177e4
LT
2539 return -EBUSY;
2540 }
2541 /*
2542 * make sure the task is off any CPU
2543 */
85ba2d86 2544 wait_task_inactive(task, 0);
1da177e4
LT
2545
2546 /* more to come... */
2547
2548 return 0;
2549}
2550
2551static int
2552pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2553{
2554 struct task_struct *p = current;
2555 int ret;
2556
2557 /* XXX: need to add more checks here */
2558 if (pid < 2) return -EPERM;
2559
e1b0d4ba 2560 if (pid != task_pid_vnr(current)) {
1da177e4 2561 /* make sure task cannot go away while we operate on it */
2ee08260
MR
2562 p = find_get_task_by_vpid(pid);
2563 if (!p)
2564 return -ESRCH;
1da177e4
LT
2565 }
2566
2567 ret = pfm_task_incompatible(ctx, p);
2568 if (ret == 0) {
2569 *task = p;
2570 } else if (p != current) {
2571 pfm_put_task(p);
2572 }
2573 return ret;
2574}
2575
2576
2577
2578static int
2579pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2580{
2581 pfarg_context_t *req = (pfarg_context_t *)arg;
2582 struct file *filp;
f8e811b9 2583 struct path path;
1da177e4 2584 int ctx_flags;
f8e811b9 2585 int fd;
1da177e4
LT
2586 int ret;
2587
2588 /* let's check the arguments first */
2589 ret = pfarg_is_sane(current, req);
f8e811b9
AV
2590 if (ret < 0)
2591 return ret;
1da177e4
LT
2592
2593 ctx_flags = req->ctx_flags;
2594
2595 ret = -ENOMEM;
2596
aeb682dd 2597 fd = get_unused_fd_flags(0);
f8e811b9
AV
2598 if (fd < 0)
2599 return fd;
1da177e4 2600
f8e811b9
AV
2601 ctx = pfm_context_alloc(ctx_flags);
2602 if (!ctx)
2603 goto error;
1da177e4 2604
f8e811b9
AV
2605 filp = pfm_alloc_file(ctx);
2606 if (IS_ERR(filp)) {
2607 ret = PTR_ERR(filp);
2608 goto error_file;
2609 }
1da177e4 2610
f8e811b9 2611 req->ctx_fd = ctx->ctx_fd = fd;
1da177e4
LT
2612
2613 /*
2614 * does the user want to sample?
2615 */
2616 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
41d5e5d7 2617 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
f8e811b9
AV
2618 if (ret)
2619 goto buffer_error;
1da177e4
LT
2620 }
2621
04157e4c 2622 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
1da177e4
LT
2623 ctx,
2624 ctx_flags,
2625 ctx->ctx_fl_system,
2626 ctx->ctx_fl_block,
2627 ctx->ctx_fl_excl_idle,
2628 ctx->ctx_fl_no_msg,
2629 ctx->ctx_fd));
2630
2631 /*
2632 * initialize soft PMU state
2633 */
2634 pfm_reset_pmu_state(ctx);
2635
f8e811b9
AV
2636 fd_install(fd, filp);
2637
1da177e4
LT
2638 return 0;
2639
2640buffer_error:
f8e811b9
AV
2641 path = filp->f_path;
2642 put_filp(filp);
2643 path_put(&path);
1da177e4
LT
2644
2645 if (ctx->ctx_buf_fmt) {
2646 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2647 }
2648error_file:
2649 pfm_context_free(ctx);
2650
2651error:
f8e811b9 2652 put_unused_fd(fd);
1da177e4
LT
2653 return ret;
2654}
2655
2656static inline unsigned long
2657pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2658{
2659 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2660 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2661 extern unsigned long carta_random32 (unsigned long seed);
2662
2663 if (reg->flags & PFM_REGFL_RANDOM) {
2664 new_seed = carta_random32(old_seed);
2665 val -= (old_seed & mask); /* counter values are negative numbers! */
2666 if ((mask >> 32) != 0)
2667 /* construct a full 64-bit random value: */
2668 new_seed |= carta_random32(old_seed >> 32) << 32;
2669 reg->seed = new_seed;
2670 }
2671 reg->lval = val;
2672 return val;
2673}
2674
2675static void
2676pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2677{
2678 unsigned long mask = ovfl_regs[0];
2679 unsigned long reset_others = 0UL;
2680 unsigned long val;
2681 int i;
2682
2683 /*
2684 * now restore reset value on sampling overflowed counters
2685 */
2686 mask >>= PMU_FIRST_COUNTER;
2687 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2688
2689 if ((mask & 0x1UL) == 0UL) continue;
2690
2691 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2692 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2693
2694 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2695 }
2696
2697 /*
2698 * Now take care of resetting the other registers
2699 */
2700 for(i = 0; reset_others; i++, reset_others >>= 1) {
2701
2702 if ((reset_others & 0x1) == 0) continue;
2703
2704 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2705
2706 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2707 is_long_reset ? "long" : "short", i, val));
2708 }
2709}
2710
2711static void
2712pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2713{
2714 unsigned long mask = ovfl_regs[0];
2715 unsigned long reset_others = 0UL;
2716 unsigned long val;
2717 int i;
2718
2719 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2720
2721 if (ctx->ctx_state == PFM_CTX_MASKED) {
2722 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2723 return;
2724 }
2725
2726 /*
2727 * now restore reset value on sampling overflowed counters
2728 */
2729 mask >>= PMU_FIRST_COUNTER;
2730 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2731
2732 if ((mask & 0x1UL) == 0UL) continue;
2733
2734 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2735 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2736
2737 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2738
2739 pfm_write_soft_counter(ctx, i, val);
2740 }
2741
2742 /*
2743 * Now take care of resetting the other registers
2744 */
2745 for(i = 0; reset_others; i++, reset_others >>= 1) {
2746
2747 if ((reset_others & 0x1) == 0) continue;
2748
2749 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2750
2751 if (PMD_IS_COUNTING(i)) {
2752 pfm_write_soft_counter(ctx, i, val);
2753 } else {
2754 ia64_set_pmd(i, val);
2755 }
2756 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2757 is_long_reset ? "long" : "short", i, val));
2758 }
2759 ia64_srlz_d();
2760}
2761
2762static int
2763pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2764{
1da177e4
LT
2765 struct task_struct *task;
2766 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2767 unsigned long value, pmc_pm;
2768 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2769 unsigned int cnum, reg_flags, flags, pmc_type;
2770 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2771 int is_monitor, is_counting, state;
2772 int ret = -EINVAL;
2773 pfm_reg_check_t wr_func;
2774#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2775
2776 state = ctx->ctx_state;
2777 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2778 is_system = ctx->ctx_fl_system;
2779 task = ctx->ctx_task;
2780 impl_pmds = pmu_conf->impl_pmds[0];
2781
2782 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2783
2784 if (is_loaded) {
1da177e4
LT
2785 /*
2786 * In system wide and when the context is loaded, access can only happen
2787 * when the caller is running on the CPU being monitored by the session.
2788 * It does not have to be the owner (ctx_task) of the context per se.
2789 */
2790 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2791 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2792 return -EBUSY;
2793 }
2794 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2795 }
2796 expert_mode = pfm_sysctl.expert_mode;
2797
2798 for (i = 0; i < count; i++, req++) {
2799
2800 cnum = req->reg_num;
2801 reg_flags = req->reg_flags;
2802 value = req->reg_value;
2803 smpl_pmds = req->reg_smpl_pmds[0];
2804 reset_pmds = req->reg_reset_pmds[0];
2805 flags = 0;
2806
2807
2808 if (cnum >= PMU_MAX_PMCS) {
2809 DPRINT(("pmc%u is invalid\n", cnum));
2810 goto error;
2811 }
2812
2813 pmc_type = pmu_conf->pmc_desc[cnum].type;
2814 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2815 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2816 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2817
2818 /*
2819 * we reject all non implemented PMC as well
2820 * as attempts to modify PMC[0-3] which are used
2821 * as status registers by the PMU
2822 */
2823 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2824 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2825 goto error;
2826 }
2827 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2828 /*
2829 * If the PMC is a monitor, then if the value is not the default:
2830 * - system-wide session: PMCx.pm=1 (privileged monitor)
2831 * - per-task : PMCx.pm=0 (user monitor)
2832 */
2833 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2834 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2835 cnum,
2836 pmc_pm,
2837 is_system));
2838 goto error;
2839 }
2840
2841 if (is_counting) {
2842 /*
2843 * enforce generation of overflow interrupt. Necessary on all
2844 * CPUs.
2845 */
2846 value |= 1 << PMU_PMC_OI;
2847
2848 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2849 flags |= PFM_REGFL_OVFL_NOTIFY;
2850 }
2851
2852 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2853
2854 /* verify validity of smpl_pmds */
2855 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2856 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2857 goto error;
2858 }
2859
2860 /* verify validity of reset_pmds */
2861 if ((reset_pmds & impl_pmds) != reset_pmds) {
2862 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2863 goto error;
2864 }
2865 } else {
2866 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2867 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2868 goto error;
2869 }
2870 /* eventid on non-counting monitors are ignored */
2871 }
2872
2873 /*
2874 * execute write checker, if any
2875 */
2876 if (likely(expert_mode == 0 && wr_func)) {
2877 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2878 if (ret) goto error;
2879 ret = -EINVAL;
2880 }
2881
2882 /*
2883 * no error on this register
2884 */
2885 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2886
2887 /*
2888 * Now we commit the changes to the software state
2889 */
2890
2891 /*
2892 * update overflow information
2893 */
2894 if (is_counting) {
2895 /*
2896 * full flag update each time a register is programmed
2897 */
2898 ctx->ctx_pmds[cnum].flags = flags;
2899
2900 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2901 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2902 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2903
2904 /*
2905 * Mark all PMDS to be accessed as used.
2906 *
2907 * We do not keep track of PMC because we have to
2908 * systematically restore ALL of them.
2909 *
2910 * We do not update the used_monitors mask, because
2911 * if we have not programmed them, then will be in
2912 * a quiescent state, therefore we will not need to
2913 * mask/restore then when context is MASKED.
2914 */
2915 CTX_USED_PMD(ctx, reset_pmds);
2916 CTX_USED_PMD(ctx, smpl_pmds);
2917 /*
2918 * make sure we do not try to reset on
2919 * restart because we have established new values
2920 */
2921 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
2922 }
2923 /*
2924 * Needed in case the user does not initialize the equivalent
2925 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
2926 * possible leak here.
2927 */
2928 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
2929
2930 /*
2931 * keep track of the monitor PMC that we are using.
2932 * we save the value of the pmc in ctx_pmcs[] and if
2933 * the monitoring is not stopped for the context we also
2934 * place it in the saved state area so that it will be
2935 * picked up later by the context switch code.
2936 *
2937 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
2938 *
35589a8f 2939 * The value in th_pmcs[] may be modified on overflow, i.e., when
1da177e4
LT
2940 * monitoring needs to be stopped.
2941 */
2942 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
2943
2944 /*
2945 * update context state
2946 */
2947 ctx->ctx_pmcs[cnum] = value;
2948
2949 if (is_loaded) {
2950 /*
2951 * write thread state
2952 */
35589a8f 2953 if (is_system == 0) ctx->th_pmcs[cnum] = value;
1da177e4
LT
2954
2955 /*
2956 * write hardware register if we can
2957 */
2958 if (can_access_pmu) {
2959 ia64_set_pmc(cnum, value);
2960 }
2961#ifdef CONFIG_SMP
2962 else {
2963 /*
2964 * per-task SMP only here
2965 *
2966 * we are guaranteed that the task is not running on the other CPU,
2967 * we indicate that this PMD will need to be reloaded if the task
2968 * is rescheduled on the CPU it ran last on.
2969 */
2970 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
2971 }
2972#endif
2973 }
2974
2975 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
2976 cnum,
2977 value,
2978 is_loaded,
2979 can_access_pmu,
2980 flags,
2981 ctx->ctx_all_pmcs[0],
2982 ctx->ctx_used_pmds[0],
2983 ctx->ctx_pmds[cnum].eventid,
2984 smpl_pmds,
2985 reset_pmds,
2986 ctx->ctx_reload_pmcs[0],
2987 ctx->ctx_used_monitors[0],
2988 ctx->ctx_ovfl_regs[0]));
2989 }
2990
2991 /*
2992 * make sure the changes are visible
2993 */
2994 if (can_access_pmu) ia64_srlz_d();
2995
2996 return 0;
2997error:
2998 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
2999 return ret;
3000}
3001
3002static int
3003pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3004{
1da177e4
LT
3005 struct task_struct *task;
3006 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3007 unsigned long value, hw_value, ovfl_mask;
3008 unsigned int cnum;
3009 int i, can_access_pmu = 0, state;
3010 int is_counting, is_loaded, is_system, expert_mode;
3011 int ret = -EINVAL;
3012 pfm_reg_check_t wr_func;
3013
3014
3015 state = ctx->ctx_state;
3016 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3017 is_system = ctx->ctx_fl_system;
3018 ovfl_mask = pmu_conf->ovfl_val;
3019 task = ctx->ctx_task;
3020
3021 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3022
3023 /*
3024 * on both UP and SMP, we can only write to the PMC when the task is
3025 * the owner of the local PMU.
3026 */
3027 if (likely(is_loaded)) {
1da177e4
LT
3028 /*
3029 * In system wide and when the context is loaded, access can only happen
3030 * when the caller is running on the CPU being monitored by the session.
3031 * It does not have to be the owner (ctx_task) of the context per se.
3032 */
3033 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3034 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3035 return -EBUSY;
3036 }
3037 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3038 }
3039 expert_mode = pfm_sysctl.expert_mode;
3040
3041 for (i = 0; i < count; i++, req++) {
3042
3043 cnum = req->reg_num;
3044 value = req->reg_value;
3045
3046 if (!PMD_IS_IMPL(cnum)) {
3047 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3048 goto abort_mission;
3049 }
3050 is_counting = PMD_IS_COUNTING(cnum);
3051 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3052
3053 /*
3054 * execute write checker, if any
3055 */
3056 if (unlikely(expert_mode == 0 && wr_func)) {
3057 unsigned long v = value;
3058
3059 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3060 if (ret) goto abort_mission;
3061
3062 value = v;
3063 ret = -EINVAL;
3064 }
3065
3066 /*
3067 * no error on this register
3068 */
3069 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3070
3071 /*
3072 * now commit changes to software state
3073 */
3074 hw_value = value;
3075
3076 /*
3077 * update virtualized (64bits) counter
3078 */
3079 if (is_counting) {
3080 /*
3081 * write context state
3082 */
3083 ctx->ctx_pmds[cnum].lval = value;
3084
3085 /*
3086 * when context is load we use the split value
3087 */
3088 if (is_loaded) {
3089 hw_value = value & ovfl_mask;
3090 value = value & ~ovfl_mask;
3091 }
3092 }
3093 /*
3094 * update reset values (not just for counters)
3095 */
3096 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3097 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3098
3099 /*
3100 * update randomization parameters (not just for counters)
3101 */
3102 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3103 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3104
3105 /*
3106 * update context value
3107 */
3108 ctx->ctx_pmds[cnum].val = value;
3109
3110 /*
3111 * Keep track of what we use
3112 *
3113 * We do not keep track of PMC because we have to
3114 * systematically restore ALL of them.
3115 */
3116 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3117
3118 /*
3119 * mark this PMD register used as well
3120 */
3121 CTX_USED_PMD(ctx, RDEP(cnum));
3122
3123 /*
3124 * make sure we do not try to reset on
3125 * restart because we have established new values
3126 */
3127 if (is_counting && state == PFM_CTX_MASKED) {
3128 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3129 }
3130
3131 if (is_loaded) {
3132 /*
3133 * write thread state
3134 */
35589a8f 3135 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
1da177e4
LT
3136
3137 /*
3138 * write hardware register if we can
3139 */
3140 if (can_access_pmu) {
3141 ia64_set_pmd(cnum, hw_value);
3142 } else {
3143#ifdef CONFIG_SMP
3144 /*
3145 * we are guaranteed that the task is not running on the other CPU,
3146 * we indicate that this PMD will need to be reloaded if the task
3147 * is rescheduled on the CPU it ran last on.
3148 */
3149 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3150#endif
3151 }
3152 }
3153
3154 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3155 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3156 cnum,
3157 value,
3158 is_loaded,
3159 can_access_pmu,
3160 hw_value,
3161 ctx->ctx_pmds[cnum].val,
3162 ctx->ctx_pmds[cnum].short_reset,
3163 ctx->ctx_pmds[cnum].long_reset,
3164 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3165 ctx->ctx_pmds[cnum].seed,
3166 ctx->ctx_pmds[cnum].mask,
3167 ctx->ctx_used_pmds[0],
3168 ctx->ctx_pmds[cnum].reset_pmds[0],
3169 ctx->ctx_reload_pmds[0],
3170 ctx->ctx_all_pmds[0],
3171 ctx->ctx_ovfl_regs[0]));
3172 }
3173
3174 /*
3175 * make changes visible
3176 */
3177 if (can_access_pmu) ia64_srlz_d();
3178
3179 return 0;
3180
3181abort_mission:
3182 /*
3183 * for now, we have only one possibility for error
3184 */
3185 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3186 return ret;
3187}
3188
3189/*
3190 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3191 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3192 * interrupt is delivered during the call, it will be kept pending until we leave, making
3193 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3194 * guaranteed to return consistent data to the user, it may simply be old. It is not
3195 * trivial to treat the overflow while inside the call because you may end up in
3196 * some module sampling buffer code causing deadlocks.
3197 */
3198static int
3199pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3200{
1da177e4
LT
3201 struct task_struct *task;
3202 unsigned long val = 0UL, lval, ovfl_mask, sval;
3203 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3204 unsigned int cnum, reg_flags = 0;
3205 int i, can_access_pmu = 0, state;
3206 int is_loaded, is_system, is_counting, expert_mode;
3207 int ret = -EINVAL;
3208 pfm_reg_check_t rd_func;
3209
3210 /*
3211 * access is possible when loaded only for
3212 * self-monitoring tasks or in UP mode
3213 */
3214
3215 state = ctx->ctx_state;
3216 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3217 is_system = ctx->ctx_fl_system;
3218 ovfl_mask = pmu_conf->ovfl_val;
3219 task = ctx->ctx_task;
3220
3221 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3222
3223 if (likely(is_loaded)) {
1da177e4
LT
3224 /*
3225 * In system wide and when the context is loaded, access can only happen
3226 * when the caller is running on the CPU being monitored by the session.
3227 * It does not have to be the owner (ctx_task) of the context per se.
3228 */
3229 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3230 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3231 return -EBUSY;
3232 }
3233 /*
3234 * this can be true when not self-monitoring only in UP
3235 */
3236 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3237
3238 if (can_access_pmu) ia64_srlz_d();
3239 }
3240 expert_mode = pfm_sysctl.expert_mode;
3241
3242 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3243 is_loaded,
3244 can_access_pmu,
3245 state));
3246
3247 /*
3248 * on both UP and SMP, we can only read the PMD from the hardware register when
3249 * the task is the owner of the local PMU.
3250 */
3251
3252 for (i = 0; i < count; i++, req++) {
3253
3254 cnum = req->reg_num;
3255 reg_flags = req->reg_flags;
3256
3257 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3258 /*
3259 * we can only read the register that we use. That includes
72fdbdce 3260 * the one we explicitly initialize AND the one we want included
1da177e4
LT
3261 * in the sampling buffer (smpl_regs).
3262 *
3263 * Having this restriction allows optimization in the ctxsw routine
3264 * without compromising security (leaks)
3265 */
3266 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3267
3268 sval = ctx->ctx_pmds[cnum].val;
3269 lval = ctx->ctx_pmds[cnum].lval;
3270 is_counting = PMD_IS_COUNTING(cnum);
3271
3272 /*
3273 * If the task is not the current one, then we check if the
3274 * PMU state is still in the local live register due to lazy ctxsw.
3275 * If true, then we read directly from the registers.
3276 */
3277 if (can_access_pmu){
3278 val = ia64_get_pmd(cnum);
3279 } else {
3280 /*
3281 * context has been saved
3282 * if context is zombie, then task does not exist anymore.
3283 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3284 */
35589a8f 3285 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
1da177e4
LT
3286 }
3287 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3288
3289 if (is_counting) {
3290 /*
3291 * XXX: need to check for overflow when loaded
3292 */
3293 val &= ovfl_mask;
3294 val += sval;
3295 }
3296
3297 /*
3298 * execute read checker, if any
3299 */
3300 if (unlikely(expert_mode == 0 && rd_func)) {
3301 unsigned long v = val;
3302 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3303 if (ret) goto error;
3304 val = v;
3305 ret = -EINVAL;
3306 }
3307
3308 PFM_REG_RETFLAG_SET(reg_flags, 0);
3309
3310 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3311
3312 /*
3313 * update register return value, abort all if problem during copy.
3314 * we only modify the reg_flags field. no check mode is fine because
3315 * access has been verified upfront in sys_perfmonctl().
3316 */
3317 req->reg_value = val;
3318 req->reg_flags = reg_flags;
3319 req->reg_last_reset_val = lval;
3320 }
3321
3322 return 0;
3323
3324error:
3325 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3326 return ret;
3327}
3328
3329int
3330pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3331{
3332 pfm_context_t *ctx;
3333
3334 if (req == NULL) return -EINVAL;
3335
3336 ctx = GET_PMU_CTX();
3337
3338 if (ctx == NULL) return -EINVAL;
3339
3340 /*
3341 * for now limit to current task, which is enough when calling
3342 * from overflow handler
3343 */
3344 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3345
3346 return pfm_write_pmcs(ctx, req, nreq, regs);
3347}
3348EXPORT_SYMBOL(pfm_mod_write_pmcs);
3349
3350int
3351pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3352{
3353 pfm_context_t *ctx;
3354
3355 if (req == NULL) return -EINVAL;
3356
3357 ctx = GET_PMU_CTX();
3358
3359 if (ctx == NULL) return -EINVAL;
3360
3361 /*
3362 * for now limit to current task, which is enough when calling
3363 * from overflow handler
3364 */
3365 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3366
3367 return pfm_read_pmds(ctx, req, nreq, regs);
3368}
3369EXPORT_SYMBOL(pfm_mod_read_pmds);
3370
3371/*
3372 * Only call this function when a process it trying to
3373 * write the debug registers (reading is always allowed)
3374 */
3375int
3376pfm_use_debug_registers(struct task_struct *task)
3377{
3378 pfm_context_t *ctx = task->thread.pfm_context;
3379 unsigned long flags;
3380 int ret = 0;
3381
3382 if (pmu_conf->use_rr_dbregs == 0) return 0;
3383
19c5870c 3384 DPRINT(("called for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3385
3386 /*
3387 * do it only once
3388 */
3389 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3390
3391 /*
3392 * Even on SMP, we do not need to use an atomic here because
3393 * the only way in is via ptrace() and this is possible only when the
3394 * process is stopped. Even in the case where the ctxsw out is not totally
3395 * completed by the time we come here, there is no way the 'stopped' process
3396 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3397 * So this is always safe.
3398 */
3399 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3400
3401 LOCK_PFS(flags);
3402
3403 /*
3404 * We cannot allow setting breakpoints when system wide monitoring
3405 * sessions are using the debug registers.
3406 */
3407 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3408 ret = -1;
3409 else
3410 pfm_sessions.pfs_ptrace_use_dbregs++;
3411
3412 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3413 pfm_sessions.pfs_ptrace_use_dbregs,
3414 pfm_sessions.pfs_sys_use_dbregs,
19c5870c 3415 task_pid_nr(task), ret));
1da177e4
LT
3416
3417 UNLOCK_PFS(flags);
3418
3419 return ret;
3420}
3421
3422/*
3423 * This function is called for every task that exits with the
3424 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3425 * able to use the debug registers for debugging purposes via
3426 * ptrace(). Therefore we know it was not using them for
af901ca1 3427 * performance monitoring, so we only decrement the number
1da177e4
LT
3428 * of "ptraced" debug register users to keep the count up to date
3429 */
3430int
3431pfm_release_debug_registers(struct task_struct *task)
3432{
3433 unsigned long flags;
3434 int ret;
3435
3436 if (pmu_conf->use_rr_dbregs == 0) return 0;
3437
3438 LOCK_PFS(flags);
3439 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
19c5870c 3440 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
1da177e4
LT
3441 ret = -1;
3442 } else {
3443 pfm_sessions.pfs_ptrace_use_dbregs--;
3444 ret = 0;
3445 }
3446 UNLOCK_PFS(flags);
3447
3448 return ret;
3449}
3450
3451static int
3452pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3453{
3454 struct task_struct *task;
3455 pfm_buffer_fmt_t *fmt;
3456 pfm_ovfl_ctrl_t rst_ctrl;
3457 int state, is_system;
3458 int ret = 0;
3459
3460 state = ctx->ctx_state;
3461 fmt = ctx->ctx_buf_fmt;
3462 is_system = ctx->ctx_fl_system;
3463 task = PFM_CTX_TASK(ctx);
3464
3465 switch(state) {
3466 case PFM_CTX_MASKED:
3467 break;
3468 case PFM_CTX_LOADED:
3469 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3470 /* fall through */
3471 case PFM_CTX_UNLOADED:
3472 case PFM_CTX_ZOMBIE:
3473 DPRINT(("invalid state=%d\n", state));
3474 return -EBUSY;
3475 default:
3476 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3477 return -EINVAL;
3478 }
3479
3480 /*
3481 * In system wide and when the context is loaded, access can only happen
3482 * when the caller is running on the CPU being monitored by the session.
3483 * It does not have to be the owner (ctx_task) of the context per se.
3484 */
3485 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3486 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3487 return -EBUSY;
3488 }
3489
3490 /* sanity check */
3491 if (unlikely(task == NULL)) {
19c5870c 3492 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
1da177e4
LT
3493 return -EINVAL;
3494 }
3495
3496 if (task == current || is_system) {
3497
3498 fmt = ctx->ctx_buf_fmt;
3499
3500 DPRINT(("restarting self %d ovfl=0x%lx\n",
19c5870c 3501 task_pid_nr(task),
1da177e4
LT
3502 ctx->ctx_ovfl_regs[0]));
3503
3504 if (CTX_HAS_SMPL(ctx)) {
3505
3506 prefetch(ctx->ctx_smpl_hdr);
3507
3508 rst_ctrl.bits.mask_monitoring = 0;
3509 rst_ctrl.bits.reset_ovfl_pmds = 0;
3510
3511 if (state == PFM_CTX_LOADED)
3512 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3513 else
3514 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3515 } else {
3516 rst_ctrl.bits.mask_monitoring = 0;
3517 rst_ctrl.bits.reset_ovfl_pmds = 1;
3518 }
3519
3520 if (ret == 0) {
3521 if (rst_ctrl.bits.reset_ovfl_pmds)
3522 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3523
3524 if (rst_ctrl.bits.mask_monitoring == 0) {
19c5870c 3525 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3526
3527 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3528 } else {
19c5870c 3529 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3530
3531 // cannot use pfm_stop_monitoring(task, regs);
3532 }
3533 }
3534 /*
3535 * clear overflowed PMD mask to remove any stale information
3536 */
3537 ctx->ctx_ovfl_regs[0] = 0UL;
3538
3539 /*
3540 * back to LOADED state
3541 */
3542 ctx->ctx_state = PFM_CTX_LOADED;
3543
3544 /*
3545 * XXX: not really useful for self monitoring
3546 */
3547 ctx->ctx_fl_can_restart = 0;
3548
3549 return 0;
3550 }
3551
3552 /*
3553 * restart another task
3554 */
3555
3556 /*
3557 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3558 * one is seen by the task.
3559 */
3560 if (state == PFM_CTX_MASKED) {
3561 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3562 /*
3563 * will prevent subsequent restart before this one is
3564 * seen by other task
3565 */
3566 ctx->ctx_fl_can_restart = 0;
3567 }
3568
3569 /*
3570 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3571 * the task is blocked or on its way to block. That's the normal
3572 * restart path. If the monitoring is not masked, then the task
3573 * can be actively monitoring and we cannot directly intervene.
3574 * Therefore we use the trap mechanism to catch the task and
3575 * force it to reset the buffer/reset PMDs.
3576 *
3577 * if non-blocking, then we ensure that the task will go into
3578 * pfm_handle_work() before returning to user mode.
3579 *
72fdbdce 3580 * We cannot explicitly reset another task, it MUST always
1da177e4
LT
3581 * be done by the task itself. This works for system wide because
3582 * the tool that is controlling the session is logically doing
3583 * "self-monitoring".
3584 */
3585 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
04157e4c 3586 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
60f1c444 3587 complete(&ctx->ctx_restart_done);
1da177e4 3588 } else {
19c5870c 3589 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
1da177e4
LT
3590
3591 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3592
3593 PFM_SET_WORK_PENDING(task, 1);
3594
f14488cc 3595 set_notify_resume(task);
1da177e4
LT
3596
3597 /*
3598 * XXX: send reschedule if task runs on another CPU
3599 */
3600 }
3601 return 0;
3602}
3603
3604static int
3605pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3606{
3607 unsigned int m = *(unsigned int *)arg;
3608
3609 pfm_sysctl.debug = m == 0 ? 0 : 1;
3610
1da177e4
LT
3611 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3612
3613 if (m == 0) {
3614 memset(pfm_stats, 0, sizeof(pfm_stats));
3615 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3616 }
3617 return 0;
3618}
3619
3620/*
3621 * arg can be NULL and count can be zero for this function
3622 */
3623static int
3624pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3625{
3626 struct thread_struct *thread = NULL;
3627 struct task_struct *task;
3628 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3629 unsigned long flags;
3630 dbreg_t dbreg;
3631 unsigned int rnum;
3632 int first_time;
3633 int ret = 0, state;
3634 int i, can_access_pmu = 0;
3635 int is_system, is_loaded;
3636
3637 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3638
3639 state = ctx->ctx_state;
3640 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3641 is_system = ctx->ctx_fl_system;
3642 task = ctx->ctx_task;
3643
3644 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3645
3646 /*
3647 * on both UP and SMP, we can only write to the PMC when the task is
3648 * the owner of the local PMU.
3649 */
3650 if (is_loaded) {
3651 thread = &task->thread;
3652 /*
3653 * In system wide and when the context is loaded, access can only happen
3654 * when the caller is running on the CPU being monitored by the session.
3655 * It does not have to be the owner (ctx_task) of the context per se.
3656 */
3657 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3658 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3659 return -EBUSY;
3660 }
3661 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3662 }
3663
3664 /*
3665 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3666 * ensuring that no real breakpoint can be installed via this call.
3667 *
3668 * IMPORTANT: regs can be NULL in this function
3669 */
3670
3671 first_time = ctx->ctx_fl_using_dbreg == 0;
3672
3673 /*
3674 * don't bother if we are loaded and task is being debugged
3675 */
3676 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
19c5870c 3677 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3678 return -EBUSY;
3679 }
3680
3681 /*
3682 * check for debug registers in system wide mode
3683 *
3684 * If though a check is done in pfm_context_load(),
3685 * we must repeat it here, in case the registers are
3686 * written after the context is loaded
3687 */
3688 if (is_loaded) {
3689 LOCK_PFS(flags);
3690
3691 if (first_time && is_system) {
3692 if (pfm_sessions.pfs_ptrace_use_dbregs)
3693 ret = -EBUSY;
3694 else
3695 pfm_sessions.pfs_sys_use_dbregs++;
3696 }
3697 UNLOCK_PFS(flags);
3698 }
3699
3700 if (ret != 0) return ret;
3701
3702 /*
3703 * mark ourself as user of the debug registers for
3704 * perfmon purposes.
3705 */
3706 ctx->ctx_fl_using_dbreg = 1;
3707
3708 /*
3709 * clear hardware registers to make sure we don't
3710 * pick up stale state.
3711 *
3712 * for a system wide session, we do not use
3713 * thread.dbr, thread.ibr because this process
3714 * never leaves the current CPU and the state
3715 * is shared by all processes running on it
3716 */
3717 if (first_time && can_access_pmu) {
19c5870c 3718 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
1da177e4
LT
3719 for (i=0; i < pmu_conf->num_ibrs; i++) {
3720 ia64_set_ibr(i, 0UL);
3721 ia64_dv_serialize_instruction();
3722 }
3723 ia64_srlz_i();
3724 for (i=0; i < pmu_conf->num_dbrs; i++) {
3725 ia64_set_dbr(i, 0UL);
3726 ia64_dv_serialize_data();
3727 }
3728 ia64_srlz_d();
3729 }
3730
3731 /*
3732 * Now install the values into the registers
3733 */
3734 for (i = 0; i < count; i++, req++) {
3735
3736 rnum = req->dbreg_num;
3737 dbreg.val = req->dbreg_value;
3738
3739 ret = -EINVAL;
3740
3741 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3742 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3743 rnum, dbreg.val, mode, i, count));
3744
3745 goto abort_mission;
3746 }
3747
3748 /*
3749 * make sure we do not install enabled breakpoint
3750 */
3751 if (rnum & 0x1) {
3752 if (mode == PFM_CODE_RR)
3753 dbreg.ibr.ibr_x = 0;
3754 else
3755 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3756 }
3757
3758 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3759
3760 /*
3761 * Debug registers, just like PMC, can only be modified
3762 * by a kernel call. Moreover, perfmon() access to those
3763 * registers are centralized in this routine. The hardware
3764 * does not modify the value of these registers, therefore,
3765 * if we save them as they are written, we can avoid having
3766 * to save them on context switch out. This is made possible
3767 * by the fact that when perfmon uses debug registers, ptrace()
3768 * won't be able to modify them concurrently.
3769 */
3770 if (mode == PFM_CODE_RR) {
3771 CTX_USED_IBR(ctx, rnum);
3772
3773 if (can_access_pmu) {
3774 ia64_set_ibr(rnum, dbreg.val);
3775 ia64_dv_serialize_instruction();
3776 }
3777
3778 ctx->ctx_ibrs[rnum] = dbreg.val;
3779
3780 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3781 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3782 } else {
3783 CTX_USED_DBR(ctx, rnum);
3784
3785 if (can_access_pmu) {
3786 ia64_set_dbr(rnum, dbreg.val);
3787 ia64_dv_serialize_data();
3788 }
3789 ctx->ctx_dbrs[rnum] = dbreg.val;
3790
3791 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3792 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3793 }
3794 }
3795
3796 return 0;
3797
3798abort_mission:
3799 /*
3800 * in case it was our first attempt, we undo the global modifications
3801 */
3802 if (first_time) {
3803 LOCK_PFS(flags);
3804 if (ctx->ctx_fl_system) {
3805 pfm_sessions.pfs_sys_use_dbregs--;
3806 }
3807 UNLOCK_PFS(flags);
3808 ctx->ctx_fl_using_dbreg = 0;
3809 }
3810 /*
3811 * install error return flag
3812 */
3813 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3814
3815 return ret;
3816}
3817
3818static int
3819pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3820{
3821 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3822}
3823
3824static int
3825pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3826{
3827 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3828}
3829
3830int
3831pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3832{
3833 pfm_context_t *ctx;
3834
3835 if (req == NULL) return -EINVAL;
3836
3837 ctx = GET_PMU_CTX();
3838
3839 if (ctx == NULL) return -EINVAL;
3840
3841 /*
3842 * for now limit to current task, which is enough when calling
3843 * from overflow handler
3844 */
3845 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3846
3847 return pfm_write_ibrs(ctx, req, nreq, regs);
3848}
3849EXPORT_SYMBOL(pfm_mod_write_ibrs);
3850
3851int
3852pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3853{
3854 pfm_context_t *ctx;
3855
3856 if (req == NULL) return -EINVAL;
3857
3858 ctx = GET_PMU_CTX();
3859
3860 if (ctx == NULL) return -EINVAL;
3861
3862 /*
3863 * for now limit to current task, which is enough when calling
3864 * from overflow handler
3865 */
3866 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3867
3868 return pfm_write_dbrs(ctx, req, nreq, regs);
3869}
3870EXPORT_SYMBOL(pfm_mod_write_dbrs);
3871
3872
3873static int
3874pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3875{
3876 pfarg_features_t *req = (pfarg_features_t *)arg;
3877
3878 req->ft_version = PFM_VERSION;
3879 return 0;
3880}
3881
3882static int
3883pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3884{
3885 struct pt_regs *tregs;
3886 struct task_struct *task = PFM_CTX_TASK(ctx);
3887 int state, is_system;
3888
3889 state = ctx->ctx_state;
3890 is_system = ctx->ctx_fl_system;
3891
3892 /*
3893 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3894 */
3895 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3896
3897 /*
3898 * In system wide and when the context is loaded, access can only happen
3899 * when the caller is running on the CPU being monitored by the session.
3900 * It does not have to be the owner (ctx_task) of the context per se.
3901 */
3902 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3903 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3904 return -EBUSY;
3905 }
3906 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
19c5870c 3907 task_pid_nr(PFM_CTX_TASK(ctx)),
1da177e4
LT
3908 state,
3909 is_system));
3910 /*
3911 * in system mode, we need to update the PMU directly
3912 * and the user level state of the caller, which may not
3913 * necessarily be the creator of the context.
3914 */
3915 if (is_system) {
3916 /*
3917 * Update local PMU first
3918 *
3919 * disable dcr pp
3920 */
3921 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
3922 ia64_srlz_i();
3923
3924 /*
3925 * update local cpuinfo
3926 */
3927 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
3928
3929 /*
3930 * stop monitoring, does srlz.i
3931 */
3932 pfm_clear_psr_pp();
3933
3934 /*
3935 * stop monitoring in the caller
3936 */
3937 ia64_psr(regs)->pp = 0;
3938
3939 return 0;
3940 }
3941 /*
3942 * per-task mode
3943 */
3944
3945 if (task == current) {
3946 /* stop monitoring at kernel level */
3947 pfm_clear_psr_up();
3948
3949 /*
3950 * stop monitoring at the user level
3951 */
3952 ia64_psr(regs)->up = 0;
3953 } else {
6450578f 3954 tregs = task_pt_regs(task);
1da177e4
LT
3955
3956 /*
3957 * stop monitoring at the user level
3958 */
3959 ia64_psr(tregs)->up = 0;
3960
3961 /*
3962 * monitoring disabled in kernel at next reschedule
3963 */
3964 ctx->ctx_saved_psr_up = 0;
19c5870c 3965 DPRINT(("task=[%d]\n", task_pid_nr(task)));
1da177e4
LT
3966 }
3967 return 0;
3968}
3969
3970
3971static int
3972pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3973{
3974 struct pt_regs *tregs;
3975 int state, is_system;
3976
3977 state = ctx->ctx_state;
3978 is_system = ctx->ctx_fl_system;
3979
3980 if (state != PFM_CTX_LOADED) return -EINVAL;
3981
3982 /*
3983 * In system wide and when the context is loaded, access can only happen
3984 * when the caller is running on the CPU being monitored by the session.
3985 * It does not have to be the owner (ctx_task) of the context per se.
3986 */
3987 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3988 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3989 return -EBUSY;
3990 }
3991
3992 /*
3993 * in system mode, we need to update the PMU directly
3994 * and the user level state of the caller, which may not
3995 * necessarily be the creator of the context.
3996 */
3997 if (is_system) {
3998
3999 /*
4000 * set user level psr.pp for the caller
4001 */
4002 ia64_psr(regs)->pp = 1;
4003
4004 /*
4005 * now update the local PMU and cpuinfo
4006 */
4007 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4008
4009 /*
4010 * start monitoring at kernel level
4011 */
4012 pfm_set_psr_pp();
4013
4014 /* enable dcr pp */
4015 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4016 ia64_srlz_i();
4017
4018 return 0;
4019 }
4020
4021 /*
4022 * per-process mode
4023 */
4024
4025 if (ctx->ctx_task == current) {
4026
4027 /* start monitoring at kernel level */
4028 pfm_set_psr_up();
4029
4030 /*
4031 * activate monitoring at user level
4032 */
4033 ia64_psr(regs)->up = 1;
4034
4035 } else {
6450578f 4036 tregs = task_pt_regs(ctx->ctx_task);
1da177e4
LT
4037
4038 /*
4039 * start monitoring at the kernel level the next
4040 * time the task is scheduled
4041 */
4042 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4043
4044 /*
4045 * activate monitoring at user level
4046 */
4047 ia64_psr(tregs)->up = 1;
4048 }
4049 return 0;
4050}
4051
4052static int
4053pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4054{
4055 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4056 unsigned int cnum;
4057 int i;
4058 int ret = -EINVAL;
4059
4060 for (i = 0; i < count; i++, req++) {
4061
4062 cnum = req->reg_num;
4063
4064 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4065
4066 req->reg_value = PMC_DFL_VAL(cnum);
4067
4068 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4069
4070 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4071 }
4072 return 0;
4073
4074abort_mission:
4075 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4076 return ret;
4077}
4078
4079static int
4080pfm_check_task_exist(pfm_context_t *ctx)
4081{
4082 struct task_struct *g, *t;
4083 int ret = -ESRCH;
4084
4085 read_lock(&tasklist_lock);
4086
4087 do_each_thread (g, t) {
4088 if (t->thread.pfm_context == ctx) {
4089 ret = 0;
6794c752 4090 goto out;
1da177e4
LT
4091 }
4092 } while_each_thread (g, t);
6794c752 4093out:
1da177e4
LT
4094 read_unlock(&tasklist_lock);
4095
4096 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4097
4098 return ret;
4099}
4100
4101static int
4102pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4103{
4104 struct task_struct *task;
4105 struct thread_struct *thread;
4106 struct pfm_context_t *old;
4107 unsigned long flags;
4108#ifndef CONFIG_SMP
4109 struct task_struct *owner_task = NULL;
4110#endif
4111 pfarg_load_t *req = (pfarg_load_t *)arg;
4112 unsigned long *pmcs_source, *pmds_source;
4113 int the_cpu;
4114 int ret = 0;
4115 int state, is_system, set_dbregs = 0;
4116
4117 state = ctx->ctx_state;
4118 is_system = ctx->ctx_fl_system;
4119 /*
4120 * can only load from unloaded or terminated state
4121 */
4122 if (state != PFM_CTX_UNLOADED) {
4123 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4124 req->load_pid,
4125 ctx->ctx_state));
a5a70b75 4126 return -EBUSY;
1da177e4
LT
4127 }
4128
4129 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4130
4131 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4132 DPRINT(("cannot use blocking mode on self\n"));
4133 return -EINVAL;
4134 }
4135
4136 ret = pfm_get_task(ctx, req->load_pid, &task);
4137 if (ret) {
4138 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4139 return ret;
4140 }
4141
4142 ret = -EINVAL;
4143
4144 /*
4145 * system wide is self monitoring only
4146 */
4147 if (is_system && task != current) {
4148 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4149 req->load_pid));
4150 goto error;
4151 }
4152
4153 thread = &task->thread;
4154
4155 ret = 0;
4156 /*
4157 * cannot load a context which is using range restrictions,
4158 * into a task that is being debugged.
4159 */
4160 if (ctx->ctx_fl_using_dbreg) {
4161 if (thread->flags & IA64_THREAD_DBG_VALID) {
4162 ret = -EBUSY;
4163 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4164 goto error;
4165 }
4166 LOCK_PFS(flags);
4167
4168 if (is_system) {
4169 if (pfm_sessions.pfs_ptrace_use_dbregs) {
19c5870c
AD
4170 DPRINT(("cannot load [%d] dbregs in use\n",
4171 task_pid_nr(task)));
1da177e4
LT
4172 ret = -EBUSY;
4173 } else {
4174 pfm_sessions.pfs_sys_use_dbregs++;
19c5870c 4175 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
1da177e4
LT
4176 set_dbregs = 1;
4177 }
4178 }
4179
4180 UNLOCK_PFS(flags);
4181
4182 if (ret) goto error;
4183 }
4184
4185 /*
4186 * SMP system-wide monitoring implies self-monitoring.
4187 *
4188 * The programming model expects the task to
4189 * be pinned on a CPU throughout the session.
4190 * Here we take note of the current CPU at the
4191 * time the context is loaded. No call from
4192 * another CPU will be allowed.
4193 *
4194 * The pinning via shed_setaffinity()
4195 * must be done by the calling task prior
4196 * to this call.
4197 *
4198 * systemwide: keep track of CPU this session is supposed to run on
4199 */
4200 the_cpu = ctx->ctx_cpu = smp_processor_id();
4201
4202 ret = -EBUSY;
4203 /*
4204 * now reserve the session
4205 */
4206 ret = pfm_reserve_session(current, is_system, the_cpu);
4207 if (ret) goto error;
4208
4209 /*
4210 * task is necessarily stopped at this point.
4211 *
4212 * If the previous context was zombie, then it got removed in
4213 * pfm_save_regs(). Therefore we should not see it here.
4214 * If we see a context, then this is an active context
4215 *
4216 * XXX: needs to be atomic
4217 */
4218 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4219 thread->pfm_context, ctx));
4220
6bf11e8c 4221 ret = -EBUSY;
1da177e4
LT
4222 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4223 if (old != NULL) {
4224 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4225 goto error_unres;
4226 }
4227
4228 pfm_reset_msgq(ctx);
4229
4230 ctx->ctx_state = PFM_CTX_LOADED;
4231
4232 /*
4233 * link context to task
4234 */
4235 ctx->ctx_task = task;
4236
4237 if (is_system) {
4238 /*
4239 * we load as stopped
4240 */
4241 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4242 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4243
4244 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4245 } else {
4246 thread->flags |= IA64_THREAD_PM_VALID;
4247 }
4248
4249 /*
4250 * propagate into thread-state
4251 */
4252 pfm_copy_pmds(task, ctx);
4253 pfm_copy_pmcs(task, ctx);
4254
35589a8f
KA
4255 pmcs_source = ctx->th_pmcs;
4256 pmds_source = ctx->th_pmds;
1da177e4
LT
4257
4258 /*
4259 * always the case for system-wide
4260 */
4261 if (task == current) {
4262
4263 if (is_system == 0) {
4264
4265 /* allow user level control */
4266 ia64_psr(regs)->sp = 0;
19c5870c 4267 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4268
4269 SET_LAST_CPU(ctx, smp_processor_id());
4270 INC_ACTIVATION();
4271 SET_ACTIVATION(ctx);
4272#ifndef CONFIG_SMP
4273 /*
4274 * push the other task out, if any
4275 */
4276 owner_task = GET_PMU_OWNER();
4277 if (owner_task) pfm_lazy_save_regs(owner_task);
4278#endif
4279 }
4280 /*
4281 * load all PMD from ctx to PMU (as opposed to thread state)
4282 * restore all PMC from ctx to PMU
4283 */
4284 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4285 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4286
4287 ctx->ctx_reload_pmcs[0] = 0UL;
4288 ctx->ctx_reload_pmds[0] = 0UL;
4289
4290 /*
4291 * guaranteed safe by earlier check against DBG_VALID
4292 */
4293 if (ctx->ctx_fl_using_dbreg) {
4294 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4295 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4296 }
4297 /*
4298 * set new ownership
4299 */
4300 SET_PMU_OWNER(task, ctx);
4301
19c5870c 4302 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4303 } else {
4304 /*
4305 * when not current, task MUST be stopped, so this is safe
4306 */
6450578f 4307 regs = task_pt_regs(task);
1da177e4
LT
4308
4309 /* force a full reload */
4310 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4311 SET_LAST_CPU(ctx, -1);
4312
4313 /* initial saved psr (stopped) */
4314 ctx->ctx_saved_psr_up = 0UL;
4315 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4316 }
4317
4318 ret = 0;
4319
4320error_unres:
4321 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4322error:
4323 /*
4324 * we must undo the dbregs setting (for system-wide)
4325 */
4326 if (ret && set_dbregs) {
4327 LOCK_PFS(flags);
4328 pfm_sessions.pfs_sys_use_dbregs--;
4329 UNLOCK_PFS(flags);
4330 }
4331 /*
4332 * release task, there is now a link with the context
4333 */
4334 if (is_system == 0 && task != current) {
4335 pfm_put_task(task);
4336
4337 if (ret == 0) {
4338 ret = pfm_check_task_exist(ctx);
4339 if (ret) {
4340 ctx->ctx_state = PFM_CTX_UNLOADED;
4341 ctx->ctx_task = NULL;
4342 }
4343 }
4344 }
4345 return ret;
4346}
4347
4348/*
4349 * in this function, we do not need to increase the use count
4350 * for the task via get_task_struct(), because we hold the
4351 * context lock. If the task were to disappear while having
4352 * a context attached, it would go through pfm_exit_thread()
4353 * which also grabs the context lock and would therefore be blocked
4354 * until we are here.
4355 */
4356static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4357
4358static int
4359pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4360{
4361 struct task_struct *task = PFM_CTX_TASK(ctx);
4362 struct pt_regs *tregs;
4363 int prev_state, is_system;
4364 int ret;
4365
19c5870c 4366 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
1da177e4
LT
4367
4368 prev_state = ctx->ctx_state;
4369 is_system = ctx->ctx_fl_system;
4370
4371 /*
4372 * unload only when necessary
4373 */
4374 if (prev_state == PFM_CTX_UNLOADED) {
4375 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4376 return 0;
4377 }
4378
4379 /*
4380 * clear psr and dcr bits
4381 */
4382 ret = pfm_stop(ctx, NULL, 0, regs);
4383 if (ret) return ret;
4384
4385 ctx->ctx_state = PFM_CTX_UNLOADED;
4386
4387 /*
4388 * in system mode, we need to update the PMU directly
4389 * and the user level state of the caller, which may not
4390 * necessarily be the creator of the context.
4391 */
4392 if (is_system) {
4393
4394 /*
4395 * Update cpuinfo
4396 *
4397 * local PMU is taken care of in pfm_stop()
4398 */
4399 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4400 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4401
4402 /*
4403 * save PMDs in context
4404 * release ownership
4405 */
4406 pfm_flush_pmds(current, ctx);
4407
4408 /*
4409 * at this point we are done with the PMU
4410 * so we can unreserve the resource.
4411 */
4412 if (prev_state != PFM_CTX_ZOMBIE)
4413 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4414
4415 /*
4416 * disconnect context from task
4417 */
4418 task->thread.pfm_context = NULL;
4419 /*
4420 * disconnect task from context
4421 */
4422 ctx->ctx_task = NULL;
4423
4424 /*
4425 * There is nothing more to cleanup here.
4426 */
4427 return 0;
4428 }
4429
4430 /*
4431 * per-task mode
4432 */
6450578f 4433 tregs = task == current ? regs : task_pt_regs(task);
1da177e4
LT
4434
4435 if (task == current) {
4436 /*
4437 * cancel user level control
4438 */
4439 ia64_psr(regs)->sp = 1;
4440
19c5870c 4441 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4442 }
4443 /*
4444 * save PMDs to context
4445 * release ownership
4446 */
4447 pfm_flush_pmds(task, ctx);
4448
4449 /*
4450 * at this point we are done with the PMU
4451 * so we can unreserve the resource.
4452 *
4453 * when state was ZOMBIE, we have already unreserved.
4454 */
4455 if (prev_state != PFM_CTX_ZOMBIE)
4456 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4457
4458 /*
4459 * reset activation counter and psr
4460 */
4461 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4462 SET_LAST_CPU(ctx, -1);
4463
4464 /*
4465 * PMU state will not be restored
4466 */
4467 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4468
4469 /*
4470 * break links between context and task
4471 */
4472 task->thread.pfm_context = NULL;
4473 ctx->ctx_task = NULL;
4474
4475 PFM_SET_WORK_PENDING(task, 0);
4476
4477 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4478 ctx->ctx_fl_can_restart = 0;
4479 ctx->ctx_fl_going_zombie = 0;
4480
19c5870c 4481 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
1da177e4
LT
4482
4483 return 0;
4484}
4485
4486
4487/*
e6464694
JS
4488 * called only from exit_thread()
4489 * we come here only if the task has a context attached (loaded or masked)
1da177e4
LT
4490 */
4491void
4492pfm_exit_thread(struct task_struct *task)
4493{
4494 pfm_context_t *ctx;
4495 unsigned long flags;
6450578f 4496 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
4497 int ret, state;
4498 int free_ok = 0;
4499
4500 ctx = PFM_GET_CTX(task);
4501
4502 PROTECT_CTX(ctx, flags);
4503
19c5870c 4504 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
1da177e4
LT
4505
4506 state = ctx->ctx_state;
4507 switch(state) {
4508 case PFM_CTX_UNLOADED:
4509 /*
72fdbdce 4510 * only comes to this function if pfm_context is not NULL, i.e., cannot
1da177e4
LT
4511 * be in unloaded state
4512 */
19c5870c 4513 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
1da177e4
LT
4514 break;
4515 case PFM_CTX_LOADED:
4516 case PFM_CTX_MASKED:
4517 ret = pfm_context_unload(ctx, NULL, 0, regs);
4518 if (ret) {
19c5870c 4519 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4520 }
4521 DPRINT(("ctx unloaded for current state was %d\n", state));
4522
4523 pfm_end_notify_user(ctx);
4524 break;
4525 case PFM_CTX_ZOMBIE:
4526 ret = pfm_context_unload(ctx, NULL, 0, regs);
4527 if (ret) {
19c5870c 4528 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4529 }
4530 free_ok = 1;
4531 break;
4532 default:
19c5870c 4533 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
1da177e4
LT
4534 break;
4535 }
4536 UNPROTECT_CTX(ctx, flags);
4537
4538 { u64 psr = pfm_get_psr();
4539 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4540 BUG_ON(GET_PMU_OWNER());
4541 BUG_ON(ia64_psr(regs)->up);
4542 BUG_ON(ia64_psr(regs)->pp);
4543 }
4544
4545 /*
4546 * All memory free operations (especially for vmalloc'ed memory)
4547 * MUST be done with interrupts ENABLED.
4548 */
4549 if (free_ok) pfm_context_free(ctx);
4550}
4551
4552/*
4553 * functions MUST be listed in the increasing order of their index (see permfon.h)
4554 */
4555#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4556#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4557#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4558#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4559#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4560
4561static pfm_cmd_desc_t pfm_cmd_tab[]={
4562/* 0 */PFM_CMD_NONE,
4563/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4564/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4565/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4566/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4567/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4568/* 6 */PFM_CMD_NONE,
4569/* 7 */PFM_CMD_NONE,
4570/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4571/* 9 */PFM_CMD_NONE,
4572/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4573/* 11 */PFM_CMD_NONE,
4574/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4575/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4576/* 14 */PFM_CMD_NONE,
4577/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4578/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4579/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4580/* 18 */PFM_CMD_NONE,
4581/* 19 */PFM_CMD_NONE,
4582/* 20 */PFM_CMD_NONE,
4583/* 21 */PFM_CMD_NONE,
4584/* 22 */PFM_CMD_NONE,
4585/* 23 */PFM_CMD_NONE,
4586/* 24 */PFM_CMD_NONE,
4587/* 25 */PFM_CMD_NONE,
4588/* 26 */PFM_CMD_NONE,
4589/* 27 */PFM_CMD_NONE,
4590/* 28 */PFM_CMD_NONE,
4591/* 29 */PFM_CMD_NONE,
4592/* 30 */PFM_CMD_NONE,
4593/* 31 */PFM_CMD_NONE,
4594/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4595/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4596};
4597#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4598
4599static int
4600pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4601{
4602 struct task_struct *task;
4603 int state, old_state;
4604
4605recheck:
4606 state = ctx->ctx_state;
4607 task = ctx->ctx_task;
4608
4609 if (task == NULL) {
4610 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4611 return 0;
4612 }
4613
4614 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4615 ctx->ctx_fd,
4616 state,
19c5870c 4617 task_pid_nr(task),
1da177e4
LT
4618 task->state, PFM_CMD_STOPPED(cmd)));
4619
4620 /*
4621 * self-monitoring always ok.
4622 *
4623 * for system-wide the caller can either be the creator of the
4624 * context (to one to which the context is attached to) OR
4625 * a task running on the same CPU as the session.
4626 */
4627 if (task == current || ctx->ctx_fl_system) return 0;
4628
4629 /*
a5a70b75 4630 * we are monitoring another thread
1da177e4 4631 */
a5a70b75 4632 switch(state) {
4633 case PFM_CTX_UNLOADED:
4634 /*
4635 * if context is UNLOADED we are safe to go
4636 */
4637 return 0;
4638 case PFM_CTX_ZOMBIE:
4639 /*
4640 * no command can operate on a zombie context
4641 */
4642 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4643 return -EINVAL;
4644 case PFM_CTX_MASKED:
4645 /*
4646 * PMU state has been saved to software even though
4647 * the thread may still be running.
4648 */
4649 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
1da177e4
LT
4650 }
4651
4652 /*
4653 * context is LOADED or MASKED. Some commands may need to have
4654 * the task stopped.
4655 *
4656 * We could lift this restriction for UP but it would mean that
4657 * the user has no guarantee the task would not run between
4658 * two successive calls to perfmonctl(). That's probably OK.
4659 * If this user wants to ensure the task does not run, then
4660 * the task must be stopped.
4661 */
4662 if (PFM_CMD_STOPPED(cmd)) {
21498223 4663 if (!task_is_stopped_or_traced(task)) {
19c5870c 4664 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
1da177e4
LT
4665 return -EBUSY;
4666 }
4667 /*
4668 * task is now stopped, wait for ctxsw out
4669 *
4670 * This is an interesting point in the code.
4671 * We need to unprotect the context because
4672 * the pfm_save_regs() routines needs to grab
4673 * the same lock. There are danger in doing
4674 * this because it leaves a window open for
4675 * another task to get access to the context
4676 * and possibly change its state. The one thing
4677 * that is not possible is for the context to disappear
4678 * because we are protected by the VFS layer, i.e.,
4679 * get_fd()/put_fd().
4680 */
4681 old_state = state;
4682
4683 UNPROTECT_CTX(ctx, flags);
4684
85ba2d86 4685 wait_task_inactive(task, 0);
1da177e4
LT
4686
4687 PROTECT_CTX(ctx, flags);
4688
4689 /*
4690 * we must recheck to verify if state has changed
4691 */
4692 if (ctx->ctx_state != old_state) {
4693 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4694 goto recheck;
4695 }
4696 }
4697 return 0;
4698}
4699
4700/*
4701 * system-call entry point (must return long)
4702 */
4703asmlinkage long
4704sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4705{
2903ff01 4706 struct fd f = {NULL, 0};
1da177e4
LT
4707 pfm_context_t *ctx = NULL;
4708 unsigned long flags = 0UL;
4709 void *args_k = NULL;
4710 long ret; /* will expand int return types */
4711 size_t base_sz, sz, xtra_sz = 0;
4712 int narg, completed_args = 0, call_made = 0, cmd_flags;
4713 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4714 int (*getsize)(void *arg, size_t *sz);
4715#define PFM_MAX_ARGSIZE 4096
4716
4717 /*
4718 * reject any call if perfmon was disabled at initialization
4719 */
4720 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4721
4722 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4723 DPRINT(("invalid cmd=%d\n", cmd));
4724 return -EINVAL;
4725 }
4726
4727 func = pfm_cmd_tab[cmd].cmd_func;
4728 narg = pfm_cmd_tab[cmd].cmd_narg;
4729 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4730 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4731 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4732
4733 if (unlikely(func == NULL)) {
4734 DPRINT(("invalid cmd=%d\n", cmd));
4735 return -EINVAL;
4736 }
4737
4738 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4739 PFM_CMD_NAME(cmd),
4740 cmd,
4741 narg,
4742 base_sz,
4743 count));
4744
4745 /*
4746 * check if number of arguments matches what the command expects
4747 */
4748 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4749 return -EINVAL;
4750
4751restart_args:
4752 sz = xtra_sz + base_sz*count;
4753 /*
4754 * limit abuse to min page size
4755 */
4756 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
19c5870c 4757 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
1da177e4
LT
4758 return -E2BIG;
4759 }
4760
4761 /*
4762 * allocate default-sized argument buffer
4763 */
4764 if (likely(count && args_k == NULL)) {
4765 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4766 if (args_k == NULL) return -ENOMEM;
4767 }
4768
4769 ret = -EFAULT;
4770
4771 /*
4772 * copy arguments
4773 *
4774 * assume sz = 0 for command without parameters
4775 */
4776 if (sz && copy_from_user(args_k, arg, sz)) {
4777 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4778 goto error_args;
4779 }
4780
4781 /*
4782 * check if command supports extra parameters
4783 */
4784 if (completed_args == 0 && getsize) {
4785 /*
4786 * get extra parameters size (based on main argument)
4787 */
4788 ret = (*getsize)(args_k, &xtra_sz);
4789 if (ret) goto error_args;
4790
4791 completed_args = 1;
4792
4793 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4794
4795 /* retry if necessary */
4796 if (likely(xtra_sz)) goto restart_args;
4797 }
4798
4799 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4800
4801 ret = -EBADF;
4802
2903ff01
AV
4803 f = fdget(fd);
4804 if (unlikely(f.file == NULL)) {
1da177e4
LT
4805 DPRINT(("invalid fd %d\n", fd));
4806 goto error_args;
4807 }
2903ff01 4808 if (unlikely(PFM_IS_FILE(f.file) == 0)) {
1da177e4
LT
4809 DPRINT(("fd %d not related to perfmon\n", fd));
4810 goto error_args;
4811 }
4812
2903ff01 4813 ctx = f.file->private_data;
1da177e4
LT
4814 if (unlikely(ctx == NULL)) {
4815 DPRINT(("no context for fd %d\n", fd));
4816 goto error_args;
4817 }
4818 prefetch(&ctx->ctx_state);
4819
4820 PROTECT_CTX(ctx, flags);
4821
4822 /*
4823 * check task is stopped
4824 */
4825 ret = pfm_check_task_state(ctx, cmd, flags);
4826 if (unlikely(ret)) goto abort_locked;
4827
4828skip_fd:
6450578f 4829 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
1da177e4
LT
4830
4831 call_made = 1;
4832
4833abort_locked:
4834 if (likely(ctx)) {
4835 DPRINT(("context unlocked\n"));
4836 UNPROTECT_CTX(ctx, flags);
1da177e4
LT
4837 }
4838
4839 /* copy argument back to user, if needed */
4840 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4841
4842error_args:
2903ff01
AV
4843 if (f.file)
4844 fdput(f);
b8444d00 4845
b2325fe1 4846 kfree(args_k);
1da177e4
LT
4847
4848 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4849
4850 return ret;
4851}
4852
4853static void
4854pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4855{
4856 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4857 pfm_ovfl_ctrl_t rst_ctrl;
4858 int state;
4859 int ret = 0;
4860
4861 state = ctx->ctx_state;
4862 /*
4863 * Unlock sampling buffer and reset index atomically
4864 * XXX: not really needed when blocking
4865 */
4866 if (CTX_HAS_SMPL(ctx)) {
4867
4868 rst_ctrl.bits.mask_monitoring = 0;
4869 rst_ctrl.bits.reset_ovfl_pmds = 0;
4870
4871 if (state == PFM_CTX_LOADED)
4872 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4873 else
4874 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4875 } else {
4876 rst_ctrl.bits.mask_monitoring = 0;
4877 rst_ctrl.bits.reset_ovfl_pmds = 1;
4878 }
4879
4880 if (ret == 0) {
4881 if (rst_ctrl.bits.reset_ovfl_pmds) {
4882 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4883 }
4884 if (rst_ctrl.bits.mask_monitoring == 0) {
4885 DPRINT(("resuming monitoring\n"));
4886 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4887 } else {
4888 DPRINT(("stopping monitoring\n"));
4889 //pfm_stop_monitoring(current, regs);
4890 }
4891 ctx->ctx_state = PFM_CTX_LOADED;
4892 }
4893}
4894
4895/*
4896 * context MUST BE LOCKED when calling
4897 * can only be called for current
4898 */
4899static void
4900pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4901{
4902 int ret;
4903
19c5870c 4904 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
1da177e4
LT
4905
4906 ret = pfm_context_unload(ctx, NULL, 0, regs);
4907 if (ret) {
19c5870c 4908 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
1da177e4
LT
4909 }
4910
4911 /*
4912 * and wakeup controlling task, indicating we are now disconnected
4913 */
4914 wake_up_interruptible(&ctx->ctx_zombieq);
4915
4916 /*
4917 * given that context is still locked, the controlling
4918 * task will only get access when we return from
4919 * pfm_handle_work().
4920 */
4921}
4922
4923static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
0fb232fd 4924
4944930a
SE
4925 /*
4926 * pfm_handle_work() can be called with interrupts enabled
4927 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
4928 * call may sleep, therefore we must re-enable interrupts
4929 * to avoid deadlocks. It is safe to do so because this function
0fb232fd 4930 * is called ONLY when returning to user level (pUStk=1), in which case
4944930a
SE
4931 * there is no risk of kernel stack overflow due to deep
4932 * interrupt nesting.
4933 */
1da177e4
LT
4934void
4935pfm_handle_work(void)
4936{
4937 pfm_context_t *ctx;
4938 struct pt_regs *regs;
4944930a 4939 unsigned long flags, dummy_flags;
1da177e4
LT
4940 unsigned long ovfl_regs;
4941 unsigned int reason;
4942 int ret;
4943
4944 ctx = PFM_GET_CTX(current);
4945 if (ctx == NULL) {
0fb232fd
HS
4946 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
4947 task_pid_nr(current));
1da177e4
LT
4948 return;
4949 }
4950
4951 PROTECT_CTX(ctx, flags);
4952
4953 PFM_SET_WORK_PENDING(current, 0);
4954
6450578f 4955 regs = task_pt_regs(current);
1da177e4
LT
4956
4957 /*
4958 * extract reason for being here and clear
4959 */
4960 reason = ctx->ctx_fl_trap_reason;
4961 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4962 ovfl_regs = ctx->ctx_ovfl_regs[0];
4963
4964 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
4965
4966 /*
4967 * must be done before we check for simple-reset mode
4968 */
0fb232fd
HS
4969 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
4970 goto do_zombie;
1da177e4
LT
4971
4972 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
0fb232fd
HS
4973 if (reason == PFM_TRAP_REASON_RESET)
4974 goto skip_blocking;
1da177e4 4975
4944930a
SE
4976 /*
4977 * restore interrupt mask to what it was on entry.
4978 * Could be enabled/diasbled.
4979 */
1da177e4
LT
4980 UNPROTECT_CTX(ctx, flags);
4981
4944930a
SE
4982 /*
4983 * force interrupt enable because of down_interruptible()
4984 */
1da177e4
LT
4985 local_irq_enable();
4986
4987 DPRINT(("before block sleeping\n"));
4988
4989 /*
4990 * may go through without blocking on SMP systems
4991 * if restart has been received already by the time we call down()
4992 */
60f1c444 4993 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
1da177e4
LT
4994
4995 DPRINT(("after block sleeping ret=%d\n", ret));
4996
4997 /*
4944930a
SE
4998 * lock context and mask interrupts again
4999 * We save flags into a dummy because we may have
5000 * altered interrupts mask compared to entry in this
5001 * function.
1da177e4 5002 */
4944930a 5003 PROTECT_CTX(ctx, dummy_flags);
1da177e4
LT
5004
5005 /*
5006 * we need to read the ovfl_regs only after wake-up
5007 * because we may have had pfm_write_pmds() in between
5008 * and that can changed PMD values and therefore
5009 * ovfl_regs is reset for these new PMD values.
5010 */
5011 ovfl_regs = ctx->ctx_ovfl_regs[0];
5012
5013 if (ctx->ctx_fl_going_zombie) {
5014do_zombie:
5015 DPRINT(("context is zombie, bailing out\n"));
5016 pfm_context_force_terminate(ctx, regs);
5017 goto nothing_to_do;
5018 }
5019 /*
5020 * in case of interruption of down() we don't restart anything
5021 */
0fb232fd
HS
5022 if (ret < 0)
5023 goto nothing_to_do;
1da177e4
LT
5024
5025skip_blocking:
5026 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5027 ctx->ctx_ovfl_regs[0] = 0UL;
5028
5029nothing_to_do:
4944930a
SE
5030 /*
5031 * restore flags as they were upon entry
5032 */
1da177e4
LT
5033 UNPROTECT_CTX(ctx, flags);
5034}
5035
5036static int
5037pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5038{
5039 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5040 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5041 return 0;
5042 }
5043
5044 DPRINT(("waking up somebody\n"));
5045
5046 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5047
5048 /*
5049 * safe, we are not in intr handler, nor in ctxsw when
5050 * we come here
5051 */
5052 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5053
5054 return 0;
5055}
5056
5057static int
5058pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5059{
5060 pfm_msg_t *msg = NULL;
5061
5062 if (ctx->ctx_fl_no_msg == 0) {
5063 msg = pfm_get_new_msg(ctx);
5064 if (msg == NULL) {
5065 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5066 return -1;
5067 }
5068
5069 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5070 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5071 msg->pfm_ovfl_msg.msg_active_set = 0;
5072 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5073 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5074 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5075 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5076 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5077 }
5078
5079 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5080 msg,
5081 ctx->ctx_fl_no_msg,
5082 ctx->ctx_fd,
5083 ovfl_pmds));
5084
5085 return pfm_notify_user(ctx, msg);
5086}
5087
5088static int
5089pfm_end_notify_user(pfm_context_t *ctx)
5090{
5091 pfm_msg_t *msg;
5092
5093 msg = pfm_get_new_msg(ctx);
5094 if (msg == NULL) {
5095 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5096 return -1;
5097 }
5098 /* no leak */
5099 memset(msg, 0, sizeof(*msg));
5100
5101 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5102 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5103 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5104
5105 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5106 msg,
5107 ctx->ctx_fl_no_msg,
5108 ctx->ctx_fd));
5109
5110 return pfm_notify_user(ctx, msg);
5111}
5112
5113/*
5114 * main overflow processing routine.
72fdbdce 5115 * it can be called from the interrupt path or explicitly during the context switch code
1da177e4 5116 */
e088a4ad
MW
5117static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5118 unsigned long pmc0, struct pt_regs *regs)
1da177e4
LT
5119{
5120 pfm_ovfl_arg_t *ovfl_arg;
5121 unsigned long mask;
5122 unsigned long old_val, ovfl_val, new_val;
5123 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5124 unsigned long tstamp;
5125 pfm_ovfl_ctrl_t ovfl_ctrl;
5126 unsigned int i, has_smpl;
5127 int must_notify = 0;
5128
5129 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5130
5131 /*
5132 * sanity test. Should never happen
5133 */
5134 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5135
5136 tstamp = ia64_get_itc();
5137 mask = pmc0 >> PMU_FIRST_COUNTER;
5138 ovfl_val = pmu_conf->ovfl_val;
5139 has_smpl = CTX_HAS_SMPL(ctx);
5140
5141 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5142 "used_pmds=0x%lx\n",
5143 pmc0,
19c5870c 5144 task ? task_pid_nr(task): -1,
1da177e4
LT
5145 (regs ? regs->cr_iip : 0),
5146 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5147 ctx->ctx_used_pmds[0]));
5148
5149
5150 /*
5151 * first we update the virtual counters
5152 * assume there was a prior ia64_srlz_d() issued
5153 */
5154 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5155
5156 /* skip pmd which did not overflow */
5157 if ((mask & 0x1) == 0) continue;
5158
5159 /*
5160 * Note that the pmd is not necessarily 0 at this point as qualified events
5161 * may have happened before the PMU was frozen. The residual count is not
5162 * taken into consideration here but will be with any read of the pmd via
5163 * pfm_read_pmds().
5164 */
5165 old_val = new_val = ctx->ctx_pmds[i].val;
5166 new_val += 1 + ovfl_val;
5167 ctx->ctx_pmds[i].val = new_val;
5168
5169 /*
5170 * check for overflow condition
5171 */
5172 if (likely(old_val > new_val)) {
5173 ovfl_pmds |= 1UL << i;
5174 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5175 }
5176
5177 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5178 i,
5179 new_val,
5180 old_val,
5181 ia64_get_pmd(i) & ovfl_val,
5182 ovfl_pmds,
5183 ovfl_notify));
5184 }
5185
5186 /*
5187 * there was no 64-bit overflow, nothing else to do
5188 */
5189 if (ovfl_pmds == 0UL) return;
5190
5191 /*
5192 * reset all control bits
5193 */
5194 ovfl_ctrl.val = 0;
5195 reset_pmds = 0UL;
5196
5197 /*
5198 * if a sampling format module exists, then we "cache" the overflow by
5199 * calling the module's handler() routine.
5200 */
5201 if (has_smpl) {
5202 unsigned long start_cycles, end_cycles;
5203 unsigned long pmd_mask;
5204 int j, k, ret = 0;
5205 int this_cpu = smp_processor_id();
5206
5207 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5208 ovfl_arg = &ctx->ctx_ovfl_arg;
5209
5210 prefetch(ctx->ctx_smpl_hdr);
5211
5212 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5213
5214 mask = 1UL << i;
5215
5216 if ((pmd_mask & 0x1) == 0) continue;
5217
5218 ovfl_arg->ovfl_pmd = (unsigned char )i;
5219 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5220 ovfl_arg->active_set = 0;
5221 ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
5222 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5223
5224 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5225 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5226 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5227
5228 /*
5229 * copy values of pmds of interest. Sampling format may copy them
5230 * into sampling buffer.
5231 */
5232 if (smpl_pmds) {
5233 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5234 if ((smpl_pmds & 0x1) == 0) continue;
5235 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5236 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5237 }
5238 }
5239
5240 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5241
5242 start_cycles = ia64_get_itc();
5243
5244 /*
5245 * call custom buffer format record (handler) routine
5246 */
5247 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5248
5249 end_cycles = ia64_get_itc();
5250
5251 /*
5252 * For those controls, we take the union because they have
5253 * an all or nothing behavior.
5254 */
5255 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5256 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5257 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5258 /*
5259 * build the bitmask of pmds to reset now
5260 */
5261 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5262
5263 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5264 }
5265 /*
5266 * when the module cannot handle the rest of the overflows, we abort right here
5267 */
5268 if (ret && pmd_mask) {
5269 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5270 pmd_mask<<PMU_FIRST_COUNTER));
5271 }
5272 /*
5273 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5274 */
5275 ovfl_pmds &= ~reset_pmds;
5276 } else {
5277 /*
5278 * when no sampling module is used, then the default
5279 * is to notify on overflow if requested by user
5280 */
5281 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5282 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5283 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5284 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5285 /*
5286 * if needed, we reset all overflowed pmds
5287 */
5288 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5289 }
5290
5291 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5292
5293 /*
5294 * reset the requested PMD registers using the short reset values
5295 */
5296 if (reset_pmds) {
5297 unsigned long bm = reset_pmds;
5298 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5299 }
5300
5301 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5302 /*
5303 * keep track of what to reset when unblocking
5304 */
5305 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5306
5307 /*
5308 * check for blocking context
5309 */
5310 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5311
5312 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5313
5314 /*
5315 * set the perfmon specific checking pending work for the task
5316 */
5317 PFM_SET_WORK_PENDING(task, 1);
5318
5319 /*
5320 * when coming from ctxsw, current still points to the
5321 * previous task, therefore we must work with task and not current.
5322 */
f14488cc 5323 set_notify_resume(task);
1da177e4
LT
5324 }
5325 /*
5326 * defer until state is changed (shorten spin window). the context is locked
5327 * anyway, so the signal receiver would come spin for nothing.
5328 */
5329 must_notify = 1;
5330 }
5331
5332 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
19c5870c 5333 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
1da177e4
LT
5334 PFM_GET_WORK_PENDING(task),
5335 ctx->ctx_fl_trap_reason,
5336 ovfl_pmds,
5337 ovfl_notify,
5338 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5339 /*
5340 * in case monitoring must be stopped, we toggle the psr bits
5341 */
5342 if (ovfl_ctrl.bits.mask_monitoring) {
5343 pfm_mask_monitoring(task);
5344 ctx->ctx_state = PFM_CTX_MASKED;
5345 ctx->ctx_fl_can_restart = 1;
5346 }
5347
5348 /*
5349 * send notification now
5350 */
5351 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5352
5353 return;
5354
5355sanity_check:
5356 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5357 smp_processor_id(),
19c5870c 5358 task ? task_pid_nr(task) : -1,
1da177e4
LT
5359 pmc0);
5360 return;
5361
5362stop_monitoring:
5363 /*
5364 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5365 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5366 * come here as zombie only if the task is the current task. In which case, we
5367 * can access the PMU hardware directly.
5368 *
5369 * Note that zombies do have PM_VALID set. So here we do the minimal.
5370 *
5371 * In case the context was zombified it could not be reclaimed at the time
5372 * the monitoring program exited. At this point, the PMU reservation has been
5373 * returned, the sampiing buffer has been freed. We must convert this call
5374 * into a spurious interrupt. However, we must also avoid infinite overflows
5375 * by stopping monitoring for this task. We can only come here for a per-task
5376 * context. All we need to do is to stop monitoring using the psr bits which
5377 * are always task private. By re-enabling secure montioring, we ensure that
5378 * the monitored task will not be able to re-activate monitoring.
5379 * The task will eventually be context switched out, at which point the context
5380 * will be reclaimed (that includes releasing ownership of the PMU).
5381 *
5382 * So there might be a window of time where the number of per-task session is zero
5383 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5384 * context. This is safe because if a per-task session comes in, it will push this one
5385 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5386 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5387 * also push our zombie context out.
5388 *
5389 * Overall pretty hairy stuff....
5390 */
19c5870c 5391 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
1da177e4
LT
5392 pfm_clear_psr_up();
5393 ia64_psr(regs)->up = 0;
5394 ia64_psr(regs)->sp = 1;
5395 return;
5396}
5397
5398static int
9010eff0 5399pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
1da177e4
LT
5400{
5401 struct task_struct *task;
5402 pfm_context_t *ctx;
5403 unsigned long flags;
5404 u64 pmc0;
5405 int this_cpu = smp_processor_id();
5406 int retval = 0;
5407
5408 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5409
5410 /*
5411 * srlz.d done before arriving here
5412 */
5413 pmc0 = ia64_get_pmc(0);
5414
5415 task = GET_PMU_OWNER();
5416 ctx = GET_PMU_CTX();
5417
5418 /*
5419 * if we have some pending bits set
5420 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5421 */
5422 if (PMC0_HAS_OVFL(pmc0) && task) {
5423 /*
5424 * we assume that pmc0.fr is always set here
5425 */
5426
5427 /* sanity check */
5428 if (!ctx) goto report_spurious1;
5429
5430 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5431 goto report_spurious2;
5432
5433 PROTECT_CTX_NOPRINT(ctx, flags);
5434
5435 pfm_overflow_handler(task, ctx, pmc0, regs);
5436
5437 UNPROTECT_CTX_NOPRINT(ctx, flags);
5438
5439 } else {
5440 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5441 retval = -1;
5442 }
5443 /*
5444 * keep it unfrozen at all times
5445 */
5446 pfm_unfreeze_pmu();
5447
5448 return retval;
5449
5450report_spurious1:
5451 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
19c5870c 5452 this_cpu, task_pid_nr(task));
1da177e4
LT
5453 pfm_unfreeze_pmu();
5454 return -1;
5455report_spurious2:
5456 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5457 this_cpu,
19c5870c 5458 task_pid_nr(task));
1da177e4
LT
5459 pfm_unfreeze_pmu();
5460 return -1;
5461}
5462
5463static irqreturn_t
3bbe486b 5464pfm_interrupt_handler(int irq, void *arg)
1da177e4
LT
5465{
5466 unsigned long start_cycles, total_cycles;
5467 unsigned long min, max;
5468 int this_cpu;
5469 int ret;
3bbe486b 5470 struct pt_regs *regs = get_irq_regs();
1da177e4
LT
5471
5472 this_cpu = get_cpu();
a1ecf7f6
TL
5473 if (likely(!pfm_alt_intr_handler)) {
5474 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5475 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
1da177e4 5476
a1ecf7f6 5477 start_cycles = ia64_get_itc();
1da177e4 5478
9010eff0 5479 ret = pfm_do_interrupt_handler(arg, regs);
1da177e4 5480
a1ecf7f6 5481 total_cycles = ia64_get_itc();
1da177e4 5482
a1ecf7f6
TL
5483 /*
5484 * don't measure spurious interrupts
5485 */
5486 if (likely(ret == 0)) {
5487 total_cycles -= start_cycles;
1da177e4 5488
a1ecf7f6
TL
5489 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5490 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
1da177e4 5491
a1ecf7f6
TL
5492 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5493 }
5494 }
5495 else {
5496 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
1da177e4 5497 }
a1ecf7f6 5498
8b0b1db0 5499 put_cpu();
1da177e4
LT
5500 return IRQ_HANDLED;
5501}
5502
5503/*
5504 * /proc/perfmon interface, for debug only
5505 */
5506
fa276f36 5507#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
1da177e4
LT
5508
5509static void *
5510pfm_proc_start(struct seq_file *m, loff_t *pos)
5511{
5512 if (*pos == 0) {
5513 return PFM_PROC_SHOW_HEADER;
5514 }
5515
5dd3c994 5516 while (*pos <= nr_cpu_ids) {
1da177e4
LT
5517 if (cpu_online(*pos - 1)) {
5518 return (void *)*pos;
5519 }
5520 ++*pos;
5521 }
5522 return NULL;
5523}
5524
5525static void *
5526pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5527{
5528 ++*pos;
5529 return pfm_proc_start(m, pos);
5530}
5531
5532static void
5533pfm_proc_stop(struct seq_file *m, void *v)
5534{
5535}
5536
5537static void
5538pfm_proc_show_header(struct seq_file *m)
5539{
5540 struct list_head * pos;
5541 pfm_buffer_fmt_t * entry;
5542 unsigned long flags;
5543
5544 seq_printf(m,
5545 "perfmon version : %u.%u\n"
5546 "model : %s\n"
5547 "fastctxsw : %s\n"
5548 "expert mode : %s\n"
5549 "ovfl_mask : 0x%lx\n"
5550 "PMU flags : 0x%x\n",
5551 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5552 pmu_conf->pmu_name,
5553 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5554 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5555 pmu_conf->ovfl_val,
5556 pmu_conf->flags);
5557
5558 LOCK_PFS(flags);
5559
5560 seq_printf(m,
5561 "proc_sessions : %u\n"
5562 "sys_sessions : %u\n"
5563 "sys_use_dbregs : %u\n"
5564 "ptrace_use_dbregs : %u\n",
5565 pfm_sessions.pfs_task_sessions,
5566 pfm_sessions.pfs_sys_sessions,
5567 pfm_sessions.pfs_sys_use_dbregs,
5568 pfm_sessions.pfs_ptrace_use_dbregs);
5569
5570 UNLOCK_PFS(flags);
5571
5572 spin_lock(&pfm_buffer_fmt_lock);
5573
5574 list_for_each(pos, &pfm_buffer_fmt_list) {
5575 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
7451adc5
AS
5576 seq_printf(m, "format : %16phD %s\n",
5577 entry->fmt_uuid, entry->fmt_name);
1da177e4
LT
5578 }
5579 spin_unlock(&pfm_buffer_fmt_lock);
5580
5581}
5582
5583static int
5584pfm_proc_show(struct seq_file *m, void *v)
5585{
5586 unsigned long psr;
5587 unsigned int i;
5588 int cpu;
5589
5590 if (v == PFM_PROC_SHOW_HEADER) {
5591 pfm_proc_show_header(m);
5592 return 0;
5593 }
5594
5595 /* show info for CPU (v - 1) */
5596
5597 cpu = (long)v - 1;
5598 seq_printf(m,
5599 "CPU%-2d overflow intrs : %lu\n"
5600 "CPU%-2d overflow cycles : %lu\n"
5601 "CPU%-2d overflow min : %lu\n"
5602 "CPU%-2d overflow max : %lu\n"
5603 "CPU%-2d smpl handler calls : %lu\n"
5604 "CPU%-2d smpl handler cycles : %lu\n"
5605 "CPU%-2d spurious intrs : %lu\n"
5606 "CPU%-2d replay intrs : %lu\n"
5607 "CPU%-2d syst_wide : %d\n"
5608 "CPU%-2d dcr_pp : %d\n"
5609 "CPU%-2d exclude idle : %d\n"
5610 "CPU%-2d owner : %d\n"
5611 "CPU%-2d context : %p\n"
5612 "CPU%-2d activations : %lu\n",
5613 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5614 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5615 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5616 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5617 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5618 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5619 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5620 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5621 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5622 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5623 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5624 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5625 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5626 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5627
5628 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5629
5630 psr = pfm_get_psr();
5631
5632 ia64_srlz_d();
5633
5634 seq_printf(m,
5635 "CPU%-2d psr : 0x%lx\n"
5636 "CPU%-2d pmc0 : 0x%lx\n",
5637 cpu, psr,
5638 cpu, ia64_get_pmc(0));
5639
5640 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5641 if (PMC_IS_COUNTING(i) == 0) continue;
5642 seq_printf(m,
5643 "CPU%-2d pmc%u : 0x%lx\n"
5644 "CPU%-2d pmd%u : 0x%lx\n",
5645 cpu, i, ia64_get_pmc(i),
5646 cpu, i, ia64_get_pmd(i));
5647 }
5648 }
5649 return 0;
5650}
5651
a23fe55e 5652const struct seq_operations pfm_seq_ops = {
1da177e4
LT
5653 .start = pfm_proc_start,
5654 .next = pfm_proc_next,
5655 .stop = pfm_proc_stop,
5656 .show = pfm_proc_show
5657};
5658
1da177e4
LT
5659/*
5660 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5661 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5662 * is active or inactive based on mode. We must rely on the value in
5663 * local_cpu_data->pfm_syst_info
5664 */
5665void
5666pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5667{
5668 struct pt_regs *regs;
5669 unsigned long dcr;
5670 unsigned long dcr_pp;
5671
5672 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5673
5674 /*
5675 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5676 * on every CPU, so we can rely on the pid to identify the idle task.
5677 */
5678 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
6450578f 5679 regs = task_pt_regs(task);
1da177e4
LT
5680 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5681 return;
5682 }
5683 /*
5684 * if monitoring has started
5685 */
5686 if (dcr_pp) {
5687 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5688 /*
5689 * context switching in?
5690 */
5691 if (is_ctxswin) {
5692 /* mask monitoring for the idle task */
5693 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5694 pfm_clear_psr_pp();
5695 ia64_srlz_i();
5696 return;
5697 }
5698 /*
5699 * context switching out
5700 * restore monitoring for next task
5701 *
5702 * Due to inlining this odd if-then-else construction generates
5703 * better code.
5704 */
5705 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5706 pfm_set_psr_pp();
5707 ia64_srlz_i();
5708 }
5709}
5710
5711#ifdef CONFIG_SMP
5712
5713static void
5714pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5715{
5716 struct task_struct *task = ctx->ctx_task;
5717
5718 ia64_psr(regs)->up = 0;
5719 ia64_psr(regs)->sp = 1;
5720
5721 if (GET_PMU_OWNER() == task) {
19c5870c
AD
5722 DPRINT(("cleared ownership for [%d]\n",
5723 task_pid_nr(ctx->ctx_task)));
1da177e4
LT
5724 SET_PMU_OWNER(NULL, NULL);
5725 }
5726
5727 /*
5728 * disconnect the task from the context and vice-versa
5729 */
5730 PFM_SET_WORK_PENDING(task, 0);
5731
5732 task->thread.pfm_context = NULL;
5733 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5734
19c5870c 5735 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
1da177e4
LT
5736}
5737
5738
5739/*
5740 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5741 */
5742void
5743pfm_save_regs(struct task_struct *task)
5744{
5745 pfm_context_t *ctx;
1da177e4
LT
5746 unsigned long flags;
5747 u64 psr;
5748
5749
5750 ctx = PFM_GET_CTX(task);
5751 if (ctx == NULL) return;
1da177e4
LT
5752
5753 /*
5754 * we always come here with interrupts ALREADY disabled by
5755 * the scheduler. So we simply need to protect against concurrent
5756 * access, not CPU concurrency.
5757 */
5758 flags = pfm_protect_ctx_ctxsw(ctx);
5759
5760 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
6450578f 5761 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
5762
5763 pfm_clear_psr_up();
5764
5765 pfm_force_cleanup(ctx, regs);
5766
5767 BUG_ON(ctx->ctx_smpl_hdr);
5768
5769 pfm_unprotect_ctx_ctxsw(ctx, flags);
5770
5771 pfm_context_free(ctx);
5772 return;
5773 }
5774
5775 /*
5776 * save current PSR: needed because we modify it
5777 */
5778 ia64_srlz_d();
5779 psr = pfm_get_psr();
5780
5781 BUG_ON(psr & (IA64_PSR_I));
5782
5783 /*
5784 * stop monitoring:
5785 * This is the last instruction which may generate an overflow
5786 *
5787 * We do not need to set psr.sp because, it is irrelevant in kernel.
5788 * It will be restored from ipsr when going back to user level
5789 */
5790 pfm_clear_psr_up();
5791
5792 /*
5793 * keep a copy of psr.up (for reload)
5794 */
5795 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5796
5797 /*
5798 * release ownership of this PMU.
5799 * PM interrupts are masked, so nothing
5800 * can happen.
5801 */
5802 SET_PMU_OWNER(NULL, NULL);
5803
5804 /*
5805 * we systematically save the PMD as we have no
5806 * guarantee we will be schedule at that same
5807 * CPU again.
5808 */
35589a8f 5809 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
5810
5811 /*
5812 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5813 * we will need it on the restore path to check
5814 * for pending overflow.
5815 */
35589a8f 5816 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
5817
5818 /*
5819 * unfreeze PMU if had pending overflows
5820 */
35589a8f 5821 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
5822
5823 /*
5824 * finally, allow context access.
5825 * interrupts will still be masked after this call.
5826 */
5827 pfm_unprotect_ctx_ctxsw(ctx, flags);
5828}
5829
5830#else /* !CONFIG_SMP */
5831void
5832pfm_save_regs(struct task_struct *task)
5833{
5834 pfm_context_t *ctx;
5835 u64 psr;
5836
5837 ctx = PFM_GET_CTX(task);
5838 if (ctx == NULL) return;
5839
5840 /*
5841 * save current PSR: needed because we modify it
5842 */
5843 psr = pfm_get_psr();
5844
5845 BUG_ON(psr & (IA64_PSR_I));
5846
5847 /*
5848 * stop monitoring:
5849 * This is the last instruction which may generate an overflow
5850 *
5851 * We do not need to set psr.sp because, it is irrelevant in kernel.
5852 * It will be restored from ipsr when going back to user level
5853 */
5854 pfm_clear_psr_up();
5855
5856 /*
5857 * keep a copy of psr.up (for reload)
5858 */
5859 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5860}
5861
5862static void
5863pfm_lazy_save_regs (struct task_struct *task)
5864{
5865 pfm_context_t *ctx;
1da177e4
LT
5866 unsigned long flags;
5867
5868 { u64 psr = pfm_get_psr();
5869 BUG_ON(psr & IA64_PSR_UP);
5870 }
5871
5872 ctx = PFM_GET_CTX(task);
1da177e4
LT
5873
5874 /*
5875 * we need to mask PMU overflow here to
5876 * make sure that we maintain pmc0 until
5877 * we save it. overflow interrupts are
5878 * treated as spurious if there is no
5879 * owner.
5880 *
5881 * XXX: I don't think this is necessary
5882 */
5883 PROTECT_CTX(ctx,flags);
5884
5885 /*
5886 * release ownership of this PMU.
5887 * must be done before we save the registers.
5888 *
5889 * after this call any PMU interrupt is treated
5890 * as spurious.
5891 */
5892 SET_PMU_OWNER(NULL, NULL);
5893
5894 /*
5895 * save all the pmds we use
5896 */
35589a8f 5897 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
5898
5899 /*
5900 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5901 * it is needed to check for pended overflow
5902 * on the restore path
5903 */
35589a8f 5904 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
5905
5906 /*
5907 * unfreeze PMU if had pending overflows
5908 */
35589a8f 5909 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
5910
5911 /*
5912 * now get can unmask PMU interrupts, they will
5913 * be treated as purely spurious and we will not
5914 * lose any information
5915 */
5916 UNPROTECT_CTX(ctx,flags);
5917}
5918#endif /* CONFIG_SMP */
5919
5920#ifdef CONFIG_SMP
5921/*
5922 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5923 */
5924void
5925pfm_load_regs (struct task_struct *task)
5926{
5927 pfm_context_t *ctx;
1da177e4
LT
5928 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
5929 unsigned long flags;
5930 u64 psr, psr_up;
5931 int need_irq_resend;
5932
5933 ctx = PFM_GET_CTX(task);
5934 if (unlikely(ctx == NULL)) return;
5935
5936 BUG_ON(GET_PMU_OWNER());
5937
1da177e4
LT
5938 /*
5939 * possible on unload
5940 */
35589a8f 5941 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
1da177e4
LT
5942
5943 /*
5944 * we always come here with interrupts ALREADY disabled by
5945 * the scheduler. So we simply need to protect against concurrent
5946 * access, not CPU concurrency.
5947 */
5948 flags = pfm_protect_ctx_ctxsw(ctx);
5949 psr = pfm_get_psr();
5950
5951 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
5952
5953 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
5954 BUG_ON(psr & IA64_PSR_I);
5955
5956 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6450578f 5957 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
5958
5959 BUG_ON(ctx->ctx_smpl_hdr);
5960
5961 pfm_force_cleanup(ctx, regs);
5962
5963 pfm_unprotect_ctx_ctxsw(ctx, flags);
5964
5965 /*
5966 * this one (kmalloc'ed) is fine with interrupts disabled
5967 */
5968 pfm_context_free(ctx);
5969
5970 return;
5971 }
5972
5973 /*
5974 * we restore ALL the debug registers to avoid picking up
5975 * stale state.
5976 */
5977 if (ctx->ctx_fl_using_dbreg) {
5978 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
5979 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
5980 }
5981 /*
5982 * retrieve saved psr.up
5983 */
5984 psr_up = ctx->ctx_saved_psr_up;
5985
5986 /*
5987 * if we were the last user of the PMU on that CPU,
5988 * then nothing to do except restore psr
5989 */
5990 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
5991
5992 /*
5993 * retrieve partial reload masks (due to user modifications)
5994 */
5995 pmc_mask = ctx->ctx_reload_pmcs[0];
5996 pmd_mask = ctx->ctx_reload_pmds[0];
5997
5998 } else {
5999 /*
6000 * To avoid leaking information to the user level when psr.sp=0,
6001 * we must reload ALL implemented pmds (even the ones we don't use).
6002 * In the kernel we only allow PFM_READ_PMDS on registers which
6003 * we initialized or requested (sampling) so there is no risk there.
6004 */
6005 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6006
6007 /*
6008 * ALL accessible PMCs are systematically reloaded, unused registers
6009 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6010 * up stale configuration.
6011 *
6012 * PMC0 is never in the mask. It is always restored separately.
6013 */
6014 pmc_mask = ctx->ctx_all_pmcs[0];
6015 }
6016 /*
6017 * when context is MASKED, we will restore PMC with plm=0
6018 * and PMD with stale information, but that's ok, nothing
6019 * will be captured.
6020 *
6021 * XXX: optimize here
6022 */
35589a8f
KA
6023 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6024 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6025
6026 /*
6027 * check for pending overflow at the time the state
6028 * was saved.
6029 */
35589a8f 6030 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6031 /*
6032 * reload pmc0 with the overflow information
6033 * On McKinley PMU, this will trigger a PMU interrupt
6034 */
35589a8f 6035 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4 6036 ia64_srlz_d();
35589a8f 6037 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6038
6039 /*
6040 * will replay the PMU interrupt
6041 */
c0ad90a3 6042 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6043
6044 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6045 }
6046
6047 /*
6048 * we just did a reload, so we reset the partial reload fields
6049 */
6050 ctx->ctx_reload_pmcs[0] = 0UL;
6051 ctx->ctx_reload_pmds[0] = 0UL;
6052
6053 SET_LAST_CPU(ctx, smp_processor_id());
6054
6055 /*
6056 * dump activation value for this PMU
6057 */
6058 INC_ACTIVATION();
6059 /*
6060 * record current activation for this context
6061 */
6062 SET_ACTIVATION(ctx);
6063
6064 /*
6065 * establish new ownership.
6066 */
6067 SET_PMU_OWNER(task, ctx);
6068
6069 /*
6070 * restore the psr.up bit. measurement
6071 * is active again.
6072 * no PMU interrupt can happen at this point
6073 * because we still have interrupts disabled.
6074 */
6075 if (likely(psr_up)) pfm_set_psr_up();
6076
6077 /*
6078 * allow concurrent access to context
6079 */
6080 pfm_unprotect_ctx_ctxsw(ctx, flags);
6081}
6082#else /* !CONFIG_SMP */
6083/*
6084 * reload PMU state for UP kernels
6085 * in 2.5 we come here with interrupts disabled
6086 */
6087void
6088pfm_load_regs (struct task_struct *task)
6089{
1da177e4
LT
6090 pfm_context_t *ctx;
6091 struct task_struct *owner;
6092 unsigned long pmd_mask, pmc_mask;
6093 u64 psr, psr_up;
6094 int need_irq_resend;
6095
6096 owner = GET_PMU_OWNER();
6097 ctx = PFM_GET_CTX(task);
1da177e4
LT
6098 psr = pfm_get_psr();
6099
6100 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6101 BUG_ON(psr & IA64_PSR_I);
6102
6103 /*
6104 * we restore ALL the debug registers to avoid picking up
6105 * stale state.
6106 *
6107 * This must be done even when the task is still the owner
6108 * as the registers may have been modified via ptrace()
6109 * (not perfmon) by the previous task.
6110 */
6111 if (ctx->ctx_fl_using_dbreg) {
6112 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6113 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6114 }
6115
6116 /*
6117 * retrieved saved psr.up
6118 */
6119 psr_up = ctx->ctx_saved_psr_up;
6120 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6121
6122 /*
6123 * short path, our state is still there, just
6124 * need to restore psr and we go
6125 *
6126 * we do not touch either PMC nor PMD. the psr is not touched
6127 * by the overflow_handler. So we are safe w.r.t. to interrupt
6128 * concurrency even without interrupt masking.
6129 */
6130 if (likely(owner == task)) {
6131 if (likely(psr_up)) pfm_set_psr_up();
6132 return;
6133 }
6134
6135 /*
6136 * someone else is still using the PMU, first push it out and
6137 * then we'll be able to install our stuff !
6138 *
6139 * Upon return, there will be no owner for the current PMU
6140 */
6141 if (owner) pfm_lazy_save_regs(owner);
6142
6143 /*
6144 * To avoid leaking information to the user level when psr.sp=0,
6145 * we must reload ALL implemented pmds (even the ones we don't use).
6146 * In the kernel we only allow PFM_READ_PMDS on registers which
6147 * we initialized or requested (sampling) so there is no risk there.
6148 */
6149 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6150
6151 /*
6152 * ALL accessible PMCs are systematically reloaded, unused registers
6153 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6154 * up stale configuration.
6155 *
6156 * PMC0 is never in the mask. It is always restored separately
6157 */
6158 pmc_mask = ctx->ctx_all_pmcs[0];
6159
35589a8f
KA
6160 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6161 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6162
6163 /*
6164 * check for pending overflow at the time the state
6165 * was saved.
6166 */
35589a8f 6167 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6168 /*
6169 * reload pmc0 with the overflow information
6170 * On McKinley PMU, this will trigger a PMU interrupt
6171 */
35589a8f 6172 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4
LT
6173 ia64_srlz_d();
6174
35589a8f 6175 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6176
6177 /*
6178 * will replay the PMU interrupt
6179 */
c0ad90a3 6180 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6181
6182 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6183 }
6184
6185 /*
6186 * establish new ownership.
6187 */
6188 SET_PMU_OWNER(task, ctx);
6189
6190 /*
6191 * restore the psr.up bit. measurement
6192 * is active again.
6193 * no PMU interrupt can happen at this point
6194 * because we still have interrupts disabled.
6195 */
6196 if (likely(psr_up)) pfm_set_psr_up();
6197}
6198#endif /* CONFIG_SMP */
6199
6200/*
6201 * this function assumes monitoring is stopped
6202 */
6203static void
6204pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6205{
6206 u64 pmc0;
6207 unsigned long mask2, val, pmd_val, ovfl_val;
6208 int i, can_access_pmu = 0;
6209 int is_self;
6210
6211 /*
6212 * is the caller the task being monitored (or which initiated the
6213 * session for system wide measurements)
6214 */
6215 is_self = ctx->ctx_task == task ? 1 : 0;
6216
6217 /*
6218 * can access PMU is task is the owner of the PMU state on the current CPU
6219 * or if we are running on the CPU bound to the context in system-wide mode
6220 * (that is not necessarily the task the context is attached to in this mode).
6221 * In system-wide we always have can_access_pmu true because a task running on an
6222 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6223 */
6224 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6225 if (can_access_pmu) {
6226 /*
6227 * Mark the PMU as not owned
6228 * This will cause the interrupt handler to do nothing in case an overflow
6229 * interrupt was in-flight
6230 * This also guarantees that pmc0 will contain the final state
6231 * It virtually gives us full control on overflow processing from that point
6232 * on.
6233 */
6234 SET_PMU_OWNER(NULL, NULL);
6235 DPRINT(("releasing ownership\n"));
6236
6237 /*
6238 * read current overflow status:
6239 *
6240 * we are guaranteed to read the final stable state
6241 */
6242 ia64_srlz_d();
6243 pmc0 = ia64_get_pmc(0); /* slow */
6244
6245 /*
6246 * reset freeze bit, overflow status information destroyed
6247 */
6248 pfm_unfreeze_pmu();
6249 } else {
35589a8f 6250 pmc0 = ctx->th_pmcs[0];
1da177e4
LT
6251 /*
6252 * clear whatever overflow status bits there were
6253 */
35589a8f 6254 ctx->th_pmcs[0] = 0;
1da177e4
LT
6255 }
6256 ovfl_val = pmu_conf->ovfl_val;
6257 /*
6258 * we save all the used pmds
6259 * we take care of overflows for counting PMDs
6260 *
6261 * XXX: sampling situation is not taken into account here
6262 */
6263 mask2 = ctx->ctx_used_pmds[0];
6264
6265 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6266
6267 for (i = 0; mask2; i++, mask2>>=1) {
6268
6269 /* skip non used pmds */
6270 if ((mask2 & 0x1) == 0) continue;
6271
6272 /*
6273 * can access PMU always true in system wide mode
6274 */
35589a8f 6275 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
1da177e4
LT
6276
6277 if (PMD_IS_COUNTING(i)) {
6278 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
19c5870c 6279 task_pid_nr(task),
1da177e4
LT
6280 i,
6281 ctx->ctx_pmds[i].val,
6282 val & ovfl_val));
6283
6284 /*
6285 * we rebuild the full 64 bit value of the counter
6286 */
6287 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6288
6289 /*
6290 * now everything is in ctx_pmds[] and we need
6291 * to clear the saved context from save_regs() such that
6292 * pfm_read_pmds() gets the correct value
6293 */
6294 pmd_val = 0UL;
6295
6296 /*
6297 * take care of overflow inline
6298 */
6299 if (pmc0 & (1UL << i)) {
6300 val += 1 + ovfl_val;
19c5870c 6301 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
1da177e4
LT
6302 }
6303 }
6304
19c5870c 6305 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
1da177e4 6306
35589a8f 6307 if (is_self) ctx->th_pmds[i] = pmd_val;
1da177e4
LT
6308
6309 ctx->ctx_pmds[i].val = val;
6310 }
6311}
6312
6313static struct irqaction perfmon_irqaction = {
6314 .handler = pfm_interrupt_handler,
1da177e4
LT
6315 .name = "perfmon"
6316};
6317
a1ecf7f6
TL
6318static void
6319pfm_alt_save_pmu_state(void *data)
6320{
6321 struct pt_regs *regs;
6322
6450578f 6323 regs = task_pt_regs(current);
a1ecf7f6
TL
6324
6325 DPRINT(("called\n"));
6326
6327 /*
6328 * should not be necessary but
6329 * let's take not risk
6330 */
6331 pfm_clear_psr_up();
6332 pfm_clear_psr_pp();
6333 ia64_psr(regs)->pp = 0;
6334
6335 /*
6336 * This call is required
6337 * May cause a spurious interrupt on some processors
6338 */
6339 pfm_freeze_pmu();
6340
6341 ia64_srlz_d();
6342}
6343
6344void
6345pfm_alt_restore_pmu_state(void *data)
6346{
6347 struct pt_regs *regs;
6348
6450578f 6349 regs = task_pt_regs(current);
a1ecf7f6
TL
6350
6351 DPRINT(("called\n"));
6352
6353 /*
6354 * put PMU back in state expected
6355 * by perfmon
6356 */
6357 pfm_clear_psr_up();
6358 pfm_clear_psr_pp();
6359 ia64_psr(regs)->pp = 0;
6360
6361 /*
6362 * perfmon runs with PMU unfrozen at all times
6363 */
6364 pfm_unfreeze_pmu();
6365
6366 ia64_srlz_d();
6367}
6368
6369int
6370pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6371{
6372 int ret, i;
6373 int reserve_cpu;
6374
6375 /* some sanity checks */
6376 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6377
6378 /* do the easy test first */
6379 if (pfm_alt_intr_handler) return -EBUSY;
6380
6381 /* one at a time in the install or remove, just fail the others */
6382 if (!spin_trylock(&pfm_alt_install_check)) {
6383 return -EBUSY;
6384 }
6385
6386 /* reserve our session */
6387 for_each_online_cpu(reserve_cpu) {
6388 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6389 if (ret) goto cleanup_reserve;
6390 }
6391
6392 /* save the current system wide pmu states */
15c8b6c1 6393 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
a1ecf7f6
TL
6394 if (ret) {
6395 DPRINT(("on_each_cpu() failed: %d\n", ret));
6396 goto cleanup_reserve;
6397 }
6398
6399 /* officially change to the alternate interrupt handler */
6400 pfm_alt_intr_handler = hdl;
6401
6402 spin_unlock(&pfm_alt_install_check);
6403
6404 return 0;
6405
6406cleanup_reserve:
6407 for_each_online_cpu(i) {
6408 /* don't unreserve more than we reserved */
6409 if (i >= reserve_cpu) break;
6410
6411 pfm_unreserve_session(NULL, 1, i);
6412 }
6413
6414 spin_unlock(&pfm_alt_install_check);
6415
6416 return ret;
6417}
6418EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6419
6420int
6421pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6422{
6423 int i;
6424 int ret;
6425
6426 if (hdl == NULL) return -EINVAL;
6427
6428 /* cannot remove someone else's handler! */
6429 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6430
6431 /* one at a time in the install or remove, just fail the others */
6432 if (!spin_trylock(&pfm_alt_install_check)) {
6433 return -EBUSY;
6434 }
6435
6436 pfm_alt_intr_handler = NULL;
6437
15c8b6c1 6438 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
a1ecf7f6
TL
6439 if (ret) {
6440 DPRINT(("on_each_cpu() failed: %d\n", ret));
6441 }
6442
6443 for_each_online_cpu(i) {
6444 pfm_unreserve_session(NULL, 1, i);
6445 }
6446
6447 spin_unlock(&pfm_alt_install_check);
6448
6449 return 0;
6450}
6451EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6452
1da177e4
LT
6453/*
6454 * perfmon initialization routine, called from the initcall() table
6455 */
6456static int init_pfm_fs(void);
6457
6458static int __init
6459pfm_probe_pmu(void)
6460{
6461 pmu_config_t **p;
6462 int family;
6463
6464 family = local_cpu_data->family;
6465 p = pmu_confs;
6466
6467 while(*p) {
6468 if ((*p)->probe) {
6469 if ((*p)->probe() == 0) goto found;
6470 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6471 goto found;
6472 }
6473 p++;
6474 }
6475 return -1;
6476found:
6477 pmu_conf = *p;
6478 return 0;
6479}
6480
1da177e4
LT
6481int __init
6482pfm_init(void)
6483{
6484 unsigned int n, n_counters, i;
6485
6486 printk("perfmon: version %u.%u IRQ %u\n",
6487 PFM_VERSION_MAJ,
6488 PFM_VERSION_MIN,
6489 IA64_PERFMON_VECTOR);
6490
6491 if (pfm_probe_pmu()) {
6492 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6493 local_cpu_data->family);
6494 return -ENODEV;
6495 }
6496
6497 /*
6498 * compute the number of implemented PMD/PMC from the
6499 * description tables
6500 */
6501 n = 0;
6502 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6503 if (PMC_IS_IMPL(i) == 0) continue;
6504 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6505 n++;
6506 }
6507 pmu_conf->num_pmcs = n;
6508
6509 n = 0; n_counters = 0;
6510 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6511 if (PMD_IS_IMPL(i) == 0) continue;
6512 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6513 n++;
6514 if (PMD_IS_COUNTING(i)) n_counters++;
6515 }
6516 pmu_conf->num_pmds = n;
6517 pmu_conf->num_counters = n_counters;
6518
6519 /*
6520 * sanity checks on the number of debug registers
6521 */
6522 if (pmu_conf->use_rr_dbregs) {
6523 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6524 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6525 pmu_conf = NULL;
6526 return -1;
6527 }
6528 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6529 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6530 pmu_conf = NULL;
6531 return -1;
6532 }
6533 }
6534
6535 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6536 pmu_conf->pmu_name,
6537 pmu_conf->num_pmcs,
6538 pmu_conf->num_pmds,
6539 pmu_conf->num_counters,
6540 ffz(pmu_conf->ovfl_val));
6541
6542 /* sanity check */
35589a8f 6543 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
1da177e4
LT
6544 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6545 pmu_conf = NULL;
6546 return -1;
6547 }
6548
6549 /*
6550 * create /proc/perfmon (mostly for debugging purposes)
6551 */
fddda2b7 6552 perfmon_dir = proc_create_seq("perfmon", S_IRUGO, NULL, &pfm_seq_ops);
1da177e4
LT
6553 if (perfmon_dir == NULL) {
6554 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6555 pmu_conf = NULL;
6556 return -1;
6557 }
1da177e4
LT
6558
6559 /*
6560 * create /proc/sys/kernel/perfmon (for debugging purposes)
6561 */
0b4d4147 6562 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
1da177e4
LT
6563
6564 /*
6565 * initialize all our spinlocks
6566 */
6567 spin_lock_init(&pfm_sessions.pfs_lock);
6568 spin_lock_init(&pfm_buffer_fmt_lock);
6569
6570 init_pfm_fs();
6571
6572 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6573
6574 return 0;
6575}
6576
6577__initcall(pfm_init);
6578
6579/*
6580 * this function is called before pfm_init()
6581 */
6582void
6583pfm_init_percpu (void)
6584{
ff741906 6585 static int first_time=1;
1da177e4
LT
6586 /*
6587 * make sure no measurement is active
6588 * (may inherit programmed PMCs from EFI).
6589 */
6590 pfm_clear_psr_pp();
6591 pfm_clear_psr_up();
6592
6593 /*
6594 * we run with the PMU not frozen at all times
6595 */
6596 pfm_unfreeze_pmu();
6597
ff741906 6598 if (first_time) {
1da177e4 6599 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ff741906
AR
6600 first_time=0;
6601 }
1da177e4
LT
6602
6603 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6604 ia64_srlz_d();
6605}
6606
6607/*
6608 * used for debug purposes only
6609 */
6610void
6611dump_pmu_state(const char *from)
6612{
6613 struct task_struct *task;
1da177e4
LT
6614 struct pt_regs *regs;
6615 pfm_context_t *ctx;
6616 unsigned long psr, dcr, info, flags;
6617 int i, this_cpu;
6618
6619 local_irq_save(flags);
6620
6621 this_cpu = smp_processor_id();
6450578f 6622 regs = task_pt_regs(current);
1da177e4
LT
6623 info = PFM_CPUINFO_GET();
6624 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6625
6626 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6627 local_irq_restore(flags);
6628 return;
6629 }
6630
6631 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6632 this_cpu,
6633 from,
19c5870c 6634 task_pid_nr(current),
1da177e4
LT
6635 regs->cr_iip,
6636 current->comm);
6637
6638 task = GET_PMU_OWNER();
6639 ctx = GET_PMU_CTX();
6640
19c5870c 6641 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
1da177e4
LT
6642
6643 psr = pfm_get_psr();
6644
6645 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6646 this_cpu,
6647 ia64_get_pmc(0),
6648 psr & IA64_PSR_PP ? 1 : 0,
6649 psr & IA64_PSR_UP ? 1 : 0,
6650 dcr & IA64_DCR_PP ? 1 : 0,
6651 info,
6652 ia64_psr(regs)->up,
6653 ia64_psr(regs)->pp);
6654
6655 ia64_psr(regs)->up = 0;
6656 ia64_psr(regs)->pp = 0;
6657
1da177e4
LT
6658 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6659 if (PMC_IS_IMPL(i) == 0) continue;
35589a8f 6660 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
1da177e4
LT
6661 }
6662
6663 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6664 if (PMD_IS_IMPL(i) == 0) continue;
35589a8f 6665 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
1da177e4
LT
6666 }
6667
6668 if (ctx) {
6669 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6670 this_cpu,
6671 ctx->ctx_state,
6672 ctx->ctx_smpl_vaddr,
6673 ctx->ctx_smpl_hdr,
6674 ctx->ctx_msgq_head,
6675 ctx->ctx_msgq_tail,
6676 ctx->ctx_saved_psr_up);
6677 }
6678 local_irq_restore(flags);
6679}
6680
6681/*
6682 * called from process.c:copy_thread(). task is new child.
6683 */
6684void
6685pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6686{
6687 struct thread_struct *thread;
6688
19c5870c 6689 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
1da177e4
LT
6690
6691 thread = &task->thread;
6692
6693 /*
6694 * cut links inherited from parent (current)
6695 */
6696 thread->pfm_context = NULL;
6697
6698 PFM_SET_WORK_PENDING(task, 0);
6699
6700 /*
6701 * the psr bits are already set properly in copy_threads()
6702 */
6703}
6704#else /* !CONFIG_PERFMON */
6705asmlinkage long
6706sys_perfmonctl (int fd, int cmd, void *arg, int count)
6707{
6708 return -ENOSYS;
6709}
6710#endif /* CONFIG_PERFMON */