1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
10 * Wei Huang <wei@redhat.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
19 #include <asm/perf_event.h>
20 #include <asm/cpu_device_id.h>
26 /* This is enough to filter the vast majority of currently defined events. */
27 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
29 struct x86_pmu_capability __read_mostly kvm_pmu_cap
;
30 EXPORT_SYMBOL_GPL(kvm_pmu_cap
);
32 /* Precise Distribution of Instructions Retired (PDIR) */
33 static const struct x86_cpu_id vmx_pebs_pdir_cpu
[] = {
34 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D
, NULL
),
35 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X
, NULL
),
36 /* Instruction-Accurate PDIR (PDIR++) */
37 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X
, NULL
),
41 /* Precise Distribution (PDist) */
42 static const struct x86_cpu_id vmx_pebs_pdist_cpu
[] = {
43 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X
, NULL
),
48 * - Each perf counter is defined as "struct kvm_pmc";
49 * - There are two types of perf counters: general purpose (gp) and fixed.
50 * gp counters are stored in gp_counters[] and fixed counters are stored
51 * in fixed_counters[] respectively. Both of them are part of "struct
53 * - pmu.c understands the difference between gp counters and fixed counters.
54 * However AMD doesn't support fixed-counters;
55 * - There are three types of index to access perf counters (PMC):
56 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
57 * has MSR_K7_PERFCTRn and, for families 15H and later,
58 * MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
59 * aliased to MSR_K7_PERFCTRn.
60 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
61 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
62 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
63 * that it also supports fixed counters. idx can be used to as index to
64 * gp and fixed counters.
65 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
66 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
67 * all perf counters (both gp and fixed). The mapping relationship
68 * between pmc and perf counters is as the following:
69 * * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
70 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
71 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
72 * and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
75 static struct kvm_pmu_ops kvm_pmu_ops __read_mostly
;
77 #define KVM_X86_PMU_OP(func) \
78 DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \
79 *(((struct kvm_pmu_ops *)0)->func));
80 #define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
81 #include <asm/kvm-x86-pmu-ops.h>
83 void kvm_pmu_ops_update(const struct kvm_pmu_ops
*pmu_ops
)
85 memcpy(&kvm_pmu_ops
, pmu_ops
, sizeof(kvm_pmu_ops
));
87 #define __KVM_X86_PMU_OP(func) \
88 static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
89 #define KVM_X86_PMU_OP(func) \
90 WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
91 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
92 #include <asm/kvm-x86-pmu-ops.h>
93 #undef __KVM_X86_PMU_OP
96 static void kvm_pmi_trigger_fn(struct irq_work
*irq_work
)
98 struct kvm_pmu
*pmu
= container_of(irq_work
, struct kvm_pmu
, irq_work
);
99 struct kvm_vcpu
*vcpu
= pmu_to_vcpu(pmu
);
101 kvm_pmu_deliver_pmi(vcpu
);
104 static inline void __kvm_perf_overflow(struct kvm_pmc
*pmc
, bool in_pmi
)
106 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
107 bool skip_pmi
= false;
109 if (pmc
->perf_event
&& pmc
->perf_event
->attr
.precise_ip
) {
112 * TODO: KVM is currently _choosing_ to not generate records
113 * for emulated instructions, avoiding BUFFER_OVF PMI when
114 * there are no records. Strictly speaking, it should be done
115 * as well in the right context to improve sampling accuracy.
119 /* Indicate PEBS overflow PMI to guest. */
120 skip_pmi
= __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT
,
121 (unsigned long *)&pmu
->global_status
);
124 __set_bit(pmc
->idx
, (unsigned long *)&pmu
->global_status
);
127 if (!pmc
->intr
|| skip_pmi
)
131 * Inject PMI. If vcpu was in a guest mode during NMI PMI
132 * can be ejected on a guest mode re-entry. Otherwise we can't
133 * be sure that vcpu wasn't executing hlt instruction at the
134 * time of vmexit and is not going to re-enter guest mode until
135 * woken up. So we should wake it, but this is impossible from
136 * NMI context. Do it from irq work instead.
138 if (in_pmi
&& !kvm_handling_nmi_from_guest(pmc
->vcpu
))
139 irq_work_queue(&pmc_to_pmu(pmc
)->irq_work
);
141 kvm_make_request(KVM_REQ_PMI
, pmc
->vcpu
);
144 static void kvm_perf_overflow(struct perf_event
*perf_event
,
145 struct perf_sample_data
*data
,
146 struct pt_regs
*regs
)
148 struct kvm_pmc
*pmc
= perf_event
->overflow_handler_context
;
151 * Ignore overflow events for counters that are scheduled to be
152 * reprogrammed, e.g. if a PMI for the previous event races with KVM's
153 * handling of a related guest WRMSR.
155 if (test_and_set_bit(pmc
->idx
, pmc_to_pmu(pmc
)->reprogram_pmi
))
158 __kvm_perf_overflow(pmc
, true);
160 kvm_make_request(KVM_REQ_PMU
, pmc
->vcpu
);
163 static u64
pmc_get_pebs_precise_level(struct kvm_pmc
*pmc
)
166 * For some model specific pebs counters with special capabilities
167 * (PDIR, PDIR++, PDIST), KVM needs to raise the event precise
168 * level to the maximum value (currently 3, backwards compatible)
169 * so that the perf subsystem would assign specific hardware counter
170 * with that capability for vPMC.
172 if ((pmc
->idx
== 0 && x86_match_cpu(vmx_pebs_pdist_cpu
)) ||
173 (pmc
->idx
== 32 && x86_match_cpu(vmx_pebs_pdir_cpu
)))
177 * The non-zero precision level of guest event makes the ordinary
178 * guest event becomes a guest PEBS event and triggers the host
179 * PEBS PMI handler to determine whether the PEBS overflow PMI
180 * comes from the host counters or the guest.
185 static int pmc_reprogram_counter(struct kvm_pmc
*pmc
, u32 type
, u64 config
,
186 bool exclude_user
, bool exclude_kernel
,
189 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
190 struct perf_event
*event
;
191 struct perf_event_attr attr
= {
193 .size
= sizeof(attr
),
195 .exclude_idle
= true,
197 .exclude_user
= exclude_user
,
198 .exclude_kernel
= exclude_kernel
,
201 bool pebs
= test_bit(pmc
->idx
, (unsigned long *)&pmu
->pebs_enable
);
203 attr
.sample_period
= get_sample_period(pmc
, pmc
->counter
);
205 if ((attr
.config
& HSW_IN_TX_CHECKPOINTED
) &&
206 guest_cpuid_is_intel(pmc
->vcpu
)) {
208 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
209 * period. Just clear the sample period so at least
210 * allocating the counter doesn't fail.
212 attr
.sample_period
= 0;
216 * For most PEBS hardware events, the difference in the software
217 * precision levels of guest and host PEBS events will not affect
218 * the accuracy of the PEBS profiling result, because the "event IP"
219 * in the PEBS record is calibrated on the guest side.
221 attr
.precise_ip
= pmc_get_pebs_precise_level(pmc
);
224 event
= perf_event_create_kernel_counter(&attr
, -1, current
,
225 kvm_perf_overflow
, pmc
);
227 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
228 PTR_ERR(event
), pmc
->idx
);
229 return PTR_ERR(event
);
232 pmc
->perf_event
= event
;
233 pmc_to_pmu(pmc
)->event_count
++;
234 pmc
->is_paused
= false;
235 pmc
->intr
= intr
|| pebs
;
239 static void pmc_pause_counter(struct kvm_pmc
*pmc
)
241 u64 counter
= pmc
->counter
;
243 if (!pmc
->perf_event
|| pmc
->is_paused
)
246 /* update counter, reset event value to avoid redundant accumulation */
247 counter
+= perf_event_pause(pmc
->perf_event
, true);
248 pmc
->counter
= counter
& pmc_bitmask(pmc
);
249 pmc
->is_paused
= true;
252 static bool pmc_resume_counter(struct kvm_pmc
*pmc
)
254 if (!pmc
->perf_event
)
257 /* recalibrate sample period and check if it's accepted by perf core */
258 if (is_sampling_event(pmc
->perf_event
) &&
259 perf_event_period(pmc
->perf_event
,
260 get_sample_period(pmc
, pmc
->counter
)))
263 if (test_bit(pmc
->idx
, (unsigned long *)&pmc_to_pmu(pmc
)->pebs_enable
) !=
264 (!!pmc
->perf_event
->attr
.precise_ip
))
267 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
268 perf_event_enable(pmc
->perf_event
);
269 pmc
->is_paused
= false;
274 static int filter_cmp(const void *pa
, const void *pb
, u64 mask
)
276 u64 a
= *(u64
*)pa
& mask
;
277 u64 b
= *(u64
*)pb
& mask
;
279 return (a
> b
) - (a
< b
);
283 static int filter_sort_cmp(const void *pa
, const void *pb
)
285 return filter_cmp(pa
, pb
, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT
|
286 KVM_PMU_MASKED_ENTRY_EXCLUDE
));
290 * For the event filter, searching is done on the 'includes' list and
291 * 'excludes' list separately rather than on the 'events' list (which
292 * has both). As a result the exclude bit can be ignored.
294 static int filter_event_cmp(const void *pa
, const void *pb
)
296 return filter_cmp(pa
, pb
, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT
));
299 static int find_filter_index(u64
*events
, u64 nevents
, u64 key
)
301 u64
*fe
= bsearch(&key
, events
, nevents
, sizeof(events
[0]),
310 static bool is_filter_entry_match(u64 filter_event
, u64 umask
)
312 u64 mask
= filter_event
>> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT
- 8);
313 u64 match
= filter_event
& KVM_PMU_MASKED_ENTRY_UMASK_MATCH
;
315 BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0, false) >>
316 (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT
- 8)) !=
317 ARCH_PERFMON_EVENTSEL_UMASK
);
319 return (umask
& mask
) == match
;
322 static bool filter_contains_match(u64
*events
, u64 nevents
, u64 eventsel
)
324 u64 event_select
= eventsel
& kvm_pmu_ops
.EVENTSEL_EVENT
;
325 u64 umask
= eventsel
& ARCH_PERFMON_EVENTSEL_UMASK
;
328 index
= find_filter_index(events
, nevents
, event_select
);
333 * Entries are sorted by the event select. Walk the list in both
334 * directions to process all entries with the targeted event select.
336 for (i
= index
; i
< nevents
; i
++) {
337 if (filter_event_cmp(&events
[i
], &event_select
))
340 if (is_filter_entry_match(events
[i
], umask
))
344 for (i
= index
- 1; i
>= 0; i
--) {
345 if (filter_event_cmp(&events
[i
], &event_select
))
348 if (is_filter_entry_match(events
[i
], umask
))
355 static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter
*f
,
358 if (filter_contains_match(f
->includes
, f
->nr_includes
, eventsel
) &&
359 !filter_contains_match(f
->excludes
, f
->nr_excludes
, eventsel
))
360 return f
->action
== KVM_PMU_EVENT_ALLOW
;
362 return f
->action
== KVM_PMU_EVENT_DENY
;
365 static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter
*filter
,
368 int fixed_idx
= idx
- INTEL_PMC_IDX_FIXED
;
370 if (filter
->action
== KVM_PMU_EVENT_DENY
&&
371 test_bit(fixed_idx
, (ulong
*)&filter
->fixed_counter_bitmap
))
373 if (filter
->action
== KVM_PMU_EVENT_ALLOW
&&
374 !test_bit(fixed_idx
, (ulong
*)&filter
->fixed_counter_bitmap
))
380 static bool check_pmu_event_filter(struct kvm_pmc
*pmc
)
382 struct kvm_x86_pmu_event_filter
*filter
;
383 struct kvm
*kvm
= pmc
->vcpu
->kvm
;
385 filter
= srcu_dereference(kvm
->arch
.pmu_event_filter
, &kvm
->srcu
);
390 return is_gp_event_allowed(filter
, pmc
->eventsel
);
392 return is_fixed_event_allowed(filter
, pmc
->idx
);
395 static bool pmc_event_is_allowed(struct kvm_pmc
*pmc
)
397 return pmc_is_globally_enabled(pmc
) && pmc_speculative_in_use(pmc
) &&
398 static_call(kvm_x86_pmu_hw_event_available
)(pmc
) &&
399 check_pmu_event_filter(pmc
);
402 static void reprogram_counter(struct kvm_pmc
*pmc
)
404 struct kvm_pmu
*pmu
= pmc_to_pmu(pmc
);
405 u64 eventsel
= pmc
->eventsel
;
406 u64 new_config
= eventsel
;
409 pmc_pause_counter(pmc
);
411 if (!pmc_event_is_allowed(pmc
))
412 goto reprogram_complete
;
414 if (pmc
->counter
< pmc
->prev_counter
)
415 __kvm_perf_overflow(pmc
, false);
417 if (eventsel
& ARCH_PERFMON_EVENTSEL_PIN_CONTROL
)
418 printk_once("kvm pmu: pin control bit is ignored\n");
420 if (pmc_is_fixed(pmc
)) {
421 fixed_ctr_ctrl
= fixed_ctrl_field(pmu
->fixed_ctr_ctrl
,
422 pmc
->idx
- INTEL_PMC_IDX_FIXED
);
423 if (fixed_ctr_ctrl
& 0x1)
424 eventsel
|= ARCH_PERFMON_EVENTSEL_OS
;
425 if (fixed_ctr_ctrl
& 0x2)
426 eventsel
|= ARCH_PERFMON_EVENTSEL_USR
;
427 if (fixed_ctr_ctrl
& 0x8)
428 eventsel
|= ARCH_PERFMON_EVENTSEL_INT
;
429 new_config
= (u64
)fixed_ctr_ctrl
;
432 if (pmc
->current_config
== new_config
&& pmc_resume_counter(pmc
))
433 goto reprogram_complete
;
435 pmc_release_perf_event(pmc
);
437 pmc
->current_config
= new_config
;
440 * If reprogramming fails, e.g. due to contention, leave the counter's
441 * regprogram bit set, i.e. opportunistically try again on the next PMU
442 * refresh. Don't make a new request as doing so can stall the guest
443 * if reprogramming repeatedly fails.
445 if (pmc_reprogram_counter(pmc
, PERF_TYPE_RAW
,
446 (eventsel
& pmu
->raw_event_mask
),
447 !(eventsel
& ARCH_PERFMON_EVENTSEL_USR
),
448 !(eventsel
& ARCH_PERFMON_EVENTSEL_OS
),
449 eventsel
& ARCH_PERFMON_EVENTSEL_INT
))
453 clear_bit(pmc
->idx
, (unsigned long *)&pmc_to_pmu(pmc
)->reprogram_pmi
);
454 pmc
->prev_counter
= 0;
457 void kvm_pmu_handle_event(struct kvm_vcpu
*vcpu
)
459 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
462 for_each_set_bit(bit
, pmu
->reprogram_pmi
, X86_PMC_IDX_MAX
) {
463 struct kvm_pmc
*pmc
= static_call(kvm_x86_pmu_pmc_idx_to_pmc
)(pmu
, bit
);
465 if (unlikely(!pmc
)) {
466 clear_bit(bit
, pmu
->reprogram_pmi
);
470 reprogram_counter(pmc
);
474 * Unused perf_events are only released if the corresponding MSRs
475 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
476 * triggers KVM_REQ_PMU if cleanup is needed.
478 if (unlikely(pmu
->need_cleanup
))
479 kvm_pmu_cleanup(vcpu
);
482 /* check if idx is a valid index to access PMU */
483 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu
*vcpu
, unsigned int idx
)
485 return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx
)(vcpu
, idx
);
488 bool is_vmware_backdoor_pmc(u32 pmc_idx
)
491 case VMWARE_BACKDOOR_PMC_HOST_TSC
:
492 case VMWARE_BACKDOOR_PMC_REAL_TIME
:
493 case VMWARE_BACKDOOR_PMC_APPARENT_TIME
:
499 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu
*vcpu
, unsigned idx
, u64
*data
)
504 case VMWARE_BACKDOOR_PMC_HOST_TSC
:
507 case VMWARE_BACKDOOR_PMC_REAL_TIME
:
508 ctr_val
= ktime_get_boottime_ns();
510 case VMWARE_BACKDOOR_PMC_APPARENT_TIME
:
511 ctr_val
= ktime_get_boottime_ns() +
512 vcpu
->kvm
->arch
.kvmclock_offset
;
522 int kvm_pmu_rdpmc(struct kvm_vcpu
*vcpu
, unsigned idx
, u64
*data
)
524 bool fast_mode
= idx
& (1u << 31);
525 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
527 u64 mask
= fast_mode
? ~0u : ~0ull;
532 if (is_vmware_backdoor_pmc(idx
))
533 return kvm_pmu_rdpmc_vmware(vcpu
, idx
, data
);
535 pmc
= static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc
)(vcpu
, idx
, &mask
);
539 if (!kvm_is_cr4_bit_set(vcpu
, X86_CR4_PCE
) &&
540 (static_call(kvm_x86_get_cpl
)(vcpu
) != 0) &&
541 kvm_is_cr0_bit_set(vcpu
, X86_CR0_PE
))
544 *data
= pmc_read_counter(pmc
) & mask
;
548 void kvm_pmu_deliver_pmi(struct kvm_vcpu
*vcpu
)
550 if (lapic_in_kernel(vcpu
)) {
551 static_call_cond(kvm_x86_pmu_deliver_pmi
)(vcpu
);
552 kvm_apic_local_deliver(vcpu
->arch
.apic
, APIC_LVTPC
);
556 bool kvm_pmu_is_valid_msr(struct kvm_vcpu
*vcpu
, u32 msr
)
559 case MSR_CORE_PERF_GLOBAL_STATUS
:
560 case MSR_CORE_PERF_GLOBAL_CTRL
:
561 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
562 return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu
));
566 return static_call(kvm_x86_pmu_msr_idx_to_pmc
)(vcpu
, msr
) ||
567 static_call(kvm_x86_pmu_is_valid_msr
)(vcpu
, msr
);
570 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu
*vcpu
, u32 msr
)
572 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
573 struct kvm_pmc
*pmc
= static_call(kvm_x86_pmu_msr_idx_to_pmc
)(vcpu
, msr
);
576 __set_bit(pmc
->idx
, pmu
->pmc_in_use
);
579 int kvm_pmu_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
581 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
582 u32 msr
= msr_info
->index
;
585 case MSR_CORE_PERF_GLOBAL_STATUS
:
586 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS
:
587 msr_info
->data
= pmu
->global_status
;
589 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL
:
590 case MSR_CORE_PERF_GLOBAL_CTRL
:
591 msr_info
->data
= pmu
->global_ctrl
;
593 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR
:
594 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
598 return static_call(kvm_x86_pmu_get_msr
)(vcpu
, msr_info
);
604 int kvm_pmu_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
606 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
607 u32 msr
= msr_info
->index
;
608 u64 data
= msr_info
->data
;
612 * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
613 * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
616 case MSR_CORE_PERF_GLOBAL_STATUS
:
617 if (!msr_info
->host_initiated
)
618 return 1; /* RO MSR */
620 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS
:
621 /* Per PPR, Read-only MSR. Writes are ignored. */
622 if (!msr_info
->host_initiated
)
625 if (data
& pmu
->global_status_mask
)
628 pmu
->global_status
= data
;
630 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL
:
631 data
&= ~pmu
->global_ctrl_mask
;
633 case MSR_CORE_PERF_GLOBAL_CTRL
:
634 if (!kvm_valid_perf_global_ctrl(pmu
, data
))
637 if (pmu
->global_ctrl
!= data
) {
638 diff
= pmu
->global_ctrl
^ data
;
639 pmu
->global_ctrl
= data
;
640 reprogram_counters(pmu
, diff
);
643 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
645 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
646 * GLOBAL_STATUS, and so the set of reserved bits is the same.
648 if (data
& pmu
->global_status_mask
)
651 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR
:
652 if (!msr_info
->host_initiated
)
653 pmu
->global_status
&= ~data
;
656 kvm_pmu_mark_pmc_in_use(vcpu
, msr_info
->index
);
657 return static_call(kvm_x86_pmu_set_msr
)(vcpu
, msr_info
);
663 /* refresh PMU settings. This function generally is called when underlying
664 * settings are changed (such as changes of PMU CPUID by guest VMs), which
665 * should rarely happen.
667 void kvm_pmu_refresh(struct kvm_vcpu
*vcpu
)
669 if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu
), vcpu
->kvm
))
672 bitmap_zero(vcpu_to_pmu(vcpu
)->all_valid_pmc_idx
, X86_PMC_IDX_MAX
);
673 static_call(kvm_x86_pmu_refresh
)(vcpu
);
676 void kvm_pmu_reset(struct kvm_vcpu
*vcpu
)
678 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
680 irq_work_sync(&pmu
->irq_work
);
681 static_call(kvm_x86_pmu_reset
)(vcpu
);
684 void kvm_pmu_init(struct kvm_vcpu
*vcpu
)
686 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
688 memset(pmu
, 0, sizeof(*pmu
));
689 static_call(kvm_x86_pmu_init
)(vcpu
);
690 init_irq_work(&pmu
->irq_work
, kvm_pmi_trigger_fn
);
691 pmu
->event_count
= 0;
692 pmu
->need_cleanup
= false;
693 kvm_pmu_refresh(vcpu
);
696 /* Release perf_events for vPMCs that have been unused for a full time slice. */
697 void kvm_pmu_cleanup(struct kvm_vcpu
*vcpu
)
699 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
700 struct kvm_pmc
*pmc
= NULL
;
701 DECLARE_BITMAP(bitmask
, X86_PMC_IDX_MAX
);
704 pmu
->need_cleanup
= false;
706 bitmap_andnot(bitmask
, pmu
->all_valid_pmc_idx
,
707 pmu
->pmc_in_use
, X86_PMC_IDX_MAX
);
709 for_each_set_bit(i
, bitmask
, X86_PMC_IDX_MAX
) {
710 pmc
= static_call(kvm_x86_pmu_pmc_idx_to_pmc
)(pmu
, i
);
712 if (pmc
&& pmc
->perf_event
&& !pmc_speculative_in_use(pmc
))
713 pmc_stop_counter(pmc
);
716 static_call_cond(kvm_x86_pmu_cleanup
)(vcpu
);
718 bitmap_zero(pmu
->pmc_in_use
, X86_PMC_IDX_MAX
);
721 void kvm_pmu_destroy(struct kvm_vcpu
*vcpu
)
726 static void kvm_pmu_incr_counter(struct kvm_pmc
*pmc
)
728 pmc
->prev_counter
= pmc
->counter
;
729 pmc
->counter
= (pmc
->counter
+ 1) & pmc_bitmask(pmc
);
730 kvm_pmu_request_counter_reprogram(pmc
);
733 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc
*pmc
,
734 unsigned int perf_hw_id
)
736 return !((pmc
->eventsel
^ perf_get_hw_event_config(perf_hw_id
)) &
737 AMD64_RAW_EVENT_MASK_NB
);
740 static inline bool cpl_is_matched(struct kvm_pmc
*pmc
)
742 bool select_os
, select_user
;
745 if (pmc_is_gp(pmc
)) {
746 config
= pmc
->eventsel
;
747 select_os
= config
& ARCH_PERFMON_EVENTSEL_OS
;
748 select_user
= config
& ARCH_PERFMON_EVENTSEL_USR
;
750 config
= fixed_ctrl_field(pmc_to_pmu(pmc
)->fixed_ctr_ctrl
,
751 pmc
->idx
- INTEL_PMC_IDX_FIXED
);
752 select_os
= config
& 0x1;
753 select_user
= config
& 0x2;
756 return (static_call(kvm_x86_get_cpl
)(pmc
->vcpu
) == 0) ? select_os
: select_user
;
759 void kvm_pmu_trigger_event(struct kvm_vcpu
*vcpu
, u64 perf_hw_id
)
761 struct kvm_pmu
*pmu
= vcpu_to_pmu(vcpu
);
765 for_each_set_bit(i
, pmu
->all_valid_pmc_idx
, X86_PMC_IDX_MAX
) {
766 pmc
= static_call(kvm_x86_pmu_pmc_idx_to_pmc
)(pmu
, i
);
768 if (!pmc
|| !pmc_event_is_allowed(pmc
))
771 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
772 if (eventsel_match_perf_hw_id(pmc
, perf_hw_id
) && cpl_is_matched(pmc
))
773 kvm_pmu_incr_counter(pmc
);
776 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event
);
778 static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter
*filter
)
780 u64 mask
= kvm_pmu_ops
.EVENTSEL_EVENT
|
781 KVM_PMU_MASKED_ENTRY_UMASK_MASK
|
782 KVM_PMU_MASKED_ENTRY_UMASK_MATCH
|
783 KVM_PMU_MASKED_ENTRY_EXCLUDE
;
786 for (i
= 0; i
< filter
->nevents
; i
++) {
787 if (filter
->events
[i
] & ~mask
)
794 static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter
*filter
)
798 for (i
= 0, j
= 0; i
< filter
->nevents
; i
++) {
800 * Skip events that are impossible to match against a guest
801 * event. When filtering, only the event select + unit mask
802 * of the guest event is used. To maintain backwards
803 * compatibility, impossible filters can't be rejected :-(
805 if (filter
->events
[i
] & ~(kvm_pmu_ops
.EVENTSEL_EVENT
|
806 ARCH_PERFMON_EVENTSEL_UMASK
))
809 * Convert userspace events to a common in-kernel event so
810 * only one code path is needed to support both events. For
811 * the in-kernel events use masked events because they are
812 * flexible enough to handle both cases. To convert to masked
813 * events all that's needed is to add an "all ones" umask_mask,
814 * (unmasked filter events don't support EXCLUDE).
816 filter
->events
[j
++] = filter
->events
[i
] |
817 (0xFFULL
<< KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT
);
823 static int prepare_filter_lists(struct kvm_x86_pmu_event_filter
*filter
)
827 if (!(filter
->flags
& KVM_PMU_EVENT_FLAG_MASKED_EVENTS
))
828 convert_to_masked_filter(filter
);
829 else if (!is_masked_filter_valid(filter
))
833 * Sort entries by event select and includes vs. excludes so that all
834 * entries for a given event select can be processed efficiently during
835 * filtering. The EXCLUDE flag uses a more significant bit than the
836 * event select, and so the sorted list is also effectively split into
837 * includes and excludes sub-lists.
839 sort(&filter
->events
, filter
->nevents
, sizeof(filter
->events
[0]),
840 filter_sort_cmp
, NULL
);
843 /* Find the first EXCLUDE event (only supported for masked events). */
844 if (filter
->flags
& KVM_PMU_EVENT_FLAG_MASKED_EVENTS
) {
845 for (i
= 0; i
< filter
->nevents
; i
++) {
846 if (filter
->events
[i
] & KVM_PMU_MASKED_ENTRY_EXCLUDE
)
851 filter
->nr_includes
= i
;
852 filter
->nr_excludes
= filter
->nevents
- filter
->nr_includes
;
853 filter
->includes
= filter
->events
;
854 filter
->excludes
= filter
->events
+ filter
->nr_includes
;
859 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm
*kvm
, void __user
*argp
)
861 struct kvm_pmu_event_filter __user
*user_filter
= argp
;
862 struct kvm_x86_pmu_event_filter
*filter
;
863 struct kvm_pmu_event_filter tmp
;
864 struct kvm_vcpu
*vcpu
;
869 if (copy_from_user(&tmp
, user_filter
, sizeof(tmp
)))
872 if (tmp
.action
!= KVM_PMU_EVENT_ALLOW
&&
873 tmp
.action
!= KVM_PMU_EVENT_DENY
)
876 if (tmp
.flags
& ~KVM_PMU_EVENT_FLAGS_VALID_MASK
)
879 if (tmp
.nevents
> KVM_PMU_EVENT_FILTER_MAX_EVENTS
)
882 size
= struct_size(filter
, events
, tmp
.nevents
);
883 filter
= kzalloc(size
, GFP_KERNEL_ACCOUNT
);
887 filter
->action
= tmp
.action
;
888 filter
->nevents
= tmp
.nevents
;
889 filter
->fixed_counter_bitmap
= tmp
.fixed_counter_bitmap
;
890 filter
->flags
= tmp
.flags
;
893 if (copy_from_user(filter
->events
, user_filter
->events
,
894 sizeof(filter
->events
[0]) * filter
->nevents
))
897 r
= prepare_filter_lists(filter
);
901 mutex_lock(&kvm
->lock
);
902 filter
= rcu_replace_pointer(kvm
->arch
.pmu_event_filter
, filter
,
903 mutex_is_locked(&kvm
->lock
));
904 mutex_unlock(&kvm
->lock
);
905 synchronize_srcu_expedited(&kvm
->srcu
);
907 BUILD_BUG_ON(sizeof(((struct kvm_pmu
*)0)->reprogram_pmi
) >
908 sizeof(((struct kvm_pmu
*)0)->__reprogram_pmi
));
910 kvm_for_each_vcpu(i
, vcpu
, kvm
)
911 atomic64_set(&vcpu_to_pmu(vcpu
)->__reprogram_pmi
, -1ull);
913 kvm_make_all_cpus_request(kvm
, KVM_REQ_PMU
);