]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: PPC: Book3S HV nestedv2: Add support for reading VPA counters for pseries guests
authorGautam Menghani <gautam@linux.ibm.com>
Mon, 20 May 2024 17:57:40 +0000 (23:27 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 3 Jun 2024 12:06:28 +0000 (22:06 +1000)
PAPR hypervisor has introduced three new counters in the VPA area of
LPAR CPUs for KVM L2 guest (see [1] for terminology) observability - two
for context switches from host to guest and vice versa, and one counter
for getting the total time spent inside the KVM guest. Add a tracepoint
that enables reading the counters for use by ftrace/perf. Note that this
tracepoint is only available for nestedv2 API (i.e, KVM on PowerVM).

[1] Terminology:
a. L1 refers to the VM (LPAR) booted on top of PAPR hypervisor
b. L2 refers to the KVM guest booted on top of L1.

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Naveen N Rao <naveen@kernel.org>
Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
Signed-off-by: Gautam Menghani <gautam@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240520175742.196329-1-gautam@linux.ibm.com
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/lppaca.h
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/trace_hv.h

index d8729ec81ca083cdc4ac219f954b323f59afe279..2ef9a5f4e5d14c7d0f0f9e25458af027925a1a72 100644 (file)
@@ -684,6 +684,11 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
 int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa);
 
+int kmvhv_counters_tracepoint_regfunc(void);
+void kmvhv_counters_tracepoint_unregfunc(void);
+int kvmhv_get_l2_counters_status(void);
+void kvmhv_set_l2_counters_status(int cpu, bool status);
+
 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
index 61ec2447dabf59f3e2891a70b43c48435bfee8fc..f40a646bee3cb47bea6be9429f9c1424d92334c6 100644 (file)
@@ -62,7 +62,8 @@ struct lppaca {
        u8      donate_dedicated_cpu;   /* Donate dedicated CPU cycles */
        u8      fpregs_in_use;
        u8      pmcregs_in_use;
-       u8      reserved8[28];
+       u8      l2_counters_enable;  /* Enable usage of counters for KVM guest */
+       u8      reserved8[27];
        __be64  wait_state_cycles;      /* Wait cycles for this proc */
        u8      reserved9[28];
        __be16  slb_count;              /* # of SLBs to maintain */
@@ -92,9 +93,13 @@ struct lppaca {
        /* cacheline 4-5 */
 
        __be32  page_ins;               /* CMO Hint - # page ins by OS */
-       u8      reserved12[148];
+       u8      reserved12[28];
+       volatile __be64 l1_to_l2_cs_tb;
+       volatile __be64 l2_to_l1_cs_tb;
+       volatile __be64 l2_runtime_tb;
+       u8 reserved13[96];
        volatile __be64 dtl_idx;        /* Dispatch Trace Log head index */
-       u8      reserved13[96];
+       u8      reserved14[96];
 } ____cacheline_aligned;
 
 #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
index daaf7faf21a5e347c687828b365d24594154ef63..83bd331a4ef4574dd27cb508778deabd64456a75 100644 (file)
@@ -4108,6 +4108,77 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
        }
 }
 
+/* Helper functions for reading L2's stats from L1's VPA */
+#ifdef CONFIG_PPC_PSERIES
+static DEFINE_PER_CPU(u64, l1_to_l2_cs);
+static DEFINE_PER_CPU(u64, l2_to_l1_cs);
+static DEFINE_PER_CPU(u64, l2_runtime_agg);
+
+int kvmhv_get_l2_counters_status(void)
+{
+       return firmware_has_feature(FW_FEATURE_LPAR) &&
+               get_lppaca()->l2_counters_enable;
+}
+
+void kvmhv_set_l2_counters_status(int cpu, bool status)
+{
+       if (!firmware_has_feature(FW_FEATURE_LPAR))
+               return;
+       if (status)
+               lppaca_of(cpu).l2_counters_enable = 1;
+       else
+               lppaca_of(cpu).l2_counters_enable = 0;
+}
+
+int kmvhv_counters_tracepoint_regfunc(void)
+{
+       int cpu;
+
+       for_each_present_cpu(cpu) {
+               kvmhv_set_l2_counters_status(cpu, true);
+       }
+       return 0;
+}
+
+void kmvhv_counters_tracepoint_unregfunc(void)
+{
+       int cpu;
+
+       for_each_present_cpu(cpu) {
+               kvmhv_set_l2_counters_status(cpu, false);
+       }
+}
+
+static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
+{
+       struct lppaca *lp = get_lppaca();
+       u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns;
+       u64 *l1_to_l2_cs_ptr = this_cpu_ptr(&l1_to_l2_cs);
+       u64 *l2_to_l1_cs_ptr = this_cpu_ptr(&l2_to_l1_cs);
+       u64 *l2_runtime_agg_ptr = this_cpu_ptr(&l2_runtime_agg);
+
+       l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb));
+       l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb));
+       l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb));
+       trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr,
+                                       l2_to_l1_ns - *l2_to_l1_cs_ptr,
+                                       l2_runtime_ns - *l2_runtime_agg_ptr);
+       *l1_to_l2_cs_ptr = l1_to_l2_ns;
+       *l2_to_l1_cs_ptr = l2_to_l1_ns;
+       *l2_runtime_agg_ptr = l2_runtime_ns;
+}
+
+#else
+int kvmhv_get_l2_counters_status(void)
+{
+       return 0;
+}
+
+static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
 static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
                                     unsigned long lpcr, u64 *tb)
 {
@@ -4156,6 +4227,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
 
        timer_rearm_host_dec(*tb);
 
+       /* Record context switch and guest_run_time data */
+       if (kvmhv_get_l2_counters_status())
+               do_trace_nested_cs_time(vcpu);
+
        return trap;
 }
 
index 8d57c84285319fb0c9217b415e45f75938307833..77ebc724e6cdf483f9007ea0b8895b42f6fef0b3 100644 (file)
@@ -512,6 +512,35 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
                        __entry->vcpu_id, __entry->exit, __entry->ret)
 );
 
+#ifdef CONFIG_PPC_PSERIES
+
+TRACE_EVENT_FN_COND(kvmppc_vcpu_stats,
+       TP_PROTO(struct kvm_vcpu *vcpu, u64 l1_to_l2_cs, u64 l2_to_l1_cs, u64 l2_runtime),
+
+       TP_ARGS(vcpu, l1_to_l2_cs, l2_to_l1_cs, l2_runtime),
+
+       TP_CONDITION(l1_to_l2_cs || l2_to_l1_cs || l2_runtime),
+
+       TP_STRUCT__entry(
+               __field(int,            vcpu_id)
+               __field(u64,            l1_to_l2_cs)
+               __field(u64,            l2_to_l1_cs)
+               __field(u64,            l2_runtime)
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_id  = vcpu->vcpu_id;
+               __entry->l1_to_l2_cs = l1_to_l2_cs;
+               __entry->l2_to_l1_cs = l2_to_l1_cs;
+               __entry->l2_runtime = l2_runtime;
+       ),
+
+       TP_printk("VCPU %d: l1_to_l2_cs_time=%llu ns l2_to_l1_cs_time=%llu ns l2_runtime=%llu ns",
+               __entry->vcpu_id,  __entry->l1_to_l2_cs,
+               __entry->l2_to_l1_cs, __entry->l2_runtime),
+       kmvhv_counters_tracepoint_regfunc, kmvhv_counters_tracepoint_unregfunc
+);
+#endif
 #endif /* _TRACE_KVM_HV_H */
 
 /* This part must be outside protection */