`perf kvm stat` supports record and report options.
By using the arch directory a report for a different machine type cannot
be supported.
Move the kvm-stat code out of the arch directory and into
util/kvm-stat-arch following the pattern of perf-regs and dwarf-regs.
Avoid duplicate symbols by renaming functions to have the architecture
name within them.
For global variables, wrap them in an architecture specific function.
Selecting the architecture to use with `perf kvm stat` is selected by
EM_HOST, ie no different than before the change.
Later the ELF machine can be determined from the session or a header
feature (ie EM_HOST at the time of the record).
The build and #define HAVE_KVM_STAT_SUPPORT is now redundant so remove
across Makefiles and in the build.
Opportunistically constify architectural structs and arrays.
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Aditya Bodkhe <aditya.b1@linux.ibm.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Jones <ajones@ventanamicro.com>
Cc: Anubhav Shelat <ashelat@redhat.com>
Cc: Anup Patel <anup@brainfault.org>
Cc: Athira Rajeev <atrajeev@linux.ibm.com>
Cc: Blake Jones <blakejones@google.com>
Cc: Chun-Tse Shao <ctshao@google.com>
Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <pjw@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quan Zhou <zhouquan@iscas.ac.cn>
Cc: Shimin Guo <shimin.guo@skydio.com>
Cc: Swapnil Sapkal <swapnil.sapkal@amd.com>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yunseong Kim <ysk@kzalloc.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
endif
endif
-ifdef HAVE_KVM_STAT_SUPPORT
- CFLAGS += -DHAVE_KVM_STAT_SUPPORT
-endif
-
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
-HAVE_KVM_STAT_SUPPORT := 1
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-y += ../../arm/util/auxtrace.o
perf-util-y += ../../arm/util/cs-etm.o
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
-HAVE_KVM_STAT_SUPPORT := 1
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
# SPDX-License-Identifier: GPL-2.0
-HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
perf-util-y += header.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-y += perf_regs.o
perf-util-y += mem-events.o
perf-util-y += pmu.o
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
-HAVE_KVM_STAT_SUPPORT := 1
perf-util-y += perf_regs.o
perf-util-y += header.o
-
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
# SPDX-License-Identifier: GPL-2.0-only
-HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
perf-util-y += header.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-y += perf_regs.o
perf-util-y += machine.o
# SPDX-License-Identifier: GPL-2.0
-HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
perf-util-y += header.o
perf-util-y += tsc.o
perf-util-y += pmu.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-y += perf_regs.o
perf-util-y += topdown.o
perf-util-y += machine.o
#include <math.h>
#include <perf/mmap.h>
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#if defined(HAVE_LIBTRACEEVENT)
#define GET_EVENT_KEY(func, field) \
static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
{ \
#endif /* HAVE_SLANG_SUPPORT */
-#endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#endif // defined(HAVE_LIBTRACEEVENT)
static const char *get_filename_for_perf_kvm(void)
{
return filename;
}
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#if defined(HAVE_LIBTRACEEVENT)
static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
{
- struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
+ const struct kvm_reg_events_ops *events_ops;
- for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
+ for (events_ops = kvm_reg_events_ops(); events_ops->name; events_ops++) {
if (!strcmp(events_ops->name, kvm->report_event)) {
kvm->events_ops = events_ops->ops;
return true;
struct perf_sample *sample,
struct event_key *key)
{
- struct child_event_ops *child_ops;
+ const struct child_event_ops *child_ops;
child_ops = kvm->events_ops->child_ops;
{
const char * const *skip_events;
- for (skip_events = kvm_skip_events; *skip_events; skip_events++)
+ for (skip_events = kvm_skip_events(); *skip_events; skip_events++)
if (!strcmp(event, *skip_events))
return true;
return NULL;
}
- vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
+ vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str());
thread__set_priv(thread, vcpu_record);
}
return ret;
}
-int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
-{
- return 0;
-}
-
static int
kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
return ret;
}
- for (events_tp = kvm_events_tp; *events_tp; events_tp++)
+ for (events_tp = kvm_events_tp(); *events_tp; events_tp++)
events_tp_size++;
rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
for (j = 0; j < events_tp_size; j++) {
rec_argv[i++] = STRDUP_FAIL_EXIT("-e");
- rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
+ rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp()[j]);
}
rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
if (evlist == NULL)
return NULL;
- for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
+ for (events_tp = kvm_events_tp(); *events_tp; events_tp++) {
tp = strdup(*events_tp);
if (tp == NULL)
perf_stat:
return cmd_stat(argc, argv);
}
-#endif /* HAVE_KVM_STAT_SUPPORT */
-
-int __weak kvm_add_default_arch_event(int *argc __maybe_unused,
- const char **argv __maybe_unused)
-{
- return 0;
-}
+#endif /* HAVE_LIBTRACEEVENT */
static int __cmd_record(const char *file_name, int argc, const char **argv)
{
return __cmd_top(argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
return __cmd_buildid_list(file_name, argc, argv);
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#if defined(HAVE_LIBTRACEEVENT)
else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
return kvm_cmd_stat(file_name, argc, argv);
#endif
perf-util-y += topdown.o
perf-util-y += iostat.o
perf-util-y += stream.o
-perf-util-y += kvm-stat.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-y += kvm-stat-arch/
perf-util-y += lock-contention.o
perf-util-y += auxtrace.o
perf-util-y += intel-pt-decoder/
--- /dev/null
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-arm64.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-loongarch.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-powerpc.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-riscv.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-s390.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-x86.o
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <memory.h>
-#include "../../../util/evsel.h"
-#include "../../../util/kvm-stat.h"
+#include "../debug.h"
+#include "../evsel.h"
+#include "../kvm-stat.h"
#include "arm64_exception_types.h"
-#include "debug.h"
define_exit_reasons_table(arm64_exit_reasons, kvm_arm_exception_type);
define_exit_reasons_table(arm64_trap_exit_reasons, kvm_arm_exception_class);
-const char *kvm_trap_exit_reason = "esr_ec";
-const char *vcpu_id_str = "id";
-const char *kvm_exit_reason = "ret";
-const char *kvm_entry_trace = "kvm:kvm_entry";
-const char *kvm_exit_trace = "kvm:kvm_exit";
+static const char *kvm_trap_exit_reason = "esr_ec";
-const char *kvm_events_tp[] = {
+static const char * const __kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
NULL,
struct event_key *key)
{
key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason);
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason());
key->exit_reasons = arm64_exit_reasons;
/*
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
- return evsel__name_is(evsel, kvm_entry_trace);
+ return evsel__name_is(evsel, kvm_entry_trace());
}
static bool event_end(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- if (evsel__name_is(evsel, kvm_exit_trace)) {
+ if (evsel__name_is(evsel, kvm_exit_trace())) {
event_get_key(evsel, sample, key);
return true;
}
return false;
}
-static struct kvm_events_ops exit_events = {
+static const struct kvm_events_ops exit_events = {
.is_begin_event = event_begin,
.is_end_event = event_end,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
{
.name = "vmexit",
.ops = &exit_events,
{ NULL, NULL },
};
-const char * const kvm_skip_events[] = {
+static const char * const __kvm_skip_events[] = {
NULL,
};
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+int __cpu_isa_init_arm64(struct perf_kvm_stat *kvm)
{
kvm->exit_reasons_isa = "arm64";
return 0;
}
+
+const char * const *__kvm_events_tp_arm64(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_arm64(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_arm64(void)
+{
+ return __kvm_skip_events;
+}
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <memory.h>
-#include "util/kvm-stat.h"
-#include "util/parse-events.h"
-#include "util/debug.h"
-#include "util/evsel.h"
-#include "util/evlist.h"
-#include "util/pmus.h"
+#include "../kvm-stat.h"
+#include "../parse-events.h"
+#include "../debug.h"
+#include "../evsel.h"
+#include "../evlist.h"
+#include "../pmus.h"
#define LOONGARCH_EXCEPTION_INT 0
#define LOONGARCH_EXCEPTION_PIL 1
define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type);
-const char *vcpu_id_str = "vcpu_id";
-const char *kvm_exit_reason = "reason";
-const char *kvm_entry_trace = "kvm:kvm_enter";
-const char *kvm_reenter_trace = "kvm:kvm_reenter";
-const char *kvm_exit_trace = "kvm:kvm_exit";
-const char *kvm_events_tp[] = {
+static const char *kvm_reenter_trace = "kvm:kvm_reenter";
+static const char * const __kvm_events_tp[] = {
"kvm:kvm_enter",
"kvm:kvm_reenter",
"kvm:kvm_exit",
* kvm:kvm_enter means returning to vmm and then to guest
* kvm:kvm_reenter means returning to guest immediately
*/
- return evsel__name_is(evsel, kvm_entry_trace) || evsel__name_is(evsel, kvm_reenter_trace);
+ return evsel__name_is(evsel, kvm_entry_trace()) ||
+ evsel__name_is(evsel, kvm_reenter_trace);
}
static void event_gspr_get_key(struct evsel *evsel,
}
}
-static struct child_event_ops child_events[] = {
+static const struct child_event_ops child_events[] = {
{ .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key },
{ NULL, NULL },
};
-static struct kvm_events_ops exit_events = {
+static const struct kvm_events_ops exit_events = {
.is_begin_event = event_begin,
.is_end_event = event_end,
.child_ops = child_events,
.name = "VM-EXIT"
};
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events, },
{ NULL, NULL },
};
-const char * const kvm_skip_events[] = {
+static const char * const __kvm_skip_events[] = {
NULL,
};
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+int __cpu_isa_init_loongarch(struct perf_kvm_stat *kvm)
{
kvm->exit_reasons_isa = "loongarch64";
kvm->exit_reasons = loongarch_exit_reasons;
return 0;
}
+
+const char * const *__kvm_events_tp_loongarch(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_loongarch(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_loongarch(void)
+{
+ return __kvm_skip_events;
+}
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
-#include "util/kvm-stat.h"
-#include "util/parse-events.h"
-#include "util/debug.h"
-#include "util/evsel.h"
-#include "util/evlist.h"
-#include "util/pmus.h"
+#include "../kvm-stat.h"
+#include "../parse-events.h"
+#include "../debug.h"
+#include "../evsel.h"
+#include "../evlist.h"
+#include "../pmus.h"
#include "book3s_hv_exits.h"
#include "book3s_hcalls.h"
#define NR_TPS 4
-const char *vcpu_id_str = "vcpu_id";
-const char *kvm_entry_trace = "kvm_hv:kvm_guest_enter";
-const char *kvm_exit_trace = "kvm_hv:kvm_guest_exit";
-
define_exit_reasons_table(hv_exit_reasons, kvm_trace_symbol_exit);
define_exit_reasons_table(hcall_reasons, kvm_trace_symbol_hcall);
/* Tracepoints specific to ppc_book3s_hv */
-const char *ppc_book3s_hv_kvm_tp[] = {
+static const char * const ppc_book3s_hv_kvm_tp[] = {
"kvm_hv:kvm_guest_enter",
"kvm_hv:kvm_guest_exit",
"kvm_hv:kvm_hcall_enter",
};
/* 1 extra placeholder for NULL */
-const char *kvm_events_tp[NR_TPS + 1];
-const char *kvm_exit_reason;
+static const char *__kvm_events_tp[NR_TPS + 1];
static void hcall_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
- return (evsel__name_is(evsel, kvm_events_tp[3]));
+ return evsel__name_is(evsel, __kvm_events_tp[3]);
}
static bool hcall_event_begin(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
- if (evsel__name_is(evsel, kvm_events_tp[2])) {
+ if (evsel__name_is(evsel, __kvm_events_tp[2])) {
hcall_event_get_key(evsel, sample, key);
return true;
}
scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", hcall_reason);
}
-static struct kvm_events_ops hcall_events = {
+static const struct kvm_events_ops hcall_events = {
.is_begin_event = hcall_event_begin,
.is_end_event = hcall_event_end,
.decode_key = hcall_event_decode_key,
.name = "HCALL-EVENT",
};
-static struct kvm_events_ops exit_events = {
+static const struct kvm_events_ops exit_events = {
.is_begin_event = exit_event_begin,
.is_end_event = exit_event_end,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events },
{ .name = "hcall", .ops = &hcall_events },
{ NULL, NULL },
};
-const char * const kvm_skip_events[] = {
+static const char * const __kvm_skip_events[] = {
NULL,
};
static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
struct evlist *evlist)
{
- const char **events_ptr;
+ const char * const *events_ptr;
int i, nr_tp = 0, err = -1;
/* Check for book3s_hv tracepoints */
}
for (i = 0; i < nr_tp; i++)
- kvm_events_tp[i] = ppc_book3s_hv_kvm_tp[i];
+ __kvm_events_tp[i] = ppc_book3s_hv_kvm_tp[i];
- kvm_events_tp[i] = NULL;
- kvm_exit_reason = "trap";
+ __kvm_events_tp[i] = NULL;
kvm->exit_reasons = hv_exit_reasons;
kvm->exit_reasons_isa = "HV";
return ppc__setup_book3s_hv(kvm, evlist);
}
-int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
+int __setup_kvm_events_tp_powerpc(struct perf_kvm_stat *kvm)
{
return ppc__setup_kvm_tp(kvm);
}
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+int __cpu_isa_init_powerpc(struct perf_kvm_stat *kvm)
{
int ret;
*
* Function to parse the arguments and return appropriate values.
*/
-int kvm_add_default_arch_event(int *argc, const char **argv)
+int __kvm_add_default_arch_event_powerpc(int *argc, const char **argv)
{
const char **tmp;
bool event = false;
free(tmp);
return 0;
}
+
+const char * const *__kvm_events_tp_powerpc(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_powerpc(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_powerpc(void)
+{
+ return __kvm_skip_events;
+}
*/
#include <errno.h>
#include <memory.h>
-#include "../../../util/evsel.h"
-#include "../../../util/kvm-stat.h"
+#include "../evsel.h"
+#include "../kvm-stat.h"
#include "riscv_trap_types.h"
#include "debug.h"
define_exit_reasons_table(riscv_exit_reasons, kvm_riscv_trap_class);
-const char *vcpu_id_str = "id";
-const char *kvm_exit_reason = "scause";
-const char *kvm_entry_trace = "kvm:kvm_entry";
-const char *kvm_exit_trace = "kvm:kvm_exit";
-
-const char *kvm_events_tp[] = {
+static const char * const __kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
NULL,
struct perf_sample *sample,
struct event_key *key)
{
+ int xlen = 64; // TODO: 32-bit support.
+
key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason) & ~CAUSE_IRQ_FLAG;
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason()) & ~CAUSE_IRQ_FLAG(xlen);
key->exit_reasons = riscv_exit_reasons;
}
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
- return evsel__name_is(evsel, kvm_entry_trace);
+ return evsel__name_is(evsel, kvm_entry_trace());
}
static bool event_end(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- if (evsel__name_is(evsel, kvm_exit_trace)) {
+ if (evsel__name_is(evsel, kvm_exit_trace())) {
event_get_key(evsel, sample, key);
return true;
}
return false;
}
-static struct kvm_events_ops exit_events = {
+static const struct kvm_events_ops exit_events = {
.is_begin_event = event_begin,
.is_end_event = event_end,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
{
.name = "vmexit",
.ops = &exit_events,
{ NULL, NULL },
};
-const char * const kvm_skip_events[] = {
+static const char * const __kvm_skip_events[] = {
NULL,
};
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+int __cpu_isa_init_riscv(struct perf_kvm_stat *kvm)
{
kvm->exit_reasons_isa = "riscv64";
return 0;
}
+
+const char * const *__kvm_events_tp_riscv(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_riscv(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_riscv(void)
+{
+ return __kvm_skip_events;
+}
#include <errno.h>
#include <string.h>
-#include "../../util/kvm-stat.h"
-#include "../../util/evsel.h"
-#include <asm/sie.h>
+#include "../kvm-stat.h"
+#include "../evsel.h"
+#include "../../../arch/s390/include/uapi/asm/sie.h"
define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
define_exit_reasons_table(sie_icpt_insn_codes, icpt_insn_codes);
define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
-const char *vcpu_id_str = "id";
-const char *kvm_exit_reason = "icptcode";
-const char *kvm_entry_trace = "kvm:kvm_s390_sie_enter";
-const char *kvm_exit_trace = "kvm:kvm_s390_sie_exit";
-
static void event_icpt_insn_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- unsigned long insn;
+ u64 insn;
insn = evsel__intval(evsel, sample, "instruction");
key->key = icpt_insn_decoder(insn);
key->exit_reasons = sie_icpt_prog_codes;
}
-static struct child_event_ops child_events[] = {
+static const struct child_event_ops child_events[] = {
{ .name = "kvm:kvm_s390_intercept_instruction",
.get_key = event_icpt_insn_get_key },
{ .name = "kvm:kvm_s390_handle_sigp",
{ NULL, NULL },
};
-static struct kvm_events_ops exit_events = {
+static const struct kvm_events_ops exit_events = {
.is_begin_event = exit_event_begin,
.is_end_event = exit_event_end,
.child_ops = child_events,
.name = "VM-EXIT"
};
-const char *kvm_events_tp[] = {
+static const char * const __kvm_events_tp[] = {
"kvm:kvm_s390_sie_enter",
"kvm:kvm_s390_sie_exit",
"kvm:kvm_s390_intercept_instruction",
NULL,
};
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events },
{ NULL, NULL },
};
-const char * const kvm_skip_events[] = {
+static const char * const __kvm_skip_events[] = {
"Wait state",
NULL,
};
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+int __cpu_isa_init_s390(struct perf_kvm_stat *kvm, const char *cpuid)
{
if (strstr(cpuid, "IBM")) {
kvm->exit_reasons = sie_exit_reasons;
return 0;
}
+
+const char * const *__kvm_events_tp_s390(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_s390(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_s390(void)
+{
+ return __kvm_skip_events;
+}
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <string.h>
-#include "../../../util/kvm-stat.h"
-#include "../../../util/evsel.h"
-#include "../../../util/env.h"
+#include "../kvm-stat.h"
+#include "../evsel.h"
+#include "../env.h"
#include <asm/svm.h>
#include <asm/vmx.h>
#include <asm/kvm.h>
define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
-static struct kvm_events_ops exit_events = {
+static const struct kvm_events_ops exit_events = {
.is_begin_event = exit_event_begin,
.is_end_event = exit_event_end,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
-const char *vcpu_id_str = "vcpu_id";
-const char *kvm_exit_reason = "exit_reason";
-const char *kvm_entry_trace = "kvm:kvm_entry";
-const char *kvm_exit_trace = "kvm:kvm_exit";
-
/*
* For the mmio events, we treat:
* the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
}
-static struct kvm_events_ops mmio_events = {
+static const struct kvm_events_ops mmio_events = {
.is_begin_event = mmio_event_begin,
.is_end_event = mmio_event_end,
.decode_key = mmio_event_decode_key,
key->info ? "POUT" : "PIN");
}
-static struct kvm_events_ops ioport_events = {
+static const struct kvm_events_ops ioport_events = {
.is_begin_event = ioport_event_begin,
.is_end_event = ioport_event_end,
.decode_key = ioport_event_decode_key,
key->info ? "W" : "R");
}
-static struct kvm_events_ops msr_events = {
+static const struct kvm_events_ops msr_events = {
.is_begin_event = msr_event_begin,
.is_end_event = msr_event_end,
.decode_key = msr_event_decode_key,
.name = "MSR Access"
};
-const char *kvm_events_tp[] = {
+static const char * const __kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
"kvm:kvm_mmio",
NULL,
};
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events },
{ .name = "mmio", .ops = &mmio_events },
{ .name = "ioport", .ops = &ioport_events },
{ NULL, NULL },
};
-const char * const kvm_skip_events[] = {
+static const char * const __kvm_skip_events[] = {
"HLT",
NULL,
};
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+int __cpu_isa_init_x86(struct perf_kvm_stat *kvm, const char *cpuid)
{
if (strstr(cpuid, "Intel")) {
kvm->exit_reasons = vmx_exit_reasons;
* So, to avoid this issue explicitly use "cycles" instead of "cycles:P" event
* by default to sample guest on Intel platforms.
*/
-int kvm_add_default_arch_event(int *argc, const char **argv)
+int __kvm_add_default_arch_event_x86(int *argc, const char **argv)
{
const char **tmp;
bool event = false;
free(tmp);
return ret;
}
+
+const char * const *__kvm_events_tp_x86(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_x86(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_x86(void)
+{
+ return __kvm_skip_events;
+}
#define ARCH_PERF_RISCV_TRAP_TYPES_H
/* Exception cause high bit - is an interrupt if set */
-#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+#define CAUSE_IRQ_FLAG(xlen) (_AC(1, UL) << (xlen - 1))
/* Interrupt causes (minus the high bit) */
#define IRQ_S_SOFT 1
#include "debug.h"
#include "evsel.h"
#include "kvm-stat.h"
-
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#include <dwarf-regs.h>
bool kvm_exit_event(struct evsel *evsel)
{
- return evsel__name_is(evsel, kvm_exit_trace);
+ return evsel__name_is(evsel, kvm_exit_trace());
}
void exit_event_get_key(struct evsel *evsel,
struct event_key *key)
{
key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason);
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason());
}
bool kvm_entry_event(struct evsel *evsel)
{
- return evsel__name_is(evsel, kvm_entry_trace);
+ return evsel__name_is(evsel, kvm_entry_trace());
}
bool exit_event_end(struct evsel *evsel,
scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason);
}
-#endif
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
+{
+ switch (EM_HOST) {
+ case EM_PPC:
+ case EM_PPC64:
+ return __setup_kvm_events_tp_powerpc(kvm);
+ default:
+ return 0;
+ }
+}
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __cpu_isa_init_arm64(kvm);
+ case EM_LOONGARCH:
+ return __cpu_isa_init_loongarch(kvm);
+ case EM_PPC:
+ case EM_PPC64:
+ return __cpu_isa_init_powerpc(kvm);
+ case EM_RISCV:
+ return __cpu_isa_init_riscv(kvm);
+ case EM_S390:
+ return __cpu_isa_init_s390(kvm, cpuid);
+ case EM_X86_64:
+ case EM_386:
+ return __cpu_isa_init_x86(kvm, cpuid);
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return -1;
+ }
+}
+
+const char *vcpu_id_str(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ case EM_RISCV:
+ case EM_S390:
+ return "id";
+ case EM_LOONGARCH:
+ case EM_PPC:
+ case EM_PPC64:
+ case EM_X86_64:
+ case EM_386:
+ return "vcpu_id";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char *kvm_exit_reason(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return "ret";
+ case EM_LOONGARCH:
+ return "reason";
+ case EM_PPC:
+ case EM_PPC64:
+ return "trap";
+ case EM_RISCV:
+ return "scause";
+ case EM_S390:
+ return "icptcode";
+ case EM_X86_64:
+ case EM_386:
+ return "exit_reason";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char *kvm_entry_trace(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ case EM_RISCV:
+ case EM_X86_64:
+ case EM_386:
+ return "kvm:kvm_entry";
+ case EM_LOONGARCH:
+ return "kvm:kvm_enter";
+ case EM_PPC:
+ case EM_PPC64:
+ return "kvm_hv:kvm_guest_enter";
+ case EM_S390:
+ return "kvm:kvm_s390_sie_enter";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char *kvm_exit_trace(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ case EM_LOONGARCH:
+ case EM_RISCV:
+ case EM_X86_64:
+ case EM_386:
+ return "kvm:kvm_exit";
+ case EM_PPC:
+ case EM_PPC64:
+ return "kvm_hv:kvm_guest_exit";
+ case EM_S390:
+ return "kvm:kvm_s390_sie_exit";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char * const *kvm_events_tp(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __kvm_events_tp_arm64();
+ case EM_LOONGARCH:
+ return __kvm_events_tp_loongarch();
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_events_tp_powerpc();
+ case EM_RISCV:
+ return __kvm_events_tp_riscv();
+ case EM_S390:
+ return __kvm_events_tp_s390();
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_events_tp_x86();
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const struct kvm_reg_events_ops *kvm_reg_events_ops(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __kvm_reg_events_ops_arm64();
+ case EM_LOONGARCH:
+ return __kvm_reg_events_ops_loongarch();
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_reg_events_ops_powerpc();
+ case EM_RISCV:
+ return __kvm_reg_events_ops_riscv();
+ case EM_S390:
+ return __kvm_reg_events_ops_s390();
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_reg_events_ops_x86();
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char * const *kvm_skip_events(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __kvm_skip_events_arm64();
+ case EM_LOONGARCH:
+ return __kvm_skip_events_loongarch();
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_skip_events_powerpc();
+ case EM_RISCV:
+ return __kvm_skip_events_riscv();
+ case EM_S390:
+ return __kvm_skip_events_s390();
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_skip_events_x86();
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+int kvm_add_default_arch_event(int *argc, const char **argv)
+{
+ switch (EM_HOST) {
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_add_default_arch_event_powerpc(argc, argv);
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_add_default_arch_event_x86(argc, argv);
+ default:
+ return 0;
+ }
+}
#ifndef __PERF_KVM_STAT_H
#define __PERF_KVM_STAT_H
-#ifdef HAVE_KVM_STAT_SUPPORT
-
#include "tool.h"
#include "sort.h"
#include "stat.h"
struct event_key *key);
bool (*is_end_event)(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key);
- struct child_event_ops *child_ops;
+ const struct child_event_ops *child_ops;
void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
char *decode);
const char *name;
struct exit_reasons_table *exit_reasons;
const char *exit_reasons_isa;
- struct kvm_events_ops *events_ops;
+ const struct kvm_events_ops *events_ops;
u64 total_time;
u64 total_count;
struct kvm_reg_events_ops {
const char *name;
- struct kvm_events_ops *ops;
+ const struct kvm_events_ops *ops;
};
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#ifdef HAVE_LIBTRACEEVENT
void exit_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
void exit_event_decode_key(struct perf_kvm_stat *kvm,
struct event_key *key,
char *decode);
-#endif
bool kvm_exit_event(struct evsel *evsel);
bool kvm_entry_event(struct evsel *evsel);
-int setup_kvm_events_tp(struct perf_kvm_stat *kvm);
#define define_exit_reasons_table(name, symbols) \
static struct exit_reasons_table name[] = { \
/*
* arch specific callbacks and data structures
*/
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm);
+int __setup_kvm_events_tp_powerpc(struct perf_kvm_stat *kvm);
+
int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid);
+int __cpu_isa_init_arm64(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_loongarch(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_powerpc(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_riscv(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_s390(struct perf_kvm_stat *kvm, const char *cpuid);
+int __cpu_isa_init_x86(struct perf_kvm_stat *kvm, const char *cpuid);
+
+const char *vcpu_id_str(void);
+const char *kvm_exit_reason(void);
+const char *kvm_entry_trace(void);
+const char *kvm_exit_trace(void);
+
+const char * const *kvm_events_tp(void);
+const char * const *__kvm_events_tp_arm64(void);
+const char * const *__kvm_events_tp_loongarch(void);
+const char * const *__kvm_events_tp_powerpc(void);
+const char * const *__kvm_events_tp_riscv(void);
+const char * const *__kvm_events_tp_s390(void);
+const char * const *__kvm_events_tp_x86(void);
+
+const struct kvm_reg_events_ops *kvm_reg_events_ops(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_arm64(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_loongarch(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_powerpc(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_riscv(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_s390(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_x86(void);
+
+const char * const *kvm_skip_events(void);
+const char * const *__kvm_skip_events_arm64(void);
+const char * const *__kvm_skip_events_loongarch(void);
+const char * const *__kvm_skip_events_powerpc(void);
+const char * const *__kvm_skip_events_riscv(void);
+const char * const *__kvm_skip_events_s390(void);
+const char * const *__kvm_skip_events_x86(void);
+
+int kvm_add_default_arch_event(int *argc, const char **argv);
+int __kvm_add_default_arch_event_powerpc(int *argc, const char **argv);
+int __kvm_add_default_arch_event_x86(int *argc, const char **argv);
+
+#else /* !HAVE_LIBTRACEEVENT */
+
+static inline int kvm_add_default_arch_event(int *argc __maybe_unused,
+ const char **argv __maybe_unused)
+{
+ return 0;
+}
-extern const char *kvm_events_tp[];
-extern struct kvm_reg_events_ops kvm_reg_events_ops[];
-extern const char * const kvm_skip_events[];
-extern const char *vcpu_id_str;
-extern const char *kvm_exit_reason;
-extern const char *kvm_entry_trace;
-extern const char *kvm_exit_trace;
+#endif /* HAVE_LIBTRACEEVENT */
static inline struct kvm_info *kvm_info__get(struct kvm_info *ki)
{
return ki;
}
-#else /* HAVE_KVM_STAT_SUPPORT */
-// We use this unconditionally in hists__findnew_entry() and hist_entry__delete()
-#define kvm_info__zput(ki) do { } while (0)
-#endif /* HAVE_KVM_STAT_SUPPORT */
-
#define STRDUP_FAIL_EXIT(s) \
({ char *_p; \
_p = strdup(s); \
_p; \
})
-extern int kvm_add_default_arch_event(int *argc, const char **argv);
#endif /* __PERF_KVM_STAT_H */