__KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
__KVM_HOST_SMCCC_FUNC___tracing_update_clock,
__KVM_HOST_SMCCC_FUNC___tracing_reset,
+ __KVM_HOST_SMCCC_FUNC___tracing_enable_event,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define REMOTE_EVENT_INCLUDE_FILE arch/arm64/include/asm/kvm_hypevents.h
+
+#define REMOTE_EVENT_SECTION "_hyp_events"
+
+#define HE_STRUCT(__args) __args
+#define HE_PRINTK(__args...) __args
+#define he_field re_field
+
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ REMOTE_EVENT(__name, 0, RE_STRUCT(__struct), RE_PRINTK(__printk))
+
+#define HYP_EVENT_MULTI_READ
+#include <trace/define_remote_events.h>
+#undef HYP_EVENT_MULTI_READ
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(__ARM64_KVM_HYPEVENTS_H_) || defined(HYP_EVENT_MULTI_READ)
+#define __ARM64_KVM_HYPEVENTS_H_
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#include <nvhe/trace.h>
+#endif
+
+#endif
struct trace_buffer_desc trace_buffer_desc;
};
+
+struct hyp_event_id {
+ unsigned short id;
+ atomic_t enabled;
+};
+
+extern struct remote_event __hyp_events_start[];
+extern struct remote_event __hyp_events_end[];
+
+/* hyp_event section used by the hypervisor */
+extern struct hyp_event_id __hyp_event_ids_start[];
+extern struct hyp_event_id __hyp_event_ids_end[];
+
#endif
KVM_NVHE_ALIAS(__hyp_data_end);
KVM_NVHE_ALIAS(__hyp_rodata_start);
KVM_NVHE_ALIAS(__hyp_rodata_end);
+#ifdef CONFIG_NVHE_EL2_TRACING
+KVM_NVHE_ALIAS(__hyp_event_ids_start);
+KVM_NVHE_ALIAS(__hyp_event_ids_end);
+#endif
/* pKVM static key */
KVM_NVHE_ALIAS(kvm_protected_mode_initialized);
*(__kvm_ex_table) \
__stop___kvm_ex_table = .;
+#ifdef CONFIG_NVHE_EL2_TRACING
+#define HYPERVISOR_EVENT_IDS \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_event_ids_start = .; \
+ *(HYP_SECTION_NAME(.event_ids)) \
+ __hyp_event_ids_end = .;
+#else
+#define HYPERVISOR_EVENT_IDS
+#endif
+
#define HYPERVISOR_RODATA_SECTIONS \
HYP_SECTION_NAME(.rodata) : { \
. = ALIGN(PAGE_SIZE); \
__hyp_rodata_start = .; \
*(HYP_SECTION_NAME(.data..ro_after_init)) \
*(HYP_SECTION_NAME(.rodata)) \
+ HYPERVISOR_EVENT_IDS \
. = ALIGN(PAGE_SIZE); \
__hyp_rodata_end = .; \
}
HYPERVISOR_DATA_SECTION
+#ifdef CONFIG_NVHE_EL2_TRACING
+ .data.hyp_events : {
+ __hyp_events_start = .;
+ *(SORT(_hyp_events.*))
+ __hyp_events_end = .;
+ }
+#endif
/*
* Data written with the MMU off but read with the MMU on requires
* cache lines to be invalidated, discarding up to a Cache Writeback
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef HYP_EVENT
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ struct hyp_event_id hyp_event_id_##__name \
+ __section(".hyp.event_ids."#__name) = { \
+ .enabled = ATOMIC_INIT(0), \
+ }
+
+#define HYP_EVENT_MULTI_READ
+#include <asm/kvm_hypevents.h>
+#undef HYP_EVENT_MULTI_READ
+
+#undef HYP_EVENT
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ARM64_KVM_HYP_NVHE_TRACE_H
#define __ARM64_KVM_HYP_NVHE_TRACE_H
+
+#include <linux/trace_remote_event.h>
+
#include <asm/kvm_hyptrace.h>
+#define HE_PROTO(__args...) __args
+#define HE_ASSIGN(__args...) __args
+#define HE_STRUCT RE_STRUCT
+#define he_field re_field
+
#ifdef CONFIG_NVHE_EL2_TRACING
+
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ REMOTE_EVENT_FORMAT(__name, __struct); \
+ extern struct hyp_event_id hyp_event_id_##__name; \
+ static __always_inline void trace_##__name(__proto) \
+ { \
+ struct remote_event_format_##__name *__entry; \
+ size_t length = sizeof(*__entry); \
+ \
+ if (!atomic_read(&hyp_event_id_##__name.enabled)) \
+ return; \
+ __entry = tracing_reserve_entry(length); \
+ if (!__entry) \
+ return; \
+ __entry->hdr.id = hyp_event_id_##__name.id; \
+ __assign \
+ tracing_commit_entry(); \
+ }
+
void *tracing_reserve_entry(unsigned long length);
void tracing_commit_entry(void);
int __tracing_swap_reader(unsigned int cpu);
void __tracing_update_clock(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc);
int __tracing_reset(unsigned int cpu);
+int __tracing_enable_event(unsigned short id, bool enable);
#else
static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
static inline void tracing_commit_entry(void) { }
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ static inline void trace_##__name(__proto) {}
static inline int __tracing_load(unsigned long desc_va, size_t desc_size) { return -ENODEV; }
static inline void __tracing_unload(void) { }
static inline int __tracing_swap_reader(unsigned int cpu) { return -ENODEV; }
static inline void __tracing_update_clock(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) { }
static inline int __tracing_reset(unsigned int cpu) { return -ENODEV; }
+static inline int __tracing_enable_event(unsigned short id, bool enable) { return -ENODEV; }
#endif
#endif
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-y += ../../../kernel/smccc-call.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
-hyp-obj-$(CONFIG_NVHE_EL2_TRACING) += clock.o trace.o
+hyp-obj-$(CONFIG_NVHE_EL2_TRACING) += clock.o trace.o events.o
hyp-obj-y += $(lib-objs)
# Path to simple_ring_buffer.c
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <nvhe/mm.h>
+#include <nvhe/trace.h>
+
+#include <nvhe/define_events.h>
+
+int __tracing_enable_event(unsigned short id, bool enable)
+{
+ struct hyp_event_id *event_id = &__hyp_event_ids_start[id];
+ atomic_t *enabled;
+
+ if (event_id >= __hyp_event_ids_end)
+ return -EINVAL;
+
+ enabled = hyp_fixmap_map(__hyp_pa(&event_id->enabled));
+ atomic_set(enabled, enable);
+ hyp_fixmap_unmap();
+
+ return 0;
+}
cpu_reg(host_ctxt, 1) = __tracing_reset(cpu);
}
+static void handle___tracing_enable_event(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned short, id, host_ctxt, 1);
+ DECLARE_REG(bool, enable, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __tracing_enable_event(id, enable);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
HANDLE_FUNC(__tracing_swap_reader),
HANDLE_FUNC(__tracing_update_clock),
HANDLE_FUNC(__tracing_reset),
+ HANDLE_FUNC(__tracing_enable_event),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
HYP_SECTION(.text)
HYP_SECTION(.data..ro_after_init)
HYP_SECTION(.rodata)
+#ifdef CONFIG_NVHE_EL2_TRACING
+ . = ALIGN(PAGE_SIZE);
+ BEGIN_HYP_SECTION(.event_ids)
+ *(SORT(.hyp.event_ids.*))
+ END_HYP_SECTION
+#endif
/*
* .hyp..data..percpu needs to be page aligned to maintain the same
static int hyp_trace_enable_event(unsigned short id, bool enable, void *priv)
{
+ struct hyp_event_id *event_id = lm_alias(&__hyp_event_ids_start[id]);
+ struct page *page;
+ atomic_t *enabled;
+ void *map;
+
+ if (is_protected_kvm_enabled())
+ return kvm_call_hyp_nvhe(__tracing_enable_event, id, enable);
+
+ enabled = &event_id->enabled;
+ page = virt_to_page(enabled);
+ map = vmap(&page, 1, VM_MAP, PAGE_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ enabled = map + offset_in_page(enabled);
+ atomic_set(enabled, enable);
+
+ vunmap(map);
+
return 0;
}
.enable_event = hyp_trace_enable_event,
};
+#include <asm/kvm_define_hypevents.h>
+
+static void __init hyp_trace_init_events(void)
+{
+ struct hyp_event_id *hyp_event_id = __hyp_event_ids_start;
+ struct remote_event *event = __hyp_events_start;
+ int id = 0;
+
+ /* Events on both sides hypervisor are sorted */
+ for (; event < __hyp_events_end; event++, hyp_event_id++, id++)
+ event->id = hyp_event_id->id = id;
+}
+
int __init kvm_hyp_trace_init(void)
{
int cpu;
}
#endif
- return trace_remote_register("hypervisor", &trace_remote_callbacks, &trace_buffer, NULL, 0);
+ hyp_trace_init_events();
+
+ return trace_remote_register("hypervisor", &trace_remote_callbacks, &trace_buffer,
+ __hyp_events_start, __hyp_events_end - __hyp_events_start);
}