}
#ifdef CONFIG_KVM_AMD_SEV
-static __always_inline bool sev_guest(struct kvm *kvm)
+static __always_inline bool ____sev_guest(struct kvm *kvm)
{
return to_kvm_sev_info(kvm)->active;
}
-static __always_inline bool sev_es_guest(struct kvm *kvm)
+static __always_inline bool ____sev_es_guest(struct kvm *kvm)
{
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
return sev->es_active && !WARN_ON_ONCE(!sev->active);
}
-static __always_inline bool sev_snp_guest(struct kvm *kvm)
+static __always_inline bool ____sev_snp_guest(struct kvm *kvm)
{
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
- !WARN_ON_ONCE(!sev_es_guest(kvm));
+ !WARN_ON_ONCE(!____sev_es_guest(kvm));
+}
+
+static __always_inline bool sev_guest(struct kvm *kvm)
+{
+ return ____sev_guest(kvm);
+}
+static __always_inline bool sev_es_guest(struct kvm *kvm)
+{
+ return ____sev_es_guest(kvm);
+}
+
+static __always_inline bool sev_snp_guest(struct kvm *kvm)
+{
+ return ____sev_snp_guest(kvm);
}
static __always_inline bool is_sev_guest(struct kvm_vcpu *vcpu)
{
- return sev_guest(vcpu->kvm);
+ return ____sev_guest(vcpu->kvm);
}
static __always_inline bool is_sev_es_guest(struct kvm_vcpu *vcpu)
{
- return sev_es_guest(vcpu->kvm);
+ return ____sev_es_guest(vcpu->kvm);
}
static __always_inline bool is_sev_snp_guest(struct kvm_vcpu *vcpu)
{
- return sev_snp_guest(vcpu->kvm);
+ return ____sev_snp_guest(vcpu->kvm);
}
#else
#define sev_guest(kvm) false