]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86: Rename get_msr_feature() APIs to get_feature_msr()
authorSean Christopherson <seanjc@google.com>
Fri, 2 Aug 2024 18:19:30 +0000 (11:19 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 22 Aug 2024 19:06:56 +0000 (12:06 -0700)
Rename all APIs related to feature MSRs from get_msr_feature() to
get_feature_msr().  The APIs get "feature MSRs", not "MSR features".
And unlike kvm_{g,s}et_msr_common(), the "feature" adjective doesn't
describe the helper itself.

No functional change intended.

Link: https://lore.kernel.org/r/20240802181935.292540-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/x86_ops.h
arch/x86/kvm/x86.c

index 68ad4f923664e2de83d0ec9def9cfdeec6fb4fd0..9afbf8bcb5210f597f66381a7bcb7440b3510c6b 100644 (file)
@@ -125,7 +125,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
 KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
 KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
 KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
-KVM_X86_OP(get_msr_feature)
+KVM_X86_OP(get_feature_msr)
 KVM_X86_OP(check_emulate_instruction)
 KVM_X86_OP(apic_init_signal_blocked)
 KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
index fe61cff1f49d86420e71045312df31b72f7baca5..95396e4cb3dad52c72d54715bb857e784be7a0f9 100644 (file)
@@ -1806,7 +1806,7 @@ struct kvm_x86_ops {
        int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
        void (*guest_memory_reclaimed)(struct kvm *kvm);
 
-       int (*get_msr_feature)(u32 msr, u64 *data);
+       int (*get_feature_msr)(u32 msr, u64 *data);
 
        int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
                                         void *insn, int insn_len);
index 314dd4aacfe97131a2180329086f61e34996ca0c..d8cfe8f2332766f43833955ecba4c501bc2732ed 100644 (file)
@@ -2825,7 +2825,7 @@ static int efer_trap(struct kvm_vcpu *vcpu)
        return kvm_complete_insn_gp(vcpu, ret);
 }
 
-static int svm_get_msr_feature(u32 msr, u64 *data)
+static int svm_get_feature_msr(u32 msr, u64 *data)
 {
        *data = 0;
 
@@ -3181,7 +3181,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_AMD64_DE_CFG: {
                u64 supported_de_cfg;
 
-               if (svm_get_msr_feature(ecx, &supported_de_cfg))
+               if (svm_get_feature_msr(ecx, &supported_de_cfg))
                        return 1;
 
                if (data & ~supported_de_cfg)
@@ -5002,7 +5002,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .vcpu_unblocking = avic_vcpu_unblocking,
 
        .update_exception_bitmap = svm_update_exception_bitmap,
-       .get_msr_feature = svm_get_msr_feature,
+       .get_feature_msr = svm_get_feature_msr,
        .get_msr = svm_get_msr,
        .set_msr = svm_set_msr,
        .get_segment_base = svm_get_segment_base,
index 0bf35ebe8a1bdbb3889537ad657c57ac3e173a76..4f6023a0deb34ef8c7458155d00ab3ff202a41f4 100644 (file)
@@ -41,7 +41,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .vcpu_put = vmx_vcpu_put,
 
        .update_exception_bitmap = vmx_update_exception_bitmap,
-       .get_msr_feature = vmx_get_msr_feature,
+       .get_feature_msr = vmx_get_feature_msr,
        .get_msr = vmx_get_msr,
        .set_msr = vmx_set_msr,
        .get_segment_base = vmx_get_segment_base,
index 3d24eb4aeca266703ca07bdb0ce7c0edc33b6a7c..cf85f8d50ccb3aaf70e022ee34fb6f19478d3692 100644 (file)
@@ -1998,7 +1998,7 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
        return !(msr->data & ~valid_bits);
 }
 
-int vmx_get_msr_feature(u32 msr, u64 *data)
+int vmx_get_feature_msr(u32 msr, u64 *data)
 {
        switch (msr) {
        case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
index 9a0304eb847b27cb07405ab5ca2ff96c48819c35..eeafd121fb0891f38db3b8bab129cd11c474eaa3 100644 (file)
@@ -56,7 +56,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
 void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
-int vmx_get_msr_feature(u32 msr, u64 *data);
+int vmx_get_feature_msr(u32 msr, u64 *data);
 int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
index 29d1205f62d3909d0a89c4ffdcdf82c584e8eb4e..3efe086bea4cc5be3f11a7b0c1cc543cd22d2b24 100644 (file)
@@ -1659,7 +1659,7 @@ static u64 kvm_get_arch_capabilities(void)
        return data;
 }
 
-static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+static int kvm_get_feature_msr(struct kvm_msr_entry *msr)
 {
        switch (msr->index) {
        case MSR_IA32_ARCH_CAPABILITIES:
@@ -1672,12 +1672,12 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
                rdmsrl_safe(msr->index, &msr->data);
                break;
        default:
-               return kvm_x86_call(get_msr_feature)(msr->index, &msr->data);
+               return kvm_x86_call(get_feature_msr)(msr->index, &msr->data);
        }
        return 0;
 }
 
-static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 {
        struct kvm_msr_entry msr;
        int r;
@@ -1685,7 +1685,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        /* Unconditionally clear the output for simplicity */
        msr.data = 0;
        msr.index = index;
-       r = kvm_get_msr_feature(&msr);
+       r = kvm_get_feature_msr(&msr);
 
        if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false))
                r = 0;
@@ -4943,7 +4943,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
                break;
        }
        case KVM_GET_MSRS:
-               r = msr_io(NULL, argp, do_get_msr_feature, 1);
+               r = msr_io(NULL, argp, do_get_feature_msr, 1);
                break;
 #ifdef CONFIG_KVM_HYPERV
        case KVM_GET_SUPPORTED_HV_CPUID:
@@ -7382,7 +7382,7 @@ static void kvm_probe_feature_msr(u32 msr_index)
                .index = msr_index,
        };
 
-       if (kvm_get_msr_feature(&msr))
+       if (kvm_get_feature_msr(&msr))
                return;
 
        msr_based_features[num_msr_based_features++] = msr_index;