--- /dev/null
+From foo@baz Mon Jan 24 03:31:36 PM CET 2022
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Mon, 24 Jan 2022 14:05:33 +0100
+Subject: KVM: selftests: Rename 'get_cpuid_test' to 'cpuid_test'
+To: stable@vger.kernel.org
+Cc: Paolo Bonzini <pbonzini@redhat.com>, Igor Mammedov <imammedo@redhat.com>, gregkh@linuxfoundation.org
+Message-ID: <20220124130534.2645955-4-vkuznets@redhat.com>
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit 9e6d484f9991176269607bb3c54a494e32eab27a upstream.
+
+In preparation to reusing the existing 'get_cpuid_test' for testing
+"KVM_SET_CPUID{,2} after KVM_RUN" rename it to 'cpuid_test' to avoid
+the confusion.
+
+No functional change intended.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20220117150542.2176196-4-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/kvm/.gitignore | 2 +-
+ tools/testing/selftests/kvm/Makefile | 4 ++--
+ .../selftests/kvm/x86_64/{get_cpuid_test.c => cpuid_test.c} | 0
+ tools/testing/selftests/kvm/.gitignore | 2
+ tools/testing/selftests/kvm/Makefile | 4
+ tools/testing/selftests/kvm/x86_64/cpuid_test.c | 179 ++++++++++++++++++++
+ tools/testing/selftests/kvm/x86_64/get_cpuid_test.c | 179 --------------------
+ 4 files changed, 182 insertions(+), 182 deletions(-)
+ rename tools/testing/selftests/kvm/x86_64/{get_cpuid_test.c => cpuid_test.c} (100%)
+
+--- a/tools/testing/selftests/kvm/.gitignore
++++ b/tools/testing/selftests/kvm/.gitignore
+@@ -7,11 +7,11 @@
+ /s390x/memop
+ /s390x/resets
+ /s390x/sync_regs_test
++/x86_64/cpuid_test
+ /x86_64/cr4_cpuid_sync_test
+ /x86_64/debug_regs
+ /x86_64/evmcs_test
+ /x86_64/emulator_error_test
+-/x86_64/get_cpuid_test
+ /x86_64/get_msr_index_features
+ /x86_64/kvm_clock_test
+ /x86_64/kvm_pv_test
+--- a/tools/testing/selftests/kvm/Makefile
++++ b/tools/testing/selftests/kvm/Makefile
+@@ -38,11 +38,11 @@ LIBKVM_x86_64 = lib/x86_64/apic.c lib/x8
+ LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c
+ LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
+
+-TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
++TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
++TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
+ TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
+ TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
+ TEST_GEN_PROGS_x86_64 += x86_64/emulator_error_test
+-TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test
+ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
+ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
+ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
+--- /dev/null
++++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+@@ -0,0 +1,179 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (C) 2021, Red Hat Inc.
++ *
++ * Generic tests for KVM CPUID set/get ioctls
++ */
++#include <asm/kvm_para.h>
++#include <linux/kvm_para.h>
++#include <stdint.h>
++
++#include "test_util.h"
++#include "kvm_util.h"
++#include "processor.h"
++
++#define VCPU_ID 0
++
++/* CPUIDs known to differ */
++struct {
++ u32 function;
++ u32 index;
++} mangled_cpuids[] = {
++ /*
++ * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
++ * which are not controlled for by this test.
++ */
++ {.function = 0xd, .index = 0},
++ {.function = 0xd, .index = 1},
++};
++
++static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
++{
++ int i;
++ u32 eax, ebx, ecx, edx;
++
++ for (i = 0; i < guest_cpuid->nent; i++) {
++ eax = guest_cpuid->entries[i].function;
++ ecx = guest_cpuid->entries[i].index;
++
++ cpuid(&eax, &ebx, &ecx, &edx);
++
++ GUEST_ASSERT(eax == guest_cpuid->entries[i].eax &&
++ ebx == guest_cpuid->entries[i].ebx &&
++ ecx == guest_cpuid->entries[i].ecx &&
++ edx == guest_cpuid->entries[i].edx);
++ }
++
++}
++
++static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid)
++{
++ u32 eax = 0x40000000, ebx, ecx = 0, edx;
++
++ cpuid(&eax, &ebx, &ecx, &edx);
++
++ GUEST_ASSERT(eax == 0x40000001);
++}
++
++static void guest_main(struct kvm_cpuid2 *guest_cpuid)
++{
++ GUEST_SYNC(1);
++
++ test_guest_cpuids(guest_cpuid);
++
++ GUEST_SYNC(2);
++
++ test_cpuid_40000000(guest_cpuid);
++
++ GUEST_DONE();
++}
++
++static bool is_cpuid_mangled(struct kvm_cpuid_entry2 *entrie)
++{
++ int i;
++
++ for (i = 0; i < sizeof(mangled_cpuids); i++) {
++ if (mangled_cpuids[i].function == entrie->function &&
++ mangled_cpuids[i].index == entrie->index)
++ return true;
++ }
++
++ return false;
++}
++
++static void check_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *entrie)
++{
++ int i;
++
++ for (i = 0; i < cpuid->nent; i++) {
++ if (cpuid->entries[i].function == entrie->function &&
++ cpuid->entries[i].index == entrie->index) {
++ if (is_cpuid_mangled(entrie))
++ return;
++
++ TEST_ASSERT(cpuid->entries[i].eax == entrie->eax &&
++ cpuid->entries[i].ebx == entrie->ebx &&
++ cpuid->entries[i].ecx == entrie->ecx &&
++ cpuid->entries[i].edx == entrie->edx,
++ "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
++ entrie->function, entrie->index,
++ cpuid->entries[i].eax, cpuid->entries[i].ebx,
++ cpuid->entries[i].ecx, cpuid->entries[i].edx,
++ entrie->eax, entrie->ebx, entrie->ecx, entrie->edx);
++ return;
++ }
++ }
++
++ TEST_ASSERT(false, "CPUID 0x%x.%x not found", entrie->function, entrie->index);
++}
++
++static void compare_cpuids(struct kvm_cpuid2 *cpuid1, struct kvm_cpuid2 *cpuid2)
++{
++ int i;
++
++ for (i = 0; i < cpuid1->nent; i++)
++ check_cpuid(cpuid2, &cpuid1->entries[i]);
++
++ for (i = 0; i < cpuid2->nent; i++)
++ check_cpuid(cpuid1, &cpuid2->entries[i]);
++}
++
++static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
++{
++ struct ucall uc;
++
++ _vcpu_run(vm, vcpuid);
++
++ switch (get_ucall(vm, vcpuid, &uc)) {
++ case UCALL_SYNC:
++ TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
++ uc.args[1] == stage + 1,
++ "Stage %d: Unexpected register values vmexit, got %lx",
++ stage + 1, (ulong)uc.args[1]);
++ return;
++ case UCALL_DONE:
++ return;
++ case UCALL_ABORT:
++ TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx", (const char *)uc.args[0],
++ __FILE__, uc.args[1], uc.args[2], uc.args[3]);
++ default:
++ TEST_ASSERT(false, "Unexpected exit: %s",
++ exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
++ }
++}
++
++struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
++{
++ int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
++ vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR);
++ struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
++
++ memcpy(guest_cpuids, cpuid, size);
++
++ *p_gva = gva;
++ return guest_cpuids;
++}
++
++int main(void)
++{
++ struct kvm_cpuid2 *supp_cpuid, *cpuid2;
++ vm_vaddr_t cpuid_gva;
++ struct kvm_vm *vm;
++ int stage;
++
++ vm = vm_create_default(VCPU_ID, 0, guest_main);
++
++ supp_cpuid = kvm_get_supported_cpuid();
++ cpuid2 = vcpu_get_cpuid(vm, VCPU_ID);
++
++ compare_cpuids(supp_cpuid, cpuid2);
++
++ vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
++
++ vcpu_args_set(vm, VCPU_ID, 1, cpuid_gva);
++
++ for (stage = 0; stage < 3; stage++)
++ run_vcpu(vm, VCPU_ID, stage);
++
++ kvm_vm_free(vm);
++}
+--- a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
++++ /dev/null
+@@ -1,179 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Copyright (C) 2021, Red Hat Inc.
+- *
+- * Generic tests for KVM CPUID set/get ioctls
+- */
+-#include <asm/kvm_para.h>
+-#include <linux/kvm_para.h>
+-#include <stdint.h>
+-
+-#include "test_util.h"
+-#include "kvm_util.h"
+-#include "processor.h"
+-
+-#define VCPU_ID 0
+-
+-/* CPUIDs known to differ */
+-struct {
+- u32 function;
+- u32 index;
+-} mangled_cpuids[] = {
+- /*
+- * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
+- * which are not controlled for by this test.
+- */
+- {.function = 0xd, .index = 0},
+- {.function = 0xd, .index = 1},
+-};
+-
+-static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
+-{
+- int i;
+- u32 eax, ebx, ecx, edx;
+-
+- for (i = 0; i < guest_cpuid->nent; i++) {
+- eax = guest_cpuid->entries[i].function;
+- ecx = guest_cpuid->entries[i].index;
+-
+- cpuid(&eax, &ebx, &ecx, &edx);
+-
+- GUEST_ASSERT(eax == guest_cpuid->entries[i].eax &&
+- ebx == guest_cpuid->entries[i].ebx &&
+- ecx == guest_cpuid->entries[i].ecx &&
+- edx == guest_cpuid->entries[i].edx);
+- }
+-
+-}
+-
+-static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid)
+-{
+- u32 eax = 0x40000000, ebx, ecx = 0, edx;
+-
+- cpuid(&eax, &ebx, &ecx, &edx);
+-
+- GUEST_ASSERT(eax == 0x40000001);
+-}
+-
+-static void guest_main(struct kvm_cpuid2 *guest_cpuid)
+-{
+- GUEST_SYNC(1);
+-
+- test_guest_cpuids(guest_cpuid);
+-
+- GUEST_SYNC(2);
+-
+- test_cpuid_40000000(guest_cpuid);
+-
+- GUEST_DONE();
+-}
+-
+-static bool is_cpuid_mangled(struct kvm_cpuid_entry2 *entrie)
+-{
+- int i;
+-
+- for (i = 0; i < sizeof(mangled_cpuids); i++) {
+- if (mangled_cpuids[i].function == entrie->function &&
+- mangled_cpuids[i].index == entrie->index)
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static void check_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *entrie)
+-{
+- int i;
+-
+- for (i = 0; i < cpuid->nent; i++) {
+- if (cpuid->entries[i].function == entrie->function &&
+- cpuid->entries[i].index == entrie->index) {
+- if (is_cpuid_mangled(entrie))
+- return;
+-
+- TEST_ASSERT(cpuid->entries[i].eax == entrie->eax &&
+- cpuid->entries[i].ebx == entrie->ebx &&
+- cpuid->entries[i].ecx == entrie->ecx &&
+- cpuid->entries[i].edx == entrie->edx,
+- "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
+- entrie->function, entrie->index,
+- cpuid->entries[i].eax, cpuid->entries[i].ebx,
+- cpuid->entries[i].ecx, cpuid->entries[i].edx,
+- entrie->eax, entrie->ebx, entrie->ecx, entrie->edx);
+- return;
+- }
+- }
+-
+- TEST_ASSERT(false, "CPUID 0x%x.%x not found", entrie->function, entrie->index);
+-}
+-
+-static void compare_cpuids(struct kvm_cpuid2 *cpuid1, struct kvm_cpuid2 *cpuid2)
+-{
+- int i;
+-
+- for (i = 0; i < cpuid1->nent; i++)
+- check_cpuid(cpuid2, &cpuid1->entries[i]);
+-
+- for (i = 0; i < cpuid2->nent; i++)
+- check_cpuid(cpuid1, &cpuid2->entries[i]);
+-}
+-
+-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
+-{
+- struct ucall uc;
+-
+- _vcpu_run(vm, vcpuid);
+-
+- switch (get_ucall(vm, vcpuid, &uc)) {
+- case UCALL_SYNC:
+- TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+- uc.args[1] == stage + 1,
+- "Stage %d: Unexpected register values vmexit, got %lx",
+- stage + 1, (ulong)uc.args[1]);
+- return;
+- case UCALL_DONE:
+- return;
+- case UCALL_ABORT:
+- TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx", (const char *)uc.args[0],
+- __FILE__, uc.args[1], uc.args[2], uc.args[3]);
+- default:
+- TEST_ASSERT(false, "Unexpected exit: %s",
+- exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+- }
+-}
+-
+-struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
+-{
+- int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
+- vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR);
+- struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
+-
+- memcpy(guest_cpuids, cpuid, size);
+-
+- *p_gva = gva;
+- return guest_cpuids;
+-}
+-
+-int main(void)
+-{
+- struct kvm_cpuid2 *supp_cpuid, *cpuid2;
+- vm_vaddr_t cpuid_gva;
+- struct kvm_vm *vm;
+- int stage;
+-
+- vm = vm_create_default(VCPU_ID, 0, guest_main);
+-
+- supp_cpuid = kvm_get_supported_cpuid();
+- cpuid2 = vcpu_get_cpuid(vm, VCPU_ID);
+-
+- compare_cpuids(supp_cpuid, cpuid2);
+-
+- vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
+-
+- vcpu_args_set(vm, VCPU_ID, 1, cpuid_gva);
+-
+- for (stage = 0; stage < 3; stage++)
+- run_vcpu(vm, VCPU_ID, stage);
+-
+- kvm_vm_free(vm);
+-}
--- /dev/null
+From foo@baz Mon Jan 24 03:31:36 PM CET 2022
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Mon, 24 Jan 2022 14:05:34 +0100
+Subject: KVM: selftests: Test KVM_SET_CPUID2 after KVM_RUN
+To: stable@vger.kernel.org
+Cc: Paolo Bonzini <pbonzini@redhat.com>, Igor Mammedov <imammedo@redhat.com>, gregkh@linuxfoundation.org
+Message-ID: <20220124130534.2645955-5-vkuznets@redhat.com>
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit ecebb966acaab2466d9857d1cc435ee1fc9eee50 upstream.
+
+KVM forbids KVM_SET_CPUID2 after KVM_RUN was performed on a vCPU unless
+the supplied CPUID data is equal to what was previously set. Test this.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20220117150542.2176196-5-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/kvm/include/x86_64/processor.h | 7 +++
+ tools/testing/selftests/kvm/lib/x86_64/processor.c | 33 ++++++++++++++---
+ tools/testing/selftests/kvm/x86_64/cpuid_test.c | 30 +++++++++++++++
+ 3 files changed, 66 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
++++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
+@@ -358,6 +358,8 @@ uint64_t kvm_get_feature_msr(uint64_t ms
+ struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
+
+ struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
++int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
++ struct kvm_cpuid2 *cpuid);
+ void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_cpuid2 *cpuid);
+
+@@ -402,6 +404,11 @@ void vm_set_page_table_entry(struct kvm_
+ uint64_t pte);
+
+ /*
++ * get_cpuid() - find matching CPUID entry and return pointer to it.
++ */
++struct kvm_cpuid_entry2 *get_cpuid(struct kvm_cpuid2 *cpuid, uint32_t function,
++ uint32_t index);
++/*
+ * set_cpuid() - overwrites a matching cpuid entry with the provided value.
+ * matches based on ent->function && ent->index. returns true
+ * if a match was found and successfully overwritten.
+--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+@@ -847,6 +847,17 @@ kvm_get_supported_cpuid_index(uint32_t f
+ return entry;
+ }
+
++
++int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
++ struct kvm_cpuid2 *cpuid)
++{
++ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
++
++ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
++
++ return ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
++}
++
+ /*
+ * VM VCPU CPUID Set
+ *
+@@ -864,12 +875,9 @@ kvm_get_supported_cpuid_index(uint32_t f
+ void vcpu_set_cpuid(struct kvm_vm *vm,
+ uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
+ {
+- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int rc;
+
+- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+-
+- rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
++ rc = __vcpu_set_cpuid(vm, vcpuid, cpuid);
+ TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
+ rc, errno);
+
+@@ -1337,6 +1345,23 @@ void assert_on_unhandled_exception(struc
+ }
+ }
+
++struct kvm_cpuid_entry2 *get_cpuid(struct kvm_cpuid2 *cpuid, uint32_t function,
++ uint32_t index)
++{
++ int i;
++
++ for (i = 0; i < cpuid->nent; i++) {
++ struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
++
++ if (cur->function == function && cur->index == index)
++ return cur;
++ }
++
++ TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
++
++ return NULL;
++}
++
+ bool set_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 *ent)
+ {
+--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
++++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+@@ -154,6 +154,34 @@ struct kvm_cpuid2 *vcpu_alloc_cpuid(stru
+ return guest_cpuids;
+ }
+
++static void set_cpuid_after_run(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid)
++{
++ struct kvm_cpuid_entry2 *ent;
++ int rc;
++ u32 eax, ebx, x;
++
++ /* Setting unmodified CPUID is allowed */
++ rc = __vcpu_set_cpuid(vm, VCPU_ID, cpuid);
++ TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc);
++
++ /* Changing CPU features is forbidden */
++ ent = get_cpuid(cpuid, 0x7, 0);
++ ebx = ent->ebx;
++ ent->ebx--;
++ rc = __vcpu_set_cpuid(vm, VCPU_ID, cpuid);
++ TEST_ASSERT(rc, "Changing CPU features should fail");
++ ent->ebx = ebx;
++
++ /* Changing MAXPHYADDR is forbidden */
++ ent = get_cpuid(cpuid, 0x80000008, 0);
++ eax = ent->eax;
++ x = eax & 0xff;
++ ent->eax = (eax & ~0xffu) | (x - 1);
++ rc = __vcpu_set_cpuid(vm, VCPU_ID, cpuid);
++ TEST_ASSERT(rc, "Changing MAXPHYADDR should fail");
++ ent->eax = eax;
++}
++
+ int main(void)
+ {
+ struct kvm_cpuid2 *supp_cpuid, *cpuid2;
+@@ -175,5 +203,7 @@ int main(void)
+ for (stage = 0; stage < 3; stage++)
+ run_vcpu(vm, VCPU_ID, stage);
+
++ set_cpuid_after_run(vm, cpuid2);
++
+ kvm_vm_free(vm);
+ }
--- /dev/null
+From foo@baz Mon Jan 24 03:31:36 PM CET 2022
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Mon, 24 Jan 2022 14:05:31 +0100
+Subject: KVM: x86: Do runtime CPUID update before updating vcpu->arch.cpuid_entries
+To: stable@vger.kernel.org
+Cc: Paolo Bonzini <pbonzini@redhat.com>, Igor Mammedov <imammedo@redhat.com>, gregkh@linuxfoundation.org
+Message-ID: <20220124130534.2645955-2-vkuznets@redhat.com>
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit ee3a5f9e3d9bf94159f3cc80da542fbe83502dd8 upstream.
+
+kvm_update_cpuid_runtime() mangles CPUID data coming from userspace
+VMM after updating 'vcpu->arch.cpuid_entries', this makes it
+impossible to compare an update with what was previously
+supplied. Introduce __kvm_update_cpuid_runtime() version which can be
+used to tweak the input before it goes to 'vcpu->arch.cpuid_entries'
+so the upcoming update check can compare tweaked data.
+
+No functional change intended.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20220117150542.2176196-2-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c | 54 ++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 34 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -125,14 +125,21 @@ static void kvm_update_kvm_cpuid_base(st
+ }
+ }
+
+-static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
++static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
++ struct kvm_cpuid_entry2 *entries, int nent)
+ {
+ u32 base = vcpu->arch.kvm_cpuid_base;
+
+ if (!base)
+ return NULL;
+
+- return kvm_find_cpuid_entry(vcpu, base | KVM_CPUID_FEATURES, 0);
++ return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES, 0);
++}
++
++static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
++{
++ return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
++ vcpu->arch.cpuid_nent);
+ }
+
+ void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
+@@ -147,11 +154,12 @@ void kvm_update_pv_runtime(struct kvm_vc
+ vcpu->arch.pv_cpuid.features = best->eax;
+ }
+
+-void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
++static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
++ int nent)
+ {
+ struct kvm_cpuid_entry2 *best;
+
+- best = kvm_find_cpuid_entry(vcpu, 1, 0);
++ best = cpuid_entry2_find(entries, nent, 1, 0);
+ if (best) {
+ /* Update OSXSAVE bit */
+ if (boot_cpu_has(X86_FEATURE_XSAVE))
+@@ -162,33 +170,38 @@ void kvm_update_cpuid_runtime(struct kvm
+ vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
+ }
+
+- best = kvm_find_cpuid_entry(vcpu, 7, 0);
++ best = cpuid_entry2_find(entries, nent, 7, 0);
+ if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
+ cpuid_entry_change(best, X86_FEATURE_OSPKE,
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
+
+- best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
++ best = cpuid_entry2_find(entries, nent, 0xD, 0);
+ if (best)
+ best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
+
+- best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
++ best = cpuid_entry2_find(entries, nent, 0xD, 1);
+ if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
+ cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
+ best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+
+- best = kvm_find_kvm_cpuid_features(vcpu);
++ best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
+ if (kvm_hlt_in_guest(vcpu->kvm) && best &&
+ (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
+ best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+
+ if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
+- best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
++ best = cpuid_entry2_find(entries, nent, 0x1, 0);
+ if (best)
+ cpuid_entry_change(best, X86_FEATURE_MWAIT,
+ vcpu->arch.ia32_misc_enable_msr &
+ MSR_IA32_MISC_ENABLE_MWAIT);
+ }
+ }
++
++void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
++{
++ __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
++}
+ EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
+
+ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+@@ -276,21 +289,22 @@ u64 kvm_vcpu_reserved_gpa_bits_raw(struc
+ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
+ int nent)
+ {
+- int r;
++ int r;
++
++ __kvm_update_cpuid_runtime(vcpu, e2, nent);
+
+- r = kvm_check_cpuid(e2, nent);
+- if (r)
+- return r;
++ r = kvm_check_cpuid(e2, nent);
++ if (r)
++ return r;
+
+- kvfree(vcpu->arch.cpuid_entries);
+- vcpu->arch.cpuid_entries = e2;
+- vcpu->arch.cpuid_nent = nent;
++ kvfree(vcpu->arch.cpuid_entries);
++ vcpu->arch.cpuid_entries = e2;
++ vcpu->arch.cpuid_nent = nent;
+
+- kvm_update_kvm_cpuid_base(vcpu);
+- kvm_update_cpuid_runtime(vcpu);
+- kvm_vcpu_after_set_cpuid(vcpu);
++ kvm_update_kvm_cpuid_base(vcpu);
++ kvm_vcpu_after_set_cpuid(vcpu);
+
+- return 0;
++ return 0;
+ }
+
+ /* when an old userspace process fills a new kernel module */
--- /dev/null
+From foo@baz Mon Jan 24 03:31:36 PM CET 2022
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Mon, 24 Jan 2022 14:05:32 +0100
+Subject: KVM: x86: Partially allow KVM_SET_CPUID{,2} after KVM_RUN
+To: stable@vger.kernel.org
+Cc: Paolo Bonzini <pbonzini@redhat.com>, Igor Mammedov <imammedo@redhat.com>, gregkh@linuxfoundation.org
+Message-ID: <20220124130534.2645955-3-vkuznets@redhat.com>
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit c6617c61e8fe44b9e9fdfede921f61cac6b5149d upstream.
+
+Commit feb627e8d6f6 ("KVM: x86: Forbid KVM_SET_CPUID{,2} after KVM_RUN")
+forbade changing CPUID altogether but unfortunately this is not fully
+compatible with existing VMMs. In particular, QEMU reuses vCPU fds for
+CPU hotplug after unplug and it calls KVM_SET_CPUID2. Instead of full ban,
+check whether the supplied CPUID data is equal to what was previously set.
+
+Reported-by: Igor Mammedov <imammedo@redhat.com>
+Fixes: feb627e8d6f6 ("KVM: x86: Forbid KVM_SET_CPUID{,2} after KVM_RUN")
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Message-Id: <20220117150542.2176196-3-vkuznets@redhat.com>
+Cc: stable@vger.kernel.org
+[Do not call kvm_find_cpuid_entry repeatedly. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c | 36 ++++++++++++++++++++++++++++++++++++
+ arch/x86/kvm/x86.c | 19 -------------------
+ 2 files changed, 36 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -99,6 +99,28 @@ static int kvm_check_cpuid(struct kvm_cp
+ return 0;
+ }
+
++/* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
++static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
++ int nent)
++{
++ struct kvm_cpuid_entry2 *orig;
++ int i;
++
++ if (nent != vcpu->arch.cpuid_nent)
++ return -EINVAL;
++
++ for (i = 0; i < nent; i++) {
++ orig = &vcpu->arch.cpuid_entries[i];
++ if (e2[i].function != orig->function ||
++ e2[i].index != orig->index ||
++ e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
++ e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
+ {
+ u32 function;
+@@ -293,6 +315,20 @@ static int kvm_set_cpuid(struct kvm_vcpu
+
+ __kvm_update_cpuid_runtime(vcpu, e2, nent);
+
++ /*
++ * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
++ * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
++ * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
++ * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
++ * the core vCPU model on the fly. It would've been better to forbid any
++ * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
++ * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
++ * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
++ * whether the supplied CPUID data is equal to what's already set.
++ */
++ if (vcpu->arch.last_vmentry_cpu != -1)
++ return kvm_cpuid_check_equal(vcpu, e2, nent);
++
+ r = kvm_check_cpuid(e2, nent);
+ if (r)
+ return r;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5149,17 +5149,6 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ struct kvm_cpuid __user *cpuid_arg = argp;
+ struct kvm_cpuid cpuid;
+
+- /*
+- * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+- * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+- * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
+- * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
+- * the core vCPU model on the fly, so fail.
+- */
+- r = -EINVAL;
+- if (vcpu->arch.last_vmentry_cpu != -1)
+- goto out;
+-
+ r = -EFAULT;
+ if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
+ goto out;
+@@ -5170,14 +5159,6 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ struct kvm_cpuid2 __user *cpuid_arg = argp;
+ struct kvm_cpuid2 cpuid;
+
+- /*
+- * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
+- * KVM_SET_CPUID case above.
+- */
+- r = -EINVAL;
+- if (vcpu->arch.last_vmentry_cpu != -1)
+- goto out;
+-
+ r = -EFAULT;
+ if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
+ goto out;
mm-hmm.c-allow-vm_mixedmap-to-work-with-hmm_range_fault.patch
bonding-fix-extraction-of-ports-from-the-packet-headers.patch
lib-test_meminit-destroy-cache-in-kmem_cache_alloc_bulk-test.patch
+kvm-x86-do-runtime-cpuid-update-before-updating-vcpu-arch.cpuid_entries.patch
+kvm-x86-partially-allow-kvm_set_cpuid-2-after-kvm_run.patch
+kvm-selftests-rename-get_cpuid_test-to-cpuid_test.patch
+kvm-selftests-test-kvm_set_cpuid2-after-kvm_run.patch