From: Greg Kroah-Hartman Date: Fri, 17 Dec 2021 12:27:17 +0000 (+0100) Subject: 5.10-stable patches X-Git-Tag: v4.4.296~72 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7506c36a06ccc91f62c25e5a31709d7f495e447c;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: kvm-downgrade-two-bug_ons-to-warn_on_once.patch kvm-selftests-make-sure-kvm_create_max_vcpus-test-wo.patch --- diff --git a/queue-5.10/kvm-downgrade-two-bug_ons-to-warn_on_once.patch b/queue-5.10/kvm-downgrade-two-bug_ons-to-warn_on_once.patch new file mode 100644 index 00000000000..6a09f3f3b90 --- /dev/null +++ b/queue-5.10/kvm-downgrade-two-bug_ons-to-warn_on_once.patch @@ -0,0 +1,46 @@ +From 0522e7d18bdab4b74b9223a951836a3b210e69f6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 22 Nov 2021 18:24:01 -0500 +Subject: KVM: downgrade two BUG_ONs to WARN_ON_ONCE + +From: Paolo Bonzini + +[ Upstream commit 5f25e71e311478f9bb0a8ef49e7d8b95316491d7 ] + +This is not an unrecoverable situation. Users of kvm_read_guest_offset_cached +and kvm_write_guest_offset_cached must expect the read/write to fail, and +therefore it is possible to just return early with an error value. + +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + virt/kvm/kvm_main.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 97ac3c6fd4441..4a7d377b3a500 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -2590,7 +2590,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + int r; + gpa_t gpa = ghc->gpa + offset; + +- BUG_ON(len + offset > ghc->len); ++ if (WARN_ON_ONCE(len + offset > ghc->len)) ++ return -EINVAL; + + if (slots->generation != ghc->generation) { + if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) +@@ -2627,7 +2628,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + int r; + gpa_t gpa = ghc->gpa + offset; + +- BUG_ON(len + offset > ghc->len); ++ if (WARN_ON_ONCE(len + offset > ghc->len)) ++ return -EINVAL; + + if (slots->generation != ghc->generation) { + if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) +-- +2.33.0 + diff --git a/queue-5.10/kvm-selftests-make-sure-kvm_create_max_vcpus-test-wo.patch b/queue-5.10/kvm-selftests-make-sure-kvm_create_max_vcpus-test-wo.patch new file mode 100644 index 00000000000..92564d9acbc --- /dev/null +++ b/queue-5.10/kvm-selftests-make-sure-kvm_create_max_vcpus-test-wo.patch @@ -0,0 +1,90 @@ +From 0961c952dd3bf0193cffcadfc1dd56670a98309a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 23 Nov 2021 14:59:53 +0100 +Subject: KVM: selftests: Make sure kvm_create_max_vcpus test won't hit + RLIMIT_NOFILE + +From: Vitaly Kuznetsov + +[ Upstream commit 908fa88e420f30dde6d80f092795a18ec72ca6d3 ] + +With the elevated 'KVM_CAP_MAX_VCPUS' value kvm_create_max_vcpus test +may hit RLIMIT_NOFILE limits: + + # ./kvm_create_max_vcpus + KVM_CAP_MAX_VCPU_ID: 4096 + KVM_CAP_MAX_VCPUS: 1024 + Testing creating 1024 vCPUs, with IDs 0...1023. + /dev/kvm not available (errno: 24), skipping test + +Adjust RLIMIT_NOFILE limits to make sure KVM_CAP_MAX_VCPUS fds can be +opened. Note, raising hard limit ('rlim_max') requires CAP_SYS_RESOURCE +capability which is generally not needed to run kvm selftests (but without +raising the limit the test is doomed to fail anyway). + +Signed-off-by: Vitaly Kuznetsov +Message-Id: <20211123135953.667434-1-vkuznets@redhat.com> +[Skip the test if the hard limit can be raised. - Paolo] +Reviewed-by: Sean Christopherson +Tested-by: Sean Christopherson +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + .../selftests/kvm/kvm_create_max_vcpus.c | 30 +++++++++++++++++++ + 1 file changed, 30 insertions(+) + +diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c +index 0299cd81b8ba2..aa3795cd7bd3d 100644 +--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c ++++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + #include "test_util.h" + +@@ -40,10 +41,39 @@ int main(int argc, char *argv[]) + { + int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID); + int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); ++ /* ++ * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds + ++ * an arbitrary number for everything else. ++ */ ++ int nr_fds_wanted = kvm_max_vcpus + 100; ++ struct rlimit rl; + + pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id); + pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus); + ++ /* ++ * Check that we're allowed to open nr_fds_wanted file descriptors and ++ * try raising the limits if needed. ++ */ ++ TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!"); ++ ++ if (rl.rlim_cur < nr_fds_wanted) { ++ rl.rlim_cur = nr_fds_wanted; ++ if (rl.rlim_max < nr_fds_wanted) { ++ int old_rlim_max = rl.rlim_max; ++ rl.rlim_max = nr_fds_wanted; ++ ++ int r = setrlimit(RLIMIT_NOFILE, &rl); ++ if (r < 0) { ++ printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", ++ old_rlim_max, nr_fds_wanted); ++ exit(KSFT_SKIP); ++ } ++ } else { ++ TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); ++ } ++ } ++ + /* + * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID. + * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID +-- +2.33.0 + diff --git a/queue-5.10/series b/queue-5.10/series new file mode 100644 index 00000000000..46e01197d65 --- /dev/null +++ b/queue-5.10/series @@ -0,0 +1,2 @@ +kvm-selftests-make-sure-kvm_create_max_vcpus-test-wo.patch +kvm-downgrade-two-bug_ons-to-warn_on_once.patch