]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Mar 2020 13:06:05 +0000 (14:06 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Mar 2020 13:06:05 +0000 (14:06 +0100)
added patches:
kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch
namei-only-return-echild-from-follow_dotdot_rcu.patch

queue-4.9/kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch [new file with mode: 0644]
queue-4.9/namei-only-return-echild-from-follow_dotdot_rcu.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch b/queue-4.9/kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch
new file mode 100644 (file)
index 0000000..a7238e6
--- /dev/null
@@ -0,0 +1,77 @@
+From fcfbc617547fc6d9552cb6c1c563b6a90ee98085 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Thu, 9 Jan 2020 15:56:18 -0800
+Subject: KVM: Check for a bad hva before dropping into the ghc slow path
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit fcfbc617547fc6d9552cb6c1c563b6a90ee98085 upstream.
+
+When reading/writing using the guest/host cache, check for a bad hva
+before checking for a NULL memslot, which triggers the slow path for
+handing cross-page accesses.  Because the memslot is nullified on error
+by __kvm_gfn_to_hva_cache_init(), if the bad hva is encountered after
+crossing into a new page, then the kvm_{read,write}_guest() slow path
+could potentially write/access the first chunk prior to detecting the
+bad hva.
+
+Arguably, performing a partial access is semantically correct from an
+architectural perspective, but that behavior is certainly not intended.
+In the original implementation, memslot was not explicitly nullified
+and therefore the partial access behavior varied based on whether the
+memslot itself was null, or if the hva was simply bad.  The current
+behavior was introduced as a seemingly unintentional side effect in
+commit f1b9dd5eb86c ("kvm: Disallow wraparound in
+kvm_gfn_to_hva_cache_init"), which justified the change with "since some
+callers don't check the return code from this function, it sit seems
+prudent to clear ghc->memslot in the event of an error".
+
+Regardless of intent, the partial access is dependent on _not_ checking
+the result of the cache initialization, which is arguably a bug in its
+own right, at best simply weird.
+
+Fixes: 8f964525a121 ("KVM: Allow cross page reads and writes from cached translations.")
+Cc: Jim Mattson <jmattson@google.com>
+Cc: Andrew Honig <ahonig@google.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/kvm_main.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2045,12 +2045,12 @@ int kvm_write_guest_cached(struct kvm *k
+       if (slots->generation != ghc->generation)
+               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+-      if (unlikely(!ghc->memslot))
+-              return kvm_write_guest(kvm, ghc->gpa, data, len);
+-
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
++      if (unlikely(!ghc->memslot))
++              return kvm_write_guest(kvm, ghc->gpa, data, len);
++
+       r = __copy_to_user((void __user *)ghc->hva, data, len);
+       if (r)
+               return -EFAULT;
+@@ -2071,12 +2071,12 @@ int kvm_read_guest_cached(struct kvm *kv
+       if (slots->generation != ghc->generation)
+               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+-      if (unlikely(!ghc->memslot))
+-              return kvm_read_guest(kvm, ghc->gpa, data, len);
+-
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
++      if (unlikely(!ghc->memslot))
++              return kvm_read_guest(kvm, ghc->gpa, data, len);
++
+       r = __copy_from_user(data, (void __user *)ghc->hva, len);
+       if (r)
+               return -EFAULT;
diff --git a/queue-4.9/namei-only-return-echild-from-follow_dotdot_rcu.patch b/queue-4.9/namei-only-return-echild-from-follow_dotdot_rcu.patch
new file mode 100644 (file)
index 0000000..d86af49
--- /dev/null
@@ -0,0 +1,41 @@
+From 2b98149c2377bff12be5dd3ce02ae0506e2dd613 Mon Sep 17 00:00:00 2001
+From: Aleksa Sarai <cyphar@cyphar.com>
+Date: Sat, 7 Dec 2019 01:13:26 +1100
+Subject: namei: only return -ECHILD from follow_dotdot_rcu()
+
+From: Aleksa Sarai <cyphar@cyphar.com>
+
+commit 2b98149c2377bff12be5dd3ce02ae0506e2dd613 upstream.
+
+It's over-zealous to return hard errors under RCU-walk here, given that
+a REF-walk will be triggered for all other cases handling ".." under
+RCU.
+
+The original purpose of this check was to ensure that if a rename occurs
+such that a directory is moved outside of the bind-mount which the
+resolution started in, it would be detected and blocked to avoid being
+able to mess with paths outside of the bind-mount. However, triggering a
+new REF-walk is just as effective a solution.
+
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Fixes: 397d425dc26d ("vfs: Test for and handle paths that are unreachable from their mnt_root")
+Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/namei.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1370,7 +1370,7 @@ static int follow_dotdot_rcu(struct name
+                       nd->path.dentry = parent;
+                       nd->seq = seq;
+                       if (unlikely(!path_connected(&nd->path)))
+-                              return -ENOENT;
++                              return -ECHILD;
+                       break;
+               } else {
+                       struct mount *mnt = real_mount(nd->path.mnt);
index c1e52f183a82d5cf67c16de2e28f58e227d78e59..f4e7cc939433e9409f091e7991a7ae36b711153e 100644 (file)
@@ -40,3 +40,5 @@ serial-8250-check-upf_irq_shared-in-advance.patch
 include-linux-bitops.h-introduce-bits_per_type.patch
 net-netlink-cap-max-groups-which-will-be-considered-in-netlink_bind.patch
 net-ena-make-ena-rxfh-support-eth_rss_hash_no_change.patch
+namei-only-return-echild-from-follow_dotdot_rcu.patch
+kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch