]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 Mar 2018 15:03:31 +0000 (07:03 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 Mar 2018 15:03:31 +0000 (07:03 -0800)
added patches:
kvm-x86-extend-usage-of-ret_mmio_pf_-constants.patch
kvm-x86-fix-vcpu-initialization-with-userspace-lapic.patch

queue-4.14/kvm-x86-extend-usage-of-ret_mmio_pf_-constants.patch [new file with mode: 0644]
queue-4.14/kvm-x86-fix-smram-accessing-even-if-vm-is-shutdown.patch
queue-4.14/kvm-x86-fix-vcpu-initialization-with-userspace-lapic.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/kvm-x86-extend-usage-of-ret_mmio_pf_-constants.patch b/queue-4.14/kvm-x86-extend-usage-of-ret_mmio_pf_-constants.patch
new file mode 100644 (file)
index 0000000..aa3a87b
--- /dev/null
@@ -0,0 +1,332 @@
+From 9b8ebbdb74b5ad76b9dfd8b101af17839174b126 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 17 Aug 2017 15:03:32 +0200
+Subject: KVM: x86: extend usage of RET_MMIO_PF_* constants
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9b8ebbdb74b5ad76b9dfd8b101af17839174b126 upstream.
+
+The x86 MMU if full of code that returns 0 and 1 for retry/emulate.  Use
+the existing RET_MMIO_PF_RETRY/RET_MMIO_PF_EMULATE enum, renaming it to
+drop the MMIO part.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Thomas Backlund <tmb@mageia.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c         |   95 +++++++++++++++++++++------------------------
+ arch/x86/kvm/paging_tmpl.h |   18 ++++----
+ 2 files changed, 55 insertions(+), 58 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -150,6 +150,20 @@ module_param(dbg, bool, 0644);
+ /* make pte_list_desc fit well in cache line */
+ #define PTE_LIST_EXT 3
++/*
++ * Return values of handle_mmio_page_fault and mmu.page_fault:
++ * RET_PF_RETRY: let CPU fault again on the address.
++ * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
++ *
++ * For handle_mmio_page_fault only:
++ * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
++ */
++enum {
++      RET_PF_RETRY = 0,
++      RET_PF_EMULATE = 1,
++      RET_PF_INVALID = 2,
++};
++
+ struct pte_list_desc {
+       u64 *sptes[PTE_LIST_EXT];
+       struct pte_list_desc *more;
+@@ -2794,13 +2808,13 @@ done:
+       return ret;
+ }
+-static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
+-                       int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
+-                       bool speculative, bool host_writable)
++static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
++                      int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
++                      bool speculative, bool host_writable)
+ {
+       int was_rmapped = 0;
+       int rmap_count;
+-      bool emulate = false;
++      int ret = RET_PF_RETRY;
+       pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
+                *sptep, write_fault, gfn);
+@@ -2830,12 +2844,12 @@ static bool mmu_set_spte(struct kvm_vcpu
+       if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
+             true, host_writable)) {
+               if (write_fault)
+-                      emulate = true;
++                      ret = RET_PF_EMULATE;
+               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+       }
+       if (unlikely(is_mmio_spte(*sptep)))
+-              emulate = true;
++              ret = RET_PF_EMULATE;
+       pgprintk("%s: setting spte %llx\n", __func__, *sptep);
+       pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
+@@ -2855,7 +2869,7 @@ static bool mmu_set_spte(struct kvm_vcpu
+       kvm_release_pfn_clean(pfn);
+-      return emulate;
++      return ret;
+ }
+ static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
+@@ -2994,14 +3008,13 @@ static int kvm_handle_bad_page(struct kv
+        * Do not cache the mmio info caused by writing the readonly gfn
+        * into the spte otherwise read access on readonly gfn also can
+        * caused mmio page fault and treat it as mmio access.
+-       * Return 1 to tell kvm to emulate it.
+        */
+       if (pfn == KVM_PFN_ERR_RO_FAULT)
+-              return 1;
++              return RET_PF_EMULATE;
+       if (pfn == KVM_PFN_ERR_HWPOISON) {
+               kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
+-              return 0;
++              return RET_PF_RETRY;
+       }
+       return -EFAULT;
+@@ -3286,13 +3299,13 @@ static int nonpaging_map(struct kvm_vcpu
+       }
+       if (fast_page_fault(vcpu, v, level, error_code))
+-              return 0;
++              return RET_PF_RETRY;
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       smp_rmb();
+       if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
+-              return 0;
++              return RET_PF_RETRY;
+       if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
+               return r;
+@@ -3312,7 +3325,7 @@ static int nonpaging_map(struct kvm_vcpu
+ out_unlock:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_release_pfn_clean(pfn);
+-      return 0;
++      return RET_PF_RETRY;
+ }
+@@ -3659,54 +3672,38 @@ exit:
+       return reserved;
+ }
+-/*
+- * Return values of handle_mmio_page_fault:
+- * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
+- *                    directly.
+- * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
+- *                    fault path update the mmio spte.
+- * RET_MMIO_PF_RETRY: let CPU fault again on the address.
+- * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
+- */
+-enum {
+-      RET_MMIO_PF_EMULATE = 1,
+-      RET_MMIO_PF_INVALID = 2,
+-      RET_MMIO_PF_RETRY = 0,
+-      RET_MMIO_PF_BUG = -1
+-};
+-
+ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+ {
+       u64 spte;
+       bool reserved;
+       if (mmio_info_in_cache(vcpu, addr, direct))
+-              return RET_MMIO_PF_EMULATE;
++              return RET_PF_EMULATE;
+       reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
+       if (WARN_ON(reserved))
+-              return RET_MMIO_PF_BUG;
++              return -EINVAL;
+       if (is_mmio_spte(spte)) {
+               gfn_t gfn = get_mmio_spte_gfn(spte);
+               unsigned access = get_mmio_spte_access(spte);
+               if (!check_mmio_spte(vcpu, spte))
+-                      return RET_MMIO_PF_INVALID;
++                      return RET_PF_INVALID;
+               if (direct)
+                       addr = 0;
+               trace_handle_mmio_page_fault(addr, gfn, access);
+               vcpu_cache_mmio_info(vcpu, addr, gfn, access);
+-              return RET_MMIO_PF_EMULATE;
++              return RET_PF_EMULATE;
+       }
+       /*
+        * If the page table is zapped by other cpus, let CPU fault again on
+        * the address.
+        */
+-      return RET_MMIO_PF_RETRY;
++      return RET_PF_RETRY;
+ }
+ EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
+@@ -3756,7 +3753,7 @@ static int nonpaging_page_fault(struct k
+       pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
+       if (page_fault_handle_page_track(vcpu, error_code, gfn))
+-              return 1;
++              return RET_PF_EMULATE;
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+@@ -3877,7 +3874,7 @@ static int tdp_page_fault(struct kvm_vcp
+       MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+       if (page_fault_handle_page_track(vcpu, error_code, gfn))
+-              return 1;
++              return RET_PF_EMULATE;
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+@@ -3894,13 +3891,13 @@ static int tdp_page_fault(struct kvm_vcp
+       }
+       if (fast_page_fault(vcpu, gpa, level, error_code))
+-              return 0;
++              return RET_PF_RETRY;
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       smp_rmb();
+       if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+-              return 0;
++              return RET_PF_RETRY;
+       if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
+               return r;
+@@ -3920,7 +3917,7 @@ static int tdp_page_fault(struct kvm_vcp
+ out_unlock:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_release_pfn_clean(pfn);
+-      return 0;
++      return RET_PF_RETRY;
+ }
+ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
+@@ -4919,25 +4916,25 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
+               vcpu->arch.gpa_val = cr2;
+       }
++      r = RET_PF_INVALID;
+       if (unlikely(error_code & PFERR_RSVD_MASK)) {
+               r = handle_mmio_page_fault(vcpu, cr2, direct);
+-              if (r == RET_MMIO_PF_EMULATE) {
++              if (r == RET_PF_EMULATE) {
+                       emulation_type = 0;
+                       goto emulate;
+               }
+-              if (r == RET_MMIO_PF_RETRY)
+-                      return 1;
+-              if (r < 0)
+-                      return r;
+-              /* Must be RET_MMIO_PF_INVALID.  */
+       }
+-      r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
+-                                    false);
++      if (r == RET_PF_INVALID) {
++              r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
++                                            false);
++              WARN_ON(r == RET_PF_INVALID);
++      }
++
++      if (r == RET_PF_RETRY)
++              return 1;
+       if (r < 0)
+               return r;
+-      if (!r)
+-              return 1;
+       /*
+        * Before emulating the instruction, check if the error code
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -593,7 +593,7 @@ static int FNAME(fetch)(struct kvm_vcpu
+       struct kvm_mmu_page *sp = NULL;
+       struct kvm_shadow_walk_iterator it;
+       unsigned direct_access, access = gw->pt_access;
+-      int top_level, emulate;
++      int top_level, ret;
+       direct_access = gw->pte_access;
+@@ -659,15 +659,15 @@ static int FNAME(fetch)(struct kvm_vcpu
+       }
+       clear_sp_write_flooding_count(it.sptep);
+-      emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
+-                             it.level, gw->gfn, pfn, prefault, map_writable);
++      ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
++                         it.level, gw->gfn, pfn, prefault, map_writable);
+       FNAME(pte_prefetch)(vcpu, gw, it.sptep);
+-      return emulate;
++      return ret;
+ out_gpte_changed:
+       kvm_release_pfn_clean(pfn);
+-      return 0;
++      return RET_PF_RETRY;
+ }
+  /*
+@@ -762,12 +762,12 @@ static int FNAME(page_fault)(struct kvm_
+               if (!prefault)
+                       inject_page_fault(vcpu, &walker.fault);
+-              return 0;
++              return RET_PF_RETRY;
+       }
+       if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
+               shadow_page_table_clear_flood(vcpu, addr);
+-              return 1;
++              return RET_PF_EMULATE;
+       }
+       vcpu->arch.write_fault_to_shadow_pgtable = false;
+@@ -789,7 +789,7 @@ static int FNAME(page_fault)(struct kvm_
+       if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
+                        &map_writable))
+-              return 0;
++              return RET_PF_RETRY;
+       if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
+               return r;
+@@ -834,7 +834,7 @@ static int FNAME(page_fault)(struct kvm_
+ out_unlock:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_release_pfn_clean(pfn);
+-      return 0;
++      return RET_PF_RETRY;
+ }
+ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
index cf19fbc92347121190625ded3ddfbbbb766c489e..a75d7532675db7bd2a6dbfbfb1cc9b93e04275bc 100644 (file)
@@ -46,8 +46,8 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/arch/x86/kvm/mmu.c
 +++ b/arch/x86/kvm/mmu.c
-@@ -3004,7 +3004,7 @@ static int kvm_handle_bad_page(struct kv
-               return 0;
+@@ -3017,7 +3017,7 @@ static int kvm_handle_bad_page(struct kv
+               return RET_PF_RETRY;
        }
  
 -      return -EFAULT;
diff --git a/queue-4.14/kvm-x86-fix-vcpu-initialization-with-userspace-lapic.patch b/queue-4.14/kvm-x86-fix-vcpu-initialization-with-userspace-lapic.patch
new file mode 100644 (file)
index 0000000..9501191
--- /dev/null
@@ -0,0 +1,75 @@
+From b7e31be385584afe7f073130e8e570d53c95f7fe Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>
+Date: Thu, 1 Mar 2018 15:24:25 +0100
+Subject: KVM: x86: fix vcpu initialization with userspace lapic
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Radim Krčmář <rkrcmar@redhat.com>
+
+commit b7e31be385584afe7f073130e8e570d53c95f7fe upstream.
+
+Moving the code around broke this rare configuration.
+Use this opportunity to finally call lapic reset from vcpu reset.
+
+Reported-by: syzbot+fb7a33a4b6c35007a72b@syzkaller.appspotmail.com
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Fixes: 0b2e9904c159 ("KVM: x86: move LAPIC initialization after VMCS creation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/lapic.c |   10 ++++------
+ arch/x86/kvm/x86.c   |    3 ++-
+ 2 files changed, 6 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1944,14 +1944,13 @@ void kvm_lapic_set_base(struct kvm_vcpu
+ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ {
+-      struct kvm_lapic *apic;
++      struct kvm_lapic *apic = vcpu->arch.apic;
+       int i;
+-      apic_debug("%s\n", __func__);
++      if (!apic)
++              return;
+-      ASSERT(vcpu);
+-      apic = vcpu->arch.apic;
+-      ASSERT(apic != NULL);
++      apic_debug("%s\n", __func__);
+       /* Stop the timer in case it's a reset to an active apic */
+       hrtimer_cancel(&apic->lapic_timer.timer);
+@@ -2510,7 +2509,6 @@ void kvm_apic_accept_events(struct kvm_v
+       pe = xchg(&apic->pending_events, 0);
+       if (test_bit(KVM_APIC_INIT, &pe)) {
+-              kvm_lapic_reset(vcpu, true);
+               kvm_vcpu_reset(vcpu, true);
+               if (kvm_vcpu_is_bsp(apic->vcpu))
+                       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7779,7 +7779,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu
+       if (r)
+               return r;
+       kvm_vcpu_reset(vcpu, false);
+-      kvm_lapic_reset(vcpu, false);
+       kvm_mmu_setup(vcpu);
+       vcpu_put(vcpu);
+       return r;
+@@ -7822,6 +7821,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vc
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ {
++      kvm_lapic_reset(vcpu, init_event);
++
+       vcpu->arch.hflags = 0;
+       vcpu->arch.smi_pending = 0;
index 692553c7581b2eeed94edf2814bd8cf34a561f5c..850b3305f15122a3a9b1e2b0b7c49eaa3e0a3422 100644 (file)
@@ -93,8 +93,10 @@ arm-orion-fix-orion_ge00_switch_board_info-initialization.patch
 arm-dts-rockchip-remove-1.8-ghz-operation-point-from-phycore-som.patch
 arm-mvebu-fix-broken-pl310_errata_753970-selects.patch
 arm-kvm-fix-building-with-gcc-8.patch
+kvm-x86-extend-usage-of-ret_mmio_pf_-constants.patch
 kvm-x86-fix-smram-accessing-even-if-vm-is-shutdown.patch
 kvm-mmu-fix-overlap-between-public-and-private-memslots.patch
 kvm-x86-remove-indirect-msr-op-calls-from-spec_ctrl.patch
 kvm-x86-move-lapic-initialization-after-vmcs-creation.patch
 kvm-vmx-optimize-vmx_vcpu_run-and-svm_vcpu_run-by-marking-the-rdmsr-path-as-unlikely.patch
+kvm-x86-fix-vcpu-initialization-with-userspace-lapic.patch