]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Merge branch 'kvm-ppc-fixes' into kvm-ppc-next
authorPaul Mackerras <paulus@ozlabs.org>
Thu, 9 Nov 2017 03:30:24 +0000 (14:30 +1100)
committerPaul Mackerras <paulus@ozlabs.org>
Thu, 9 Nov 2017 03:30:24 +0000 (14:30 +1100)
This merges in a couple of fixes from the kvm-ppc-fixes branch that
modify the same areas of code as some commits from the kvm-ppc-next
branch, in order to resolve the conflicts.

Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
1  2 
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/powerpc.c

index 6aec8a22aeff3b1d70b71cfb544be9285966a14d,59247af5fd45076f063ff3567f143cfe2328bd92..235319c2574e07f03c3473d66e160e6e900204e2
@@@ -651,6 -646,16 +651,16 @@@ int kvmppc_book3s_hv_page_fault(struct 
                hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
                hnow_r = hpte_new_to_old_r(hnow_r);
        }
 -       * The synchronization for hpte_setup_done test vs. set is provided
+       /*
+        * If the HPT is being resized, don't update the HPTE,
+        * instead let the guest retry after the resize operation is complete.
 -      if (!kvm->arch.hpte_setup_done)
++       * The synchronization for mmu_ready test vs. set is provided
+        * by the HPTE lock.
+        */
++      if (!kvm->arch.mmu_ready)
+               goto out_unlock;
        if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
            rev->guest_rpte != hpte[2])
                /* HPTE has been changed under us; let the guest retry */
index fff62fdf1464f72ac443d128ac51f0f7d76693de,8d43cf205d348779e1316c81fc95ab6133eb0784..ca0d4d938d6a373037608dba80e472e022f5c44d
@@@ -2717,11 -2705,14 +2717,13 @@@ static noinline void kvmppc_run_core(st
         * Hard-disable interrupts, and check resched flag and signals.
         * If we need to reschedule or deliver a signal, clean up
         * and return without going into the guest(s).
 -       * If the hpte_setup_done flag has been cleared, don't go into the
++       * If the mmu_ready flag has been cleared, don't go into the
+        * guest because that means a HPT resize operation is in progress.
         */
        local_irq_disable();
        hard_irq_disable();
        if (lazy_irq_pending() || need_resched() ||
-           recheck_signals(&core_info)) {
 -          recheck_signals(&core_info) ||
 -          (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) {
++          recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
                local_irq_enable();
                vc->vcore_state = VCORE_INACTIVE;
                /* Unlock all except the primary vcore */
@@@ -3174,6 -3135,20 +3176,30 @@@ static int kvmppc_run_vcpu(struct kvm_r
  
        while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
               !signal_pending(current)) {
 -              /* See if the HPT and VRMA are ready to go */
 -              if (!kvm_is_radix(vcpu->kvm) &&
 -                  !vcpu->kvm->arch.hpte_setup_done) {
++              /* See if the MMU is ready to go */
++              if (!vcpu->kvm->arch.mmu_ready) {
+                       spin_unlock(&vc->lock);
 -                      r = kvmppc_hv_setup_htab_rma(vcpu);
++                      mutex_lock(&vcpu->kvm->lock);
++                      r = 0;
++                      if (!vcpu->kvm->arch.mmu_ready) {
++                              if (!kvm_is_radix(vcpu->kvm))
++                                      r = kvmppc_hv_setup_htab_rma(vcpu);
++                              if (!r) {
++                                      if (cpu_has_feature(CPU_FTR_ARCH_300))
++                                              kvmppc_setup_partition_table(vcpu->kvm);
++                                      vcpu->kvm->arch.mmu_ready = 1;
++                              }
++                      }
++                      mutex_unlock(&vcpu->kvm->lock);
+                       spin_lock(&vc->lock);
+                       if (r) {
+                               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+                               kvm_run->fail_entry.hardware_entry_failure_reason = 0;
+                               vcpu->arch.ret = r;
+                               break;
+                       }
+               }
                if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
                        kvmppc_vcore_end_preempt(vc);
  
@@@ -3288,29 -3262,10 +3314,11 @@@ static int kvmppc_vcpu_run_hv(struct kv
                return -EINTR;
        }
  
 -      atomic_inc(&vcpu->kvm->arch.vcpus_running);
 -      /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
 +      kvm = vcpu->kvm;
 +      atomic_inc(&kvm->arch.vcpus_running);
 +      /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
        smp_mb();
  
-       /* On the first time here, set up MMU if necessary */
-       if (!vcpu->kvm->arch.mmu_ready) {
-               mutex_lock(&kvm->lock);
-               r = 0;
-               if (!kvm->arch.mmu_ready) {
-                       if (!kvm_is_radix(vcpu->kvm))
-                               r = kvmppc_hv_setup_htab_rma(vcpu);
-                       if (!r) {
-                               if (cpu_has_feature(CPU_FTR_ARCH_300))
-                                       kvmppc_setup_partition_table(kvm);
-                               kvm->arch.mmu_ready = 1;
-                       }
-               }
-               mutex_unlock(&kvm->lock);
-               if (r)
-                       goto out;
-       }
        flush_all_to_thread(current);
  
        /* Save userspace EBB and other register values */
index a0b7f094de78a3dd3366ddc2372f2f0938fb9434,ee279c7f48021e0b43c658d7529b9160061b5415..6b6c53c42ac9455f2a8c4f157f402da772163336
@@@ -643,8 -644,7 +643,8 @@@ int kvm_vm_ioctl_check_extension(struc
                break;
  #endif
        case KVM_CAP_PPC_HTM:
-               r = is_kvmppc_hv_enabled(kvm) &&
 -              r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
++              r = hv_enabled &&
 +                  (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
                break;
        default:
                r = 0;