]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.6
authorSasha Levin <sashal@kernel.org>
Mon, 3 Mar 2025 07:38:29 +0000 (02:38 -0500)
committerSasha Levin <sashal@kernel.org>
Mon, 3 Mar 2025 07:38:29 +0000 (02:38 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.6/phy-rockchip-naneng-combphy-compatible-reset-with-ol.patch [new file with mode: 0644]
queue-6.6/riscv-kvm-fix-hart-suspend-status-check.patch [new file with mode: 0644]
queue-6.6/riscv-kvm-fix-sbi-ipi-error-generation.patch [new file with mode: 0644]
queue-6.6/riscv-kvm-fix-sbi-time-error-generation.patch [new file with mode: 0644]
queue-6.6/riscv-kvm-introduce-mp_state_lock-to-avoid-lock-inve.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/phy-rockchip-naneng-combphy-compatible-reset-with-ol.patch b/queue-6.6/phy-rockchip-naneng-combphy-compatible-reset-with-ol.patch
new file mode 100644 (file)
index 0000000..7c21516
--- /dev/null
@@ -0,0 +1,42 @@
+From 7e3c8d6aee5010586ef70e05defef684ea6a95b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Jan 2025 18:00:01 +0800
+Subject: phy: rockchip: naneng-combphy: compatible reset with old DT
+
+From: Chukun Pan <amadeus@jmu.edu.cn>
+
+[ Upstream commit 3126ea9be66b53e607f87f067641ba724be24181 ]
+
+The device tree of RK3568 did not specify reset-names before.
+So add fallback to old behaviour to be compatible with old DT.
+
+Fixes: fbcbffbac994 ("phy: rockchip: naneng-combphy: fix phy reset")
+Cc: Jianfeng Liu <liujianfeng1994@gmail.com>
+Signed-off-by: Chukun Pan <amadeus@jmu.edu.cn>
+Reviewed-by: Jonas Karlman <jonas@kwiboo.se>
+Link: https://lore.kernel.org/r/20250106100001.1344418-2-amadeus@jmu.edu.cn
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/rockchip/phy-rockchip-naneng-combphy.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+index 9c231094ba359..2354ce8b21594 100644
+--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
++++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+@@ -309,7 +309,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
+       priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
+-      priv->phy_rst = devm_reset_control_get(dev, "phy");
++      priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
++      /* fallback to old behaviour */
++      if (PTR_ERR(priv->phy_rst) == -ENOENT)
++              priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
+       if (IS_ERR(priv->phy_rst))
+               return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
+-- 
+2.39.5
+
diff --git a/queue-6.6/riscv-kvm-fix-hart-suspend-status-check.patch b/queue-6.6/riscv-kvm-fix-hart-suspend-status-check.patch
new file mode 100644 (file)
index 0000000..85dff73
--- /dev/null
@@ -0,0 +1,48 @@
+From 5f179eb8e6c1250b5ed3ec7d3cadca9fc354145a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Feb 2025 09:45:08 +0100
+Subject: riscv: KVM: Fix hart suspend status check
+
+From: Andrew Jones <ajones@ventanamicro.com>
+
+[ Upstream commit c7db342e3b4744688be1e27e31254c1d31a35274 ]
+
+"Not stopped" means started or suspended so we need to check for
+a single state in order to have a chance to check for each state.
+Also, we need to use target_vcpu when checking for the suspend
+state.
+
+Fixes: 763c8bed8c05 ("RISC-V: KVM: Implement SBI HSM suspend call")
+Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Link: https://lore.kernel.org/r/20250217084506.18763-8-ajones@ventanamicro.com
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kvm/vcpu_sbi_hsm.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
+index 827d946ab8714..7e349b4ee926c 100644
+--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
++++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
+@@ -76,12 +76,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
+       target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+       if (!target_vcpu)
+               return SBI_ERR_INVALID_PARAM;
+-      if (!kvm_riscv_vcpu_stopped(target_vcpu))
+-              return SBI_HSM_STATE_STARTED;
+-      else if (vcpu->stat.generic.blocking)
++      if (kvm_riscv_vcpu_stopped(target_vcpu))
++              return SBI_HSM_STATE_STOPPED;
++      else if (target_vcpu->stat.generic.blocking)
+               return SBI_HSM_STATE_SUSPENDED;
+       else
+-              return SBI_HSM_STATE_STOPPED;
++              return SBI_HSM_STATE_STARTED;
+ }
+ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+-- 
+2.39.5
+
diff --git a/queue-6.6/riscv-kvm-fix-sbi-ipi-error-generation.patch b/queue-6.6/riscv-kvm-fix-sbi-ipi-error-generation.patch
new file mode 100644 (file)
index 0000000..245cbb3
--- /dev/null
@@ -0,0 +1,72 @@
+From 19a233925a062d99e872ebedf79e39b6eee29788 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Feb 2025 09:45:10 +0100
+Subject: riscv: KVM: Fix SBI IPI error generation
+
+From: Andrew Jones <ajones@ventanamicro.com>
+
+[ Upstream commit 0611f78f83c93c000029ab01daa28166d03590ed ]
+
+When an invalid function ID of an SBI extension is used we should
+return not-supported, not invalid-param. Also, when we see that at
+least one hartid constructed from the base and mask parameters is
+invalid, then we should return invalid-param. Finally, rather than
+relying on overflowing a left shift to result in zero and then using
+that zero in a condition which [correctly] skips sending an IPI (but
+loops unnecessarily), explicitly check for overflow and exit the loop
+immediately.
+
+Fixes: 5f862df5585c ("RISC-V: KVM: Add v0.1 replacement SBI extensions defined in v0.2")
+Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Link: https://lore.kernel.org/r/20250217084506.18763-10-ajones@ventanamicro.com
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kvm/vcpu_sbi_replace.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
+index 7c4d5d38a3390..26e2619ab887b 100644
+--- a/arch/riscv/kvm/vcpu_sbi_replace.c
++++ b/arch/riscv/kvm/vcpu_sbi_replace.c
+@@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       unsigned long hmask = cp->a0;
+       unsigned long hbase = cp->a1;
++      unsigned long hart_bit = 0, sentmask = 0;
+       if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
+-              retdata->err_val = SBI_ERR_INVALID_PARAM;
++              retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+               return 0;
+       }
+@@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+               if (hbase != -1UL) {
+                       if (tmp->vcpu_id < hbase)
+                               continue;
+-                      if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
++                      hart_bit = tmp->vcpu_id - hbase;
++                      if (hart_bit >= __riscv_xlen)
++                              goto done;
++                      if (!(hmask & (1UL << hart_bit)))
+                               continue;
+               }
+               ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
+               if (ret < 0)
+                       break;
++              sentmask |= 1UL << hart_bit;
+               kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
+       }
++done:
++      if (hbase != -1UL && (hmask ^ sentmask))
++              retdata->err_val = SBI_ERR_INVALID_PARAM;
++
+       return ret;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.6/riscv-kvm-fix-sbi-time-error-generation.patch b/queue-6.6/riscv-kvm-fix-sbi-time-error-generation.patch
new file mode 100644 (file)
index 0000000..d5bb9c8
--- /dev/null
@@ -0,0 +1,38 @@
+From d7643d866d5b8bba463e96b46d0a6a98e7d94969 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Feb 2025 09:45:11 +0100
+Subject: riscv: KVM: Fix SBI TIME error generation
+
+From: Andrew Jones <ajones@ventanamicro.com>
+
+[ Upstream commit b901484852992cf3d162a5eab72251cc813ca624 ]
+
+When an invalid function ID of an SBI extension is used we should
+return not-supported, not invalid-param.
+
+Fixes: 5f862df5585c ("RISC-V: KVM: Add v0.1 replacement SBI extensions defined in v0.2")
+Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Link: https://lore.kernel.org/r/20250217084506.18763-11-ajones@ventanamicro.com
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kvm/vcpu_sbi_replace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
+index 26e2619ab887b..87ec68ed52d76 100644
+--- a/arch/riscv/kvm/vcpu_sbi_replace.c
++++ b/arch/riscv/kvm/vcpu_sbi_replace.c
+@@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+       u64 next_cycle;
+       if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
+-              retdata->err_val = SBI_ERR_INVALID_PARAM;
++              retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+               return 0;
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.6/riscv-kvm-introduce-mp_state_lock-to-avoid-lock-inve.patch b/queue-6.6/riscv-kvm-introduce-mp_state_lock-to-avoid-lock-inve.patch
new file mode 100644 (file)
index 0000000..02d28c8
--- /dev/null
@@ -0,0 +1,283 @@
+From a8d51273849897476d9668318218f780fad21aa9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Apr 2024 15:45:25 +0800
+Subject: RISCV: KVM: Introduce mp_state_lock to avoid lock inversion
+
+From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
+
+[ Upstream commit 2121cadec45aaf61fa45b3aa3d99723ed4e6683a ]
+
+Documentation/virt/kvm/locking.rst advises that kvm->lock should be
+acquired outside vcpu->mutex and kvm->srcu. However, when KVM/RISC-V
+handling SBI_EXT_HSM_HART_START, the lock ordering is vcpu->mutex,
+kvm->srcu then kvm->lock.
+
+Although the lockdep checking no longer complains about this after commit
+f0f44752f5f6 ("rcu: Annotate SRCU's update-side lockdep dependencies"),
+it's necessary to replace kvm->lock with a new dedicated lock to ensure
+only one hart can execute the SBI_EXT_HSM_HART_START call for the target
+hart simultaneously.
+
+Additionally, this patch also rename "power_off" to "mp_state" with two
+possible values. The vcpu->mp_state_lock also protects the access of
+vcpu->mp_state.
+
+Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Link: https://lore.kernel.org/r/20240417074528.16506-2-yongxuan.wang@sifive.com
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Stable-dep-of: c7db342e3b47 ("riscv: KVM: Fix hart suspend status check")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/kvm_host.h |  8 ++++--
+ arch/riscv/kvm/vcpu.c             | 48 ++++++++++++++++++++++---------
+ arch/riscv/kvm/vcpu_sbi.c         |  7 +++--
+ arch/riscv/kvm/vcpu_sbi_hsm.c     | 39 +++++++++++++++++--------
+ 4 files changed, 73 insertions(+), 29 deletions(-)
+
+diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
+index 1ebf20dfbaa69..459e61ad7d2b6 100644
+--- a/arch/riscv/include/asm/kvm_host.h
++++ b/arch/riscv/include/asm/kvm_host.h
+@@ -236,8 +236,9 @@ struct kvm_vcpu_arch {
+       /* Cache pages needed to program page tables with spinlock held */
+       struct kvm_mmu_memory_cache mmu_page_cache;
+-      /* VCPU power-off state */
+-      bool power_off;
++      /* VCPU power state */
++      struct kvm_mp_state mp_state;
++      spinlock_t mp_state_lock;
+       /* Don't run the VCPU (blocked) */
+       bool pause;
+@@ -351,7 +352,10 @@ int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+ void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
+ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
+ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
++void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
+ void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
++void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+ void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
++bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
+ #endif /* __RISCV_KVM_HOST_H__ */
+diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
+index 82229db1ce73f..9584d62c96ee7 100644
+--- a/arch/riscv/kvm/vcpu.c
++++ b/arch/riscv/kvm/vcpu.c
+@@ -100,6 +100,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+       struct kvm_cpu_context *cntx;
+       struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
++      spin_lock_init(&vcpu->arch.mp_state_lock);
++
+       /* Mark this VCPU never ran */
+       vcpu->arch.ran_atleast_once = false;
+       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+@@ -193,7 +195,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+ {
+       return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
+-              !vcpu->arch.power_off && !vcpu->arch.pause);
++              !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause);
+ }
+ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+@@ -421,26 +423,42 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
+       return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
+ }
+-void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
++void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
+ {
+-      vcpu->arch.power_off = true;
++      WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+       kvm_make_request(KVM_REQ_SLEEP, vcpu);
+       kvm_vcpu_kick(vcpu);
+ }
+-void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
++void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
+ {
+-      vcpu->arch.power_off = false;
++      spin_lock(&vcpu->arch.mp_state_lock);
++      __kvm_riscv_vcpu_power_off(vcpu);
++      spin_unlock(&vcpu->arch.mp_state_lock);
++}
++
++void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
++{
++      WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+       kvm_vcpu_wake_up(vcpu);
+ }
++void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
++{
++      spin_lock(&vcpu->arch.mp_state_lock);
++      __kvm_riscv_vcpu_power_on(vcpu);
++      spin_unlock(&vcpu->arch.mp_state_lock);
++}
++
++bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu)
++{
++      return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
++}
++
+ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+ {
+-      if (vcpu->arch.power_off)
+-              mp_state->mp_state = KVM_MP_STATE_STOPPED;
+-      else
+-              mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
++      *mp_state = READ_ONCE(vcpu->arch.mp_state);
+       return 0;
+ }
+@@ -450,17 +468,21 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ {
+       int ret = 0;
++      spin_lock(&vcpu->arch.mp_state_lock);
++
+       switch (mp_state->mp_state) {
+       case KVM_MP_STATE_RUNNABLE:
+-              vcpu->arch.power_off = false;
++              WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
+               break;
+       case KVM_MP_STATE_STOPPED:
+-              kvm_riscv_vcpu_power_off(vcpu);
++              __kvm_riscv_vcpu_power_off(vcpu);
+               break;
+       default:
+               ret = -EINVAL;
+       }
++      spin_unlock(&vcpu->arch.mp_state_lock);
++
+       return ret;
+ }
+@@ -561,11 +583,11 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
+               if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
+                       kvm_vcpu_srcu_read_unlock(vcpu);
+                       rcuwait_wait_event(wait,
+-                              (!vcpu->arch.power_off) && (!vcpu->arch.pause),
++                              (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
+                               TASK_INTERRUPTIBLE);
+                       kvm_vcpu_srcu_read_lock(vcpu);
+-                      if (vcpu->arch.power_off || vcpu->arch.pause) {
++                      if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) {
+                               /*
+                                * Awaken to handle a signal, request to
+                                * sleep again later.
+diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
+index 7a7fe40d0930b..be43278109f4e 100644
+--- a/arch/riscv/kvm/vcpu_sbi.c
++++ b/arch/riscv/kvm/vcpu_sbi.c
+@@ -102,8 +102,11 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
+       unsigned long i;
+       struct kvm_vcpu *tmp;
+-      kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+-              tmp->arch.power_off = true;
++      kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
++              spin_lock(&vcpu->arch.mp_state_lock);
++              WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
++              spin_unlock(&vcpu->arch.mp_state_lock);
++      }
+       kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+       memset(&run->system_event, 0, sizeof(run->system_event));
+diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
+index 7dca0e9381d9a..827d946ab8714 100644
+--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
++++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
+@@ -18,12 +18,18 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       struct kvm_vcpu *target_vcpu;
+       unsigned long target_vcpuid = cp->a0;
++      int ret = 0;
+       target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+       if (!target_vcpu)
+               return SBI_ERR_INVALID_PARAM;
+-      if (!target_vcpu->arch.power_off)
+-              return SBI_ERR_ALREADY_AVAILABLE;
++
++      spin_lock(&target_vcpu->arch.mp_state_lock);
++
++      if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
++              ret = SBI_ERR_ALREADY_AVAILABLE;
++              goto out;
++      }
+       reset_cntx = &target_vcpu->arch.guest_reset_context;
+       /* start address */
+@@ -34,19 +40,31 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
+       reset_cntx->a1 = cp->a2;
+       kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
+-      kvm_riscv_vcpu_power_on(target_vcpu);
++      __kvm_riscv_vcpu_power_on(target_vcpu);
+-      return 0;
++out:
++      spin_unlock(&target_vcpu->arch.mp_state_lock);
++
++      return ret;
+ }
+ static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
+ {
+-      if (vcpu->arch.power_off)
+-              return SBI_ERR_FAILURE;
++      int ret = 0;
+-      kvm_riscv_vcpu_power_off(vcpu);
++      spin_lock(&vcpu->arch.mp_state_lock);
+-      return 0;
++      if (kvm_riscv_vcpu_stopped(vcpu)) {
++              ret = SBI_ERR_FAILURE;
++              goto out;
++      }
++
++      __kvm_riscv_vcpu_power_off(vcpu);
++
++out:
++      spin_unlock(&vcpu->arch.mp_state_lock);
++
++      return ret;
+ }
+ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
+@@ -58,7 +76,7 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
+       target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+       if (!target_vcpu)
+               return SBI_ERR_INVALID_PARAM;
+-      if (!target_vcpu->arch.power_off)
++      if (!kvm_riscv_vcpu_stopped(target_vcpu))
+               return SBI_HSM_STATE_STARTED;
+       else if (vcpu->stat.generic.blocking)
+               return SBI_HSM_STATE_SUSPENDED;
+@@ -71,14 +89,11 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ {
+       int ret = 0;
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+-      struct kvm *kvm = vcpu->kvm;
+       unsigned long funcid = cp->a6;
+       switch (funcid) {
+       case SBI_EXT_HSM_HART_START:
+-              mutex_lock(&kvm->lock);
+               ret = kvm_sbi_hsm_vcpu_start(vcpu);
+-              mutex_unlock(&kvm->lock);
+               break;
+       case SBI_EXT_HSM_HART_STOP:
+               ret = kvm_sbi_hsm_vcpu_stop(vcpu);
+-- 
+2.39.5
+
index 405c492eadebde9cfa921117fdd9336b11f498e6..5a5950edd4eb32d7228d9fa7377b00d2afb9ad9c 100644 (file)
@@ -53,3 +53,8 @@ perf-core-order-the-pmu-list-to-fix-warning-about-un.patch
 uprobes-reject-the-shared-zeropage-in-uprobe_write_o.patch
 io_uring-net-save-msg_control-for-compat.patch
 x86-cpu-fix-warm-boot-hang-regression-on-amd-sc1100-.patch
+phy-rockchip-naneng-combphy-compatible-reset-with-ol.patch
+riscv-kvm-introduce-mp_state_lock-to-avoid-lock-inve.patch
+riscv-kvm-fix-hart-suspend-status-check.patch
+riscv-kvm-fix-sbi-ipi-error-generation.patch
+riscv-kvm-fix-sbi-time-error-generation.patch