]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SVM: switch to raw spinlock for svm->ir_list_lock
authorMaxim Levitsky <mlevitsk@redhat.com>
Thu, 30 Oct 2025 19:41:30 +0000 (15:41 -0400)
committerSean Christopherson <seanjc@google.com>
Tue, 4 Nov 2025 17:14:28 +0000 (09:14 -0800)
Use a raw spinlock for vcpu_svm.ir_list_lock as the lock can be taken
during schedule() via kvm_sched_out() => __avic_vcpu_put(), and "normal"
spinlocks are sleepable locks when PREEMPT_RT=y.

This fixes the following lockdep warning:

  =============================
  [ BUG: Invalid wait context ]
  6.12.0-146.1640_2124176644.el10.x86_64+debug #1 Not tainted
  -----------------------------
  qemu-kvm/38299 is trying to lock:
  ff11000239725600 (&svm->ir_list_lock){....}-{3:3}, at: __avic_vcpu_put+0xfd/0x300 [kvm_amd]
  other info that might help us debug this:
  context-{5:5}
  2 locks held by qemu-kvm/38299:
   #0: ff11000239723ba8 (&vcpu->mutex){+.+.}-{4:4}, at: kvm_vcpu_ioctl+0x240/0xe00 [kvm]
   #1: ff11000b906056d8 (&rq->__lock){-.-.}-{2:2}, at: raw_spin_rq_lock_nested+0x2e/0x130
  stack backtrace:
  CPU: 1 UID: 0 PID: 38299 Comm: qemu-kvm Kdump: loaded Not tainted 6.12.0-146.1640_2124176644.el10.x86_64+debug #1 PREEMPT(voluntary)
  Hardware name: AMD Corporation QUARTZ/QUARTZ, BIOS RQZ100AB 09/14/2023
  Call Trace:
   <TASK>
   dump_stack_lvl+0x6f/0xb0
   __lock_acquire+0x921/0xb80
   lock_acquire.part.0+0xbe/0x270
   _raw_spin_lock_irqsave+0x46/0x90
   __avic_vcpu_put+0xfd/0x300 [kvm_amd]
   svm_vcpu_put+0xfa/0x130 [kvm_amd]
   kvm_arch_vcpu_put+0x48c/0x790 [kvm]
   kvm_sched_out+0x161/0x1c0 [kvm]
   prepare_task_switch+0x36b/0xf60
   __schedule+0x4f7/0x1890
   schedule+0xd4/0x260
   xfer_to_guest_mode_handle_work+0x54/0xc0
   vcpu_run+0x69a/0xa70 [kvm]
   kvm_arch_vcpu_ioctl_run+0xdc0/0x17e0 [kvm]
   kvm_vcpu_ioctl+0x39f/0xe00 [kvm]

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://patch.msgid.link/20251030194130.307900-1-mlevitsk@redhat.com
[sean: massage changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/svm.h

index 89864fee6e839c73c4f9c1896f58b34d65104af2..fef00546c88566c76592a9e8b9149efa6e0002d2 100644 (file)
@@ -788,7 +788,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
        struct kvm_vcpu *vcpu = &svm->vcpu;
 
        INIT_LIST_HEAD(&svm->ir_list);
-       spin_lock_init(&svm->ir_list_lock);
+       raw_spin_lock_init(&svm->ir_list_lock);
 
        if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
                return 0;
@@ -816,9 +816,9 @@ static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd)
        if (!vcpu)
                return;
 
-       spin_lock_irqsave(&to_svm(vcpu)->ir_list_lock, flags);
+       raw_spin_lock_irqsave(&to_svm(vcpu)->ir_list_lock, flags);
        list_del(&irqfd->vcpu_list);
-       spin_unlock_irqrestore(&to_svm(vcpu)->ir_list_lock, flags);
+       raw_spin_unlock_irqrestore(&to_svm(vcpu)->ir_list_lock, flags);
 }
 
 int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
@@ -855,7 +855,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
                 * list of IRQs being posted to the vCPU, to ensure the IRTE
                 * isn't programmed with stale pCPU/IsRunning information.
                 */
-               guard(spinlock_irqsave)(&svm->ir_list_lock);
+               guard(raw_spinlock_irqsave)(&svm->ir_list_lock);
 
                /*
                 * Update the target pCPU for IOMMU doorbells if the vCPU is
@@ -972,7 +972,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu,
         * up-to-date entry information, or that this task will wait until
         * svm_ir_list_add() completes to set the new target pCPU.
         */
-       spin_lock_irqsave(&svm->ir_list_lock, flags);
+       raw_spin_lock_irqsave(&svm->ir_list_lock, flags);
 
        entry = svm->avic_physical_id_entry;
        WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
@@ -997,7 +997,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu,
 
        avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, action);
 
-       spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+       raw_spin_unlock_irqrestore(&svm->ir_list_lock, flags);
 }
 
 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -1035,7 +1035,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu, enum avic_vcpu_action action)
         * or that this task will wait until svm_ir_list_add() completes to
         * mark the vCPU as not running.
         */
-       spin_lock_irqsave(&svm->ir_list_lock, flags);
+       raw_spin_lock_irqsave(&svm->ir_list_lock, flags);
 
        avic_update_iommu_vcpu_affinity(vcpu, -1, action);
 
@@ -1059,7 +1059,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu, enum avic_vcpu_action action)
 
        svm->avic_physical_id_entry = entry;
 
-       spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+       raw_spin_unlock_irqrestore(&svm->ir_list_lock, flags);
 }
 
 void avic_vcpu_put(struct kvm_vcpu *vcpu)
index 8c36ee0d67ef09336a0fda038756a6b40d4009a1..c856d8e0f95e7c1cd7a500bd7c7b9219394f9ab0 100644 (file)
@@ -329,7 +329,7 @@ struct vcpu_svm {
         * back into remapped mode).
         */
        struct list_head ir_list;
-       spinlock_t ir_list_lock;
+       raw_spinlock_t ir_list_lock;
 
        struct vcpu_sev_es_state sev_es;