]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/amd: move wait_on_sem() out of spinlock
authorAnkit Soni <Ankit.Soni@amd.com>
Mon, 1 Dec 2025 14:39:40 +0000 (14:39 +0000)
committerJoerg Roedel <joerg.roedel@amd.com>
Sat, 10 Jan 2026 09:54:38 +0000 (10:54 +0100)
With iommu.strict=1, the existing completion wait path can cause soft
lockups under stressed environment, as wait_on_sem() busy-waits under the
spinlock with interrupts disabled.

Move the completion wait in iommu_completion_wait() out of the spinlock.
wait_on_sem() only polls the hardware-updated cmd_sem and does not require
iommu->lock, so holding the lock during the busy wait unnecessarily
increases contention and extends the time with interrupts disabled.

Signed-off-by: Ankit Soni <Ankit.Soni@amd.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
drivers/iommu/amd/iommu.c

index 5d45795c367a6be0acf32213556c2bd900d175b8..858d1669fe6cd82eac3f454ef37f72b6b979d243 100644 (file)
@@ -1185,7 +1185,12 @@ static int wait_on_sem(struct amd_iommu *iommu, u64 data)
 {
        int i = 0;
 
-       while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
+       /*
+        * cmd_sem holds a monotonically non-decreasing completion sequence
+        * number.
+        */
+       while ((__s64)(READ_ONCE(*iommu->cmd_sem) - data) < 0 &&
+              i < LOOP_TIMEOUT) {
                udelay(1);
                i += 1;
        }
@@ -1437,14 +1442,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
        raw_spin_lock_irqsave(&iommu->lock, flags);
 
        ret = __iommu_queue_command_sync(iommu, &cmd, false);
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
+
        if (ret)
-               goto out_unlock;
+               return ret;
 
        ret = wait_on_sem(iommu, data);
 
-out_unlock:
-       raw_spin_unlock_irqrestore(&iommu->lock, flags);
-
        return ret;
 }
 
@@ -3121,13 +3125,18 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
        raw_spin_lock_irqsave(&iommu->lock, flags);
        ret = __iommu_queue_command_sync(iommu, &cmd, true);
        if (ret)
-               goto out;
+               goto out_err;
        ret = __iommu_queue_command_sync(iommu, &cmd2, false);
        if (ret)
-               goto out;
+               goto out_err;
+       raw_spin_unlock_irqrestore(&iommu->lock, flags);
+
        wait_on_sem(iommu, data);
-out:
+       return;
+
+out_err:
        raw_spin_unlock_irqrestore(&iommu->lock, flags);
+       return;
 }
 
 static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)