From: Ankit Soni Date: Mon, 1 Dec 2025 14:39:40 +0000 (+0000) Subject: iommu/amd: move wait_on_sem() out of spinlock X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d2a0cac10597068567d336e85fa3cbdbe8ca62bf;p=thirdparty%2Fkernel%2Flinux.git iommu/amd: move wait_on_sem() out of spinlock With iommu.strict=1, the existing completion wait path can cause soft lockups under stressed environment, as wait_on_sem() busy-waits under the spinlock with interrupts disabled. Move the completion wait in iommu_completion_wait() out of the spinlock. wait_on_sem() only polls the hardware-updated cmd_sem and does not require iommu->lock, so holding the lock during the busy wait unnecessarily increases contention and extends the time with interrupts disabled. Signed-off-by: Ankit Soni Reviewed-by: Vasant Hegde Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 5d45795c367a6..858d1669fe6cd 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1185,7 +1185,12 @@ static int wait_on_sem(struct amd_iommu *iommu, u64 data) { int i = 0; - while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { + /* + * cmd_sem holds a monotonically non-decreasing completion sequence + * number. + */ + while ((__s64)(READ_ONCE(*iommu->cmd_sem) - data) < 0 && + i < LOOP_TIMEOUT) { udelay(1); i += 1; } @@ -1437,14 +1442,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu) raw_spin_lock_irqsave(&iommu->lock, flags); ret = __iommu_queue_command_sync(iommu, &cmd, false); + raw_spin_unlock_irqrestore(&iommu->lock, flags); + if (ret) - goto out_unlock; + return ret; ret = wait_on_sem(iommu, data); -out_unlock: - raw_spin_unlock_irqrestore(&iommu->lock, flags); - return ret; } @@ -3121,13 +3125,18 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) raw_spin_lock_irqsave(&iommu->lock, flags); ret = __iommu_queue_command_sync(iommu, &cmd, true); if (ret) - goto out; + goto out_err; ret = __iommu_queue_command_sync(iommu, &cmd2, false); if (ret) - goto out; + goto out_err; + raw_spin_unlock_irqrestore(&iommu->lock, flags); + wait_on_sem(iommu, data); -out: + return; + +out_err: raw_spin_unlock_irqrestore(&iommu->lock, flags); + return; } static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)