]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/amd: serialize sequence allocation under concurrent TLB invalidations
authorAnkit Soni <Ankit.Soni@amd.com>
Thu, 22 Jan 2026 15:30:38 +0000 (15:30 +0000)
committerJoerg Roedel <joerg.roedel@amd.com>
Tue, 3 Feb 2026 13:27:05 +0000 (14:27 +0100)
With concurrent TLB invalidations, completion wait randomly gets timed out
because cmd_sem_val was incremented outside the IOMMU spinlock, allowing
CMD_COMPL_WAIT commands to be queued out of sequence and breaking the
ordering assumption in wait_on_sem().
Move the cmd_sem_val increment under iommu->lock so completion sequence
allocation is serialized with command queuing.
And remove the unnecessary return.

Fixes: d2a0cac10597 ("iommu/amd: move wait_on_sem() out of spinlock")
Tested-by: Srikanth Aithal <sraithal@amd.com>
Reported-by: Srikanth Aithal <sraithal@amd.com>
Signed-off-by: Ankit Soni <Ankit.Soni@amd.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c

index cfcbad6c28ff9b1fe0bacf31c1a3ed88ea999061..c685d3771436a27c1fd026dab99f877b28f9658c 100644 (file)
@@ -752,7 +752,7 @@ struct amd_iommu {
 
        u32 flags;
        volatile u64 *cmd_sem;
-       atomic64_t cmd_sem_val;
+       u64 cmd_sem_val;
        /*
         * Track physical address to directly use it in build_completion_wait()
         * and avoid adding any special checks and handling for kdump.
index b1c344ed7dbda2c4f6e48a5d65917ed4cdc67ade..02c0c64c5f6bf5d12ae3e594b0c99ec020e41195 100644 (file)
@@ -1885,7 +1885,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
        iommu->pci_seg = pci_seg;
 
        raw_spin_lock_init(&iommu->lock);
-       atomic64_set(&iommu->cmd_sem_val, 0);
+       iommu->cmd_sem_val = 0;
 
        /* Add IOMMU to internal data structures */
        list_add_tail(&iommu->list, &amd_iommu_list);
index 285ae635c32404430751cd330d528a6353f1b676..58be841d624e52de9007e47eeb86509a01ebc7c4 100644 (file)
@@ -1439,6 +1439,12 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
        return iommu_queue_command_sync(iommu, cmd, true);
 }
 
+static u64 get_cmdsem_val(struct amd_iommu *iommu)
+{
+       lockdep_assert_held(&iommu->lock);
+       return ++iommu->cmd_sem_val;
+}
+
 /*
  * This function queues a completion wait command into the command
  * buffer of an IOMMU
@@ -1453,11 +1459,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
        if (!iommu->need_sync)
                return 0;
 
-       data = atomic64_inc_return(&iommu->cmd_sem_val);
-       build_completion_wait(&cmd, iommu, data);
-
        raw_spin_lock_irqsave(&iommu->lock, flags);
 
+       data = get_cmdsem_val(iommu);
+       build_completion_wait(&cmd, iommu, data);
+
        ret = __iommu_queue_command_sync(iommu, &cmd, false);
        raw_spin_unlock_irqrestore(&iommu->lock, flags);
 
@@ -3177,10 +3183,11 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
                return;
 
        build_inv_irt(&cmd, devid);
-       data = atomic64_inc_return(&iommu->cmd_sem_val);
-       build_completion_wait(&cmd2, iommu, data);
 
        raw_spin_lock_irqsave(&iommu->lock, flags);
+       data = get_cmdsem_val(iommu);
+       build_completion_wait(&cmd2, iommu, data);
+
        ret = __iommu_queue_command_sync(iommu, &cmd, true);
        if (ret)
                goto out_err;
@@ -3194,7 +3201,6 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
 
 out_err:
        raw_spin_unlock_irqrestore(&iommu->lock, flags);
-       return;
 }
 
 static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)