]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
dma-buf: Rename dma_buf_move_notify() to dma_buf_invalidate_mappings()
authorLeon Romanovsky <leonro@nvidia.com>
Sat, 24 Jan 2026 19:14:14 +0000 (21:14 +0200)
committerChristian König <christian.koenig@amd.com>
Tue, 27 Jan 2026 09:44:30 +0000 (10:44 +0100)
Along with renaming the .move_notify() callback, rename the corresponding
dma-buf core function. This makes the expected behavior clear to exporters
calling this function.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20260124-dmabuf-revoke-v5-2-f98fca917e96@nvidia.com
Signed-off-by: Christian König <christian.koenig@amd.com>
drivers/dma-buf/dma-buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/xe/xe_bo.c
drivers/iommu/iommufd/selftest.c
drivers/vfio/pci/vfio_pci_dmabuf.c
include/linux/dma-buf.h

index cc9b88214d9773f38f3a0b42e7d0da1544c81ca3..1c257607a6236045eb898c35cc8c01f8e7a52711 100644 (file)
@@ -981,7 +981,7 @@ dma_buf_pin_on_map(struct dma_buf_attachment *attach)
  * 3. Exporters must hold the dma-buf reservation lock when calling these
  *    functions:
  *
- *     - dma_buf_move_notify()
+ *     - dma_buf_invalidate_mappings()
  */
 
 /**
@@ -1323,14 +1323,14 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
 
 /**
- * dma_buf_move_notify - notify attachments that DMA-buf is moving
+ * dma_buf_invalidate_mappings - notify attachments that DMA-buf is moving
  *
  * @dmabuf:    [in]    buffer which is moving
  *
  * Informs all attachments that they need to destroy and recreate all their
  * mappings.
  */
-void dma_buf_move_notify(struct dma_buf *dmabuf)
+void dma_buf_invalidate_mappings(struct dma_buf *dmabuf)
 {
        struct dma_buf_attachment *attach;
 
@@ -1340,7 +1340,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
                if (attach->importer_ops)
                        attach->importer_ops->invalidate_mappings(attach);
 }
-EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
+EXPORT_SYMBOL_NS_GPL(dma_buf_invalidate_mappings, "DMA_BUF");
 
 /**
  * DOC: cpu access
index e08f58de4b17f2c88933cd858c46100ce9d02d0a..f73dc99d188716e3fc1a771c5449c25ae539a825 100644 (file)
@@ -1270,7 +1270,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 
        if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
            old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
-               dma_buf_move_notify(abo->tbo.base.dma_buf);
+               dma_buf_invalidate_mappings(abo->tbo.base.dma_buf);
 
        /* move_notify is called before move happens */
        trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
index b0bd31d14bb97eaf41bcdb7638ce146189e6de57..94712b05edffddb861572f6202ddf6153026f885 100644 (file)
@@ -819,7 +819,7 @@ static int xe_bo_move_notify(struct xe_bo *bo,
 
        /* Don't call move_notify() for imported dma-bufs. */
        if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
-               dma_buf_move_notify(ttm_bo->base.dma_buf);
+               dma_buf_invalidate_mappings(ttm_bo->base.dma_buf);
 
        /*
         * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
index c4322fd26f93e56767f0f24453e6ea4803365dd2..fd47953db4a3f62946ec40f4352eef221fa159dd 100644 (file)
@@ -2073,7 +2073,7 @@ static int iommufd_test_dmabuf_revoke(struct iommufd_ucmd *ucmd, int fd,
        priv = dmabuf->priv;
        dma_resv_lock(dmabuf->resv, NULL);
        priv->revoked = revoked;
-       dma_buf_move_notify(dmabuf);
+       dma_buf_invalidate_mappings(dmabuf);
        dma_resv_unlock(dmabuf->resv);
 
 err_put:
index d4d0f7d08c53e2b9b756f00b0bd41b2cd3d5370d..362e3d1498175deaad410055aafe118373d9d7c3 100644 (file)
@@ -320,7 +320,7 @@ void vfio_pci_dma_buf_move(struct vfio_pci_core_device *vdev, bool revoked)
                if (priv->revoked != revoked) {
                        dma_resv_lock(priv->dmabuf->resv, NULL);
                        priv->revoked = revoked;
-                       dma_buf_move_notify(priv->dmabuf);
+                       dma_buf_invalidate_mappings(priv->dmabuf);
                        dma_resv_unlock(priv->dmabuf->resv);
                }
                fput(priv->dmabuf->file);
@@ -341,7 +341,7 @@ void vfio_pci_dma_buf_cleanup(struct vfio_pci_core_device *vdev)
                list_del_init(&priv->dmabufs_elm);
                priv->vdev = NULL;
                priv->revoked = true;
-               dma_buf_move_notify(priv->dmabuf);
+               dma_buf_invalidate_mappings(priv->dmabuf);
                dma_resv_unlock(priv->dmabuf->resv);
                vfio_device_put_registration(&vdev->vdev);
                fput(priv->dmabuf->file);
index d9ee4499b37dca048214da6d9087be808e2058a8..d0470af8887e9b22901c114f811b1f0b41eb1d25 100644 (file)
@@ -588,7 +588,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
                                        enum dma_data_direction);
 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
                                enum dma_data_direction);
-void dma_buf_move_notify(struct dma_buf *dma_buf);
+void dma_buf_invalidate_mappings(struct dma_buf *dma_buf);
 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
                             enum dma_data_direction dir);
 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,