]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
dma-buf: Rename .move_notify() callback to a clearer identifier
authorLeon Romanovsky <leonro@nvidia.com>
Sat, 24 Jan 2026 19:14:13 +0000 (21:14 +0200)
committerChristian König <christian.koenig@amd.com>
Tue, 27 Jan 2026 09:43:55 +0000 (10:43 +0100)
Rename the .move_notify() callback to .invalidate_mappings() to make its
purpose explicit and highlight that it is responsible for invalidating
existing mappings.

Suggested-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Link: https://lore.kernel.org/r/20260124-dmabuf-revoke-v5-1-f98fca917e96@nvidia.com
Signed-off-by: Christian König <christian.koenig@amd.com>
drivers/dma-buf/dma-buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/virtio/virtgpu_prime.c
drivers/gpu/drm/xe/tests/xe_dma_buf.c
drivers/gpu/drm/xe/xe_dma_buf.c
drivers/infiniband/core/umem_dmabuf.c
drivers/infiniband/hw/mlx5/mr.c
drivers/iommu/iommufd/pages.c
include/linux/dma-buf.h

index 77555096e4c780e17e9b9043653c0dc3606095dd..cc9b88214d9773f38f3a0b42e7d0da1544c81ca3 100644 (file)
@@ -1017,7 +1017,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
        if (WARN_ON(!dmabuf || !dev))
                return ERR_PTR(-EINVAL);
 
-       if (WARN_ON(importer_ops && !importer_ops->move_notify))
+       if (WARN_ON(importer_ops && !importer_ops->invalidate_mappings))
                return ERR_PTR(-EINVAL);
 
        attach = kzalloc(sizeof(*attach), GFP_KERNEL);
@@ -1130,7 +1130,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
  *
  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
  * any mapping of @attach again and inform the importer through
- * &dma_buf_attach_ops.move_notify.
+ * &dma_buf_attach_ops.invalidate_mappings.
  */
 void dma_buf_unpin(struct dma_buf_attachment *attach)
 {
@@ -1338,7 +1338,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
 
        list_for_each_entry(attach, &dmabuf->attachments, node)
                if (attach->importer_ops)
-                       attach->importer_ops->move_notify(attach);
+                       attach->importer_ops->invalidate_mappings(attach);
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
 
index e22cfa7c6d32f286de94c6e0947c20db41894b68..863454148b281bb3196a61b7097148a9111509e4 100644 (file)
@@ -450,7 +450,7 @@ error:
 }
 
 /**
- * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ * amdgpu_dma_buf_move_notify - &attach.invalidate_mappings implementation
  *
  * @attach: the DMA-buf attachment
  *
@@ -521,7 +521,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
 
 static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
        .allow_peer2peer = true,
-       .move_notify = amdgpu_dma_buf_move_notify
+       .invalidate_mappings = amdgpu_dma_buf_move_notify
 };
 
 /**
index ce49282198cbf6aa31751248f4b3f30c74494d1c..19c78dd2ca7795533af6b58d9bbf9440f58f58b8 100644 (file)
@@ -288,7 +288,7 @@ static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
 
 static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
        .allow_peer2peer = true,
-       .move_notify = virtgpu_dma_buf_move_notify
+       .invalidate_mappings = virtgpu_dma_buf_move_notify
 };
 
 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
index 5df98de5ba3c8078f084c24a34327f05a7a16f45..1f2cca5c2f8101076c40cf7eb7814437600efb06 100644 (file)
@@ -23,7 +23,7 @@ static bool p2p_enabled(struct dma_buf_test_params *params)
 static bool is_dynamic(struct dma_buf_test_params *params)
 {
        return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
-               params->attach_ops->move_notify;
+               params->attach_ops->invalidate_mappings;
 }
 
 static void check_residency(struct kunit *test, struct xe_bo *exported,
@@ -60,7 +60,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
 
        /*
         * Evict exporter. Evicting the exported bo will
-        * evict also the imported bo through the move_notify() functionality if
+        * evict also the imported bo through the invalidate_mappings() functionality if
         * importer is on a different device. If they're on the same device,
         * the exporter and the importer should be the same bo.
         */
@@ -198,7 +198,7 @@ out:
 
 static const struct dma_buf_attach_ops nop2p_attach_ops = {
        .allow_peer2peer = false,
-       .move_notify = xe_dma_buf_move_notify
+       .invalidate_mappings = xe_dma_buf_move_notify
 };
 
 /*
index 54e42960daadc0f4db95aba2bfbab228fa9f82f2..2e167b29d0c95a3cfa9cb7d976e2a0fd022a29c9 100644 (file)
@@ -287,7 +287,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
 
 static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
        .allow_peer2peer = true,
-       .move_notify = xe_dma_buf_move_notify
+       .invalidate_mappings = xe_dma_buf_move_notify
 };
 
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
index 0ec2e4120cc94bb0ba74cad6cd5e56a010f26b7f..d77a739cfe7a3110271ab643d5bcf5cf55ed28c2 100644 (file)
@@ -129,7 +129,7 @@ ib_umem_dmabuf_get_with_dma_device(struct ib_device *device,
        if (check_add_overflow(offset, (unsigned long)size, &end))
                return ret;
 
-       if (unlikely(!ops || !ops->move_notify))
+       if (unlikely(!ops || !ops->invalidate_mappings))
                return ret;
 
        dmabuf = dma_buf_get(fd);
@@ -195,7 +195,7 @@ ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
 
 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
        .allow_peer2peer = true,
-       .move_notify = ib_umem_dmabuf_unsupported_move_notify,
+       .invalidate_mappings = ib_umem_dmabuf_unsupported_move_notify,
 };
 
 struct ib_umem_dmabuf *
index 325fa04cbe8ae36aeec0f5c9b91bf6530807ebdd..97099d3b16884ab3bb7d3fa3508c860b9a386e68 100644 (file)
@@ -1620,7 +1620,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
 
 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
        .allow_peer2peer = 1,
-       .move_notify = mlx5_ib_dmabuf_invalidate_cb,
+       .invalidate_mappings = mlx5_ib_dmabuf_invalidate_cb,
 };
 
 static struct ib_mr *
index dbe51ecb9a20f81efdb4211223c0360a403fee29..76f900fa1687857d4fa37efff140c26fd97b4d53 100644 (file)
@@ -1451,7 +1451,7 @@ static void iopt_revoke_notify(struct dma_buf_attachment *attach)
 
 static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
        .allow_peer2peer = true,
-       .move_notify = iopt_revoke_notify,
+       .invalidate_mappings = iopt_revoke_notify,
 };
 
 /*
index 91f4939db89be08bc9703519b2e1f8afe9f4a1fe..d9ee4499b37dca048214da6d9087be808e2058a8 100644 (file)
@@ -407,7 +407,7 @@ struct dma_buf {
         *   through the device.
         *
         * - Dynamic importers should set fences for any access that they can't
-        *   disable immediately from their &dma_buf_attach_ops.move_notify
+        *   disable immediately from their &dma_buf_attach_ops.invalidate_mappings
         *   callback.
         *
         * IMPORTANT:
@@ -446,7 +446,7 @@ struct dma_buf_attach_ops {
        bool allow_peer2peer;
 
        /**
-        * @move_notify: [optional] notification that the DMA-buf is moving
+        * @invalidate_mappings: [optional] notification that the DMA-buf is moving
         *
         * If this callback is provided the framework can avoid pinning the
         * backing store while mappings exists.
@@ -463,7 +463,7 @@ struct dma_buf_attach_ops {
         * New mappings can be created after this callback returns, and will
         * point to the new location of the DMA-buf.
         */
-       void (*move_notify)(struct dma_buf_attachment *attach);
+       void (*invalidate_mappings)(struct dma_buf_attachment *attach);
 };
 
 /**