]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/vmwgfx: Add basic support for external buffers
authorZack Rusin <zack.rusin@broadcom.com>
Mon, 22 Jul 2024 18:41:16 +0000 (14:41 -0400)
committerZack Rusin <zack.rusin@broadcom.com>
Thu, 25 Jul 2024 02:21:53 +0000 (22:21 -0400)
Make vmwgfx go through the dma-buf interface to map/unmap imported
buffers. The driver used to try to directly manipulate external
buffers, assuming that everything that was coming to it had to live
in cpu accessible memory. While technically true because what's in the
vms is controlled by us, it's semantically completely broken.

Fix importing of external buffers by forwarding all memory access
requests to the importer.

Tested by the vmw_prime basic_vgem test.

Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240722184313.181318-5-zack.rusin@broadcom.com
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c

index 07185c108218fc3b66f7daf620eaaec617138298..b9857f37ca1ac6f81b6285270a63c4de9a0b6f2e 100644 (file)
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /*
- * Copyright 2021-2023 VMware, Inc.
+ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -78,6 +79,59 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
        return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
 }
 
+static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+       struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+       int ret;
+
+       if (obj->import_attach) {
+               ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+               if (!ret) {
+                       if (drm_WARN_ON(obj->dev, map->is_iomem)) {
+                               dma_buf_vunmap(obj->import_attach->dmabuf, map);
+                               return -EIO;
+                       }
+               }
+       } else {
+               ret = ttm_bo_vmap(bo, map);
+       }
+
+       return ret;
+}
+
+static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+       if (obj->import_attach)
+               dma_buf_vunmap(obj->import_attach->dmabuf, map);
+       else
+               drm_gem_ttm_vunmap(obj, map);
+}
+
+static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+       int ret;
+
+       if (obj->import_attach) {
+               /*
+                * Reset both vm_ops and vm_private_data, so we don't end up with
+                * vm_ops pointing to our implementation if the dma-buf backend
+                * doesn't set those fields.
+                */
+               vma->vm_private_data = NULL;
+               vma->vm_ops = NULL;
+
+               ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+               /* Drop the reference drm_gem_mmap_obj() acquired.*/
+               if (!ret)
+                       drm_gem_object_put(obj);
+
+               return ret;
+       }
+
+       return drm_gem_ttm_mmap(obj, vma);
+}
+
 static const struct vm_operations_struct vmw_vm_ops = {
        .pfn_mkwrite = vmw_bo_vm_mkwrite,
        .page_mkwrite = vmw_bo_vm_mkwrite,
@@ -94,9 +148,9 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
        .pin = vmw_gem_object_pin,
        .unpin = vmw_gem_object_unpin,
        .get_sg_table = vmw_gem_object_get_sg_table,
-       .vmap = drm_gem_ttm_vmap,
-       .vunmap = drm_gem_ttm_vunmap,
-       .mmap = drm_gem_ttm_mmap,
+       .vmap = vmw_gem_vmap,
+       .vunmap = vmw_gem_vunmap,
+       .mmap = vmw_gem_mmap,
        .vm_ops = &vmw_vm_ops,
 };