From: Chen-Yu Tsai Date: Wed, 11 Mar 2026 09:49:26 +0000 (+0800) Subject: drm/gem-dma: Support dedicated DMA device for allocation and mapping X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=a9da24732aaa80d631bffc8a1390836d4b896690;p=thirdparty%2Flinux.git drm/gem-dma: Support dedicated DMA device for allocation and mapping Support for a dedicated DMA device for prime imports was added in commit 143ec8d3f939 ("drm/prime: Support dedicated DMA device for dma-buf imports"). This allowed the DRM driver to provide a dedicated DMA device when its own underlying device was not capable of DMA, for example when it is a USB device (the original target) or a virtual device. The latter case is common on embedded SoCs, on which the display pipeline is composed of various fixed function blocks, and the DRM device is simply a made-up device, an address space managing the routing between the blocks, or whichever block the implementor thought made sense at the time. The point is that the chosen device is often not the actual device doing the DMA. Various drivers have used workarounds or reimplemented the GEM DMA helpers to get the DMA addresses and IOMMUs to work correctly. Add support for the dedicated DMA device to the GEM DMA helpers. No existing driver currently uses the GEM DMA helpers and calls drm_dev_set_dma_dev() to set a dedicated DMA device, so no existing users should be affected. Reviewed-by: Thomas Zimmermann Reviewed-by: AngeloGioacchino Del Regno Link: https://patch.msgid.link/20260311094929.3393338-3-wenst@chromium.org Signed-off-by: Chen-Yu Tsai --- diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index ecb9746f4da86..70f83e4644766 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -146,12 +146,13 @@ struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm, return dma_obj; if (dma_obj->map_noncoherent) { - dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size, + dma_obj->vaddr = dma_alloc_noncoherent(drm_dev_dma_dev(drm), + size, &dma_obj->dma_addr, DMA_TO_DEVICE, GFP_KERNEL | __GFP_NOWARN); } else { - dma_obj->vaddr = dma_alloc_wc(drm->dev, size, + dma_obj->vaddr = dma_alloc_wc(drm_dev_dma_dev(drm), size, &dma_obj->dma_addr, GFP_KERNEL | __GFP_NOWARN); } @@ -236,12 +237,14 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj) drm_prime_gem_destroy(gem_obj, dma_obj->sgt); } else if (dma_obj->vaddr) { if (dma_obj->map_noncoherent) - dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size, + dma_free_noncoherent(drm_dev_dma_dev(gem_obj->dev), + dma_obj->base.size, dma_obj->vaddr, dma_obj->dma_addr, DMA_TO_DEVICE); else - dma_free_wc(gem_obj->dev->dev, dma_obj->base.size, - dma_obj->vaddr, dma_obj->dma_addr); + dma_free_wc(drm_dev_dma_dev(gem_obj->dev), + dma_obj->base.size, dma_obj->vaddr, + dma_obj->dma_addr); } drm_gem_object_release(gem_obj); @@ -432,7 +435,7 @@ struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj) if (!sgt) return ERR_PTR(-ENOMEM); - ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr, + ret = dma_get_sgtable(drm_dev_dma_dev(obj->dev), sgt, dma_obj->vaddr, dma_obj->dma_addr, obj->size); if (ret < 0) goto out; @@ -539,12 +542,12 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct * if (dma_obj->map_noncoherent) { vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - ret = dma_mmap_pages(dma_obj->base.dev->dev, + ret = dma_mmap_pages(drm_dev_dma_dev(dma_obj->base.dev), vma, vma->vm_end - vma->vm_start, virt_to_page(dma_obj->vaddr)); } else { - ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr, - dma_obj->dma_addr, + ret = dma_mmap_wc(drm_dev_dma_dev(dma_obj->base.dev), vma, + dma_obj->vaddr, dma_obj->dma_addr, vma->vm_end - vma->vm_start); } if (ret)