]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Drop preempt-fences when destroying imported dma-bufs.
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 17 Dec 2025 09:34:41 +0000 (10:34 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 18 Dec 2025 17:12:16 +0000 (18:12 +0100)
When imported dma-bufs are destroyed, TTM is not fully
individualizing the dma-resv, but it *is* copying the fences that
need to be waited for before declaring idle. So in the case where
the bo->resv != bo->_resv we can still drop the preempt-fences, but
make sure we do that on bo->_resv which contains the fence-pointer
copy.

In the case where the copying fails, bo->_resv will typically not
contain any fences pointers at all, so there will be nothing to
drop. In that case, TTM would have ensured all fences that would
have been copied are signaled, including any remaining preempt
fences.

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Fixes: fa0af721bd1f ("drm/ttm: test private resv obj on release/destroy")
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.16+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Tested-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251217093441.5073-1-thomas.hellstrom@linux.intel.com
(cherry picked from commit 425fe550fb513b567bd6d01f397d274092a9c274)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
drivers/gpu/drm/xe/xe_bo.c

index b0bd31d14bb97eaf41bcdb7638ce146189e6de57..bf4ee976b6805fd13990c87a33cce3c16569172d 100644 (file)
@@ -1527,7 +1527,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
         * always succeed here, as long as we hold the lru lock.
         */
        spin_lock(&ttm_bo->bdev->lru_lock);
-       locked = dma_resv_trylock(ttm_bo->base.resv);
+       locked = dma_resv_trylock(&ttm_bo->base._resv);
        spin_unlock(&ttm_bo->bdev->lru_lock);
        xe_assert(xe, locked);
 
@@ -1547,13 +1547,6 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
        bo = ttm_to_xe_bo(ttm_bo);
        xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
 
-       /*
-        * Corner case where TTM fails to allocate memory and this BOs resv
-        * still points the VMs resv
-        */
-       if (ttm_bo->base.resv != &ttm_bo->base._resv)
-               return;
-
        if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
                return;
 
@@ -1563,14 +1556,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
         * TODO: Don't do this for external bos once we scrub them after
         * unbind.
         */
-       dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
+       dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
                                DMA_RESV_USAGE_BOOKKEEP, fence) {
                if (xe_fence_is_xe_preempt(fence) &&
                    !dma_fence_is_signaled(fence)) {
                        if (!replacement)
                                replacement = dma_fence_get_stub();
 
-                       dma_resv_replace_fences(ttm_bo->base.resv,
+                       dma_resv_replace_fences(&ttm_bo->base._resv,
                                                fence->context,
                                                replacement,
                                                DMA_RESV_USAGE_BOOKKEEP);
@@ -1578,7 +1571,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
        }
        dma_fence_put(replacement);
 
-       dma_resv_unlock(ttm_bo->base.resv);
+       dma_resv_unlock(&ttm_bo->base._resv);
 }
 
 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)