]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Drop preempt-fences when destroying imported dma-bufs.
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 17 Dec 2025 09:34:41 +0000 (10:34 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 18 Dec 2025 16:36:43 +0000 (17:36 +0100)
When imported dma-bufs are destroyed, TTM is not fully
individualizing the dma-resv, but it *is* copying the fences that
need to be waited for before declaring idle. So in the case where
the bo->resv != bo->_resv we can still drop the preempt-fences, but
make sure we do that on bo->_resv which contains the fence-pointer
copy.

In the case where the copying fails, bo->_resv will typically not
contain any fences pointers at all, so there will be nothing to
drop. In that case, TTM would have ensured all fences that would
have been copied are signaled, including any remaining preempt
fences.

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Fixes: fa0af721bd1f ("drm/ttm: test private resv obj on release/destroy")
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.16+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Tested-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251217093441.5073-1-thomas.hellstrom@linux.intel.com
drivers/gpu/drm/xe/xe_bo.c

index 6280e6a013ff4e1d8158efe292a57c64cee40ca7..8b6474cd3eaf237a267cc0d89b381f21f9c0e61d 100644 (file)
@@ -1526,7 +1526,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
         * always succeed here, as long as we hold the lru lock.
         */
        spin_lock(&ttm_bo->bdev->lru_lock);
-       locked = dma_resv_trylock(ttm_bo->base.resv);
+       locked = dma_resv_trylock(&ttm_bo->base._resv);
        spin_unlock(&ttm_bo->bdev->lru_lock);
        xe_assert(xe, locked);
 
@@ -1546,13 +1546,6 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
        bo = ttm_to_xe_bo(ttm_bo);
        xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
 
-       /*
-        * Corner case where TTM fails to allocate memory and this BOs resv
-        * still points the VMs resv
-        */
-       if (ttm_bo->base.resv != &ttm_bo->base._resv)
-               return;
-
        if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
                return;
 
@@ -1562,14 +1555,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
         * TODO: Don't do this for external bos once we scrub them after
         * unbind.
         */
-       dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
+       dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
                                DMA_RESV_USAGE_BOOKKEEP, fence) {
                if (xe_fence_is_xe_preempt(fence) &&
                    !dma_fence_is_signaled(fence)) {
                        if (!replacement)
                                replacement = dma_fence_get_stub();
 
-                       dma_resv_replace_fences(ttm_bo->base.resv,
+                       dma_resv_replace_fences(&ttm_bo->base._resv,
                                                fence->context,
                                                replacement,
                                                DMA_RESV_USAGE_BOOKKEEP);
@@ -1577,7 +1570,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
        }
        dma_fence_put(replacement);
 
-       dma_resv_unlock(ttm_bo->base.resv);
+       dma_resv_unlock(&ttm_bo->base._resv);
 }
 
 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)