return 0;
}
+/**
+ * xe_bo_decompress - schedule in-place decompress and install fence
+ * @bo: buffer object (caller should hold drm_exec reservations for VM+BO)
+ *
+ * Schedules an in-place resolve via the migrate layer and installs the
+ * returned dma_fence into the BO kernel reservation slot (DMA_RESV_USAGE_KERNEL).
+ * In preempt fence mode, this operation interrupts hardware execution
+ * which is expensive. Page fault mode is recommended for better performance.
+ *
+ * The resolve path only runs for VRAM-backed buffers (currently dGPU-only);
+ * iGPU/system-memory objects fail the resource check and bypass the resolve.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int xe_bo_decompress(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct dma_fence *decomp_fence = NULL;
+ struct ttm_operation_ctx op_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .gfp_retry_mayfail = false,
+ };
+ int err = 0;
+
+ /* Silently skip decompression for non-VRAM buffers */
+ if (!bo->ttm.resource || !mem_type_is_vram(bo->ttm.resource->mem_type))
+ return 0;
+
+ /* Notify before scheduling resolve */
+ err = xe_bo_move_notify(bo, &op_ctx);
+ if (err)
+ return err;
+
+ /* Reserve fence slot before scheduling */
+ err = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (err)
+ return err;
+
+ /* Schedule the in-place decompression */
+ decomp_fence = xe_migrate_resolve(tile->migrate,
+ bo,
+ bo->ttm.resource);
+
+ if (IS_ERR(decomp_fence))
+ return PTR_ERR(decomp_fence);
+
+ /* Install kernel-usage fence */
+ dma_resv_add_fence(bo->ttm.base.resv, decomp_fence, DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(decomp_fence);
+
+ return 0;
+}
+
/**
* xe_bo_lock() - Lock the buffer object's dma_resv object
* @bo: The struct xe_bo whose lock is to be taken
op->map.vma_flags |= XE_VMA_DUMPABLE;
if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
+ op->map.request_decompress = flags & DRM_XE_VM_BIND_FLAG_DECOMPRESS;
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
}
static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
- bool res_evict, bool validate)
+ bool res_evict, bool validate, bool request_decompress)
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
err = xe_bo_validate(bo, vm,
xe_vm_allow_vm_eviction(vm) &&
res_evict, exec);
+
+ if (err)
+ return err;
+
+ if (request_decompress)
+ err = xe_bo_decompress(bo);
}
return err;
err = vma_lock_and_validate(exec, op->map.vma,
res_evict,
!xe_vm_in_fault_mode(vm) ||
- op->map.immediate);
+ op->map.immediate,
+ op->map.request_decompress);
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.remap.unmap->va),
- res_evict, false);
+ res_evict, false, false);
if (!err && op->remap.prev)
err = vma_lock_and_validate(exec, op->remap.prev,
- res_evict, true);
+ res_evict, true, false);
if (!err && op->remap.next)
err = vma_lock_and_validate(exec, op->remap.next,
- res_evict, true);
+ res_evict, true, false);
break;
case DRM_GPUVA_OP_UNMAP:
err = check_ufence(gpuva_to_vma(op->base.unmap.va));
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.unmap.va),
- res_evict, false);
+ res_evict, false, false);
break;
case DRM_GPUVA_OP_PREFETCH:
{
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
- res_evict, false);
+ res_evict, false, false);
if (!err && !xe_vma_has_no_bo(vma))
err = xe_bo_migrate(xe_vma_bo(vma),
region_to_mem_type[region],
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
- DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
+ DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET | \
+ DRM_XE_VM_BIND_FLAG_DECOMPRESS)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
bool is_cpu_addr_mirror = flags &
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
+ bool is_decompress = flags & DRM_XE_VM_BIND_FLAG_DECOMPRESS;
u16 pat_index = (*bind_ops)[i].pat_index;
u16 coh_mode;
bool comp_en;
XE_IOCTL_DBG(xe, obj_offset && (is_null ||
is_cpu_addr_mirror)) ||
XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
- (is_null || is_cpu_addr_mirror)) ||
+ (is_decompress || is_null || is_cpu_addr_mirror)) ||
+ XE_IOCTL_DBG(xe, is_decompress &&
+ xe_pat_index_get_comp_en(xe, pat_index)) ||
XE_IOCTL_DBG(xe, !obj &&
op == DRM_XE_VM_BIND_OP_MAP &&
!is_null && !is_cpu_addr_mirror) ||
err = -EINVAL;
goto free_bind_ops;
}
+
+ if (is_decompress && (XE_IOCTL_DBG(xe, !xe_device_has_flat_ccs(xe)) ||
+ XE_IOCTL_DBG(xe, GRAPHICS_VER(xe) < 20) ||
+ XE_IOCTL_DBG(xe, !IS_DGFX(xe)))) {
+ err = -EOPNOTSUPP;
+ goto free_bind_ops;
+ }
}
return 0;