When set, starting xe3p_lpg, the L2 flush optimization
feature will control whether L2 is in Persistent or
Transient mode through monitoring of media activity.
To enable L2 flush optimization include new feature flag
GUC_CTL_ENABLE_L2FLUSH_OPT for Novalake platforms when
media type is detected.
Tighten UAPI validation to restrict userptr, svm and
dmabuf mappings to be either 2WAY or XA+1WAY
V5(Thomas): logic correction
V4(MattA): Modify uapi doc and commit
V3(MattA): check valid op and pat_index value
V2(MattA): validate dma-buf bos and madvise pat-index
Acked-by: José Roberto de Souza <jose.souza@intel.com>
Acked-by: Michal Mrozek <michal.mrozek@intel.com>
Acked-by: Carl Zhang <carl.zhang@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patch.msgid.link/20260305121902.1892593-9-tejas.upadhyay@intel.com
Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
if (xe_guc_using_main_gamctrl_queues(guc))
flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES;
+ if (GRAPHICS_VER(xe) >= 35 && !IS_DGFX(xe) && xe_gt_is_media_type(guc_to_gt(guc)))
+ flags |= GUC_CTL_ENABLE_L2FLUSH_OPT;
+
return flags;
}
#define GUC_CTL_ENABLE_PSMI_LOGGING BIT(7)
#define GUC_CTL_MAIN_GAMCTRL_QUEUES BIT(9)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
+#define GUC_CTL_ENABLE_L2FLUSH_OPT BIT(15)
#define GUC_CTL_DEBUG 3
#define GUC_LOG_VERBOSITY REG_GENMASK(1, 0)
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+ XE_IOCTL_DBG(xe, xe_device_is_l2_flush_optimized(xe) &&
+ (op == DRM_XE_VM_BIND_OP_MAP_USERPTR ||
+ is_cpu_addr_mirror) &&
+ (pat_index != 19 && coh_mode != XE_COH_2WAY)) ||
XE_IOCTL_DBG(xe, comp_en &&
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR &&
if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && comp_en))
return -EINVAL;
+ if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && xe_device_is_l2_flush_optimized(xe) &&
+ (pat_index != 19 && coh_mode != XE_COH_2WAY)))
+ return -EINVAL;
+
/* If a BO is protected it can only be mapped if the key is still valid */
if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start,
.range = args->range, };
struct xe_madvise_details details;
+ u16 pat_index, coh_mode;
struct xe_vm *vm;
struct drm_exec exec;
int err, attr_type;
if (err || !madvise_range.num_vmas)
goto madv_fini;
+ if (args->type == DRM_XE_MEM_RANGE_ATTR_PAT) {
+ pat_index = array_index_nospec(args->pat_index.val, xe->pat.n_entries);
+ coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
+ if (XE_IOCTL_DBG(xe, madvise_range.has_svm_userptr_vmas &&
+ xe_device_is_l2_flush_optimized(xe) &&
+ (pat_index != 19 && coh_mode != XE_COH_2WAY))) {
+ err = -EINVAL;
+ goto madv_fini;
+ }
+ }
+
if (madvise_range.has_bo_vmas) {
if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
if (!check_bo_args_are_sane(vm, madvise_range.vmas,
if (!bo)
continue;
+
+ if (args->type == DRM_XE_MEM_RANGE_ATTR_PAT) {
+ if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach &&
+ xe_device_is_l2_flush_optimized(xe) &&
+ (pat_index != 19 &&
+ coh_mode != XE_COH_2WAY))) {
+ err = -EINVAL;
+ goto err_fini;
+ }
+ }
+
err = drm_exec_lock_obj(&exec, &bo->ttm.base);
drm_exec_retry_on_contention(&exec);
if (err)
* incoherent GT access is possible.
*
* Note: For userptr and externally imported dma-buf the kernel expects
- * either 1WAY or 2WAY for the @pat_index.
+ * either 1WAY or 2WAY for the @pat_index. Starting from NVL-P, for
+ * userptr, svm, madvise and externally imported dma-buf the kernel expects
+ * either 2WAY or 1WAY and XA @pat_index.
*
* For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
* on the @pat_index. For such mappings there is no actual memory being