]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/xe3p_lpg: Restrict UAPI to enable L2 flush optimization
authorTejas Upadhyay <tejas.upadhyay@intel.com>
Thu, 5 Mar 2026 12:19:06 +0000 (17:49 +0530)
committerTejas Upadhyay <tejas.upadhyay@intel.com>
Mon, 23 Mar 2026 09:54:14 +0000 (15:24 +0530)
When set, starting xe3p_lpg, the L2 flush optimization
feature will control whether L2 is in Persistent or
Transient mode through monitoring of media activity.

To enable L2 flush optimization include new feature flag
GUC_CTL_ENABLE_L2FLUSH_OPT for Novalake platforms when
media type is detected.

Tighten UAPI validation to restrict userptr, svm and
dmabuf mappings to be either 2WAY or XA+1WAY

V5(Thomas): logic correction
V4(MattA): Modify uapi doc and commit
V3(MattA): check valid op and pat_index value
V2(MattA): validate dma-buf bos and madvise pat-index

Acked-by: José Roberto de Souza <jose.souza@intel.com>
Acked-by: Michal Mrozek <michal.mrozek@intel.com>
Acked-by: Carl Zhang <carl.zhang@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patch.msgid.link/20260305121902.1892593-9-tejas.upadhyay@intel.com
Signed-off-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
drivers/gpu/drm/xe/xe_guc.c
drivers/gpu/drm/xe/xe_guc_fwif.h
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_madvise.c
include/uapi/drm/xe_drm.h

index 576f3d500390c37f684bb784c07ff908894767ec..ccebb437e37fd067a258dffc0ace7ab391af58f7 100644 (file)
@@ -98,6 +98,9 @@ static u32 guc_ctl_feature_flags(struct xe_guc *guc)
        if (xe_guc_using_main_gamctrl_queues(guc))
                flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES;
 
+       if (GRAPHICS_VER(xe) >= 35 && !IS_DGFX(xe) && xe_gt_is_media_type(guc_to_gt(guc)))
+               flags |= GUC_CTL_ENABLE_L2FLUSH_OPT;
+
        return flags;
 }
 
index bb8f71d386114a23928a246890a0b0ac3e40fe02..b73fae063facdf97a4710253ee22bfbb7799ef36 100644 (file)
@@ -67,6 +67,7 @@ struct guc_update_exec_queue_policy {
 #define   GUC_CTL_ENABLE_PSMI_LOGGING  BIT(7)
 #define   GUC_CTL_MAIN_GAMCTRL_QUEUES  BIT(9)
 #define   GUC_CTL_DISABLE_SCHEDULER    BIT(14)
+#define   GUC_CTL_ENABLE_L2FLUSH_OPT   BIT(15)
 
 #define GUC_CTL_DEBUG                  3
 #define   GUC_LOG_VERBOSITY            REG_GENMASK(1, 0)
index c0d8f5db019d079dd00eabf129c87c2b024e1129..e2443628778647fe69216a5571a9332720d765be 100644 (file)
@@ -3492,6 +3492,10 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
                                 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
                    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
                                 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+                   XE_IOCTL_DBG(xe, xe_device_is_l2_flush_optimized(xe) &&
+                                (op == DRM_XE_VM_BIND_OP_MAP_USERPTR ||
+                                 is_cpu_addr_mirror) &&
+                                (pat_index != 19 && coh_mode != XE_COH_2WAY)) ||
                    XE_IOCTL_DBG(xe, comp_en &&
                                 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
                    XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR &&
@@ -3633,6 +3637,10 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
        if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && comp_en))
                return -EINVAL;
 
+       if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && xe_device_is_l2_flush_optimized(xe) &&
+                        (pat_index != 19 && coh_mode != XE_COH_2WAY)))
+               return -EINVAL;
+
        /* If a BO is protected it can only be mapped if the key is still valid */
        if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
            op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
index 431be53be56fff646ac0021d86d152d2f333539a..e564b12c02d9ff969133fff0b75aaac00d56d38c 100644 (file)
@@ -419,6 +419,7 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
        struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start,
                                                         .range =  args->range, };
        struct xe_madvise_details details;
+       u16 pat_index, coh_mode;
        struct xe_vm *vm;
        struct drm_exec exec;
        int err, attr_type;
@@ -455,6 +456,17 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
        if (err || !madvise_range.num_vmas)
                goto madv_fini;
 
+       if (args->type == DRM_XE_MEM_RANGE_ATTR_PAT) {
+               pat_index = array_index_nospec(args->pat_index.val, xe->pat.n_entries);
+               coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
+               if (XE_IOCTL_DBG(xe, madvise_range.has_svm_userptr_vmas &&
+                                xe_device_is_l2_flush_optimized(xe) &&
+                                (pat_index != 19 && coh_mode != XE_COH_2WAY))) {
+                       err = -EINVAL;
+                       goto madv_fini;
+               }
+       }
+
        if (madvise_range.has_bo_vmas) {
                if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
                        if (!check_bo_args_are_sane(vm, madvise_range.vmas,
@@ -472,6 +484,17 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
 
                                if (!bo)
                                        continue;
+
+                               if (args->type == DRM_XE_MEM_RANGE_ATTR_PAT) {
+                                       if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach &&
+                                                        xe_device_is_l2_flush_optimized(xe) &&
+                                                        (pat_index != 19 &&
+                                                         coh_mode != XE_COH_2WAY))) {
+                                               err = -EINVAL;
+                                               goto err_fini;
+                                       }
+                               }
+
                                err = drm_exec_lock_obj(&exec, &bo->ttm.base);
                                drm_exec_retry_on_contention(&exec);
                                if (err)
index f8b2afb20540fca8fadf7dd10bc021c00fe37133..7014dde1c9c45b585ab182e84b6501971c910443 100644 (file)
@@ -1114,7 +1114,9 @@ struct drm_xe_vm_bind_op {
         * incoherent GT access is possible.
         *
         * Note: For userptr and externally imported dma-buf the kernel expects
-        * either 1WAY or 2WAY for the @pat_index.
+        * either 1WAY or 2WAY for the @pat_index. Starting from NVL-P, for
+        * userptr, svm, madvise and externally imported dma-buf the kernel expects
+        * either 2WAY or 1WAY and XA @pat_index.
         *
         * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
         * on the @pat_index. For such mappings there is no actual memory being