]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Add debugfs knobs to control long running workload timeslicing
authorMatthew Brost <matthew.brost@intel.com>
Fri, 12 Dec 2025 18:28:43 +0000 (10:28 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Mon, 15 Dec 2025 21:53:32 +0000 (13:53 -0800)
Add debugfs knobs to control timeslicing for long-running workloads,
allowing quick tuning of values when running benchmarks.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251212182847.1683222-4-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_debugfs.c
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_vm.c

index ad070055cef14a2ac51c28899437133e3c060f3b..0907868b32d67310c0fe8f8da257bf9a6a24ad89 100644 (file)
@@ -361,6 +361,74 @@ static const struct file_operations atomic_svm_timeslice_ms_fops = {
        .write = atomic_svm_timeslice_ms_set,
 };
 
+static ssize_t min_run_period_lr_ms_show(struct file *f, char __user *ubuf,
+                                        size_t size, loff_t *pos)
+{
+       struct xe_device *xe = file_inode(f)->i_private;
+       char buf[32];
+       int len = 0;
+
+       len = scnprintf(buf, sizeof(buf), "%d\n", xe->min_run_period_lr_ms);
+
+       return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t min_run_period_lr_ms_set(struct file *f, const char __user *ubuf,
+                                       size_t size, loff_t *pos)
+{
+       struct xe_device *xe = file_inode(f)->i_private;
+       u32 min_run_period_lr_ms;
+       ssize_t ret;
+
+       ret = kstrtouint_from_user(ubuf, size, 0, &min_run_period_lr_ms);
+       if (ret)
+               return ret;
+
+       xe->min_run_period_lr_ms = min_run_period_lr_ms;
+
+       return size;
+}
+
+static const struct file_operations min_run_period_lr_ms_fops = {
+       .owner = THIS_MODULE,
+       .read = min_run_period_lr_ms_show,
+       .write = min_run_period_lr_ms_set,
+};
+
+static ssize_t min_run_period_pf_ms_show(struct file *f, char __user *ubuf,
+                                        size_t size, loff_t *pos)
+{
+       struct xe_device *xe = file_inode(f)->i_private;
+       char buf[32];
+       int len = 0;
+
+       len = scnprintf(buf, sizeof(buf), "%d\n", xe->min_run_period_pf_ms);
+
+       return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t min_run_period_pf_ms_set(struct file *f, const char __user *ubuf,
+                                       size_t size, loff_t *pos)
+{
+       struct xe_device *xe = file_inode(f)->i_private;
+       u32 min_run_period_pf_ms;
+       ssize_t ret;
+
+       ret = kstrtouint_from_user(ubuf, size, 0, &min_run_period_pf_ms);
+       if (ret)
+               return ret;
+
+       xe->min_run_period_pf_ms = min_run_period_pf_ms;
+
+       return size;
+}
+
+static const struct file_operations min_run_period_pf_ms_fops = {
+       .owner = THIS_MODULE,
+       .read = min_run_period_pf_ms_show,
+       .write = min_run_period_pf_ms_set,
+};
+
 static ssize_t disable_late_binding_show(struct file *f, char __user *ubuf,
                                         size_t size, loff_t *pos)
 {
@@ -428,6 +496,12 @@ void xe_debugfs_register(struct xe_device *xe)
        debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
                            &atomic_svm_timeslice_ms_fops);
 
+       debugfs_create_file("min_run_period_lr_ms", 0600, root, xe,
+                           &min_run_period_lr_ms_fops);
+
+       debugfs_create_file("min_run_period_pf_ms", 0600, root, xe,
+                           &min_run_period_pf_ms_fops);
+
        debugfs_create_file("disable_late_binding", 0600, root, xe,
                            &disable_late_binding_fops);
 
index cdaa1c1e73f523fcf4aa15f02eb257bdd087dc0f..00afc84a8683eefb9500c6e752d5fc96629effa1 100644 (file)
@@ -460,6 +460,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
        xe->info.revid = pdev->revision;
        xe->info.force_execlist = xe_modparam.force_execlist;
        xe->atomic_svm_timeslice_ms = 5;
+       xe->min_run_period_lr_ms = 5;
 
        err = xe_irq_init(xe);
        if (err)
index 85700533db5299a242e5bac3e6ec036b0301b9b4..413ba4c8b62e6d50901ab4921ed3dd85bdbca0da 100644 (file)
@@ -627,6 +627,12 @@ struct xe_device {
        /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
        u32 atomic_svm_timeslice_ms;
 
+       /** @min_run_period_lr_ms: LR VM (preempt fence mode) timeslice */
+       u32 min_run_period_lr_ms;
+
+       /** @min_run_period_pf_ms: LR VM (page fault mode) timeslice */
+       u32 min_run_period_pf_ms;
+
 #ifdef TEST_VM_OPS_ERROR
        /**
         * @vm_inject_error_position: inject errors at different places in VM
index 68dffbf9768085914d814f0e37e5f35cbd5246ca..95e22ff95ea8ff970b206b9caea2c8641a2a4b0d 100644 (file)
@@ -1509,9 +1509,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
 
        INIT_LIST_HEAD(&vm->preempt.exec_queues);
        if (flags & XE_VM_FLAG_FAULT_MODE)
-               vm->preempt.min_run_period_ms = 0;
+               vm->preempt.min_run_period_ms = xe->min_run_period_pf_ms;
        else
-               vm->preempt.min_run_period_ms = 5;
+               vm->preempt.min_run_period_ms = xe->min_run_period_lr_ms;
 
        for_each_tile(tile, xe, id)
                xe_range_fence_tree_init(&vm->rftree[id]);