]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Implement xe_pagefault_handler
authorMatthew Brost <matthew.brost@intel.com>
Fri, 31 Oct 2025 16:54:13 +0000 (09:54 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Tue, 4 Nov 2025 17:04:29 +0000 (09:04 -0800)
Enqueue (copy) the input struct xe_pagefault into a queue (i.e., into a
memory buffer) and schedule a worker to service it.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Francois Dugast <francois.dugast@intel.com>
Tested-by: Francois Dugast <francois.dugast@intel.com>
Link: https://patch.msgid.link/20251031165416.2871503-5-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_pagefault.c

index b1decad9b54cd2e90d35b63368029cb8818ff287..7b2ac01a558ebe24168a1c7a92fa24d74d6b9c90 100644 (file)
@@ -3,6 +3,8 @@
  * Copyright © 2025 Intel Corporation
  */
 
+#include <linux/circ_buf.h>
+
 #include <drm/drm_managed.h>
 
 #include "xe_device.h"
@@ -167,6 +169,14 @@ void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt)
                xe_pagefault_queue_reset(xe, gt, xe->usm.pf_queue + i);
 }
 
+static bool xe_pagefault_queue_full(struct xe_pagefault_queue *pf_queue)
+{
+       lockdep_assert_held(&pf_queue->lock);
+
+       return CIRC_SPACE(pf_queue->head, pf_queue->tail, pf_queue->size) <=
+               xe_pagefault_entry_size();
+}
+
 /**
  * xe_pagefault_handler() - Page fault handler
  * @xe: xe device instance
@@ -179,6 +189,24 @@ void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt)
  */
 int xe_pagefault_handler(struct xe_device *xe, struct xe_pagefault *pf)
 {
-       /* TODO - implement */
-       return 0;
+       struct xe_pagefault_queue *pf_queue = xe->usm.pf_queue +
+               (pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT);
+       unsigned long flags;
+       bool full;
+
+       spin_lock_irqsave(&pf_queue->lock, flags);
+       full = xe_pagefault_queue_full(pf_queue);
+       if (!full) {
+               memcpy(pf_queue->data + pf_queue->head, pf, sizeof(*pf));
+               pf_queue->head = (pf_queue->head + xe_pagefault_entry_size()) %
+                       pf_queue->size;
+               queue_work(xe->usm.pf_wq, &pf_queue->worker);
+       } else {
+               drm_warn(&xe->drm,
+                        "PageFault Queue (%d) full, shouldn't be possible\n",
+                        pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT);
+       }
+       spin_unlock_irqrestore(&pf_queue->lock, flags);
+
+       return full ? -ENOSPC : 0;
 }