--- /dev/null
+From 2a58b21adee3df10ca6f4491af965c4890d2d8e3 Mon Sep 17 00:00:00 2001
+From: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+Date: Tue, 20 May 2025 19:54:45 +0530
+Subject: drm/xe/mocs: Initialize MOCS index early
+
+From: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+
+commit 2a58b21adee3df10ca6f4491af965c4890d2d8e3 upstream.
+
+MOCS uc_index is used even before it is initialized in the following
+callstack
+ guc_prepare_xfer()
+ __xe_guc_upload()
+ xe_guc_min_load_for_hwconfig()
+ xe_uc_init_hwconfig()
+ xe_gt_init_hwconfig()
+
+Do MOCS index initialization earlier in the device probe.
+
+Signed-off-by: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+Reviewed-by: Ravi Kumar Vodapalli <ravi.kumar.vodapalli@intel.com>
+Link: https://lore.kernel.org/r/20250520142445.2792824-1-balasubramani.vivekanandan@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 241cc827c0987d7173714fc5a95a7c8fc9bf15c0)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Stable-dep-of: 3155ac89251d ("drm/xe: Move page fault init after topology init")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_gt.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -375,6 +375,8 @@ int xe_gt_init_early(struct xe_gt *gt)
+ if (err)
+ return err;
+
++ xe_mocs_init_early(gt);
++
+ return 0;
+ }
+
+@@ -592,8 +594,6 @@ int xe_gt_init(struct xe_gt *gt)
+ if (err)
+ return err;
+
+- xe_mocs_init_early(gt);
+-
+ err = xe_gt_sysfs_init(gt);
+ if (err)
+ return err;
--- /dev/null
+From 3155ac89251dcb5e35a3ec2f60a74a6ed22c56fd Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Thu, 10 Jul 2025 12:12:08 -0700
+Subject: drm/xe: Move page fault init after topology init
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit 3155ac89251dcb5e35a3ec2f60a74a6ed22c56fd upstream.
+
+We need the topology to determine GT page fault queue size, move page
+fault init after topology init.
+
+Cc: stable@vger.kernel.org
+Fixes: 3338e4f90c14 ("drm/xe: Use topology to determine page fault queue size")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
+Reviewed-by: Stuart Summers <stuart.summers@intel.com>
+Link: https://lore.kernel.org/r/20250710191208.1040215-1-matthew.brost@intel.com
+(cherry picked from commit beb72acb5b38dbe670d8eb752d1ad7a32f9c4119)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_gt.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -590,15 +590,15 @@ int xe_gt_init(struct xe_gt *gt)
+ if (err)
+ return err;
+
+- err = xe_gt_pagefault_init(gt);
++ err = xe_gt_sysfs_init(gt);
+ if (err)
+ return err;
+
+- err = xe_gt_sysfs_init(gt);
++ err = gt_fw_domain_init(gt);
+ if (err)
+ return err;
+
+- err = gt_fw_domain_init(gt);
++ err = xe_gt_pagefault_init(gt);
+ if (err)
+ return err;
+
--- /dev/null
+From e14fd98c6d66cb76694b12c05768e4f9e8c95664 Mon Sep 17 00:00:00 2001
+From: Breno Leitao <leitao@debian.org>
+Date: Wed, 16 Jul 2025 10:38:48 -0700
+Subject: sched/ext: Prevent update_locked_rq() calls with NULL rq
+
+From: Breno Leitao <leitao@debian.org>
+
+commit e14fd98c6d66cb76694b12c05768e4f9e8c95664 upstream.
+
+Avoid invoking update_locked_rq() when the runqueue (rq) pointer is NULL
+in the SCX_CALL_OP and SCX_CALL_OP_RET macros.
+
+Previously, calling update_locked_rq(NULL) with preemption enabled could
+trigger the following warning:
+
+ BUG: using __this_cpu_write() in preemptible [00000000]
+
+This happens because __this_cpu_write() is unsafe to use in preemptible
+context.
+
+rq is NULL when an ops invoked from an unlocked context. In such cases, we
+don't need to store any rq, since the value should already be NULL
+(unlocked). Ensure that update_locked_rq() is only called when rq is
+non-NULL, preventing calling __this_cpu_write() on preemptible context.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Fixes: 18853ba782bef ("sched_ext: Track currently locked rq")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Acked-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: stable@vger.kernel.org # v6.15
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -1149,7 +1149,8 @@ static inline struct rq *scx_locked_rq(v
+
+ #define SCX_CALL_OP(mask, op, rq, args...) \
+ do { \
+- update_locked_rq(rq); \
++ if (rq) \
++ update_locked_rq(rq); \
+ if (mask) { \
+ scx_kf_allow(mask); \
+ scx_ops.op(args); \
+@@ -1157,14 +1158,16 @@ do { \
+ } else { \
+ scx_ops.op(args); \
+ } \
+- update_locked_rq(NULL); \
++ if (rq) \
++ update_locked_rq(NULL); \
+ } while (0)
+
+ #define SCX_CALL_OP_RET(mask, op, rq, args...) \
+ ({ \
+ __typeof__(scx_ops.op(args)) __ret; \
+ \
+- update_locked_rq(rq); \
++ if (rq) \
++ update_locked_rq(rq); \
+ if (mask) { \
+ scx_kf_allow(mask); \
+ __ret = scx_ops.op(args); \
+@@ -1172,7 +1175,8 @@ do { \
+ } else { \
+ __ret = scx_ops.op(args); \
+ } \
+- update_locked_rq(NULL); \
++ if (rq) \
++ update_locked_rq(NULL); \
+ __ret; \
+ })
+
cifs-fix-the-smbd_response-slab-to-allow-usercopy.patch
cifs-fix-reading-into-an-iter_folioq-from-the-smbdirect-code.patch
sched-freezer-remove-unnecessary-warning-in-__thaw_task.patch
+sched-ext-prevent-update_locked_rq-calls-with-null-rq.patch
+drm-xe-mocs-initialize-mocs-index-early.patch
+drm-xe-move-page-fault-init-after-topology-init.patch