--- /dev/null
+From b017a0cea627fcbe158fc2c214fe893e18c4d0c4 Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@kernel.org>
+Date: Mon, 25 Mar 2024 16:35:21 +0000
+Subject: arm64/ptrace: Use saved floating point state type to determine SVE layout
+
+From: Mark Brown <broonie@kernel.org>
+
+commit b017a0cea627fcbe158fc2c214fe893e18c4d0c4 upstream.
+
+The SVE register sets have two different formats, one of which is a wrapped
+version of the standard FPSIMD register set and another with actual SVE
+register data. At present we check TIF_SVE to see if full SVE register
+state should be provided when reading the SVE regset but if we were in a
+syscall we may have saved only floating point registers even though that is
+set.
+
+Fix this and simplify the logic by checking and using the format which we
+recorded when deciding if we should use FPSIMD or SVE format.
+
+Fixes: 8c845e273104 ("arm64/sve: Leave SVE enabled on syscall if we don't context switch")
+Cc: <stable@vger.kernel.org> # 6.2.x
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Link: https://lore.kernel.org/r/20240325-arm64-ptrace-fp-type-v1-1-8dc846caf11f@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/ptrace.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -729,7 +729,6 @@ static void sve_init_header_from_task(st
+ {
+ unsigned int vq;
+ bool active;
+- bool fpsimd_only;
+ enum vec_type task_type;
+
+ memset(header, 0, sizeof(*header));
+@@ -745,12 +744,10 @@ static void sve_init_header_from_task(st
+ case ARM64_VEC_SVE:
+ if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+- fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
+ break;
+ case ARM64_VEC_SME:
+ if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+- fpsimd_only = false;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+@@ -758,7 +755,7 @@ static void sve_init_header_from_task(st
+ }
+
+ if (active) {
+- if (fpsimd_only) {
++ if (target->thread.fp_type == FP_STATE_FPSIMD) {
+ header->flags |= SVE_PT_REGS_FPSIMD;
+ } else {
+ header->flags |= SVE_PT_REGS_SVE;
--- /dev/null
+From f7caddfd558e32db0ae944256e623a259538b357 Mon Sep 17 00:00:00 2001
+From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Date: Tue, 5 Mar 2024 11:14:43 +0530
+Subject: drm/i915/dp: Fix the computation for compressed_bpp for DISPLAY < 13
+
+From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+
+commit f7caddfd558e32db0ae944256e623a259538b357 upstream.
+
+For DISPLAY < 13, compressed bpp is chosen from a list of
+supported compressed bpps. Fix the condition to choose the
+appropriate compressed bpp from the list.
+
+Fixes: 1c56e9a39833 ("drm/i915/dp: Get optimal link config to have best compressed bpp")
+Cc: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v6.7+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/10162
+Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240305054443.2489895-1-ankit.k.nautiyal@intel.com
+(cherry picked from commit 5a1da42b50f3594e18738885c2f23ed36629dd00)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1916,8 +1916,9 @@ icl_dsc_compute_link_config(struct intel
+ dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
+
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
+- if (valid_dsc_bpp[i] < dsc_min_bpp ||
+- valid_dsc_bpp[i] > dsc_max_bpp)
++ if (valid_dsc_bpp[i] < dsc_min_bpp)
++ continue;
++ if (valid_dsc_bpp[i] > dsc_max_bpp)
+ break;
+
+ ret = dsc_compute_link_config(intel_dp,
--- /dev/null
+From bc9a1ec01289e6e7259dc5030b413a9c6654a99a Mon Sep 17 00:00:00 2001
+From: Andi Shyti <andi.shyti@linux.intel.com>
+Date: Thu, 28 Mar 2024 08:34:03 +0100
+Subject: drm/i915/gt: Disable HW load balancing for CCS
+
+From: Andi Shyti <andi.shyti@linux.intel.com>
+
+commit bc9a1ec01289e6e7259dc5030b413a9c6654a99a upstream.
+
+The hardware should not dynamically balance the load between CCS
+engines. Wa_14019159160 recommends disabling it across all
+platforms.
+
+Fixes: d2eae8e98d59 ("drm/i915/dg2: Drop force_probe requirement")
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Cc: <stable@vger.kernel.org> # v6.2+
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Acked-by: Michal Mrozek <michal.mrozek@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240328073409.674098-2-andi.shyti@linux.intel.com
+(cherry picked from commit f5d2904cf814f20b79e3e4c1b24a4ccc2411b7e0)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/intel_gt_regs.h | 1 +
+ drivers/gpu/drm/i915/gt/intel_workarounds.c | 23 +++++++++++++++++++++--
+ 2 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+@@ -1477,6 +1477,7 @@
+ #define ECOBITS_PPGTT_CACHE4B (0 << 8)
+
+ #define GEN12_RCU_MODE _MMIO(0x14800)
++#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
+ #define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+
+ #define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -51,7 +51,8 @@
+ * registers belonging to BCS, VCS or VECS should be implemented in
+ * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
+ * engine's MMIO range but that are part of of the common RCS/CCS reset domain
+- * should be implemented in general_render_compute_wa_init().
++ * should be implemented in general_render_compute_wa_init(). The settings
++ * about the CCS load balancing should be added in ccs_engine_wa_mode().
+ *
+ * - GT workarounds: the list of these WAs is applied whenever these registers
+ * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
+@@ -2850,6 +2851,22 @@ add_render_compute_tuning_settings(struc
+ wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
+ }
+
++static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
++{
++ struct intel_gt *gt = engine->gt;
++
++ if (!IS_DG2(gt->i915))
++ return;
++
++ /*
++ * Wa_14019159160: This workaround, along with others, leads to
++ * significant challenges in utilizing load balancing among the
++ * CCS slices. Consequently, an architectural decision has been
++ * made to completely disable automatic CCS load balancing.
++ */
++ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
++}
++
+ /*
+ * The workarounds in this function apply to shared registers in
+ * the general render reset domain that aren't tied to a
+@@ -3000,8 +3017,10 @@ engine_init_workarounds(struct intel_eng
+ * to a single RCS/CCS engine's workaround list since
+ * they're reset as part of the general render domain reset.
+ */
+- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
++ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
+ general_render_compute_wa_init(engine, wal);
++ ccs_engine_wa_mode(engine, wal);
++ }
+
+ if (engine->class == COMPUTE_CLASS)
+ ccs_engine_wa_init(engine, wal);
--- /dev/null
+From ea315f98e5d6d3191b74beb0c3e5fc16081d517c Mon Sep 17 00:00:00 2001
+From: Andi Shyti <andi.shyti@linux.intel.com>
+Date: Thu, 28 Mar 2024 08:34:04 +0100
+Subject: drm/i915/gt: Do not generate the command streamer for all the CCS
+
+From: Andi Shyti <andi.shyti@linux.intel.com>
+
+commit ea315f98e5d6d3191b74beb0c3e5fc16081d517c upstream.
+
+We want a fixed load CCS balancing consisting in all slices
+sharing one single user engine. For this reason do not create the
+intel_engine_cs structure with its dedicated command streamer for
+CCS slices beyond the first.
+
+Fixes: d2eae8e98d59 ("drm/i915/dg2: Drop force_probe requirement")
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Cc: <stable@vger.kernel.org> # v6.2+
+Acked-by: Michal Mrozek <michal.mrozek@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240328073409.674098-3-andi.shyti@linux.intel.com
+(cherry picked from commit c7a5aa4e57f88470313a8277eb299b221b86e3b1)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/intel_engine_cs.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -908,6 +908,23 @@ static intel_engine_mask_t init_engine_m
+ info->engine_mask &= ~BIT(GSC0);
+ }
+
++ /*
++ * Do not create the command streamer for CCS slices beyond the first.
++ * All the workload submitted to the first engine will be shared among
++ * all the slices.
++ *
++ * Once the user will be allowed to customize the CCS mode, then this
++ * check needs to be removed.
++ */
++ if (IS_DG2(gt->i915)) {
++ u8 first_ccs = __ffs(CCS_MASK(gt));
++
++ /* Mask off all the CCS engine */
++ info->engine_mask &= ~GENMASK(CCS3, CCS0);
++ /* Put back in the first CCS engine */
++ info->engine_mask |= BIT(_CCS(first_ccs));
++ }
++
+ return info->engine_mask;
+ }
+
--- /dev/null
+From 6db31251bb265813994bfb104eb4b4d0f44d64fb Mon Sep 17 00:00:00 2001
+From: Andi Shyti <andi.shyti@linux.intel.com>
+Date: Thu, 28 Mar 2024 08:34:05 +0100
+Subject: drm/i915/gt: Enable only one CCS for compute workload
+
+From: Andi Shyti <andi.shyti@linux.intel.com>
+
+commit 6db31251bb265813994bfb104eb4b4d0f44d64fb upstream.
+
+Enable only one CCS engine by default with all the compute sices
+allocated to it.
+
+While generating the list of UABI engines to be exposed to the
+user, exclude any additional CCS engines beyond the first
+instance.
+
+This change can be tested with igt i915_query.
+
+Fixes: d2eae8e98d59 ("drm/i915/dg2: Drop force_probe requirement")
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Cc: <stable@vger.kernel.org> # v6.2+
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Acked-by: Michal Mrozek <michal.mrozek@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240328073409.674098-4-andi.shyti@linux.intel.com
+(cherry picked from commit 2bebae0112b117de7e8a7289277a4bd2403b9e17)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/Makefile | 1
+ drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c | 39 ++++++++++++++++++++++++++++
+ drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h | 13 +++++++++
+ drivers/gpu/drm/i915/gt/intel_gt_regs.h | 5 +++
+ drivers/gpu/drm/i915/gt/intel_workarounds.c | 7 +++++
+ 5 files changed, 65 insertions(+)
+ create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+ create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
+
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -118,6 +118,7 @@ gt-y += \
+ gt/intel_ggtt_fencing.o \
+ gt/intel_gt.o \
+ gt/intel_gt_buffer_pool.o \
++ gt/intel_gt_ccs_mode.o \
+ gt/intel_gt_clock_utils.o \
+ gt/intel_gt_debugfs.o \
+ gt/intel_gt_engines_debugfs.o \
+--- /dev/null
++++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+@@ -0,0 +1,39 @@
++// SPDX-License-Identifier: MIT
++/*
++ * Copyright © 2024 Intel Corporation
++ */
++
++#include "i915_drv.h"
++#include "intel_gt.h"
++#include "intel_gt_ccs_mode.h"
++#include "intel_gt_regs.h"
++
++void intel_gt_apply_ccs_mode(struct intel_gt *gt)
++{
++ int cslice;
++ u32 mode = 0;
++ int first_ccs = __ffs(CCS_MASK(gt));
++
++ if (!IS_DG2(gt->i915))
++ return;
++
++ /* Build the value for the fixed CCS load balancing */
++ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
++ if (CCS_MASK(gt) & BIT(cslice))
++ /*
++ * If available, assign the cslice
++ * to the first available engine...
++ */
++ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
++
++ else
++ /*
++ * ... otherwise, mark the cslice as
++ * unavailable if no CCS dispatches here
++ */
++ mode |= XEHP_CCS_MODE_CSLICE(cslice,
++ XEHP_CCS_MODE_CSLICE_MASK);
++ }
++
++ intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode);
++}
+--- /dev/null
++++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: MIT */
++/*
++ * Copyright © 2024 Intel Corporation
++ */
++
++#ifndef __INTEL_GT_CCS_MODE_H__
++#define __INTEL_GT_CCS_MODE_H__
++
++struct intel_gt;
++
++void intel_gt_apply_ccs_mode(struct intel_gt *gt);
++
++#endif /* __INTEL_GT_CCS_MODE_H__ */
+--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+@@ -1480,6 +1480,11 @@
+ #define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
+ #define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+
++#define XEHP_CCS_MODE _MMIO(0x14804)
++#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
++#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
++#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
++
+ #define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
+ #define CHV_FGT_DISABLE_SS0 (1 << 10)
+ #define CHV_FGT_DISABLE_SS1 (1 << 11)
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -10,6 +10,7 @@
+ #include "intel_engine_regs.h"
+ #include "intel_gpu_commands.h"
+ #include "intel_gt.h"
++#include "intel_gt_ccs_mode.h"
+ #include "intel_gt_mcr.h"
+ #include "intel_gt_print.h"
+ #include "intel_gt_regs.h"
+@@ -2865,6 +2866,12 @@ static void ccs_engine_wa_mode(struct in
+ * made to completely disable automatic CCS load balancing.
+ */
+ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
++
++ /*
++ * After having disabled automatic load balancing we need to
++ * assign all slices to a single CCS. We will call it CCS mode 1
++ */
++ intel_gt_apply_ccs_mode(gt);
+ }
+
+ /*
--- /dev/null
+From 51bc63392e96ca45d7be98bc43c180b174ffca09 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Tue, 2 Apr 2024 16:51:46 +0300
+Subject: drm/i915/mst: Limit MST+DSC to TGL+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 51bc63392e96ca45d7be98bc43c180b174ffca09 upstream.
+
+The MST code currently assumes that glk+ already supports MST+DSC,
+which is incorrect. We need to check for TGL+ actually. ICL does
+support SST+DSC, but supposedly it can't do MST+FEC which will
+also rule out MST+DSC.
+
+Note that a straight TGL+ check doesn't work here because DSC
+support can get fused out, so we do need to also check 'has_dsc'.
+
+Cc: stable@vger.kernel.org
+Fixes: d51f25eb479a ("drm/i915: Add DSC support to MST path")
+Reviewed-by: Uma Shankar <uma.shankar@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240402135148.23011-6-ville.syrjala@linux.intel.com
+(cherry picked from commit c9c92f286dbdf872390ef3e74dbe5f0641e46f55)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_display_device.h | 1 +
+ drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_display_device.h
++++ b/drivers/gpu/drm/i915/display/intel_display_device.h
+@@ -47,6 +47,7 @@ struct drm_printer;
+ #define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
+ #define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
+ #define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
++#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
+ #define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
+ #define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
+ #define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -1338,7 +1338,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_c
+ return 0;
+ }
+
+- if (DISPLAY_VER(dev_priv) >= 10 &&
++ if (HAS_DSC_MST(dev_priv) &&
+ drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
+ /*
+ * TBD pass the connector BPC,
--- /dev/null
+From 99f855082f228cdcecd6ab768d3b8b505e0eb028 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Tue, 2 Apr 2024 16:51:47 +0300
+Subject: drm/i915/mst: Reject FEC+MST on ICL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 99f855082f228cdcecd6ab768d3b8b505e0eb028 upstream.
+
+ICL supposedly doesn't support FEC on MST. Reject it.
+
+Cc: stable@vger.kernel.org
+Fixes: d51f25eb479a ("drm/i915: Add DSC support to MST path")
+Reviewed-by: Uma Shankar <uma.shankar@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240402135148.23011-7-ville.syrjala@linux.intel.com
+(cherry picked from commit b648ce2a28ba83c4fa67c61fcc5983e15e9d4afb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1422,7 +1422,8 @@ static bool intel_dp_source_supports_fec
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return true;
+
+- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
++ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
++ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
+ return true;
+
+ return false;
--- /dev/null
+From a00e7e3fb4b9b30a9f2286a6f892b6e781e560a8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Wed, 27 Mar 2024 10:11:34 +0100
+Subject: drm/xe: Rework rebinding
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit a00e7e3fb4b9b30a9f2286a6f892b6e781e560a8 upstream.
+
+Instead of handling the vm's rebind fence separately,
+which is error prone if they are not strictly ordered,
+attach rebind fences as kernel fences to the vm's resv.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240327091136.3271-3-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 5a091aff50b780ae29c7faf70a7a6c21c98a54c4)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_exec.c | 31 +++----------------------------
+ drivers/gpu/drm/xe/xe_pt.c | 2 +-
+ drivers/gpu/drm/xe/xe_vm.c | 27 +++++++++------------------
+ drivers/gpu/drm/xe/xe_vm.h | 2 +-
+ drivers/gpu/drm/xe/xe_vm_types.h | 3 ---
+ 5 files changed, 14 insertions(+), 51 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_exec.c
++++ b/drivers/gpu/drm/xe/xe_exec.c
+@@ -113,7 +113,6 @@ int xe_exec_ioctl(struct drm_device *dev
+ struct drm_exec *exec = &vm_exec.exec;
+ u32 i, num_syncs = 0, num_ufence = 0;
+ struct xe_sched_job *job;
+- struct dma_fence *rebind_fence;
+ struct xe_vm *vm;
+ bool write_locked, skip_retry = false;
+ ktime_t end = 0;
+@@ -256,35 +255,11 @@ retry:
+ * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
+ * VM mode only.
+ */
+- rebind_fence = xe_vm_rebind(vm, false);
+- if (IS_ERR(rebind_fence)) {
+- err = PTR_ERR(rebind_fence);
++ err = xe_vm_rebind(vm, false);
++ if (err)
+ goto err_put_job;
+- }
+-
+- /*
+- * We store the rebind_fence in the VM so subsequent execs don't get
+- * scheduled before the rebinds of userptrs / evicted BOs is complete.
+- */
+- if (rebind_fence) {
+- dma_fence_put(vm->rebind_fence);
+- vm->rebind_fence = rebind_fence;
+- }
+- if (vm->rebind_fence) {
+- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+- &vm->rebind_fence->flags)) {
+- dma_fence_put(vm->rebind_fence);
+- vm->rebind_fence = NULL;
+- } else {
+- dma_fence_get(vm->rebind_fence);
+- err = drm_sched_job_add_dependency(&job->drm,
+- vm->rebind_fence);
+- if (err)
+- goto err_put_job;
+- }
+- }
+
+- /* Wait behind munmap style rebinds */
++ /* Wait behind rebinds */
+ if (!xe_vm_in_lr_mode(vm)) {
+ err = drm_sched_job_add_resv_dependencies(&job->drm,
+ xe_vm_resv(vm),
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -1300,7 +1300,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, s
+ }
+
+ /* add shared fence now for pagetable delayed destroy */
+- dma_resv_add_fence(xe_vm_resv(vm), fence, !rebind &&
++ dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
+ last_munmap_rebind ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -520,7 +520,6 @@ static void preempt_rebind_work_func(str
+ {
+ struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
+ struct drm_exec exec;
+- struct dma_fence *rebind_fence;
+ unsigned int fence_count = 0;
+ LIST_HEAD(preempt_fences);
+ ktime_t end = 0;
+@@ -566,18 +565,11 @@ retry:
+ if (err)
+ goto out_unlock;
+
+- rebind_fence = xe_vm_rebind(vm, true);
+- if (IS_ERR(rebind_fence)) {
+- err = PTR_ERR(rebind_fence);
++ err = xe_vm_rebind(vm, true);
++ if (err)
+ goto out_unlock;
+- }
+
+- if (rebind_fence) {
+- dma_fence_wait(rebind_fence, false);
+- dma_fence_put(rebind_fence);
+- }
+-
+- /* Wait on munmap style VM unbinds */
++ /* Wait on rebinds and munmap style VM unbinds */
+ wait = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+@@ -771,14 +763,14 @@ xe_vm_bind_vma(struct xe_vma *vma, struc
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op);
+
+-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
++int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
+ {
+- struct dma_fence *fence = NULL;
++ struct dma_fence *fence;
+ struct xe_vma *vma, *next;
+
+ lockdep_assert_held(&vm->lock);
+ if (xe_vm_in_lr_mode(vm) && !rebind_worker)
+- return NULL;
++ return 0;
+
+ xe_vm_assert_held(vm);
+ list_for_each_entry_safe(vma, next, &vm->rebind_list,
+@@ -786,17 +778,17 @@ struct dma_fence *xe_vm_rebind(struct xe
+ xe_assert(vm->xe, vma->tile_present);
+
+ list_del_init(&vma->combined_links.rebind);
+- dma_fence_put(fence);
+ if (rebind_worker)
+ trace_xe_vma_rebind_worker(vma);
+ else
+ trace_xe_vma_rebind_exec(vma);
+ fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
+ if (IS_ERR(fence))
+- return fence;
++ return PTR_ERR(fence);
++ dma_fence_put(fence);
+ }
+
+- return fence;
++ return 0;
+ }
+
+ static void xe_vma_free(struct xe_vma *vma)
+@@ -1575,7 +1567,6 @@ static void vm_destroy_work_func(struct
+ XE_WARN_ON(vm->pt_root[id]);
+
+ trace_xe_vm_free(vm);
+- dma_fence_put(vm->rebind_fence);
+ kfree(vm);
+ }
+
+--- a/drivers/gpu/drm/xe/xe_vm.h
++++ b/drivers/gpu/drm/xe/xe_vm.h
+@@ -207,7 +207,7 @@ int __xe_vm_userptr_needs_repin(struct x
+
+ int xe_vm_userptr_check_repin(struct xe_vm *vm);
+
+-struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
++int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+
+ int xe_vm_invalidate_vma(struct xe_vma *vma);
+
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -171,9 +171,6 @@ struct xe_vm {
+ */
+ struct list_head rebind_list;
+
+- /** @rebind_fence: rebind fence from execbuf */
+- struct dma_fence *rebind_fence;
+-
+ /**
+ * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
+ * from an irq context can be last put and the destroy needs to be able
--- /dev/null
+From 3c88b8f471ee9512bc4ef02bebafdc53fb7c5d9e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Wed, 27 Mar 2024 10:11:33 +0100
+Subject: drm/xe: Use ring ops TLB invalidation for rebinds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 3c88b8f471ee9512bc4ef02bebafdc53fb7c5d9e upstream.
+
+For each rebind we insert a GuC TLB invalidation and add a
+corresponding unordered TLB invalidation fence. This might
+add a huge number of TLB invalidation fences to wait for so
+rather than doing that, defer the TLB invalidation to the
+next ring ops for each affected exec queue. Since the TLB
+is invalidated on exec_queue switch, we need to invalidate
+once for each affected exec_queue.
+
+v2:
+- Simplify if-statements around the tlb_flush_seqno.
+ (Matthew Brost)
+- Add some comments and asserts.
+
+Fixes: 5387e865d90e ("drm/xe: Add TLB invalidation fence after rebinds issued from execs")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240327091136.3271-2-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 4fc4899e86f7afbd09f4bcb899f0fc57e0296e62)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_exec_queue_types.h | 5 +++++
+ drivers/gpu/drm/xe/xe_pt.c | 6 ++++--
+ drivers/gpu/drm/xe/xe_ring_ops.c | 11 ++++-------
+ drivers/gpu/drm/xe/xe_sched_job.c | 10 ++++++++++
+ drivers/gpu/drm/xe/xe_sched_job_types.h | 2 ++
+ drivers/gpu/drm/xe/xe_vm_types.h | 5 +++++
+ 6 files changed, 30 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
++++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
+@@ -159,6 +159,11 @@ struct xe_exec_queue {
+ const struct xe_ring_ops *ring_ops;
+ /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
+ struct drm_sched_entity *entity;
++ /**
++ * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
++ * Protected by @vm's resv. Unused if @vm == NULL.
++ */
++ u64 tlb_flush_seqno;
+ /** @lrc: logical ring context for this exec queue */
+ struct xe_lrc lrc[];
+ };
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -1255,11 +1255,13 @@ __xe_pt_bind_vma(struct xe_tile *tile, s
+ * non-faulting LR, in particular on user-space batch buffer chaining,
+ * it needs to be done here.
+ */
+- if ((rebind && !xe_vm_in_lr_mode(vm) && !vm->batch_invalidate_tlb) ||
+- (!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
++ if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
+ ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence)
+ return ERR_PTR(-ENOMEM);
++ } else if (rebind && !xe_vm_in_lr_mode(vm)) {
++ /* We bump also if batch_invalidate_tlb is true */
++ vm->tlb_flush_seqno++;
+ }
+
+ rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -227,10 +227,9 @@ static void __emit_job_gen12_simple(stru
+ {
+ u32 dw[MAX_JOB_SIZE_DW], i = 0;
+ u32 ppgtt_flag = get_ppgtt_flag(job);
+- struct xe_vm *vm = job->q->vm;
+ struct xe_gt *gt = job->q->gt;
+
+- if (vm && vm->batch_invalidate_tlb) {
++ if (job->ring_ops_flush_tlb) {
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, true, dw, i);
+@@ -278,7 +277,6 @@ static void __emit_job_gen12_video(struc
+ struct xe_gt *gt = job->q->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
+- struct xe_vm *vm = job->q->vm;
+
+ dw[i++] = preparser_disable(true);
+
+@@ -290,13 +288,13 @@ static void __emit_job_gen12_video(struc
+ i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
+ }
+
+- if (vm && vm->batch_invalidate_tlb)
++ if (job->ring_ops_flush_tlb)
+ i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, true, dw, i);
+
+ dw[i++] = preparser_disable(false);
+
+- if (!vm || !vm->batch_invalidate_tlb)
++ if (!job->ring_ops_flush_tlb)
+ i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, dw, i);
+
+@@ -325,7 +323,6 @@ static void __emit_job_gen12_render_comp
+ struct xe_gt *gt = job->q->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
+- struct xe_vm *vm = job->q->vm;
+ u32 mask_flags = 0;
+
+ dw[i++] = preparser_disable(true);
+@@ -335,7 +332,7 @@ static void __emit_job_gen12_render_comp
+ mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
+
+ /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
+- i = emit_pipe_invalidate(mask_flags, vm && vm->batch_invalidate_tlb, dw, i);
++ i = emit_pipe_invalidate(mask_flags, job->ring_ops_flush_tlb, dw, i);
+
+ /* hsdes: 1809175790 */
+ if (has_aux_ccs(xe))
+--- a/drivers/gpu/drm/xe/xe_sched_job.c
++++ b/drivers/gpu/drm/xe/xe_sched_job.c
+@@ -250,6 +250,16 @@ bool xe_sched_job_completed(struct xe_sc
+
+ void xe_sched_job_arm(struct xe_sched_job *job)
+ {
++ struct xe_exec_queue *q = job->q;
++ struct xe_vm *vm = q->vm;
++
++ if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
++ (vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
++ xe_vm_assert_held(vm);
++ q->tlb_flush_seqno = vm->tlb_flush_seqno;
++ job->ring_ops_flush_tlb = true;
++ }
++
+ drm_sched_job_arm(&job->drm);
+ }
+
+--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
++++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
+@@ -39,6 +39,8 @@ struct xe_sched_job {
+ } user_fence;
+ /** @migrate_flush_flags: Additional flush flags for migration jobs */
+ u32 migrate_flush_flags;
++ /** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
++ bool ring_ops_flush_tlb;
+ /** @batch_addr: batch buffer address of job */
+ u64 batch_addr[];
+ };
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -282,6 +282,11 @@ struct xe_vm {
+ bool capture_once;
+ } error_capture;
+
++ /**
++ * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
++ * protected by the vm resv.
++ */
++ u64 tlb_flush_seqno;
+ /** @batch_invalidate_tlb: Always invalidate TLB before batch start */
+ bool batch_invalidate_tlb;
+ /** @xef: XE file handle for tracking this VM's drm client */
--- /dev/null
+From 65291dcfcf8936e1b23cfd7718fdfde7cfaf7706 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Tue, 26 Mar 2024 15:32:08 +0100
+Subject: mm/secretmem: fix GUP-fast succeeding on secretmem folios
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 65291dcfcf8936e1b23cfd7718fdfde7cfaf7706 upstream.
+
+folio_is_secretmem() currently relies on secretmem folios being LRU
+folios, to save some cycles.
+
+However, folios might reside in a folio batch without the LRU flag set, or
+temporarily have their LRU flag cleared. Consequently, the LRU flag is
+unreliable for this purpose.
+
+In particular, this is the case when secretmem_fault() allocates a fresh
+page and calls filemap_add_folio()->folio_add_lru(). The folio might be
+added to the per-cpu folio batch and won't get the LRU flag set until the
+batch was drained using e.g., lru_add_drain().
+
+Consequently, folio_is_secretmem() might not detect secretmem folios and
+GUP-fast can succeed in grabbing a secretmem folio, crashing the kernel
+when we would later try reading/writing to the folio, because the folio
+has been unmapped from the directmap.
+
+Fix it by removing that unreliable check.
+
+Link: https://lkml.kernel.org/r/20240326143210.291116-2-david@redhat.com
+Fixes: 1507f51255c9 ("mm: introduce memfd_secret system call to create "secret" memory areas")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: xingwei lee <xrivendell7@gmail.com>
+Reported-by: yue sun <samsun1006219@gmail.com>
+Closes: https://lore.kernel.org/lkml/CABOYnLyevJeravW=QrH0JUPYEcDN160aZFb7kwndm-J2rmz0HQ@mail.gmail.com/
+Debugged-by: Miklos Szeredi <miklos@szeredi.hu>
+Tested-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/secretmem.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/secretmem.h
++++ b/include/linux/secretmem.h
+@@ -13,10 +13,10 @@ static inline bool folio_is_secretmem(st
+ /*
+ * Using folio_mapping() is quite slow because of the actual call
+ * instruction.
+- * We know that secretmem pages are not compound and LRU so we can
++ * We know that secretmem pages are not compound, so we can
+ * save a couple of cycles here.
+ */
+- if (folio_test_large(folio) || !folio_test_lru(folio))
++ if (folio_test_large(folio))
+ return false;
+
+ mapping = (struct address_space *)
--- /dev/null
+From d080a08b06b6266cc3e0e86c5acfd80db937cb6b Mon Sep 17 00:00:00 2001
+From: Samuel Holland <samuel.holland@sifive.com>
+Date: Mon, 11 Mar 2024 19:19:13 -0700
+Subject: riscv: Fix spurious errors from __get/put_kernel_nofault
+
+From: Samuel Holland <samuel.holland@sifive.com>
+
+commit d080a08b06b6266cc3e0e86c5acfd80db937cb6b upstream.
+
+These macros did not initialize __kr_err, so they could fail even if
+the access did not fault.
+
+Cc: stable@vger.kernel.org
+Fixes: d464118cdc41 ("riscv: implement __get_kernel_nofault and __put_user_nofault")
+Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
+Link: https://lore.kernel.org/r/20240312022030.320789-1-samuel.holland@sifive.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/include/asm/uaccess.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/riscv/include/asm/uaccess.h
++++ b/arch/riscv/include/asm/uaccess.h
+@@ -319,7 +319,7 @@ unsigned long __must_check clear_user(vo
+
+ #define __get_kernel_nofault(dst, src, type, err_label) \
+ do { \
+- long __kr_err; \
++ long __kr_err = 0; \
+ \
+ __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
+ if (unlikely(__kr_err)) \
+@@ -328,7 +328,7 @@ do { \
+
+ #define __put_kernel_nofault(dst, src, type, err_label) \
+ do { \
+- long __kr_err; \
++ long __kr_err = 0; \
+ \
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
+ if (unlikely(__kr_err)) \
--- /dev/null
+From d14fa1fcf69db9d070e75f1c4425211fa619dfc8 Mon Sep 17 00:00:00 2001
+From: Stefan O'Rear <sorear@fastmail.com>
+Date: Wed, 27 Mar 2024 02:12:58 -0400
+Subject: riscv: process: Fix kernel gp leakage
+
+From: Stefan O'Rear <sorear@fastmail.com>
+
+commit d14fa1fcf69db9d070e75f1c4425211fa619dfc8 upstream.
+
+childregs represents the registers which are active for the new thread
+in user context. For a kernel thread, childregs->gp is never used since
+the kernel gp is not touched by switch_to. For a user mode helper, the
+gp value can be observed in user space after execve or possibly by other
+means.
+
+[From the email thread]
+
+The /* Kernel thread */ comment is somewhat inaccurate in that it is also used
+for user_mode_helper threads, which exec a user process, e.g. /sbin/init or
+when /proc/sys/kernel/core_pattern is a pipe. Such threads do not have
+PF_KTHREAD set and are valid targets for ptrace etc. even before they exec.
+
+childregs is the *user* context during syscall execution and it is observable
+from userspace in at least five ways:
+
+1. kernel_execve does not currently clear integer registers, so the starting
+ register state for PID 1 and other user processes started by the kernel has
+ sp = user stack, gp = kernel __global_pointer$, all other integer registers
+ zeroed by the memset in the patch comment.
+
+ This is a bug in its own right, but I'm unwilling to bet that it is the only
+ way to exploit the issue addressed by this patch.
+
+2. ptrace(PTRACE_GETREGSET): you can PTRACE_ATTACH to a user_mode_helper thread
+ before it execs, but ptrace requires SIGSTOP to be delivered which can only
+ happen at user/kernel boundaries.
+
+3. /proc/*/task/*/syscall: this is perfectly happy to read pt_regs for
+ user_mode_helpers before the exec completes, but gp is not one of the
+ registers it returns.
+
+4. PERF_SAMPLE_REGS_USER: LOCKDOWN_PERF normally prevents access to kernel
+ addresses via PERF_SAMPLE_REGS_INTR, but due to this bug kernel addresses
+ are also exposed via PERF_SAMPLE_REGS_USER which is permitted under
+ LOCKDOWN_PERF. I have not attempted to write exploit code.
+
+5. Much of the tracing infrastructure allows access to user registers. I have
+ not attempted to determine which forms of tracing allow access to user
+ registers without already allowing access to kernel registers.
+
+Fixes: 7db91e57a0ac ("RISC-V: Task implementation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Stefan O'Rear <sorear@fastmail.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20240327061258.2370291-1-sorear@fastmail.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/process.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -27,8 +27,6 @@
+ #include <asm/vector.h>
+ #include <asm/cpufeature.h>
+
+-register unsigned long gp_in_global __asm__("gp");
+-
+ #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+ #include <linux/stackprotector.h>
+ unsigned long __stack_chk_guard __read_mostly;
+@@ -207,7 +205,6 @@ int copy_thread(struct task_struct *p, c
+ if (unlikely(args->fn)) {
+ /* Kernel thread */
+ memset(childregs, 0, sizeof(struct pt_regs));
+- childregs->gp = gp_in_global;
+ /* Supervisor/Machine, irqs on: */
+ childregs->status = SR_PP | SR_PIE;
+
--- /dev/null
+From 378ca2d2ad410a1cd5690d06b46c5e2297f4c8c0 Mon Sep 17 00:00:00 2001
+From: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Date: Tue, 26 Mar 2024 18:12:13 +0100
+Subject: s390/entry: align system call table on 8 bytes
+
+From: Sumanth Korikkar <sumanthk@linux.ibm.com>
+
+commit 378ca2d2ad410a1cd5690d06b46c5e2297f4c8c0 upstream.
+
+Align system call table on 8 bytes. With sys_call_table entry size
+of 8 bytes that eliminates the possibility of a system call pointer
+crossing cache line boundary.
+
+Cc: stable@kernel.org
+Suggested-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/entry.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -653,6 +653,7 @@ SYM_DATA_START_LOCAL(daton_psw)
+ SYM_DATA_END(daton_psw)
+
+ .section .rodata, "a"
++ .balign 8
+ #define SYSCALL(esame,emu) .quad __s390x_ ## esame
+ SYM_DATA_START(sys_call_table)
+ #include "asm/syscall_table.h"
--- /dev/null
+From 176517c9310281d00dd3210ab4cc4d3cdc26b17e Mon Sep 17 00:00:00 2001
+From: Edward Liaw <edliaw@google.com>
+Date: Fri, 29 Mar 2024 18:58:10 +0000
+Subject: selftests/mm: include strings.h for ffsl
+
+From: Edward Liaw <edliaw@google.com>
+
+commit 176517c9310281d00dd3210ab4cc4d3cdc26b17e upstream.
+
+Got a compilation error on Android for ffsl after 91b80cc5b39f
+("selftests: mm: fix map_hugetlb failure on 64K page size systems")
+included vm_util.h.
+
+Link: https://lkml.kernel.org/r/20240329185814.16304-1-edliaw@google.com
+Fixes: af605d26a8f2 ("selftests/mm: merge util.h into vm_util.h")
+Signed-off-by: Edward Liaw <edliaw@google.com>
+Reviewed-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/mm/vm_util.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/mm/vm_util.h
++++ b/tools/testing/selftests/mm/vm_util.h
+@@ -3,7 +3,7 @@
+ #include <stdbool.h>
+ #include <sys/mman.h>
+ #include <err.h>
+-#include <string.h> /* ffsl() */
++#include <strings.h> /* ffsl() */
+ #include <unistd.h> /* _SC_PAGESIZE */
+
+ #define BIT_ULL(nr) (1ULL << (nr))
perf-x86-intel-ds-don-t-clear-pebs_data_cfg-for-the-last-pebs-event.patch
aio-fix-null-ptr-deref-in-aio_complete-wakeup.patch
riscv-fix-vector-state-restore-in-rt_sigreturn.patch
+arm64-ptrace-use-saved-floating-point-state-type-to-determine-sve-layout.patch
+mm-secretmem-fix-gup-fast-succeeding-on-secretmem-folios.patch
+selftests-mm-include-strings.h-for-ffsl.patch
+s390-entry-align-system-call-table-on-8-bytes.patch
+riscv-fix-spurious-errors-from-__get-put_kernel_nofault.patch
+riscv-process-fix-kernel-gp-leakage.patch
+smb-client-fix-uaf-in-smb2_reconnect_server.patch
+smb-client-guarantee-refcounted-children-from-parent-session.patch
+smb-client-refresh-referral-without-acquiring-refpath_lock.patch
+smb-client-handle-dfs-tcons-in-cifs_construct_tcon.patch
+smb-client-serialise-cifs_construct_tcon-with-cifs_mount_mutex.patch
+smb3-retrying-on-failed-server-close.patch
+smb-client-fix-potential-uaf-in-cifs_debug_files_proc_show.patch
+smb-client-fix-potential-uaf-in-cifs_stats_proc_write.patch
+smb-client-fix-potential-uaf-in-cifs_stats_proc_show.patch
+smb-client-fix-potential-uaf-in-cifs_dump_full_key.patch
+smb-client-fix-potential-uaf-in-smb2_is_valid_oplock_break.patch
+smb-client-fix-potential-uaf-in-smb2_is_valid_lease_break.patch
+smb-client-fix-potential-uaf-in-is_valid_oplock_break.patch
+smb-client-fix-potential-uaf-in-smb2_is_network_name_deleted.patch
+smb-client-fix-potential-uaf-in-cifs_signal_cifsd_for_reconnect.patch
+drm-i915-mst-limit-mst-dsc-to-tgl.patch
+drm-i915-mst-reject-fec-mst-on-icl.patch
+drm-i915-dp-fix-the-computation-for-compressed_bpp-for-display-13.patch
+drm-i915-gt-disable-hw-load-balancing-for-ccs.patch
+drm-i915-gt-do-not-generate-the-command-streamer-for-all-the-ccs.patch
+drm-i915-gt-enable-only-one-ccs-for-compute-workload.patch
+drm-xe-use-ring-ops-tlb-invalidation-for-rebinds.patch
+drm-xe-rework-rebinding.patch
--- /dev/null
+From ca545b7f0823f19db0f1148d59bc5e1a56634502 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:33:53 -0300
+Subject: smb: client: fix potential UAF in cifs_debug_files_proc_show()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit ca545b7f0823f19db0f1148d59bc5e1a56634502 upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifs_debug.c | 2 ++
+ fs/smb/client/cifsglob.h | 10 ++++++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -250,6 +250,8 @@ static int cifs_debug_files_proc_show(st
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ spin_lock(&tcon->open_file_lock);
+ list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -2295,4 +2295,14 @@ struct smb2_compound_vars {
+ struct smb2_file_link_info link_info;
+ };
+
++static inline bool cifs_ses_exiting(struct cifs_ses *ses)
++{
++ bool ret;
++
++ spin_lock(&ses->ses_lock);
++ ret = ses->ses_status == SES_EXITING;
++ spin_unlock(&ses->ses_lock);
++ return ret;
++}
++
+ #endif /* _CIFS_GLOB_H */
--- /dev/null
+From 58acd1f497162e7d282077f816faa519487be045 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:33:54 -0300
+Subject: smb: client: fix potential UAF in cifs_dump_full_key()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 58acd1f497162e7d282077f816faa519487be045 upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/ioctl.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/smb/client/ioctl.c
++++ b/fs/smb/client/ioctl.c
+@@ -247,7 +247,9 @@ static int cifs_dump_full_key(struct cif
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
+- if (ses_it->Suid == out.session_id) {
++ spin_lock(&ses_it->ses_lock);
++ if (ses_it->ses_status != SES_EXITING &&
++ ses_it->Suid == out.session_id) {
+ ses = ses_it;
+ /*
+ * since we are using the session outside the crit
+@@ -255,9 +257,11 @@ static int cifs_dump_full_key(struct cif
+ * so increment its refcount
+ */
+ cifs_smb_ses_inc_refcount(ses);
++ spin_unlock(&ses_it->ses_lock);
+ found = true;
+ goto search_end;
+ }
++ spin_unlock(&ses_it->ses_lock);
+ }
+ }
+ search_end:
--- /dev/null
+From e0e50401cc3921c9eaf1b0e667db174519ea939f Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:34:04 -0300
+Subject: smb: client: fix potential UAF in cifs_signal_cifsd_for_reconnect()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit e0e50401cc3921c9eaf1b0e667db174519ea939f upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/connect.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -178,6 +178,8 @@ cifs_signal_cifsd_for_reconnect(struct T
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ spin_lock(&ses->chan_lock);
+ for (i = 0; i < ses->chan_count; i++) {
+ if (!ses->chans[i].server)
--- /dev/null
+From 0865ffefea197b437ba78b5dd8d8e256253efd65 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:33:56 -0300
+Subject: smb: client: fix potential UAF in cifs_stats_proc_show()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 0865ffefea197b437ba78b5dd8d8e256253efd65 upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifs_debug.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -739,6 +739,8 @@ static int cifs_stats_proc_show(struct s
+ }
+ #endif /* STATS2 */
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ i++;
+ seq_printf(m, "\n%d) %s", i, tcon->tree_name);
--- /dev/null
+From d3da25c5ac84430f89875ca7485a3828150a7e0a Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:33:55 -0300
+Subject: smb: client: fix potential UAF in cifs_stats_proc_write()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit d3da25c5ac84430f89875ca7485a3828150a7e0a upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifs_debug.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -658,6 +658,8 @@ static ssize_t cifs_stats_proc_write(str
+ }
+ #endif /* CONFIG_CIFS_STATS2 */
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ atomic_set(&tcon->num_smbs_sent, 0);
+ spin_lock(&tcon->stat_lock);
--- /dev/null
+From 69ccf040acddf33a3a85ec0f6b45ef84b0f7ec29 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:34:00 -0300
+Subject: smb: client: fix potential UAF in is_valid_oplock_break()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 69ccf040acddf33a3a85ec0f6b45ef84b0f7ec29 upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/misc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -484,6 +484,8 @@ is_valid_oplock_break(char *buffer, stru
+ /* look up tcon based on tid & uid */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid != buf->Tid)
+ continue;
--- /dev/null
+From 63981561ffd2d4987807df4126f96a11e18b0c1d Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:34:02 -0300
+Subject: smb: client: fix potential UAF in smb2_is_network_name_deleted()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 63981561ffd2d4987807df4126f96a11e18b0c1d upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2ops.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -2480,6 +2480,8 @@ smb2_is_network_name_deleted(char *buf,
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
+ spin_lock(&tcon->tc_lock);
--- /dev/null
+From 705c76fbf726c7a2f6ff9143d4013b18daaaebf1 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:33:58 -0300
+Subject: smb: client: fix potential UAF in smb2_is_valid_lease_break()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 705c76fbf726c7a2f6ff9143d4013b18daaaebf1 upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2misc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -622,6 +622,8 @@ smb2_is_valid_lease_break(char *buffer,
+ /* look up tcon based on tid & uid */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ spin_lock(&tcon->open_file_lock);
+ cifs_stats_inc(
--- /dev/null
+From 22863485a4626ec6ecf297f4cc0aef709bc862e4 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 2 Apr 2024 16:33:59 -0300
+Subject: smb: client: fix potential UAF in smb2_is_valid_oplock_break()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 22863485a4626ec6ecf297f4cc0aef709bc862e4 upstream.
+
+Skip sessions that are being teared down (status == SES_EXITING) to
+avoid UAF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2misc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -697,6 +697,8 @@ smb2_is_valid_oplock_break(char *buffer,
+ /* look up tcon based on tid & uid */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+
+ spin_lock(&tcon->open_file_lock);
--- /dev/null
+From 24a9799aa8efecd0eb55a75e35f9d8e6400063aa Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 1 Apr 2024 14:13:10 -0300
+Subject: smb: client: fix UAF in smb2_reconnect_server()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 24a9799aa8efecd0eb55a75e35f9d8e6400063aa upstream.
+
+The UAF bug is due to smb2_reconnect_server() accessing a session that
+is already being teared down by another thread that is executing
+__cifs_put_smb_ses(). This can happen when (a) the client has
+connection to the server but no session or (b) another thread ends up
+setting @ses->ses_status again to something different than
+SES_EXITING.
+
+To fix this, we need to make sure to unconditionally set
+@ses->ses_status to SES_EXITING and prevent any other threads from
+setting a new status while we're still tearing it down.
+
+The following can be reproduced by adding some delay to right after
+the ipc is freed in __cifs_put_smb_ses() - which will give
+smb2_reconnect_server() worker a chance to run and then accessing
+@ses->ipc:
+
+kinit ...
+mount.cifs //srv/share /mnt/1 -o sec=krb5,nohandlecache,echo_interval=10
+[disconnect srv]
+ls /mnt/1 &>/dev/null
+sleep 30
+kdestroy
+[reconnect srv]
+sleep 10
+umount /mnt/1
+...
+CIFS: VFS: Verify user has a krb5 ticket and keyutils is installed
+CIFS: VFS: \\srv Send error in SessSetup = -126
+CIFS: VFS: Verify user has a krb5 ticket and keyutils is installed
+CIFS: VFS: \\srv Send error in SessSetup = -126
+general protection fault, probably for non-canonical address
+0x6b6b6b6b6b6b6b6b: 0000 [#1] PREEMPT SMP NOPTI
+CPU: 3 PID: 50 Comm: kworker/3:1 Not tainted 6.9.0-rc2 #1
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-1.fc39
+04/01/2014
+Workqueue: cifsiod smb2_reconnect_server [cifs]
+RIP: 0010:__list_del_entry_valid_or_report+0x33/0xf0
+Code: 4f 08 48 85 d2 74 42 48 85 c9 74 59 48 b8 00 01 00 00 00 00 ad
+de 48 39 c2 74 61 48 b8 22 01 00 00 00 00 74 69 <48> 8b 01 48 39 f8 75
+7b 48 8b 72 08 48 39 c6 0f 85 88 00 00 00 b8
+RSP: 0018:ffffc900001bfd70 EFLAGS: 00010a83
+RAX: dead000000000122 RBX: ffff88810da53838 RCX: 6b6b6b6b6b6b6b6b
+RDX: 6b6b6b6b6b6b6b6b RSI: ffffffffc02f6878 RDI: ffff88810da53800
+RBP: ffff88810da53800 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000001 R12: ffff88810c064000
+R13: 0000000000000001 R14: ffff88810c064000 R15: ffff8881039cc000
+FS: 0000000000000000(0000) GS:ffff888157c00000(0000)
+knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fe3728b1000 CR3: 000000010caa4000 CR4: 0000000000750ef0
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ ? die_addr+0x36/0x90
+ ? exc_general_protection+0x1c1/0x3f0
+ ? asm_exc_general_protection+0x26/0x30
+ ? __list_del_entry_valid_or_report+0x33/0xf0
+ __cifs_put_smb_ses+0x1ae/0x500 [cifs]
+ smb2_reconnect_server+0x4ed/0x710 [cifs]
+ process_one_work+0x205/0x6b0
+ worker_thread+0x191/0x360
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0xe2/0x110
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x34/0x50
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/connect.c | 85 +++++++++++++++++++-----------------------------
+ 1 file changed, 35 insertions(+), 50 deletions(-)
+
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -235,7 +235,13 @@ cifs_mark_tcp_ses_conns_for_reconnect(st
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
+- /* check if iface is still active */
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
++ spin_unlock(&ses->ses_lock);
++
+ spin_lock(&ses->chan_lock);
+ if (cifs_ses_get_chan_index(ses, server) ==
+ CIFS_INVAL_CHAN_INDEX) {
+@@ -1966,31 +1972,6 @@ out:
+ return rc;
+ }
+
+-/**
+- * cifs_free_ipc - helper to release the session IPC tcon
+- * @ses: smb session to unmount the IPC from
+- *
+- * Needs to be called everytime a session is destroyed.
+- *
+- * On session close, the IPC is closed and the server must release all tcons of the session.
+- * No need to send a tree disconnect here.
+- *
+- * Besides, it will make the server to not close durable and resilient files on session close, as
+- * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
+- */
+-static int
+-cifs_free_ipc(struct cifs_ses *ses)
+-{
+- struct cifs_tcon *tcon = ses->tcon_ipc;
+-
+- if (tcon == NULL)
+- return 0;
+-
+- tconInfoFree(tcon);
+- ses->tcon_ipc = NULL;
+- return 0;
+-}
+-
+ static struct cifs_ses *
+ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ {
+@@ -2022,48 +2003,52 @@ cifs_find_smb_ses(struct TCP_Server_Info
+ void __cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+ struct TCP_Server_Info *server = ses->server;
++ struct cifs_tcon *tcon;
+ unsigned int xid;
+ size_t i;
++ bool do_logoff;
+ int rc;
+
++ spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_EXITING) {
++ cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n",
++ __func__, ses->Suid, ses->ses_count, ses->ses_status,
++ ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none");
++ if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) {
+ spin_unlock(&ses->ses_lock);
+- return;
+- }
+- spin_unlock(&ses->ses_lock);
+-
+- cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
+- cifs_dbg(FYI,
+- "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
+-
+- spin_lock(&cifs_tcp_ses_lock);
+- if (--ses->ses_count > 0) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ }
+- spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_GOOD)
+- ses->ses_status = SES_EXITING;
+- spin_unlock(&ses->ses_lock);
+- spin_unlock(&cifs_tcp_ses_lock);
+-
+ /* ses_count can never go negative */
+ WARN_ON(ses->ses_count < 0);
+
+- spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_EXITING && server->ops->logoff) {
+- spin_unlock(&ses->ses_lock);
+- cifs_free_ipc(ses);
++ spin_lock(&ses->chan_lock);
++ cifs_chan_clear_need_reconnect(ses, server);
++ spin_unlock(&ses->chan_lock);
++
++ do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff;
++ ses->ses_status = SES_EXITING;
++ tcon = ses->tcon_ipc;
++ ses->tcon_ipc = NULL;
++ spin_unlock(&ses->ses_lock);
++ spin_unlock(&cifs_tcp_ses_lock);
++
++ /*
++ * On session close, the IPC is closed and the server must release all
++ * tcons of the session. No need to send a tree disconnect here.
++ *
++ * Besides, it will make the server to not close durable and resilient
++ * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
++ * SMB2 LOGOFF Request.
++ */
++ tconInfoFree(tcon);
++ if (do_logoff) {
+ xid = get_xid();
+ rc = server->ops->logoff(xid, ses);
+ if (rc)
+ cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ __func__, rc);
+ _free_xid(xid);
+- } else {
+- spin_unlock(&ses->ses_lock);
+- cifs_free_ipc(ses);
+ }
+
+ spin_lock(&cifs_tcp_ses_lock);
--- /dev/null
+From 062a7f0ff46eb57aff526897bd2bebfdb1d3046a Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 1 Apr 2024 22:37:42 -0500
+Subject: smb: client: guarantee refcounted children from parent session
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 062a7f0ff46eb57aff526897bd2bebfdb1d3046a upstream.
+
+Avoid potential use-after-free bugs when walking DFS referrals,
+mounting and performing DFS failover by ensuring that all children
+from parent @tcon->ses are also refcounted. They're all needed across
+the entire DFS mount. Get rid of @tcon->dfs_ses_list while we're at
+it, too.
+
+Cc: stable@vger.kernel.org # 6.4+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202404021527.ZlRkIxgv-lkp@intel.com/
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsglob.h | 2 -
+ fs/smb/client/cifsproto.h | 20 +++++++++---------
+ fs/smb/client/connect.c | 25 ++++++++++++++++++----
+ fs/smb/client/dfs.c | 51 +++++++++++++++++++++-------------------------
+ fs/smb/client/dfs.h | 37 ++++++++++++++++++++-------------
+ fs/smb/client/dfs_cache.c | 11 ---------
+ fs/smb/client/misc.c | 6 -----
+ 7 files changed, 78 insertions(+), 74 deletions(-)
+
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1267,7 +1267,6 @@ struct cifs_tcon {
+ struct cached_fids *cfids;
+ /* BB add field for back pointer to sb struct(s)? */
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+- struct list_head dfs_ses_list;
+ struct delayed_work dfs_cache_work;
+ #endif
+ struct delayed_work query_interfaces; /* query interfaces workqueue job */
+@@ -1788,7 +1787,6 @@ struct cifs_mount_ctx {
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+- struct list_head dfs_ses_list;
+ };
+
+ static inline void __free_dfs_info_param(struct dfs_info3_param *param)
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -723,31 +723,31 @@ struct super_block *cifs_get_tcon_super(
+ void cifs_put_tcon_super(struct super_block *sb);
+ int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+
+-/* Put references of @ses and @ses->dfs_root_ses */
++/* Put references of @ses and its children */
+ static inline void cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+- struct cifs_ses *rses = ses->dfs_root_ses;
++ struct cifs_ses *next;
+
+- __cifs_put_smb_ses(ses);
+- if (rses)
+- __cifs_put_smb_ses(rses);
++ do {
++ next = ses->dfs_root_ses;
++ __cifs_put_smb_ses(ses);
++ } while ((ses = next));
+ }
+
+-/* Get an active reference of @ses and @ses->dfs_root_ses.
++/* Get an active reference of @ses and its children.
+ *
+ * NOTE: make sure to call this function when incrementing reference count of
+ * @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
+ * will also get its reference count incremented.
+ *
+- * cifs_put_smb_ses() will put both references, so call it when you're done.
++ * cifs_put_smb_ses() will put all references, so call it when you're done.
+ */
+ static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
+ {
+ lockdep_assert_held(&cifs_tcp_ses_lock);
+
+- ses->ses_count++;
+- if (ses->dfs_root_ses)
+- ses->dfs_root_ses->ses_count++;
++ for (; ses; ses = ses->dfs_root_ses)
++ ses->ses_count++;
+ }
+
+ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1869,6 +1869,9 @@ static int match_session(struct cifs_ses
+ ctx->sectype != ses->sectype)
+ return 0;
+
++ if (ctx->dfs_root_ses != ses->dfs_root_ses)
++ return 0;
++
+ /*
+ * If an existing session is limited to less channels than
+ * requested, it should not be reused
+@@ -2361,9 +2364,9 @@ cifs_get_smb_ses(struct TCP_Server_Info
+ * need to lock before changing something in the session.
+ */
+ spin_lock(&cifs_tcp_ses_lock);
++ if (ctx->dfs_root_ses)
++ cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
+ ses->dfs_root_ses = ctx->dfs_root_ses;
+- if (ses->dfs_root_ses)
+- ses->dfs_root_ses->ses_count++;
+ list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+@@ -3312,6 +3315,9 @@ void cifs_mount_put_conns(struct cifs_mo
+ cifs_put_smb_ses(mnt_ctx->ses);
+ else if (mnt_ctx->server)
+ cifs_put_tcp_session(mnt_ctx->server, 0);
++ mnt_ctx->ses = NULL;
++ mnt_ctx->tcon = NULL;
++ mnt_ctx->server = NULL;
+ mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
+ free_xid(mnt_ctx->xid);
+ }
+@@ -3590,8 +3596,6 @@ int cifs_mount(struct cifs_sb_info *cifs
+ bool isdfs;
+ int rc;
+
+- INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
+-
+ rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ if (rc)
+ goto error;
+@@ -3622,7 +3626,6 @@ out:
+ return rc;
+
+ error:
+- dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
+ cifs_mount_put_conns(&mnt_ctx);
+ return rc;
+ }
+@@ -3637,6 +3640,18 @@ int cifs_mount(struct cifs_sb_info *cifs
+ goto error;
+
+ rc = cifs_mount_get_tcon(&mnt_ctx);
++ if (!rc) {
++ /*
++ * Prevent superblock from being created with any missing
++ * connections.
++ */
++ if (WARN_ON(!mnt_ctx.server))
++ rc = -EHOSTDOWN;
++ else if (WARN_ON(!mnt_ctx.ses))
++ rc = -EACCES;
++ else if (WARN_ON(!mnt_ctx.tcon))
++ rc = -ENOENT;
++ }
+ if (rc)
+ goto error;
+
+--- a/fs/smb/client/dfs.c
++++ b/fs/smb/client/dfs.c
+@@ -66,33 +66,20 @@ static int get_session(struct cifs_mount
+ }
+
+ /*
+- * Track individual DFS referral servers used by new DFS mount.
+- *
+- * On success, their lifetime will be shared by final tcon (dfs_ses_list).
+- * Otherwise, they will be put by dfs_put_root_smb_sessions() in cifs_mount().
++ * Get an active reference of @ses so that next call to cifs_put_tcon() won't
++ * release it as any new DFS referrals must go through its IPC tcon.
+ */
+-static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
++static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+- struct dfs_root_ses *root_ses;
+ struct cifs_ses *ses = mnt_ctx->ses;
+
+ if (ses) {
+- root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
+- if (!root_ses)
+- return -ENOMEM;
+-
+- INIT_LIST_HEAD(&root_ses->list);
+-
+ spin_lock(&cifs_tcp_ses_lock);
+ cifs_smb_ses_inc_refcount(ses);
+ spin_unlock(&cifs_tcp_ses_lock);
+- root_ses->ses = ses;
+- list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
+ }
+- /* Select new DFS referral server so that new referrals go through it */
+ ctx->dfs_root_ses = ses;
+- return 0;
+ }
+
+ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
+@@ -185,11 +172,8 @@ again:
+ continue;
+ }
+
+- if (is_refsrv) {
+- rc = add_root_smb_session(mnt_ctx);
+- if (rc)
+- goto out;
+- }
++ if (is_refsrv)
++ add_root_smb_session(mnt_ctx);
+
+ rc = ref_walk_advance(rw);
+ if (!rc) {
+@@ -232,6 +216,7 @@ static int __dfs_mount_share(struct cifs
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_tcon *tcon;
+ char *origin_fullpath;
++ bool new_tcon = true;
+ int rc;
+
+ origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
+@@ -239,6 +224,18 @@ static int __dfs_mount_share(struct cifs
+ return PTR_ERR(origin_fullpath);
+
+ rc = dfs_referral_walk(mnt_ctx);
++ if (!rc) {
++ /*
++ * Prevent superblock from being created with any missing
++ * connections.
++ */
++ if (WARN_ON(!mnt_ctx->server))
++ rc = -EHOSTDOWN;
++ else if (WARN_ON(!mnt_ctx->ses))
++ rc = -EACCES;
++ else if (WARN_ON(!mnt_ctx->tcon))
++ rc = -ENOENT;
++ }
+ if (rc)
+ goto out;
+
+@@ -247,15 +244,14 @@ static int __dfs_mount_share(struct cifs
+ if (!tcon->origin_fullpath) {
+ tcon->origin_fullpath = origin_fullpath;
+ origin_fullpath = NULL;
++ } else {
++ new_tcon = false;
+ }
+ spin_unlock(&tcon->tc_lock);
+
+- if (list_empty(&tcon->dfs_ses_list)) {
+- list_replace_init(&mnt_ctx->dfs_ses_list, &tcon->dfs_ses_list);
++ if (new_tcon) {
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
+- } else {
+- dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
+ }
+
+ out:
+@@ -298,7 +294,6 @@ int dfs_mount_share(struct cifs_mount_ct
+ if (rc)
+ return rc;
+
+- ctx->dfs_root_ses = mnt_ctx->ses;
+ /*
+ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
+ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+@@ -324,7 +319,9 @@ int dfs_mount_share(struct cifs_mount_ct
+
+ *isdfs = true;
+ add_root_smb_session(mnt_ctx);
+- return __dfs_mount_share(mnt_ctx);
++ rc = __dfs_mount_share(mnt_ctx);
++ dfs_put_root_smb_sessions(mnt_ctx);
++ return rc;
+ }
+
+ /* Update dfs referral path of superblock */
+--- a/fs/smb/client/dfs.h
++++ b/fs/smb/client/dfs.h
+@@ -7,7 +7,9 @@
+ #define _CIFS_DFS_H
+
+ #include "cifsglob.h"
++#include "cifsproto.h"
+ #include "fs_context.h"
++#include "dfs_cache.h"
+ #include "cifs_unicode.h"
+ #include <linux/namei.h>
+
+@@ -114,11 +116,6 @@ static inline void ref_walk_set_tgt_hint
+ ref_walk_tit(rw));
+ }
+
+-struct dfs_root_ses {
+- struct list_head list;
+- struct cifs_ses *ses;
+-};
+-
+ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
+ struct smb3_fs_context *ctx);
+ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+@@ -133,20 +130,32 @@ static inline int dfs_get_referral(struc
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++ struct cifs_ses *rses = ctx->dfs_root_ses ?: mnt_ctx->ses;
+
+- return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
++ return dfs_cache_find(mnt_ctx->xid, rses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), path, ref, tl);
+ }
+
+-static inline void dfs_put_root_smb_sessions(struct list_head *head)
+-{
+- struct dfs_root_ses *root, *tmp;
+-
+- list_for_each_entry_safe(root, tmp, head, list) {
+- list_del_init(&root->list);
+- cifs_put_smb_ses(root->ses);
+- kfree(root);
++/*
++ * cifs_get_smb_ses() already guarantees an active reference of
++ * @ses->dfs_root_ses when a new session is created, so we need to put extra
++ * references of all DFS root sessions that were used across the mount process
++ * in dfs_mount_share().
++ */
++static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
++{
++ const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++ struct cifs_ses *ses = ctx->dfs_root_ses;
++ struct cifs_ses *cur;
++
++ if (!ses)
++ return;
++
++ for (cur = ses; cur; cur = cur->dfs_root_ses) {
++ if (cur->dfs_root_ses)
++ cifs_put_smb_ses(cur->dfs_root_ses);
+ }
++ cifs_put_smb_ses(ses);
+ }
+
+ #endif /* _CIFS_DFS_H */
+--- a/fs/smb/client/dfs_cache.c
++++ b/fs/smb/client/dfs_cache.c
+@@ -1278,21 +1278,12 @@ int dfs_cache_remount_fs(struct cifs_sb_
+ void dfs_cache_refresh(struct work_struct *work)
+ {
+ struct TCP_Server_Info *server;
+- struct dfs_root_ses *rses;
+ struct cifs_tcon *tcon;
+ struct cifs_ses *ses;
+
+ tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
+- ses = tcon->ses;
+- server = ses->server;
+
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
+- mutex_unlock(&server->refpath_lock);
+-
+- list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
+- ses = rses->ses;
++ for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) {
+ server = ses->server;
+ mutex_lock(&server->refpath_lock);
+ if (server->leaf_fullpath)
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -141,9 +141,6 @@ tcon_info_alloc(bool dir_leases_enabled)
+ atomic_set(&ret_buf->num_local_opens, 0);
+ atomic_set(&ret_buf->num_remote_opens, 0);
+ ret_buf->stats_from_time = ktime_get_real_seconds();
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+-#endif
+
+ return ret_buf;
+ }
+@@ -159,9 +156,6 @@ tconInfoFree(struct cifs_tcon *tcon)
+ atomic_dec(&tconInfoAllocCount);
+ kfree(tcon->nativeFileSystem);
+ kfree_sensitive(tcon->password);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
+-#endif
+ kfree(tcon->origin_fullpath);
+ kfree(tcon);
+ }
--- /dev/null
+From 4a5ba0e0bfe552ac7451f57e304f6343c3d87f89 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 1 Apr 2024 22:44:08 -0300
+Subject: smb: client: handle DFS tcons in cifs_construct_tcon()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 4a5ba0e0bfe552ac7451f57e304f6343c3d87f89 upstream.
+
+The tcons created by cifs_construct_tcon() on multiuser mounts must
+also be able to failover and refresh DFS referrals, so set the
+appropriate fields in order to get a full DFS tcon. They could be
+shared among different superblocks later, too.
+
+Cc: stable@vger.kernel.org # 6.4+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202404021518.3Xu2VU4s-lkp@intel.com/
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/connect.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -3996,6 +3996,7 @@ cifs_construct_tcon(struct cifs_sb_info
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon = NULL;
+ struct smb3_fs_context *ctx;
++ char *origin_fullpath = NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+@@ -4019,6 +4020,7 @@ cifs_construct_tcon(struct cifs_sb_info
+ ctx->sign = master_tcon->ses->sign;
+ ctx->seal = master_tcon->seal;
+ ctx->witness = master_tcon->use_witness;
++ ctx->dfs_root_ses = master_tcon->ses->dfs_root_ses;
+
+ rc = cifs_set_vol_auth(ctx, master_tcon->ses);
+ if (rc) {
+@@ -4038,12 +4040,39 @@ cifs_construct_tcon(struct cifs_sb_info
+ goto out;
+ }
+
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ spin_lock(&master_tcon->tc_lock);
++ if (master_tcon->origin_fullpath) {
++ spin_unlock(&master_tcon->tc_lock);
++ origin_fullpath = dfs_get_path(cifs_sb, cifs_sb->ctx->source);
++ if (IS_ERR(origin_fullpath)) {
++ tcon = ERR_CAST(origin_fullpath);
++ origin_fullpath = NULL;
++ cifs_put_smb_ses(ses);
++ goto out;
++ }
++ } else {
++ spin_unlock(&master_tcon->tc_lock);
++ }
++#endif
++
+ tcon = cifs_get_tcon(ses, ctx);
+ if (IS_ERR(tcon)) {
+ cifs_put_smb_ses(ses);
+ goto out;
+ }
+
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ if (origin_fullpath) {
++ spin_lock(&tcon->tc_lock);
++ tcon->origin_fullpath = origin_fullpath;
++ spin_unlock(&tcon->tc_lock);
++ origin_fullpath = NULL;
++ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
++ dfs_cache_get_ttl() * HZ);
++ }
++#endif
++
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ if (cap_unix(ses))
+ reset_cifs_unix_caps(0, tcon, NULL, ctx);
+@@ -4052,6 +4081,7 @@ cifs_construct_tcon(struct cifs_sb_info
+ out:
+ kfree(ctx->username);
+ kfree_sensitive(ctx->password);
++ kfree(origin_fullpath);
+ kfree(ctx);
+
+ return tcon;
--- /dev/null
+From 0a05ad21d77a188d06481c36d6016805a881bcc0 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 1 Apr 2024 22:44:07 -0300
+Subject: smb: client: refresh referral without acquiring refpath_lock
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 0a05ad21d77a188d06481c36d6016805a881bcc0 upstream.
+
+Avoid refreshing DFS referral with refpath_lock acquired as the I/O
+could block for a while due to a potentially disconnected or slow DFS
+root server and then making other threads - that use same @server and
+don't require a DFS root server - unable to make any progress.
+
+Cc: stable@vger.kernel.org # 6.4+
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/dfs_cache.c | 44 ++++++++++++++++++++++++--------------------
+ 1 file changed, 24 insertions(+), 20 deletions(-)
+
+--- a/fs/smb/client/dfs_cache.c
++++ b/fs/smb/client/dfs_cache.c
+@@ -1172,8 +1172,8 @@ static bool is_ses_good(struct cifs_ses
+ return ret;
+ }
+
+-/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+-static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
++/* Refresh dfs referral of @ses and mark it for reconnect if needed */
++static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
+ {
+ struct TCP_Server_Info *server = ses->server;
+ DFS_CACHE_TGT_LIST(old_tl);
+@@ -1181,10 +1181,21 @@ static int __refresh_tcon(const char *pa
+ bool needs_refresh = false;
+ struct cache_entry *ce;
+ unsigned int xid;
++ char *path = NULL;
+ int rc = 0;
+
+ xid = get_xid();
+
++ mutex_lock(&server->refpath_lock);
++ if (server->leaf_fullpath) {
++ path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
++ if (!path)
++ rc = -ENOMEM;
++ }
++ mutex_unlock(&server->refpath_lock);
++ if (!path)
++ goto out;
++
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+@@ -1218,19 +1229,17 @@ out:
+ free_xid(xid);
+ dfs_cache_free_tgts(&old_tl);
+ dfs_cache_free_tgts(&new_tl);
+- return rc;
++ kfree(path);
+ }
+
+-static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
++static inline void refresh_ses_referral(struct cifs_ses *ses)
+ {
+- struct TCP_Server_Info *server = tcon->ses->server;
+- struct cifs_ses *ses = tcon->ses;
++ __refresh_ses_referral(ses, false);
++}
+
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
+- mutex_unlock(&server->refpath_lock);
+- return 0;
++static inline void force_refresh_ses_referral(struct cifs_ses *ses)
++{
++ __refresh_ses_referral(ses, true);
+ }
+
+ /**
+@@ -1271,25 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_
+ */
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+
+- return refresh_tcon(tcon, true);
++ force_refresh_ses_referral(tcon->ses);
++ return 0;
+ }
+
+ /* Refresh all DFS referrals related to DFS tcon */
+ void dfs_cache_refresh(struct work_struct *work)
+ {
+- struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon;
+ struct cifs_ses *ses;
+
+ tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
+
+- for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) {
+- server = ses->server;
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
+- mutex_unlock(&server->refpath_lock);
+- }
++ for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
++ refresh_ses_referral(ses);
+
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ atomic_read(&dfs_cache_ttl) * HZ);
--- /dev/null
+From 93cee45ccfebc62a3bb4cd622b89e00c8c7d8493 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 1 Apr 2024 22:44:09 -0300
+Subject: smb: client: serialise cifs_construct_tcon() with cifs_mount_mutex
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 93cee45ccfebc62a3bb4cd622b89e00c8c7d8493 upstream.
+
+Serialise cifs_construct_tcon() with cifs_mount_mutex to handle
+parallel mounts that may end up reusing the session and tcon created
+by it.
+
+Cc: stable@vger.kernel.org # 6.4+
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/connect.c | 13 ++++++++++++-
+ fs/smb/client/fs_context.c | 6 +++---
+ fs/smb/client/fs_context.h | 12 ++++++++++++
+ 3 files changed, 27 insertions(+), 4 deletions(-)
+
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -3989,7 +3989,7 @@ cifs_set_vol_auth(struct smb3_fs_context
+ }
+
+ static struct cifs_tcon *
+-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
++__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ {
+ int rc;
+ struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
+@@ -4087,6 +4087,17 @@ out:
+ return tcon;
+ }
+
++static struct cifs_tcon *
++cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
++{
++ struct cifs_tcon *ret;
++
++ cifs_mount_lock();
++ ret = __cifs_construct_tcon(cifs_sb, fsuid);
++ cifs_mount_unlock();
++ return ret;
++}
++
+ struct cifs_tcon *
+ cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
+ {
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -37,7 +37,7 @@
+ #include "rfc1002pdu.h"
+ #include "fs_context.h"
+
+-static DEFINE_MUTEX(cifs_mount_mutex);
++DEFINE_MUTEX(cifs_mount_mutex);
+
+ static const match_table_t cifs_smb_version_tokens = {
+ { Smb_1, SMB1_VERSION_STRING },
+@@ -753,9 +753,9 @@ static int smb3_get_tree(struct fs_conte
+
+ if (err)
+ return err;
+- mutex_lock(&cifs_mount_mutex);
++ cifs_mount_lock();
+ ret = smb3_get_tree_common(fc);
+- mutex_unlock(&cifs_mount_mutex);
++ cifs_mount_unlock();
+ return ret;
+ }
+
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -295,4 +295,16 @@ extern void smb3_update_mnt_flags(struct
+ #define MAX_CACHED_FIDS 16
+ extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+
++extern struct mutex cifs_mount_mutex;
++
++static inline void cifs_mount_lock(void)
++{
++ mutex_lock(&cifs_mount_mutex);
++}
++
++static inline void cifs_mount_unlock(void)
++{
++ mutex_unlock(&cifs_mount_mutex);
++}
++
+ #endif
--- /dev/null
+From 173217bd73365867378b5e75a86f0049e1069ee8 Mon Sep 17 00:00:00 2001
+From: Ritvik Budhiraja <rbudhiraja@microsoft.com>
+Date: Tue, 2 Apr 2024 14:01:28 -0500
+Subject: smb3: retrying on failed server close
+
+From: Ritvik Budhiraja <rbudhiraja@microsoft.com>
+
+commit 173217bd73365867378b5e75a86f0049e1069ee8 upstream.
+
+In the current implementation, CIFS close sends a close to the
+server and does not check for the success of the server close.
+This patch adds functionality to check for server close return
+status and retries in case of an EBUSY or EAGAIN error.
+
+This can help avoid handle leaks
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ritvik Budhiraja <rbudhiraja@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 6 ++--
+ fs/smb/client/cifsfs.c | 11 +++++++
+ fs/smb/client/cifsglob.h | 7 +++--
+ fs/smb/client/file.c | 63 ++++++++++++++++++++++++++++++++++++++++-----
+ fs/smb/client/smb1ops.c | 4 +-
+ fs/smb/client/smb2ops.c | 9 +++---
+ fs/smb/client/smb2pdu.c | 2 -
+ 7 files changed, 85 insertions(+), 17 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -417,6 +417,7 @@ smb2_close_cached_fid(struct kref *ref)
+ {
+ struct cached_fid *cfid = container_of(ref, struct cached_fid,
+ refcount);
++ int rc;
+
+ spin_lock(&cfid->cfids->cfid_list_lock);
+ if (cfid->on_list) {
+@@ -430,9 +431,10 @@ smb2_close_cached_fid(struct kref *ref)
+ cfid->dentry = NULL;
+
+ if (cfid->is_open) {
+- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
++ rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+- atomic_dec(&cfid->tcon->num_remote_opens);
++ if (rc != -EBUSY && rc != -EAGAIN)
++ atomic_dec(&cfid->tcon->num_remote_opens);
+ }
+
+ free_cached_dir(cfid);
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -160,6 +160,7 @@ struct workqueue_struct *decrypt_wq;
+ struct workqueue_struct *fileinfo_put_wq;
+ struct workqueue_struct *cifsoplockd_wq;
+ struct workqueue_struct *deferredclose_wq;
++struct workqueue_struct *serverclose_wq;
+ __u32 cifs_lock_secret;
+
+ /*
+@@ -1893,6 +1894,13 @@ init_cifs(void)
+ goto out_destroy_cifsoplockd_wq;
+ }
+
++ serverclose_wq = alloc_workqueue("serverclose",
++ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++ if (!serverclose_wq) {
++ rc = -ENOMEM;
++ goto out_destroy_serverclose_wq;
++ }
++
+ rc = cifs_init_inodecache();
+ if (rc)
+ goto out_destroy_deferredclose_wq;
+@@ -1967,6 +1975,8 @@ out_destroy_decrypt_wq:
+ destroy_workqueue(decrypt_wq);
+ out_destroy_cifsiod_wq:
+ destroy_workqueue(cifsiod_wq);
++out_destroy_serverclose_wq:
++ destroy_workqueue(serverclose_wq);
+ out_clean_proc:
+ cifs_proc_clean();
+ return rc;
+@@ -1996,6 +2006,7 @@ exit_cifs(void)
+ destroy_workqueue(cifsoplockd_wq);
+ destroy_workqueue(decrypt_wq);
+ destroy_workqueue(fileinfo_put_wq);
++ destroy_workqueue(serverclose_wq);
+ destroy_workqueue(cifsiod_wq);
+ cifs_proc_clean();
+ }
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -432,10 +432,10 @@ struct smb_version_operations {
+ /* set fid protocol-specific info */
+ void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
+ /* close a file */
+- void (*close)(const unsigned int, struct cifs_tcon *,
++ int (*close)(const unsigned int, struct cifs_tcon *,
+ struct cifs_fid *);
+ /* close a file, returning file attributes and timestamps */
+- void (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
++ int (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *pfile_info);
+ /* send a flush request to the server */
+ int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
+@@ -1423,6 +1423,7 @@ struct cifsFileInfo {
+ bool invalidHandle:1; /* file closed via session abend */
+ bool swapfile:1;
+ bool oplock_break_cancelled:1;
++ bool offload:1; /* offload final part of _put to a wq */
+ unsigned int oplock_epoch; /* epoch from the lease break */
+ __u32 oplock_level; /* oplock/lease level from the lease break */
+ int count;
+@@ -1431,6 +1432,7 @@ struct cifsFileInfo {
+ struct cifs_search_info srch_inf;
+ struct work_struct oplock_break; /* work for oplock breaks */
+ struct work_struct put; /* work for the final part of _put */
++ struct work_struct serverclose; /* work for serverclose */
+ struct delayed_work deferred;
+ bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
+ char *symlink_target;
+@@ -2087,6 +2089,7 @@ extern struct workqueue_struct *decrypt_
+ extern struct workqueue_struct *fileinfo_put_wq;
+ extern struct workqueue_struct *cifsoplockd_wq;
+ extern struct workqueue_struct *deferredclose_wq;
++extern struct workqueue_struct *serverclose_wq;
+ extern __u32 cifs_lock_secret;
+
+ extern mempool_t *cifs_mid_poolp;
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -459,6 +459,7 @@ cifs_down_write(struct rw_semaphore *sem
+ }
+
+ static void cifsFileInfo_put_work(struct work_struct *work);
++void serverclose_work(struct work_struct *work);
+
+ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ struct tcon_link *tlink, __u32 oplock,
+@@ -505,6 +506,7 @@ struct cifsFileInfo *cifs_new_fileinfo(s
+ cfile->tlink = cifs_get_tlink(tlink);
+ INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ INIT_WORK(&cfile->put, cifsFileInfo_put_work);
++ INIT_WORK(&cfile->serverclose, serverclose_work);
+ INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
+ mutex_init(&cfile->fh_mutex);
+ spin_lock_init(&cfile->file_info_lock);
+@@ -596,6 +598,40 @@ static void cifsFileInfo_put_work(struct
+ cifsFileInfo_put_final(cifs_file);
+ }
+
++void serverclose_work(struct work_struct *work)
++{
++ struct cifsFileInfo *cifs_file = container_of(work,
++ struct cifsFileInfo, serverclose);
++
++ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
++
++ struct TCP_Server_Info *server = tcon->ses->server;
++ int rc = 0;
++ int retries = 0;
++ int MAX_RETRIES = 4;
++
++ do {
++ if (server->ops->close_getattr)
++ rc = server->ops->close_getattr(0, tcon, cifs_file);
++ else if (server->ops->close)
++ rc = server->ops->close(0, tcon, &cifs_file->fid);
++
++ if (rc == -EBUSY || rc == -EAGAIN) {
++ retries++;
++ msleep(250);
++ }
++ } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
++ );
++
++ if (retries == MAX_RETRIES)
++ pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
++
++ if (cifs_file->offload)
++ queue_work(fileinfo_put_wq, &cifs_file->put);
++ else
++ cifsFileInfo_put_final(cifs_file);
++}
++
+ /**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+@@ -636,10 +672,13 @@ void _cifsFileInfo_put(struct cifsFileIn
+ struct cifs_fid fid = {};
+ struct cifs_pending_open open;
+ bool oplock_break_cancelled;
++ bool serverclose_offloaded = false;
+
+ spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifsi->open_file_lock);
+ spin_lock(&cifs_file->file_info_lock);
++
++ cifs_file->offload = offload;
+ if (--cifs_file->count > 0) {
+ spin_unlock(&cifs_file->file_info_lock);
+ spin_unlock(&cifsi->open_file_lock);
+@@ -681,13 +720,20 @@ void _cifsFileInfo_put(struct cifsFileIn
+ if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int xid;
++ int rc = 0;
+
+ xid = get_xid();
+ if (server->ops->close_getattr)
+- server->ops->close_getattr(xid, tcon, cifs_file);
++ rc = server->ops->close_getattr(xid, tcon, cifs_file);
+ else if (server->ops->close)
+- server->ops->close(xid, tcon, &cifs_file->fid);
++ rc = server->ops->close(xid, tcon, &cifs_file->fid);
+ _free_xid(xid);
++
++ if (rc == -EBUSY || rc == -EAGAIN) {
++ // Server close failed, hence offloading it as an async op
++ queue_work(serverclose_wq, &cifs_file->serverclose);
++ serverclose_offloaded = true;
++ }
+ }
+
+ if (oplock_break_cancelled)
+@@ -695,10 +741,15 @@ void _cifsFileInfo_put(struct cifsFileIn
+
+ cifs_del_pending_open(&open);
+
+- if (offload)
+- queue_work(fileinfo_put_wq, &cifs_file->put);
+- else
+- cifsFileInfo_put_final(cifs_file);
++ // if serverclose has been offloaded to wq (on failure), it will
++ // handle offloading put as well. If serverclose not offloaded,
++ // we need to handle offloading put here.
++ if (!serverclose_offloaded) {
++ if (offload)
++ queue_work(fileinfo_put_wq, &cifs_file->put);
++ else
++ cifsFileInfo_put_final(cifs_file);
++ }
+ }
+
+ int cifs_open(struct inode *inode, struct file *file)
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -753,11 +753,11 @@ cifs_set_fid(struct cifsFileInfo *cfile,
+ cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+ }
+
+-static void
++static int
+ cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_fid *fid)
+ {
+- CIFSSMBClose(xid, tcon, fid->netfid);
++ return CIFSSMBClose(xid, tcon, fid->netfid);
+ }
+
+ static int
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -1411,14 +1411,14 @@ smb2_set_fid(struct cifsFileInfo *cfile,
+ memcpy(cfile->fid.create_guid, fid->create_guid, 16);
+ }
+
+-static void
++static int
+ smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_fid *fid)
+ {
+- SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
++ return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+ }
+
+-static void
++static int
+ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile)
+ {
+@@ -1429,7 +1429,7 @@ smb2_close_getattr(const unsigned int xi
+ rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, &file_inf);
+ if (rc)
+- return;
++ return rc;
+
+ inode = d_inode(cfile->dentry);
+
+@@ -1458,6 +1458,7 @@ smb2_close_getattr(const unsigned int xi
+
+ /* End of file and Attributes should not have to be updated on close */
+ spin_unlock(&inode->i_lock);
++ return rc;
+ }
+
+ static int
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3606,9 +3606,9 @@ replay_again:
+ memcpy(&pbuf->network_open_info,
+ &rsp->network_open_info,
+ sizeof(pbuf->network_open_info));
++ atomic_dec(&tcon->num_remote_opens);
+ }
+
+- atomic_dec(&tcon->num_remote_opens);
+ close_exit:
+ SMB2_close_free(&rqst);
+ free_rsp_buf(resp_buftype, rsp);