]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.3-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 6 May 2023 06:52:08 +0000 (15:52 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 6 May 2023 06:52:08 +0000 (15:52 +0900)
added patches:
acpi-video-remove-acpi_backlight-video-quirk-for-lenovo-thinkpad-w530.patch
drm-amd-pm-re-enable-the-gfx-imu-when-smu-resume.patch
i2c-omap-fix-standard-mode-false-ack-readings.patch
igc-read-before-write-to-srrctl-register.patch
iommu-amd-fix-guest-virtual-apic-table-root-pointer-configuration-in-irte.patch
ksmbd-block-asynchronous-requests-when-making-a-delay-on-session-setup.patch
ksmbd-call-rcu_barrier-in-ksmbd_server_exit.patch
ksmbd-destroy-expired-sessions.patch
ksmbd-fix-deadlock-in-ksmbd_find_crypto_ctx.patch
ksmbd-fix-memleak-in-session-setup.patch
ksmbd-fix-null-pointer-dereference-in-smb2_get_info_filesystem.patch
ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch
ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with-multichannel.patch
ksmbd-fix-racy-issue-under-cocurrent-smb2-tree-disconnect.patch
ksmbd-not-allow-guest-user-on-multichannel.patch
kvm-risc-v-retry-fault-if-vma_lookup-results-become-invalid.patch
kvm-x86-preserve-tdp-mmu-roots-until-they-are-explicitly-invalidated.patch
risc-v-align-sbi-probe-implementation-with-spec.patch
riscv-mm-remove-redundant-parameter-of-create_fdt_early_page_table.patch
thermal-intel-powerclamp-fix-null-pointer-access-issue.patch
tracing-fix-permissions-for-the-buffer_percent-file.patch

22 files changed:
queue-6.3/acpi-video-remove-acpi_backlight-video-quirk-for-lenovo-thinkpad-w530.patch [new file with mode: 0644]
queue-6.3/drm-amd-pm-re-enable-the-gfx-imu-when-smu-resume.patch [new file with mode: 0644]
queue-6.3/i2c-omap-fix-standard-mode-false-ack-readings.patch [new file with mode: 0644]
queue-6.3/igc-read-before-write-to-srrctl-register.patch [new file with mode: 0644]
queue-6.3/iommu-amd-fix-guest-virtual-apic-table-root-pointer-configuration-in-irte.patch [new file with mode: 0644]
queue-6.3/ksmbd-block-asynchronous-requests-when-making-a-delay-on-session-setup.patch [new file with mode: 0644]
queue-6.3/ksmbd-call-rcu_barrier-in-ksmbd_server_exit.patch [new file with mode: 0644]
queue-6.3/ksmbd-destroy-expired-sessions.patch [new file with mode: 0644]
queue-6.3/ksmbd-fix-deadlock-in-ksmbd_find_crypto_ctx.patch [new file with mode: 0644]
queue-6.3/ksmbd-fix-memleak-in-session-setup.patch [new file with mode: 0644]
queue-6.3/ksmbd-fix-null-pointer-dereference-in-smb2_get_info_filesystem.patch [new file with mode: 0644]
queue-6.3/ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch [new file with mode: 0644]
queue-6.3/ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with-multichannel.patch [new file with mode: 0644]
queue-6.3/ksmbd-fix-racy-issue-under-cocurrent-smb2-tree-disconnect.patch [new file with mode: 0644]
queue-6.3/ksmbd-not-allow-guest-user-on-multichannel.patch [new file with mode: 0644]
queue-6.3/kvm-risc-v-retry-fault-if-vma_lookup-results-become-invalid.patch [new file with mode: 0644]
queue-6.3/kvm-x86-preserve-tdp-mmu-roots-until-they-are-explicitly-invalidated.patch [new file with mode: 0644]
queue-6.3/risc-v-align-sbi-probe-implementation-with-spec.patch [new file with mode: 0644]
queue-6.3/riscv-mm-remove-redundant-parameter-of-create_fdt_early_page_table.patch [new file with mode: 0644]
queue-6.3/series
queue-6.3/thermal-intel-powerclamp-fix-null-pointer-access-issue.patch [new file with mode: 0644]
queue-6.3/tracing-fix-permissions-for-the-buffer_percent-file.patch [new file with mode: 0644]

diff --git a/queue-6.3/acpi-video-remove-acpi_backlight-video-quirk-for-lenovo-thinkpad-w530.patch b/queue-6.3/acpi-video-remove-acpi_backlight-video-quirk-for-lenovo-thinkpad-w530.patch
new file mode 100644 (file)
index 0000000..e86fa79
--- /dev/null
@@ -0,0 +1,59 @@
+From 3db66620ea90b0fd4134b31eabfec16d7b07d7e3 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Fri, 28 Apr 2023 15:23:50 +0200
+Subject: ACPI: video: Remove acpi_backlight=video quirk for Lenovo ThinkPad W530
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 3db66620ea90b0fd4134b31eabfec16d7b07d7e3 upstream.
+
+Remove the acpi_backlight=video quirk for Lenovo ThinkPad W530.
+
+This was intended to help users of the (unsupported) Nvidia binary driver,
+but this has been reported to cause backlight control issues for users
+who have the gfx configured in hybrid (dual-GPU) mode, so drop this.
+
+The Nvidia binary driver should call acpi_video_register_backlight()
+when necessary and this has been reported to Nvidia.
+
+Until this is fixed Nvidia binary driver users can work around this by
+passing "acpi_backlight=video" on the kernel commandline (with the latest
+6.1.y or newer stable series, kernels < 6.1.y don't need this).
+
+Fixes: a5b2781dcab2 ("ACPI: video: Add acpi_backlight=video quirk for Lenovo ThinkPad W530")
+Reported-by: Русев Путин <rockeraliexpress@gmail.com>
+Link: https://lore.kernel.org/linux-acpi/CAK4BXn0ngZRmzx1bodAF8nmYj0PWdUXzPGHofRrsyZj8MBpcVA@mail.gmail.com/
+Cc: 6.1+ <stable@vger.kernel.org> # 6.1+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/video_detect.c |   14 --------------
+ 1 file changed, 14 deletions(-)
+
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -300,20 +300,6 @@ static const struct dmi_system_id video_
+       },
+       /*
+-       * Older models with nvidia GPU which need acpi_video backlight
+-       * control and where the old nvidia binary driver series does not
+-       * call acpi_video_register_backlight().
+-       */
+-      {
+-       .callback = video_detect_force_video,
+-       /* ThinkPad W530 */
+-       .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-              DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
+-              },
+-      },
+-
+-      /*
+        * These models have a working acpi_video backlight control, and using
+        * native backlight causes a regression where backlight does not work
+        * when userspace is not handling brightness key events. Disable
diff --git a/queue-6.3/drm-amd-pm-re-enable-the-gfx-imu-when-smu-resume.patch b/queue-6.3/drm-amd-pm-re-enable-the-gfx-imu-when-smu-resume.patch
new file mode 100644 (file)
index 0000000..c23feec
--- /dev/null
@@ -0,0 +1,91 @@
+From f7f28f268b861c29dd18086bb636abedf0ff59ff Mon Sep 17 00:00:00 2001
+From: Tim Huang <tim.huang@amd.com>
+Date: Wed, 22 Mar 2023 14:39:16 +0800
+Subject: drm/amd/pm: re-enable the gfx imu when smu resume
+
+From: Tim Huang <tim.huang@amd.com>
+
+commit f7f28f268b861c29dd18086bb636abedf0ff59ff upstream.
+
+If the gfx imu is poweroff when suspend, then
+it need to be re-enabled when resume.
+
+Signed-off-by: Tim Huang <tim.huang@amd.com>
+Reviewed-by: Yifan Zhang <yifan1.zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c |   40 +++++++++++++++++++++---------
+ 1 file changed, 28 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -161,10 +161,15 @@ int smu_get_dpm_freq_range(struct smu_co
+ int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
+ {
+-      if (!smu->ppt_funcs || !smu->ppt_funcs->set_gfx_power_up_by_imu)
+-              return -EOPNOTSUPP;
++      int ret = 0;
++      struct amdgpu_device *adev = smu->adev;
+-      return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
++      if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
++              ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
++              if (ret)
++                      dev_err(adev->dev, "Failed to enable gfx imu!\n");
++      }
++      return ret;
+ }
+ static u32 smu_get_mclk(void *handle, bool low)
+@@ -195,6 +200,19 @@ static u32 smu_get_sclk(void *handle, bo
+       return clk_freq * 100;
+ }
++static int smu_set_gfx_imu_enable(struct smu_context *smu)
++{
++      struct amdgpu_device *adev = smu->adev;
++
++      if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
++              return 0;
++
++      if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
++              return 0;
++
++      return smu_set_gfx_power_up_by_imu(smu);
++}
++
+ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+                                 bool enable)
+ {
+@@ -1390,15 +1408,9 @@ static int smu_hw_init(void *handle)
+       }
+       if (smu->is_apu) {
+-              if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
+-                              likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+-                      ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+-                      if (ret) {
+-                              dev_err(adev->dev, "Failed to Enable gfx imu!\n");
+-                              return ret;
+-                      }
+-              }
+-
++              ret = smu_set_gfx_imu_enable(smu);
++              if (ret)
++                      return ret;
+               smu_dpm_set_vcn_enable(smu, true);
+               smu_dpm_set_jpeg_enable(smu, true);
+               smu_set_gfx_cgpg(smu, true);
+@@ -1675,6 +1687,10 @@ static int smu_resume(void *handle)
+               return ret;
+       }
++      ret = smu_set_gfx_imu_enable(smu);
++      if (ret)
++              return ret;
++
+       smu_set_gfx_cgpg(smu, true);
+       smu->disable_uclk_switch = 0;
diff --git a/queue-6.3/i2c-omap-fix-standard-mode-false-ack-readings.patch b/queue-6.3/i2c-omap-fix-standard-mode-false-ack-readings.patch
new file mode 100644 (file)
index 0000000..fb8ac37
--- /dev/null
@@ -0,0 +1,37 @@
+From c770657bd2611b077ec1e7b1fe6aa92f249399bd Mon Sep 17 00:00:00 2001
+From: Reid Tonking <reidt@ti.com>
+Date: Wed, 26 Apr 2023 14:49:56 -0500
+Subject: i2c: omap: Fix standard mode false ACK readings
+
+From: Reid Tonking <reidt@ti.com>
+
+commit c770657bd2611b077ec1e7b1fe6aa92f249399bd upstream.
+
+Using standard mode, rare false ACK responses were appearing with
+i2cdetect tool. This was happening due to NACK interrupt triggering
+ISR thread before register access interrupt was ready. Removing the
+NACK interrupt's ability to trigger ISR thread lets register access
+ready interrupt do this instead.
+
+Cc: <stable@vger.kernel.org> # v3.7+
+Fixes: 3b2f8f82dad7 ("i2c: omap: switch to threaded IRQ support")
+Signed-off-by: Reid Tonking <reidt@ti.com>
+Acked-by: Vignesh Raghavendra <vigneshr@ti.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-omap.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -1058,7 +1058,7 @@ omap_i2c_isr(int irq, void *dev_id)
+       u16 stat;
+       stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+-      mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
++      mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
+       if (stat & mask)
+               ret = IRQ_WAKE_THREAD;
diff --git a/queue-6.3/igc-read-before-write-to-srrctl-register.patch b/queue-6.3/igc-read-before-write-to-srrctl-register.patch
new file mode 100644 (file)
index 0000000..9b937bb
--- /dev/null
@@ -0,0 +1,89 @@
+From 3ce29c17dc847bf4245e16aad78a7617afa96297 Mon Sep 17 00:00:00 2001
+From: Song Yoong Siang <yoong.siang.song@intel.com>
+Date: Tue, 2 May 2023 08:48:06 -0700
+Subject: igc: read before write to SRRCTL register
+
+From: Song Yoong Siang <yoong.siang.song@intel.com>
+
+commit 3ce29c17dc847bf4245e16aad78a7617afa96297 upstream.
+
+igc_configure_rx_ring() function will be called as part of XDP program
+setup. If Rx hardware timestamp is enabled prio to XDP program setup,
+this timestamp enablement will be overwritten when buffer size is
+written into SRRCTL register.
+
+Thus, this commit read the register value before write to SRRCTL
+register. This commit is tested by using xdp_hw_metadata bpf selftest
+tool. The tool enables Rx hardware timestamp and then attach XDP program
+to igc driver. It will display hardware timestamp of UDP packet with
+port number 9092. Below are detail of test steps and results.
+
+Command on DUT:
+  sudo ./xdp_hw_metadata <interface name>
+
+Command on Link Partner:
+  echo -n skb | nc -u -q1 <destination IPv4 addr> 9092
+
+Result before this patch:
+  skb hwtstamp is not found!
+
+Result after this patch:
+  found skb hwtstamp = 1677800973.642836757
+
+Optionally, read PHC to confirm the values obtained are almost the same:
+Command:
+  sudo ./testptp -d /dev/ptp0 -g
+Result:
+  clock time: 1677800973.913598978 or Fri Mar  3 07:49:33 2023
+
+Fixes: fc9df2a0b520 ("igc: Enable RX via AF_XDP zero-copy")
+Cc: <stable@vger.kernel.org> # 5.14+
+Signed-off-by: Song Yoong Siang <yoong.siang.song@intel.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/igc/igc_base.h |   11 ++++++++---
+ drivers/net/ethernet/intel/igc/igc_main.c |    7 +++++--
+ 2 files changed, 13 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/intel/igc/igc_base.h
++++ b/drivers/net/ethernet/intel/igc/igc_base.h
+@@ -87,8 +87,13 @@ union igc_adv_rx_desc {
+ #define IGC_RXDCTL_SWFLUSH            0x04000000 /* Receive Software Flush */
+ /* SRRCTL bit definitions */
+-#define IGC_SRRCTL_BSIZEPKT_SHIFT             10 /* Shift _right_ */
+-#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT         2  /* Shift _left_ */
+-#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF        0x02000000
++#define IGC_SRRCTL_BSIZEPKT_MASK      GENMASK(6, 0)
++#define IGC_SRRCTL_BSIZEPKT(x)                FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
++                                      (x) / 1024) /* in 1 KB resolution */
++#define IGC_SRRCTL_BSIZEHDR_MASK      GENMASK(13, 8)
++#define IGC_SRRCTL_BSIZEHDR(x)                FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \
++                                      (x) / 64) /* in 64 bytes resolution */
++#define IGC_SRRCTL_DESCTYPE_MASK      GENMASK(27, 25)
++#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF        FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1)
+ #endif /* _IGC_BASE_H */
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -641,8 +641,11 @@ static void igc_configure_rx_ring(struct
+       else
+               buf_size = IGC_RXBUFFER_2048;
+-      srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
+-      srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
++      srrctl = rd32(IGC_SRRCTL(reg_idx));
++      srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
++                  IGC_SRRCTL_DESCTYPE_MASK);
++      srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
++      srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
+       srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
+       wr32(IGC_SRRCTL(reg_idx), srrctl);
diff --git a/queue-6.3/iommu-amd-fix-guest-virtual-apic-table-root-pointer-configuration-in-irte.patch b/queue-6.3/iommu-amd-fix-guest-virtual-apic-table-root-pointer-configuration-in-irte.patch
new file mode 100644 (file)
index 0000000..17a4664
--- /dev/null
@@ -0,0 +1,50 @@
+From ccc62b827775915a9b82db42a29813d04f92df7a Mon Sep 17 00:00:00 2001
+From: Kishon Vijay Abraham I <kvijayab@amd.com>
+Date: Wed, 5 Apr 2023 13:03:17 +0000
+Subject: iommu/amd: Fix "Guest Virtual APIC Table Root Pointer" configuration in IRTE
+
+From: Kishon Vijay Abraham I <kvijayab@amd.com>
+
+commit ccc62b827775915a9b82db42a29813d04f92df7a upstream.
+
+commit b9c6ff94e43a ("iommu/amd: Re-factor guest virtual APIC
+(de-)activation code") while refactoring guest virtual APIC
+activation/de-activation code, stored information for activate/de-activate
+in "struct amd_ir_data". It used 32-bit integer data type for storing the
+"Guest Virtual APIC Table Root Pointer" (ga_root_ptr), though the
+"ga_root_ptr" is actually a 40-bit field in IRTE (Interrupt Remapping
+Table Entry).
+
+This causes interrupts from PCIe devices to not reach the guest in the case
+of PCIe passthrough with SME (Secure Memory Encryption) enabled as _SME_
+bit in the "ga_root_ptr" is lost before writing it to the IRTE.
+
+Fix it by using 64-bit data type for storing the "ga_root_ptr". While at
+that also change the data type of "ga_tag" to u32 in order to match
+the IOMMU spec.
+
+Fixes: b9c6ff94e43a ("iommu/amd: Re-factor guest virtual APIC (de-)activation code")
+Cc: stable@vger.kernel.org # v5.4+
+Reported-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Signed-off-by: Kishon Vijay Abraham I <kvijayab@amd.com>
+Link: https://lore.kernel.org/r/20230405130317.9351-1-kvijayab@amd.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/amd/amd_iommu_types.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -1001,8 +1001,8 @@ struct amd_ir_data {
+        */
+       struct irq_cfg *cfg;
+       int ga_vector;
+-      int ga_root_ptr;
+-      int ga_tag;
++      u64 ga_root_ptr;
++      u32 ga_tag;
+ };
+ struct amd_irte_ops {
diff --git a/queue-6.3/ksmbd-block-asynchronous-requests-when-making-a-delay-on-session-setup.patch b/queue-6.3/ksmbd-block-asynchronous-requests-when-making-a-delay-on-session-setup.patch
new file mode 100644 (file)
index 0000000..12750ec
--- /dev/null
@@ -0,0 +1,38 @@
+From b096d97f47326b1e2dbdef1c91fab69ffda54d17 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:43:30 +0900
+Subject: ksmbd: block asynchronous requests when making a delay on session setup
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit b096d97f47326b1e2dbdef1c91fab69ffda54d17 upstream.
+
+ksmbd make a delay of 5 seconds on session setup to avoid dictionary
+attacks. But the 5 seconds delay can be bypassed by using asynchronous
+requests. This patch block all requests on current connection when
+making a delay on sesstion setup failure.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20482
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1874,8 +1874,11 @@ out_err:
+                               try_delay = true;
+                       sess->state = SMB2_SESSION_EXPIRED;
+-                      if (try_delay)
++                      if (try_delay) {
++                              ksmbd_conn_set_need_reconnect(conn);
+                               ssleep(5);
++                              ksmbd_conn_set_need_negotiate(conn);
++                      }
+               }
+       }
diff --git a/queue-6.3/ksmbd-call-rcu_barrier-in-ksmbd_server_exit.patch b/queue-6.3/ksmbd-call-rcu_barrier-in-ksmbd_server_exit.patch
new file mode 100644 (file)
index 0000000..476ee5a
--- /dev/null
@@ -0,0 +1,34 @@
+From eb307d09fe15844fdaebeb8cc8c9b9e925430aa5 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:51:51 +0900
+Subject: ksmbd: call rcu_barrier() in ksmbd_server_exit()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit eb307d09fe15844fdaebeb8cc8c9b9e925430aa5 upstream.
+
+racy issue is triggered the bug by racing between closing a connection
+and rmmod. In ksmbd, rcu_barrier() is not called at module unload time,
+so nothing prevents ksmbd from getting unloaded while it still has RCU
+callbacks pending. It leads to trigger unintended execution of kernel
+code locally and use to defeat protections such as Kernel Lockdown
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20477
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/server.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -606,6 +606,7 @@ err_unregister:
+ static void __exit ksmbd_server_exit(void)
+ {
+       ksmbd_server_shutdown();
++      rcu_barrier();
+       ksmbd_release_inode_hash();
+ }
diff --git a/queue-6.3/ksmbd-destroy-expired-sessions.patch b/queue-6.3/ksmbd-destroy-expired-sessions.patch
new file mode 100644 (file)
index 0000000..915ba80
--- /dev/null
@@ -0,0 +1,181 @@
+From ea174a91893956450510945a0c5d1a10b5323656 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:42:21 +0900
+Subject: ksmbd: destroy expired sessions
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit ea174a91893956450510945a0c5d1a10b5323656 upstream.
+
+client can indefinitely send smb2 session setup requests with
+the SessionId set to 0, thus indefinitely spawning new sessions,
+and causing indefinite memory usage. This patch limit to the number
+of sessions using expired timeout and session state.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20478
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/mgmt/user_session.c |   68 +++++++++++++++++++++++--------------------
+ fs/ksmbd/mgmt/user_session.h |    1 
+ fs/ksmbd/smb2pdu.c           |    1 
+ fs/ksmbd/smb2pdu.h           |    2 +
+ 4 files changed, 41 insertions(+), 31 deletions(-)
+
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -165,70 +165,73 @@ static struct ksmbd_session *__session_l
+       struct ksmbd_session *sess;
+       hash_for_each_possible(sessions_table, sess, hlist, id) {
+-              if (id == sess->id)
++              if (id == sess->id) {
++                      sess->last_active = jiffies;
+                       return sess;
++              }
+       }
+       return NULL;
+ }
++static void ksmbd_expire_session(struct ksmbd_conn *conn)
++{
++      unsigned long id;
++      struct ksmbd_session *sess;
++
++      xa_for_each(&conn->sessions, id, sess) {
++              if (sess->state != SMB2_SESSION_VALID ||
++                  time_after(jiffies,
++                             sess->last_active + SMB2_SESSION_TIMEOUT)) {
++                      xa_erase(&conn->sessions, sess->id);
++                      ksmbd_session_destroy(sess);
++                      continue;
++              }
++      }
++}
++
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+                          struct ksmbd_session *sess)
+ {
+       sess->dialect = conn->dialect;
+       memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
++      ksmbd_expire_session(conn);
+       return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+-static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
++static void ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+       struct channel *chann;
+       chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
+       if (!chann)
+-              return -ENOENT;
++              return;
+       kfree(chann);
+-
+-      return 0;
+ }
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ {
+       struct ksmbd_session *sess;
++      unsigned long id;
+-      if (conn->binding) {
+-              int bkt;
+-
+-              down_write(&sessions_table_lock);
+-              hash_for_each(sessions_table, bkt, sess, hlist) {
+-                      if (!ksmbd_chann_del(conn, sess)) {
+-                              up_write(&sessions_table_lock);
+-                              goto sess_destroy;
+-                      }
+-              }
+-              up_write(&sessions_table_lock);
+-      } else {
+-              unsigned long id;
+-
+-              xa_for_each(&conn->sessions, id, sess) {
+-                      if (!ksmbd_chann_del(conn, sess))
+-                              goto sess_destroy;
++      xa_for_each(&conn->sessions, id, sess) {
++              ksmbd_chann_del(conn, sess);
++              if (xa_empty(&sess->ksmbd_chann_list)) {
++                      xa_erase(&conn->sessions, sess->id);
++                      ksmbd_session_destroy(sess);
+               }
+       }
+-
+-      return;
+-
+-sess_destroy:
+-      if (xa_empty(&sess->ksmbd_chann_list)) {
+-              xa_erase(&conn->sessions, sess->id);
+-              ksmbd_session_destroy(sess);
+-      }
+ }
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+                                          unsigned long long id)
+ {
+-      return xa_load(&conn->sessions, id);
++      struct ksmbd_session *sess;
++
++      sess = xa_load(&conn->sessions, id);
++      if (sess)
++              sess->last_active = jiffies;
++      return sess;
+ }
+ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+@@ -237,6 +240,8 @@ struct ksmbd_session *ksmbd_session_look
+       down_read(&sessions_table_lock);
+       sess = __session_lookup(id);
++      if (sess)
++              sess->last_active = jiffies;
+       up_read(&sessions_table_lock);
+       return sess;
+@@ -315,6 +320,7 @@ static struct ksmbd_session *__session_c
+       if (ksmbd_init_file_table(&sess->file_table))
+               goto error;
++      sess->last_active = jiffies;
+       sess->state = SMB2_SESSION_IN_PROGRESS;
+       set_session_flag(sess, protocol);
+       xa_init(&sess->tree_conns);
+--- a/fs/ksmbd/mgmt/user_session.h
++++ b/fs/ksmbd/mgmt/user_session.h
+@@ -59,6 +59,7 @@ struct ksmbd_session {
+       __u8                            smb3signingkey[SMB3_SIGN_KEY_SIZE];
+       struct ksmbd_file_table         file_table;
++      unsigned long                   last_active;
+ };
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1873,6 +1873,7 @@ out_err:
+                       if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+                               try_delay = true;
++                      sess->last_active = jiffies;
+                       sess->state = SMB2_SESSION_EXPIRED;
+                       if (try_delay) {
+                               ksmbd_conn_set_need_reconnect(conn);
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -61,6 +61,8 @@ struct preauth_integrity_info {
+ #define SMB2_SESSION_IN_PROGRESS      BIT(0)
+ #define SMB2_SESSION_VALID            BIT(1)
++#define SMB2_SESSION_TIMEOUT          (10 * HZ)
++
+ struct create_durable_req_v2 {
+       struct create_context ccontext;
+       __u8   Name[8];
diff --git a/queue-6.3/ksmbd-fix-deadlock-in-ksmbd_find_crypto_ctx.patch b/queue-6.3/ksmbd-fix-deadlock-in-ksmbd_find_crypto_ctx.patch
new file mode 100644 (file)
index 0000000..c6f5722
--- /dev/null
@@ -0,0 +1,74 @@
+From 7b4323373d844954bb76e0e9f39c4e5fc785fa7b Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:44:14 +0900
+Subject: ksmbd: fix deadlock in ksmbd_find_crypto_ctx()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 7b4323373d844954bb76e0e9f39c4e5fc785fa7b upstream.
+
+Deadlock is triggered by sending multiple concurrent session setup
+requests. It should be reused after releasing when getting ctx for crypto.
+Multiple consecutive ctx uses cause deadlock while waiting for releasing
+due to the limited number of ctx.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20591
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/auth.c |   19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/fs/ksmbd/auth.c
++++ b/fs/ksmbd/auth.c
+@@ -221,22 +221,22 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn
+ {
+       char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+       char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
+-      struct ksmbd_crypto_ctx *ctx;
++      struct ksmbd_crypto_ctx *ctx = NULL;
+       char *construct = NULL;
+       int rc, len;
+-      ctx = ksmbd_crypto_ctx_find_hmacmd5();
+-      if (!ctx) {
+-              ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+-              return -ENOMEM;
+-      }
+-
+       rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name);
+       if (rc) {
+               ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
+               goto out;
+       }
++      ctx = ksmbd_crypto_ctx_find_hmacmd5();
++      if (!ctx) {
++              ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
++              return -ENOMEM;
++      }
++
+       rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+                                ntlmv2_hash,
+                                CIFS_HMAC_MD5_HASH_SIZE);
+@@ -272,6 +272,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn
+               ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+               goto out;
+       }
++      ksmbd_release_crypto_ctx(ctx);
++      ctx = NULL;
+       rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp);
+       if (rc) {
+@@ -282,7 +284,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn
+       if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
+               rc = -EINVAL;
+ out:
+-      ksmbd_release_crypto_ctx(ctx);
++      if (ctx)
++              ksmbd_release_crypto_ctx(ctx);
+       kfree(construct);
+       return rc;
+ }
diff --git a/queue-6.3/ksmbd-fix-memleak-in-session-setup.patch b/queue-6.3/ksmbd-fix-memleak-in-session-setup.patch
new file mode 100644 (file)
index 0000000..2366871
--- /dev/null
@@ -0,0 +1,37 @@
+From 6d7cb549c2ca20e1f07593f15e936fd54b763028 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:26:45 +0900
+Subject: ksmbd: fix memleak in session setup
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 6d7cb549c2ca20e1f07593f15e936fd54b763028 upstream.
+
+If client send session setup request with unknown NTLMSSP message type,
+session that does not included channel can be created. It will cause
+session memleak. because ksmbd_sessions_deregister() does not destroy
+session if channel is not included. This patch return error response if
+client send the request unknown NTLMSSP message type.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20593
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1794,6 +1794,10 @@ int smb2_sess_setup(struct ksmbd_work *w
+                               }
+                               kfree(sess->Preauth_HashValue);
+                               sess->Preauth_HashValue = NULL;
++                      } else {
++                              pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
++                                              le32_to_cpu(negblob->MessageType));
++                              rc = -EINVAL;
+                       }
+               } else {
+                       /* TODO: need one more negotiation */
diff --git a/queue-6.3/ksmbd-fix-null-pointer-dereference-in-smb2_get_info_filesystem.patch b/queue-6.3/ksmbd-fix-null-pointer-dereference-in-smb2_get_info_filesystem.patch
new file mode 100644 (file)
index 0000000..0b2c993
--- /dev/null
@@ -0,0 +1,33 @@
+From 3ac00a2ab69b34189942afa9e862d5170cdcb018 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:38:33 +0900
+Subject: ksmbd: fix NULL pointer dereference in smb2_get_info_filesystem()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 3ac00a2ab69b34189942afa9e862d5170cdcb018 upstream.
+
+If share is , share->path is NULL and it cause NULL pointer
+dereference issue.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20479
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -4908,6 +4908,9 @@ static int smb2_get_info_filesystem(stru
+       int rc = 0, len;
+       int fs_infoclass_size = 0;
++      if (!share->path)
++              return -EIO;
++
+       rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+       if (rc) {
+               pr_err("cannot create vfs path\n");
diff --git a/queue-6.3/ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch b/queue-6.3/ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch
new file mode 100644 (file)
index 0000000..7e6e1e0
--- /dev/null
@@ -0,0 +1,384 @@
+From f5c779b7ddbda30866cf2a27c63e34158f858c73 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 16:45:00 +0900
+Subject: ksmbd: fix racy issue from session setup and logoff
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit f5c779b7ddbda30866cf2a27c63e34158f858c73 upstream.
+
+This racy issue is triggered by sending concurrent session setup and
+logoff requests. This patch does not set connection status as
+KSMBD_SESS_GOOD if state is KSMBD_SESS_NEED_RECONNECT in session setup.
+And relookup session to validate if session is deleted in logoff.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20481, ZDI-CAN-20590, ZDI-CAN-20596
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/connection.c        |   14 ++++----
+ fs/ksmbd/connection.h        |   39 ++++++++++++++-----------
+ fs/ksmbd/mgmt/user_session.c |    1 
+ fs/ksmbd/server.c            |    3 +
+ fs/ksmbd/smb2pdu.c           |   67 +++++++++++++++++++++++++++----------------
+ fs/ksmbd/transport_tcp.c     |    2 -
+ 6 files changed, 77 insertions(+), 49 deletions(-)
+
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -56,7 +56,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+               return NULL;
+       conn->need_neg = true;
+-      conn->status = KSMBD_SESS_NEW;
++      ksmbd_conn_set_new(conn);
+       conn->local_nls = load_nls("utf8");
+       if (!conn->local_nls)
+               conn->local_nls = load_nls_default();
+@@ -147,12 +147,12 @@ int ksmbd_conn_try_dequeue_request(struc
+       return ret;
+ }
+-static void ksmbd_conn_lock(struct ksmbd_conn *conn)
++void ksmbd_conn_lock(struct ksmbd_conn *conn)
+ {
+       mutex_lock(&conn->srv_mutex);
+ }
+-static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
++void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+ {
+       mutex_unlock(&conn->srv_mutex);
+ }
+@@ -243,7 +243,7 @@ bool ksmbd_conn_alive(struct ksmbd_conn
+       if (!ksmbd_server_running())
+               return false;
+-      if (conn->status == KSMBD_SESS_EXITING)
++      if (ksmbd_conn_exiting(conn))
+               return false;
+       if (kthread_should_stop())
+@@ -303,7 +303,7 @@ int ksmbd_conn_handler_loop(void *p)
+               pdu_size = get_rfc1002_len(hdr_buf);
+               ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+-              if (conn->status == KSMBD_SESS_GOOD)
++              if (ksmbd_conn_good(conn))
+                       max_allowed_pdu_size =
+                               SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
+               else
+@@ -312,7 +312,7 @@ int ksmbd_conn_handler_loop(void *p)
+               if (pdu_size > max_allowed_pdu_size) {
+                       pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n",
+                                       pdu_size, max_allowed_pdu_size,
+-                                      conn->status);
++                                      READ_ONCE(conn->status));
+                       break;
+               }
+@@ -416,7 +416,7 @@ again:
+               if (task)
+                       ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+                                   task->comm, task_pid_nr(task));
+-              conn->status = KSMBD_SESS_EXITING;
++              ksmbd_conn_set_exiting(conn);
+               if (t->ops->shutdown) {
+                       read_unlock(&conn_list_lock);
+                       t->ops->shutdown(t);
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -162,6 +162,8 @@ void ksmbd_conn_init_server_callbacks(st
+ int ksmbd_conn_handler_loop(void *p);
+ int ksmbd_conn_transport_init(void);
+ void ksmbd_conn_transport_destroy(void);
++void ksmbd_conn_lock(struct ksmbd_conn *conn);
++void ksmbd_conn_unlock(struct ksmbd_conn *conn);
+ /*
+  * WARNING
+@@ -169,43 +171,48 @@ void ksmbd_conn_transport_destroy(void);
+  * This is a hack. We will move status to a proper place once we land
+  * a multi-sessions support.
+  */
+-static inline bool ksmbd_conn_good(struct ksmbd_work *work)
++static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_GOOD;
++      return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
+ }
+-static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE;
++      return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+ }
+-static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_NEED_RECONNECT;
++      return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+ }
+-static inline bool ksmbd_conn_exiting(struct ksmbd_work *work)
++static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_EXITING;
++      return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
+-static inline void ksmbd_conn_set_good(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_GOOD;
++      WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+ }
+-static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_NEED_NEGOTIATE;
++      WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
+ }
+-static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_NEED_RECONNECT;
++      WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+ }
+-static inline void ksmbd_conn_set_exiting(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_EXITING;
++      WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
++}
++
++static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
++{
++      WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
+ }
+ #endif /* __CONNECTION_H__ */
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -315,6 +315,7 @@ static struct ksmbd_session *__session_c
+       if (ksmbd_init_file_table(&sess->file_table))
+               goto error;
++      sess->state = SMB2_SESSION_IN_PROGRESS;
+       set_session_flag(sess, protocol);
+       xa_init(&sess->tree_conns);
+       xa_init(&sess->ksmbd_chann_list);
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -93,7 +93,8 @@ static inline int check_conn_state(struc
+ {
+       struct smb_hdr *rsp_hdr;
+-      if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) {
++      if (ksmbd_conn_exiting(work->conn) ||
++          ksmbd_conn_need_reconnect(work->conn)) {
+               rsp_hdr = work->response_buf;
+               rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
+               return 1;
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -248,7 +248,7 @@ int init_smb2_neg_rsp(struct ksmbd_work
+       rsp = smb2_get_msg(work->response_buf);
+-      WARN_ON(ksmbd_conn_good(work));
++      WARN_ON(ksmbd_conn_good(conn));
+       rsp->StructureSize = cpu_to_le16(65);
+       ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+@@ -277,7 +277,7 @@ int init_smb2_neg_rsp(struct ksmbd_work
+               rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+       conn->use_spnego = true;
+-      ksmbd_conn_set_need_negotiate(work);
++      ksmbd_conn_set_need_negotiate(conn);
+       return 0;
+ }
+@@ -561,7 +561,7 @@ int smb2_check_user_session(struct ksmbd
+           cmd == SMB2_SESSION_SETUP_HE)
+               return 0;
+-      if (!ksmbd_conn_good(work))
++      if (!ksmbd_conn_good(conn))
+               return -EINVAL;
+       sess_id = le64_to_cpu(req_hdr->SessionId);
+@@ -594,7 +594,7 @@ static void destroy_previous_session(str
+       prev_sess->state = SMB2_SESSION_EXPIRED;
+       xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
+-              chann->conn->status = KSMBD_SESS_EXITING;
++              ksmbd_conn_set_exiting(chann->conn);
+ }
+ /**
+@@ -1079,7 +1079,7 @@ int smb2_handle_negotiate(struct ksmbd_w
+       ksmbd_debug(SMB, "Received negotiate request\n");
+       conn->need_neg = false;
+-      if (ksmbd_conn_good(work)) {
++      if (ksmbd_conn_good(conn)) {
+               pr_err("conn->tcp_status is already in CifsGood State\n");
+               work->send_no_response = 1;
+               return rc;
+@@ -1233,7 +1233,7 @@ int smb2_handle_negotiate(struct ksmbd_w
+       }
+       conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+-      ksmbd_conn_set_need_negotiate(work);
++      ksmbd_conn_set_need_negotiate(conn);
+ err_out:
+       if (rc < 0)
+@@ -1656,6 +1656,7 @@ int smb2_sess_setup(struct ksmbd_work *w
+       rsp->SecurityBufferLength = 0;
+       inc_rfc1001_len(work->response_buf, 9);
++      ksmbd_conn_lock(conn);
+       if (!req->hdr.SessionId) {
+               sess = ksmbd_smb2_session_create();
+               if (!sess) {
+@@ -1703,6 +1704,12 @@ int smb2_sess_setup(struct ksmbd_work *w
+                       goto out_err;
+               }
++              if (ksmbd_conn_need_reconnect(conn)) {
++                      rc = -EFAULT;
++                      sess = NULL;
++                      goto out_err;
++              }
++
+               if (ksmbd_session_lookup(conn, sess_id)) {
+                       rc = -EACCES;
+                       goto out_err;
+@@ -1727,12 +1734,20 @@ int smb2_sess_setup(struct ksmbd_work *w
+                       rc = -ENOENT;
+                       goto out_err;
+               }
++
++              if (sess->state == SMB2_SESSION_EXPIRED) {
++                      rc = -EFAULT;
++                      goto out_err;
++              }
++
++              if (ksmbd_conn_need_reconnect(conn)) {
++                      rc = -EFAULT;
++                      sess = NULL;
++                      goto out_err;
++              }
+       }
+       work->sess = sess;
+-      if (sess->state == SMB2_SESSION_EXPIRED)
+-              sess->state = SMB2_SESSION_IN_PROGRESS;
+-
+       negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+       negblob_len = le16_to_cpu(req->SecurityBufferLength);
+       if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+@@ -1762,8 +1777,10 @@ int smb2_sess_setup(struct ksmbd_work *w
+                               goto out_err;
+                       }
+-                      ksmbd_conn_set_good(work);
+-                      sess->state = SMB2_SESSION_VALID;
++                      if (!ksmbd_conn_need_reconnect(conn)) {
++                              ksmbd_conn_set_good(conn);
++                              sess->state = SMB2_SESSION_VALID;
++                      }
+                       kfree(sess->Preauth_HashValue);
+                       sess->Preauth_HashValue = NULL;
+               } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+@@ -1785,8 +1802,10 @@ int smb2_sess_setup(struct ksmbd_work *w
+                               if (rc)
+                                       goto out_err;
+-                              ksmbd_conn_set_good(work);
+-                              sess->state = SMB2_SESSION_VALID;
++                              if (!ksmbd_conn_need_reconnect(conn)) {
++                                      ksmbd_conn_set_good(conn);
++                                      sess->state = SMB2_SESSION_VALID;
++                              }
+                               if (conn->binding) {
+                                       struct preauth_session *preauth_sess;
+@@ -1854,14 +1873,13 @@ out_err:
+                       if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+                               try_delay = true;
+-                      xa_erase(&conn->sessions, sess->id);
+-                      ksmbd_session_destroy(sess);
+-                      work->sess = NULL;
++                      sess->state = SMB2_SESSION_EXPIRED;
+                       if (try_delay)
+                               ssleep(5);
+               }
+       }
++      ksmbd_conn_unlock(conn);
+       return rc;
+ }
+@@ -2086,21 +2104,24 @@ int smb2_session_logoff(struct ksmbd_wor
+ {
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+-      struct ksmbd_session *sess = work->sess;
++      struct ksmbd_session *sess;
++      struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+       rsp->StructureSize = cpu_to_le16(4);
+       inc_rfc1001_len(work->response_buf, 4);
+       ksmbd_debug(SMB, "request\n");
+-      /* setting CifsExiting here may race with start_tcp_sess */
+-      ksmbd_conn_set_need_reconnect(work);
++      ksmbd_conn_set_need_reconnect(conn);
+       ksmbd_close_session_fds(work);
+       ksmbd_conn_wait_idle(conn);
++      /*
++       * Re-lookup session to validate if session is deleted
++       * while waiting request complete
++       */
++      sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId));
+       if (ksmbd_tree_conn_session_logoff(sess)) {
+-              struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+-
+               ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+               rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+               smb2_set_err_rsp(work);
+@@ -2112,9 +2133,7 @@ int smb2_session_logoff(struct ksmbd_wor
+       ksmbd_free_user(sess->user);
+       sess->user = NULL;
+-
+-      /* let start_tcp_sess free connection info now */
+-      ksmbd_conn_set_need_negotiate(work);
++      ksmbd_conn_set_need_negotiate(conn);
+       return 0;
+ }
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -333,7 +333,7 @@ static int ksmbd_tcp_readv(struct tcp_tr
+               if (length == -EINTR) {
+                       total_read = -ESHUTDOWN;
+                       break;
+-              } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
++              } else if (ksmbd_conn_need_reconnect(conn)) {
+                       total_read = -EAGAIN;
+                       break;
+               } else if (length == -ERESTARTSYS || length == -EAGAIN) {
diff --git a/queue-6.3/ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with-multichannel.patch b/queue-6.3/ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with-multichannel.patch
new file mode 100644 (file)
index 0000000..d748422
--- /dev/null
@@ -0,0 +1,398 @@
+From abcc506a9a71976a8b4c9bf3ee6efd13229c1e19 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 14:03:40 +0900
+Subject: ksmbd: fix racy issue from smb2 close and logoff with multichannel
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit abcc506a9a71976a8b4c9bf3ee6efd13229c1e19 upstream.
+
+When smb client send concurrent smb2 close and logoff request
+with multichannel connection, It can cause racy issue. logoff request
+free tcon and can cause UAF issues in smb2 close. When receiving logoff
+request with multichannel, ksmbd should wait until all remaning requests
+complete as well as ones in the current connection, and then make
+session expired.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20796 ZDI-CAN-20595
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/connection.c        |   54 ++++++++++++++++++++++++++++++++-----------
+ fs/ksmbd/connection.h        |   19 ++++++++++++---
+ fs/ksmbd/mgmt/tree_connect.c |    3 ++
+ fs/ksmbd/mgmt/user_session.c |   36 +++++++++++++++++++++++-----
+ fs/ksmbd/smb2pdu.c           |   21 ++++++++--------
+ 5 files changed, 101 insertions(+), 32 deletions(-)
+
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -20,7 +20,7 @@ static DEFINE_MUTEX(init_lock);
+ static struct ksmbd_conn_ops default_conn_ops;
+ LIST_HEAD(conn_list);
+-DEFINE_RWLOCK(conn_list_lock);
++DECLARE_RWSEM(conn_list_lock);
+ /**
+  * ksmbd_conn_free() - free resources of the connection instance
+@@ -32,9 +32,9 @@ DEFINE_RWLOCK(conn_list_lock);
+  */
+ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ {
+-      write_lock(&conn_list_lock);
++      down_write(&conn_list_lock);
+       list_del(&conn->conns_list);
+-      write_unlock(&conn_list_lock);
++      up_write(&conn_list_lock);
+       xa_destroy(&conn->sessions);
+       kvfree(conn->request_buf);
+@@ -84,9 +84,9 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+       spin_lock_init(&conn->llist_lock);
+       INIT_LIST_HEAD(&conn->lock_list);
+-      write_lock(&conn_list_lock);
++      down_write(&conn_list_lock);
+       list_add(&conn->conns_list, &conn_list);
+-      write_unlock(&conn_list_lock);
++      up_write(&conn_list_lock);
+       return conn;
+ }
+@@ -95,7 +95,7 @@ bool ksmbd_conn_lookup_dialect(struct ks
+       struct ksmbd_conn *t;
+       bool ret = false;
+-      read_lock(&conn_list_lock);
++      down_read(&conn_list_lock);
+       list_for_each_entry(t, &conn_list, conns_list) {
+               if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+                       continue;
+@@ -103,7 +103,7 @@ bool ksmbd_conn_lookup_dialect(struct ks
+               ret = true;
+               break;
+       }
+-      read_unlock(&conn_list_lock);
++      up_read(&conn_list_lock);
+       return ret;
+ }
+@@ -157,9 +157,37 @@ void ksmbd_conn_unlock(struct ksmbd_conn
+       mutex_unlock(&conn->srv_mutex);
+ }
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ {
++      struct ksmbd_conn *conn;
++
++      down_read(&conn_list_lock);
++      list_for_each_entry(conn, &conn_list, conns_list) {
++              if (conn->binding || xa_load(&conn->sessions, sess_id))
++                      WRITE_ONCE(conn->status, status);
++      }
++      up_read(&conn_list_lock);
++}
++
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++{
++      struct ksmbd_conn *bind_conn;
++
+       wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
++
++      down_read(&conn_list_lock);
++      list_for_each_entry(bind_conn, &conn_list, conns_list) {
++              if (bind_conn == conn)
++                      continue;
++
++              if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
++                  !ksmbd_conn_releasing(bind_conn) &&
++                  atomic_read(&bind_conn->req_running)) {
++                      wait_event(bind_conn->req_running_q,
++                              atomic_read(&bind_conn->req_running) == 0);
++              }
++      }
++      up_read(&conn_list_lock);
+ }
+ int ksmbd_conn_write(struct ksmbd_work *work)
+@@ -360,10 +388,10 @@ int ksmbd_conn_handler_loop(void *p)
+       }
+ out:
++      ksmbd_conn_set_releasing(conn);
+       /* Wait till all reference dropped to the Server object*/
+       wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+-
+       if (IS_ENABLED(CONFIG_UNICODE))
+               utf8_unload(conn->um);
+       unload_nls(conn->local_nls);
+@@ -407,7 +435,7 @@ static void stop_sessions(void)
+       struct ksmbd_transport *t;
+ again:
+-      read_lock(&conn_list_lock);
++      down_read(&conn_list_lock);
+       list_for_each_entry(conn, &conn_list, conns_list) {
+               struct task_struct *task;
+@@ -418,12 +446,12 @@ again:
+                                   task->comm, task_pid_nr(task));
+               ksmbd_conn_set_exiting(conn);
+               if (t->ops->shutdown) {
+-                      read_unlock(&conn_list_lock);
++                      up_read(&conn_list_lock);
+                       t->ops->shutdown(t);
+-                      read_lock(&conn_list_lock);
++                      down_read(&conn_list_lock);
+               }
+       }
+-      read_unlock(&conn_list_lock);
++      up_read(&conn_list_lock);
+       if (!list_empty(&conn_list)) {
+               schedule_timeout_interruptible(HZ / 10); /* 100ms */
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -26,7 +26,8 @@ enum {
+       KSMBD_SESS_GOOD,
+       KSMBD_SESS_EXITING,
+       KSMBD_SESS_NEED_RECONNECT,
+-      KSMBD_SESS_NEED_NEGOTIATE
++      KSMBD_SESS_NEED_NEGOTIATE,
++      KSMBD_SESS_RELEASING
+ };
+ struct ksmbd_stats {
+@@ -140,10 +141,10 @@ struct ksmbd_transport {
+ #define KSMBD_TCP_PEER_SOCKADDR(c)    ((struct sockaddr *)&((c)->peer_addr))
+ extern struct list_head conn_list;
+-extern rwlock_t conn_list_lock;
++extern struct rw_semaphore conn_list_lock;
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
+ struct ksmbd_conn *ksmbd_conn_alloc(void);
+ void ksmbd_conn_free(struct ksmbd_conn *conn);
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+@@ -191,6 +192,11 @@ static inline bool ksmbd_conn_exiting(st
+       return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
++static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
++{
++      return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
++}
++
+ static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+       WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+@@ -215,4 +221,11 @@ static inline void ksmbd_conn_set_exitin
+ {
+       WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
+ }
++
++static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
++{
++      WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
++}
++
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
+ #endif /* __CONNECTION_H__ */
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -137,6 +137,9 @@ int ksmbd_tree_conn_session_logoff(struc
+       struct ksmbd_tree_connect *tc;
+       unsigned long id;
++      if (!sess)
++              return -EINVAL;
++
+       xa_for_each(&sess->tree_conns, id, tc)
+               ret |= ksmbd_tree_conn_disconnect(sess, tc);
+       xa_destroy(&sess->tree_conns);
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -144,10 +144,6 @@ void ksmbd_session_destroy(struct ksmbd_
+       if (!sess)
+               return;
+-      down_write(&sessions_table_lock);
+-      hash_del(&sess->hlist);
+-      up_write(&sessions_table_lock);
+-
+       if (sess->user)
+               ksmbd_free_user(sess->user);
+@@ -178,15 +174,18 @@ static void ksmbd_expire_session(struct
+       unsigned long id;
+       struct ksmbd_session *sess;
++      down_write(&sessions_table_lock);
+       xa_for_each(&conn->sessions, id, sess) {
+               if (sess->state != SMB2_SESSION_VALID ||
+                   time_after(jiffies,
+                              sess->last_active + SMB2_SESSION_TIMEOUT)) {
+                       xa_erase(&conn->sessions, sess->id);
++                      hash_del(&sess->hlist);
+                       ksmbd_session_destroy(sess);
+                       continue;
+               }
+       }
++      up_write(&sessions_table_lock);
+ }
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+@@ -198,15 +197,16 @@ int ksmbd_session_register(struct ksmbd_
+       return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+-static void ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
++static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+       struct channel *chann;
+       chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
+       if (!chann)
+-              return;
++              return -ENOENT;
+       kfree(chann);
++      return 0;
+ }
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+@@ -214,13 +214,37 @@ void ksmbd_sessions_deregister(struct ks
+       struct ksmbd_session *sess;
+       unsigned long id;
++      down_write(&sessions_table_lock);
++      if (conn->binding) {
++              int bkt;
++              struct hlist_node *tmp;
++
++              hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
++                      if (!ksmbd_chann_del(conn, sess) &&
++                          xa_empty(&sess->ksmbd_chann_list)) {
++                              hash_del(&sess->hlist);
++                              ksmbd_session_destroy(sess);
++                      }
++              }
++      }
++
+       xa_for_each(&conn->sessions, id, sess) {
++              unsigned long chann_id;
++              struct channel *chann;
++
++              xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
++                      if (chann->conn != conn)
++                              ksmbd_conn_set_exiting(chann->conn);
++              }
++
+               ksmbd_chann_del(conn, sess);
+               if (xa_empty(&sess->ksmbd_chann_list)) {
+                       xa_erase(&conn->sessions, sess->id);
++                      hash_del(&sess->hlist);
+                       ksmbd_session_destroy(sess);
+               }
+       }
++      up_write(&sessions_table_lock);
+ }
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -2110,21 +2110,22 @@ int smb2_session_logoff(struct ksmbd_wor
+       struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+       struct ksmbd_session *sess;
+       struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
++      u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+       rsp->StructureSize = cpu_to_le16(4);
+       inc_rfc1001_len(work->response_buf, 4);
+       ksmbd_debug(SMB, "request\n");
+-      ksmbd_conn_set_need_reconnect(conn);
++      ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+       ksmbd_close_session_fds(work);
+-      ksmbd_conn_wait_idle(conn);
++      ksmbd_conn_wait_idle(conn, sess_id);
+       /*
+        * Re-lookup session to validate if session is deleted
+        * while waiting request complete
+        */
+-      sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId));
++      sess = ksmbd_session_lookup_all(conn, sess_id);
+       if (ksmbd_tree_conn_session_logoff(sess)) {
+               ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+               rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+@@ -2137,7 +2138,7 @@ int smb2_session_logoff(struct ksmbd_wor
+       ksmbd_free_user(sess->user);
+       sess->user = NULL;
+-      ksmbd_conn_set_need_negotiate(conn);
++      ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+       return 0;
+ }
+@@ -6969,7 +6970,7 @@ int smb2_lock(struct ksmbd_work *work)
+               nolock = 1;
+               /* check locks in connection list */
+-              read_lock(&conn_list_lock);
++              down_read(&conn_list_lock);
+               list_for_each_entry(conn, &conn_list, conns_list) {
+                       spin_lock(&conn->llist_lock);
+                       list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+@@ -6986,7 +6987,7 @@ int smb2_lock(struct ksmbd_work *work)
+                                               list_del(&cmp_lock->flist);
+                                               list_del(&cmp_lock->clist);
+                                               spin_unlock(&conn->llist_lock);
+-                                              read_unlock(&conn_list_lock);
++                                              up_read(&conn_list_lock);
+                                               locks_free_lock(cmp_lock->fl);
+                                               kfree(cmp_lock);
+@@ -7008,7 +7009,7 @@ int smb2_lock(struct ksmbd_work *work)
+                                   cmp_lock->start > smb_lock->start &&
+                                   cmp_lock->start < smb_lock->end) {
+                                       spin_unlock(&conn->llist_lock);
+-                                      read_unlock(&conn_list_lock);
++                                      up_read(&conn_list_lock);
+                                       pr_err("previous lock conflict with zero byte lock range\n");
+                                       goto out;
+                               }
+@@ -7017,7 +7018,7 @@ int smb2_lock(struct ksmbd_work *work)
+                                   smb_lock->start > cmp_lock->start &&
+                                   smb_lock->start < cmp_lock->end) {
+                                       spin_unlock(&conn->llist_lock);
+-                                      read_unlock(&conn_list_lock);
++                                      up_read(&conn_list_lock);
+                                       pr_err("current lock conflict with zero byte lock range\n");
+                                       goto out;
+                               }
+@@ -7028,14 +7029,14 @@ int smb2_lock(struct ksmbd_work *work)
+                                     cmp_lock->end >= smb_lock->end)) &&
+                                   !cmp_lock->zero_len && !smb_lock->zero_len) {
+                                       spin_unlock(&conn->llist_lock);
+-                                      read_unlock(&conn_list_lock);
++                                      up_read(&conn_list_lock);
+                                       pr_err("Not allow lock operation on exclusive lock range\n");
+                                       goto out;
+                               }
+                       }
+                       spin_unlock(&conn->llist_lock);
+               }
+-              read_unlock(&conn_list_lock);
++              up_read(&conn_list_lock);
+ out_check_cl:
+               if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
+                       pr_err("Try to unlock nolocked range\n");
diff --git a/queue-6.3/ksmbd-fix-racy-issue-under-cocurrent-smb2-tree-disconnect.patch b/queue-6.3/ksmbd-fix-racy-issue-under-cocurrent-smb2-tree-disconnect.patch
new file mode 100644 (file)
index 0000000..9a37eb5
--- /dev/null
@@ -0,0 +1,78 @@
+From 30210947a343b6b3ca13adc9bfc88e1543e16dd5 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:51:12 +0900
+Subject: ksmbd: fix racy issue under cocurrent smb2 tree disconnect
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 30210947a343b6b3ca13adc9bfc88e1543e16dd5 upstream.
+
+There is UAF issue under cocurrent smb2 tree disconnect.
+This patch introduce TREE_CONN_EXPIRE flags for tcon to avoid cocurrent
+access.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20592
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/mgmt/tree_connect.c |   10 +++++++++-
+ fs/ksmbd/mgmt/tree_connect.h |    3 +++
+ fs/ksmbd/smb2pdu.c           |    3 ++-
+ 3 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -109,7 +109,15 @@ int ksmbd_tree_conn_disconnect(struct ks
+ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+                                                 unsigned int id)
+ {
+-      return xa_load(&sess->tree_conns, id);
++      struct ksmbd_tree_connect *tcon;
++
++      tcon = xa_load(&sess->tree_conns, id);
++      if (tcon) {
++              if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
++                      tcon = NULL;
++      }
++
++      return tcon;
+ }
+ struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+--- a/fs/ksmbd/mgmt/tree_connect.h
++++ b/fs/ksmbd/mgmt/tree_connect.h
+@@ -14,6 +14,8 @@ struct ksmbd_share_config;
+ struct ksmbd_user;
+ struct ksmbd_conn;
++#define TREE_CONN_EXPIRE              1
++
+ struct ksmbd_tree_connect {
+       int                             id;
+@@ -25,6 +27,7 @@ struct ksmbd_tree_connect {
+       int                             maximal_access;
+       bool                            posix_extensions;
++      unsigned long                   status;
+ };
+ struct ksmbd_tree_conn_status {
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -2048,11 +2048,12 @@ int smb2_tree_disconnect(struct ksmbd_wo
+       ksmbd_debug(SMB, "request\n");
+-      if (!tcon) {
++      if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
+               struct smb2_tree_disconnect_req *req =
+                       smb2_get_msg(work->request_buf);
+               ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
++
+               rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+               smb2_set_err_rsp(work);
+               return 0;
diff --git a/queue-6.3/ksmbd-not-allow-guest-user-on-multichannel.patch b/queue-6.3/ksmbd-not-allow-guest-user-on-multichannel.patch
new file mode 100644 (file)
index 0000000..aec96a3
--- /dev/null
@@ -0,0 +1,61 @@
+From 3353ab2df5f68dab7da8d5ebb427a2d265a1f2b2 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 3 May 2023 08:45:08 +0900
+Subject: ksmbd: not allow guest user on multichannel
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 3353ab2df5f68dab7da8d5ebb427a2d265a1f2b2 upstream.
+
+This patch return STATUS_NOT_SUPPORTED if binding session is guest.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20480
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1459,7 +1459,7 @@ static int ntlm_authenticate(struct ksmb
+                * Reuse session if anonymous try to connect
+                * on reauthetication.
+                */
+-              if (ksmbd_anonymous_user(user)) {
++              if (conn->binding == false && ksmbd_anonymous_user(user)) {
+                       ksmbd_free_user(user);
+                       return 0;
+               }
+@@ -1473,7 +1473,7 @@ static int ntlm_authenticate(struct ksmb
+               sess->user = user;
+       }
+-      if (user_guest(sess->user)) {
++      if (conn->binding == false && user_guest(sess->user)) {
+               rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
+       } else {
+               struct authenticate_message *authblob;
+@@ -1708,6 +1708,11 @@ int smb2_sess_setup(struct ksmbd_work *w
+                       goto out_err;
+               }
++              if (user_guest(sess->user)) {
++                      rc = -EOPNOTSUPP;
++                      goto out_err;
++              }
++
+               conn->binding = true;
+       } else if ((conn->dialect < SMB30_PROT_ID ||
+                   server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+@@ -1820,6 +1825,8 @@ out_err:
+               rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED;
+       else if (rc == -ENOMEM)
+               rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++      else if (rc == -EOPNOTSUPP)
++              rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+       else if (rc)
+               rsp->hdr.Status = STATUS_LOGON_FAILURE;
diff --git a/queue-6.3/kvm-risc-v-retry-fault-if-vma_lookup-results-become-invalid.patch b/queue-6.3/kvm-risc-v-retry-fault-if-vma_lookup-results-become-invalid.patch
new file mode 100644 (file)
index 0000000..a489c55
--- /dev/null
@@ -0,0 +1,87 @@
+From 2ed90cb0938a45b12eb947af062d12c7af0067b3 Mon Sep 17 00:00:00 2001
+From: David Matlack <dmatlack@google.com>
+Date: Fri, 17 Mar 2023 14:11:06 -0700
+Subject: KVM: RISC-V: Retry fault if vma_lookup() results become invalid
+
+From: David Matlack <dmatlack@google.com>
+
+commit 2ed90cb0938a45b12eb947af062d12c7af0067b3 upstream.
+
+Read mmu_invalidate_seq before dropping the mmap_lock so that KVM can
+detect if the results of vma_lookup() (e.g. vma_shift) become stale
+before it acquires kvm->mmu_lock. This fixes a theoretical bug where a
+VMA could be changed by userspace after vma_lookup() and before KVM
+reads the mmu_invalidate_seq, causing KVM to install page table entries
+based on a (possibly) no-longer-valid vma_shift.
+
+Re-order the MMU cache top-up to earlier in user_mem_abort() so that it
+is not done after KVM has read mmu_invalidate_seq (i.e. so as to avoid
+inducing spurious fault retries).
+
+It's unlikely that any sane userspace currently modifies VMAs in such a
+way as to trigger this race. And even with directed testing I was unable
+to reproduce it. But a sufficiently motivated host userspace might be
+able to exploit this race.
+
+Note KVM/ARM had the same bug and was fixed in a separate, near
+identical patch (see Link).
+
+Link: https://lore.kernel.org/kvm/20230313235454.2964067-1-dmatlack@google.com/
+Fixes: 9955371cc014 ("RISC-V: KVM: Implement MMU notifiers")
+Cc: stable@vger.kernel.org
+Signed-off-by: David Matlack <dmatlack@google.com>
+Tested-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Anup Patel <anup@brainfault.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kvm/mmu.c |   25 ++++++++++++++++---------
+ 1 file changed, 16 insertions(+), 9 deletions(-)
+
+--- a/arch/riscv/kvm/mmu.c
++++ b/arch/riscv/kvm/mmu.c
+@@ -628,6 +628,13 @@ int kvm_riscv_gstage_map(struct kvm_vcpu
+                       !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
+       unsigned long vma_pagesize, mmu_seq;
++      /* We need minimum second+third level pages */
++      ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
++      if (ret) {
++              kvm_err("Failed to topup G-stage cache\n");
++              return ret;
++      }
++
+       mmap_read_lock(current->mm);
+       vma = vma_lookup(current->mm, hva);
+@@ -648,6 +655,15 @@ int kvm_riscv_gstage_map(struct kvm_vcpu
+       if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
+               gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
++      /*
++       * Read mmu_invalidate_seq so that KVM can detect if the results of
++       * vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
++       * kvm->mmu_lock.
++       *
++       * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
++       * with the smp_wmb() in kvm_mmu_invalidate_end().
++       */
++      mmu_seq = kvm->mmu_invalidate_seq;
+       mmap_read_unlock(current->mm);
+       if (vma_pagesize != PUD_SIZE &&
+@@ -657,15 +673,6 @@ int kvm_riscv_gstage_map(struct kvm_vcpu
+               return -EFAULT;
+       }
+-      /* We need minimum second+third level pages */
+-      ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
+-      if (ret) {
+-              kvm_err("Failed to topup G-stage cache\n");
+-              return ret;
+-      }
+-
+-      mmu_seq = kvm->mmu_invalidate_seq;
+-
+       hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
+       if (hfn == KVM_PFN_ERR_HWPOISON) {
+               send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
diff --git a/queue-6.3/kvm-x86-preserve-tdp-mmu-roots-until-they-are-explicitly-invalidated.patch b/queue-6.3/kvm-x86-preserve-tdp-mmu-roots-until-they-are-explicitly-invalidated.patch
new file mode 100644 (file)
index 0000000..3f00f74
--- /dev/null
@@ -0,0 +1,257 @@
+From edbdb43fc96b11b3bfa531be306a1993d9fe89ec Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 26 Apr 2023 15:03:23 -0700
+Subject: KVM: x86: Preserve TDP MMU roots until they are explicitly invalidated
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit edbdb43fc96b11b3bfa531be306a1993d9fe89ec upstream.
+
+Preserve TDP MMU roots until they are explicitly invalidated by gifting
+the TDP MMU itself a reference to a root when it is allocated.  Keeping a
+reference in the TDP MMU fixes a flaw where the TDP MMU exhibits terrible
+performance, and can potentially even soft-hang a vCPU, if a vCPU
+frequently unloads its roots, e.g. when KVM is emulating SMI+RSM.
+
+When KVM emulates something that invalidates _all_ TLB entries, e.g. SMI
+and RSM, KVM unloads all of the vCPUs roots (KVM keeps a small per-vCPU
+cache of previous roots).  Unloading roots is a simple way to ensure KVM
+flushes and synchronizes all roots for the vCPU, as KVM flushes and syncs
+when allocating a "new" root (from the vCPU's perspective).
+
+In the shadow MMU, KVM keeps track of all shadow pages, roots included, in
+a per-VM hash table.  Unloading a shadow MMU root just wipes it from the
+per-vCPU cache; the root is still tracked in the per-VM hash table.  When
+KVM loads a "new" root for the vCPU, KVM will find the old, unloaded root
+in the per-VM hash table.
+
+Unlike the shadow MMU, the TDP MMU doesn't track "inactive" roots in a
+per-VM structure, where "active" in this case means a root is either
+in-use or cached as a previous root by at least one vCPU.  When a TDP MMU
+root becomes inactive, i.e. the last vCPU reference to the root is put,
+KVM immediately frees the root (asterisk on "immediately" as the actual
+freeing may be done by a worker, but for all intents and purposes the root
+is gone).
+
+The TDP MMU behavior is especially problematic for 1-vCPU setups, as
+unloading all roots effectively frees all roots.  The issue is mitigated
+to some degree in multi-vCPU setups as a different vCPU usually holds a
+reference to an unloaded root and thus keeps the root alive, allowing the
+vCPU to reuse its old root after unloading (with a flush+sync).
+
+The TDP MMU flaw has been known for some time, as until very recently,
+KVM's handling of CR0.WP also triggered unloading of all roots.  The
+CR0.WP toggling scenario was eventually addressed by not unloading roots
+when _only_ CR0.WP is toggled, but such an approach doesn't Just Work
+for emulating SMM as KVM must emulate a full TLB flush on entry and exit
+to/from SMM.  Given that the shadow MMU plays nice with unloading roots
+at will, teaching the TDP MMU to do the same is far less complex than
+modifying KVM to track which roots need to be flushed before reuse.
+
+Note, preserving all possible TDP MMU roots is not a concern with respect
+to memory consumption.  Now that the role for direct MMUs doesn't include
+information about the guest, e.g. CR0.PG, CR0.WP, CR4.SMEP, etc., there
+are _at most_ six possible roots (where "guest_mode" here means L2):
+
+  1. 4-level !SMM !guest_mode
+  2. 4-level  SMM !guest_mode
+  3. 5-level !SMM !guest_mode
+  4. 5-level  SMM !guest_mode
+  5. 4-level !SMM guest_mode
+  6. 5-level !SMM guest_mode
+
+And because each vCPU can track 4 valid roots, a VM can already have all
+6 root combinations live at any given time.  Not to mention that, in
+practice, no sane VMM will advertise different guest.MAXPHYADDR values
+across vCPUs, i.e. KVM won't ever use both 4-level and 5-level roots for
+a single VM.  Furthermore, the vast majority of modern hypervisors will
+utilize EPT/NPT when available, thus the guest_mode=%true cases are also
+unlikely to be utilized.
+
+Reported-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
+Link: https://lore.kernel.org/all/959c5bce-beb5-b463-7158-33fc4a4f910c@linux.microsoft.com
+Link: https://lkml.kernel.org/r/20220209170020.1775368-1-pbonzini%40redhat.com
+Link: https://lore.kernel.org/all/20230322013731.102955-1-minipli@grsecurity.net
+Link: https://lore.kernel.org/all/000000000000a0bc2b05f9dd7fab@google.com
+Link: https://lore.kernel.org/all/000000000000eca0b905fa0f7756@google.com
+Cc: Ben Gardon <bgardon@google.com>
+Cc: David Matlack <dmatlack@google.com>
+Cc: stable@vger.kernel.org
+Tested-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
+Link: https://lore.kernel.org/r/20230426220323.3079789-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/tdp_mmu.c |  121 ++++++++++++++++++++-------------------------
+ 1 file changed, 56 insertions(+), 65 deletions(-)
+
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -40,7 +40,17 @@ static __always_inline bool kvm_lockdep_
+ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ {
+-      /* Also waits for any queued work items.  */
++      /*
++       * Invalidate all roots, which besides the obvious, schedules all roots
++       * for zapping and thus puts the TDP MMU's reference to each root, i.e.
++       * ultimately frees all roots.
++       */
++      kvm_tdp_mmu_invalidate_all_roots(kvm);
++
++      /*
++       * Destroying a workqueue also first flushes the workqueue, i.e. no
++       * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
++       */
+       destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
+       WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+@@ -116,16 +126,6 @@ static void tdp_mmu_schedule_zap_root(st
+       queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
+ }
+-static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
+-{
+-      union kvm_mmu_page_role role = page->role;
+-      role.invalid = true;
+-
+-      /* No need to use cmpxchg, only the invalid bit can change.  */
+-      role.word = xchg(&page->role.word, role.word);
+-      return role.invalid;
+-}
+-
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+                         bool shared)
+ {
+@@ -134,45 +134,12 @@ void kvm_tdp_mmu_put_root(struct kvm *kv
+       if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
+               return;
+-      WARN_ON(!is_tdp_mmu_page(root));
+-
+       /*
+-       * The root now has refcount=0.  It is valid, but readers already
+-       * cannot acquire a reference to it because kvm_tdp_mmu_get_root()
+-       * rejects it.  This remains true for the rest of the execution
+-       * of this function, because readers visit valid roots only
+-       * (except for tdp_mmu_zap_root_work(), which however
+-       * does not acquire any reference itself).
+-       *
+-       * Even though there are flows that need to visit all roots for
+-       * correctness, they all take mmu_lock for write, so they cannot yet
+-       * run concurrently. The same is true after kvm_tdp_root_mark_invalid,
+-       * since the root still has refcount=0.
+-       *
+-       * However, tdp_mmu_zap_root can yield, and writers do not expect to
+-       * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
+-       * So the root temporarily gets an extra reference, going to refcount=1
+-       * while staying invalid.  Readers still cannot acquire any reference;
+-       * but writers are now allowed to run if tdp_mmu_zap_root yields and
+-       * they might take an extra reference if they themselves yield.
+-       * Therefore, when the reference is given back by the worker,
+-       * there is no guarantee that the refcount is still 1.  If not, whoever
+-       * puts the last reference will free the page, but they will not have to
+-       * zap the root because a root cannot go from invalid to valid.
++       * The TDP MMU itself holds a reference to each root until the root is
++       * explicitly invalidated, i.e. the final reference should be never be
++       * put for a valid root.
+        */
+-      if (!kvm_tdp_root_mark_invalid(root)) {
+-              refcount_set(&root->tdp_mmu_root_count, 1);
+-
+-              /*
+-               * Zapping the root in a worker is not just "nice to have";
+-               * it is required because kvm_tdp_mmu_invalidate_all_roots()
+-               * skips already-invalid roots.  If kvm_tdp_mmu_put_root() did
+-               * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
+-               * might return with some roots not zapped yet.
+-               */
+-              tdp_mmu_schedule_zap_root(kvm, root);
+-              return;
+-      }
++      KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
+       spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+       list_del_rcu(&root->link);
+@@ -320,7 +287,14 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(stru
+       root = tdp_mmu_alloc_sp(vcpu);
+       tdp_mmu_init_sp(root, NULL, 0, role);
+-      refcount_set(&root->tdp_mmu_root_count, 1);
++      /*
++       * TDP MMU roots are kept until they are explicitly invalidated, either
++       * by a memslot update or by the destruction of the VM.  Initialize the
++       * refcount to two; one reference for the vCPU, and one reference for
++       * the TDP MMU itself, which is held until the root is invalidated and
++       * is ultimately put by tdp_mmu_zap_root_work().
++       */
++      refcount_set(&root->tdp_mmu_root_count, 2);
+       spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+       list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
+@@ -1022,32 +996,49 @@ void kvm_tdp_mmu_zap_invalidated_roots(s
+ /*
+  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
+  * is about to be zapped, e.g. in response to a memslots update.  The actual
+- * zapping is performed asynchronously, so a reference is taken on all roots.
+- * Using a separate workqueue makes it easy to ensure that the destruction is
+- * performed before the "fast zap" completes, without keeping a separate list
+- * of invalidated roots; the list is effectively the list of work items in
+- * the workqueue.
+- *
+- * Get a reference even if the root is already invalid, the asynchronous worker
+- * assumes it was gifted a reference to the root it processes.  Because mmu_lock
+- * is held for write, it should be impossible to observe a root with zero refcount,
+- * i.e. the list of roots cannot be stale.
++ * zapping is performed asynchronously.  Using a separate workqueue makes it
++ * easy to ensure that the destruction is performed before the "fast zap"
++ * completes, without keeping a separate list of invalidated roots; the list is
++ * effectively the list of work items in the workqueue.
+  *
+- * This has essentially the same effect for the TDP MMU
+- * as updating mmu_valid_gen does for the shadow MMU.
++ * Note, the asynchronous worker is gifted the TDP MMU's reference.
++ * See kvm_tdp_mmu_get_vcpu_root_hpa().
+  */
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+ {
+       struct kvm_mmu_page *root;
+-      lockdep_assert_held_write(&kvm->mmu_lock);
+-      list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+-              if (!root->role.invalid &&
+-                  !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
++      /*
++       * mmu_lock must be held for write to ensure that a root doesn't become
++       * invalid while there are active readers (invalidating a root while
++       * there are active readers may or may not be problematic in practice,
++       * but it's uncharted territory and not supported).
++       *
++       * Waive the assertion if there are no users of @kvm, i.e. the VM is
++       * being destroyed after all references have been put, or if no vCPUs
++       * have been created (which means there are no roots), i.e. the VM is
++       * being destroyed in an error path of KVM_CREATE_VM.
++       */
++      if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
++          refcount_read(&kvm->users_count) && kvm->created_vcpus)
++              lockdep_assert_held_write(&kvm->mmu_lock);
++
++      /*
++       * As above, mmu_lock isn't held when destroying the VM!  There can't
++       * be other references to @kvm, i.e. nothing else can invalidate roots
++       * or be consuming roots, but walking the list of roots does need to be
++       * guarded against roots being deleted by the asynchronous zap worker.
++       */
++      rcu_read_lock();
++
++      list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
++              if (!root->role.invalid) {
+                       root->role.invalid = true;
+                       tdp_mmu_schedule_zap_root(kvm, root);
+               }
+       }
++
++      rcu_read_unlock();
+ }
+ /*
diff --git a/queue-6.3/risc-v-align-sbi-probe-implementation-with-spec.patch b/queue-6.3/risc-v-align-sbi-probe-implementation-with-spec.patch
new file mode 100644 (file)
index 0000000..8b7c6f5
--- /dev/null
@@ -0,0 +1,144 @@
+From 41cad8284d5e6bf1d49d3c10a6b52ee1ae866a20 Mon Sep 17 00:00:00 2001
+From: Andrew Jones <ajones@ventanamicro.com>
+Date: Thu, 27 Apr 2023 18:36:26 +0200
+Subject: RISC-V: Align SBI probe implementation with spec
+
+From: Andrew Jones <ajones@ventanamicro.com>
+
+commit 41cad8284d5e6bf1d49d3c10a6b52ee1ae866a20 upstream.
+
+sbi_probe_extension() is specified with "Returns 0 if the given SBI
+extension ID (EID) is not available, or 1 if it is available unless
+defined as any other non-zero value by the implementation."
+Additionally, sbiret.value is a long. Fix the implementation to
+ensure any nonzero long value is considered a success, rather
+than only positive int values.
+
+Fixes: b9dcd9e41587 ("RISC-V: Add basic support for SBI v0.2")
+Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230427163626.101042-1-ajones@ventanamicro.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/include/asm/sbi.h        |    2 +-
+ arch/riscv/kernel/cpu_ops.c         |    2 +-
+ arch/riscv/kernel/sbi.c             |   17 ++++++++---------
+ arch/riscv/kvm/main.c               |    2 +-
+ drivers/cpuidle/cpuidle-riscv-sbi.c |    2 +-
+ drivers/perf/riscv_pmu_sbi.c        |    2 +-
+ 6 files changed, 13 insertions(+), 14 deletions(-)
+
+--- a/arch/riscv/include/asm/sbi.h
++++ b/arch/riscv/include/asm/sbi.h
+@@ -296,7 +296,7 @@ int sbi_remote_hfence_vvma_asid(const st
+                               unsigned long start,
+                               unsigned long size,
+                               unsigned long asid);
+-int sbi_probe_extension(int ext);
++long sbi_probe_extension(int ext);
+ /* Check if current SBI specification version is 0.1 or not */
+ static inline int sbi_spec_is_0_1(void)
+--- a/arch/riscv/kernel/cpu_ops.c
++++ b/arch/riscv/kernel/cpu_ops.c
+@@ -27,7 +27,7 @@ const struct cpu_operations cpu_ops_spin
+ void __init cpu_set_ops(int cpuid)
+ {
+ #if IS_ENABLED(CONFIG_RISCV_SBI)
+-      if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
++      if (sbi_probe_extension(SBI_EXT_HSM)) {
+               if (!cpuid)
+                       pr_info("SBI HSM extension detected\n");
+               cpu_ops[cpuid] = &cpu_ops_sbi;
+--- a/arch/riscv/kernel/sbi.c
++++ b/arch/riscv/kernel/sbi.c
+@@ -581,19 +581,18 @@ static void sbi_srst_power_off(void)
+  * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
+  * @extid: The extension ID to be probed.
+  *
+- * Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
++ * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
+  */
+-int sbi_probe_extension(int extid)
++long sbi_probe_extension(int extid)
+ {
+       struct sbiret ret;
+       ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
+                       0, 0, 0, 0, 0);
+       if (!ret.error)
+-              if (ret.value)
+-                      return ret.value;
++              return ret.value;
+-      return -ENOTSUPP;
++      return 0;
+ }
+ EXPORT_SYMBOL(sbi_probe_extension);
+@@ -665,26 +664,26 @@ void __init sbi_init(void)
+       if (!sbi_spec_is_0_1()) {
+               pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
+                       sbi_get_firmware_id(), sbi_get_firmware_version());
+-              if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
++              if (sbi_probe_extension(SBI_EXT_TIME)) {
+                       __sbi_set_timer = __sbi_set_timer_v02;
+                       pr_info("SBI TIME extension detected\n");
+               } else {
+                       __sbi_set_timer = __sbi_set_timer_v01;
+               }
+-              if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
++              if (sbi_probe_extension(SBI_EXT_IPI)) {
+                       __sbi_send_ipi  = __sbi_send_ipi_v02;
+                       pr_info("SBI IPI extension detected\n");
+               } else {
+                       __sbi_send_ipi  = __sbi_send_ipi_v01;
+               }
+-              if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
++              if (sbi_probe_extension(SBI_EXT_RFENCE)) {
+                       __sbi_rfence    = __sbi_rfence_v02;
+                       pr_info("SBI RFENCE extension detected\n");
+               } else {
+                       __sbi_rfence    = __sbi_rfence_v01;
+               }
+               if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
+-                  (sbi_probe_extension(SBI_EXT_SRST) > 0)) {
++                  sbi_probe_extension(SBI_EXT_SRST)) {
+                       pr_info("SBI SRST extension detected\n");
+                       pm_power_off = sbi_srst_power_off;
+                       sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
+--- a/arch/riscv/kvm/main.c
++++ b/arch/riscv/kvm/main.c
+@@ -75,7 +75,7 @@ static int __init riscv_kvm_init(void)
+               return -ENODEV;
+       }
+-      if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) {
++      if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
+               kvm_info("require SBI RFENCE extension\n");
+               return -ENODEV;
+       }
+--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
+@@ -613,7 +613,7 @@ static int __init sbi_cpuidle_init(void)
+        * 2) SBI HSM extension is available
+        */
+       if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+-          sbi_probe_extension(SBI_EXT_HSM) <= 0) {
++          !sbi_probe_extension(SBI_EXT_HSM)) {
+               pr_info("HSM suspend not available\n");
+               return 0;
+       }
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -924,7 +924,7 @@ static int __init pmu_sbi_devinit(void)
+       struct platform_device *pdev;
+       if (sbi_spec_version < sbi_mk_version(0, 3) ||
+-          sbi_probe_extension(SBI_EXT_PMU) <= 0) {
++          !sbi_probe_extension(SBI_EXT_PMU)) {
+               return 0;
+       }
diff --git a/queue-6.3/riscv-mm-remove-redundant-parameter-of-create_fdt_early_page_table.patch b/queue-6.3/riscv-mm-remove-redundant-parameter-of-create_fdt_early_page_table.patch
new file mode 100644 (file)
index 0000000..4140453
--- /dev/null
@@ -0,0 +1,47 @@
+From e4ef93edd4e0b022529303db1915766ff9de450e Mon Sep 17 00:00:00 2001
+From: Song Shuai <suagrfillet@gmail.com>
+Date: Wed, 26 Apr 2023 18:00:09 +0800
+Subject: riscv: mm: remove redundant parameter of create_fdt_early_page_table
+
+From: Song Shuai <suagrfillet@gmail.com>
+
+commit e4ef93edd4e0b022529303db1915766ff9de450e upstream.
+
+create_fdt_early_page_table() explicitly uses early_pg_dir for
+32-bit fdt mapping and the pgdir parameter is redundant here.
+So remove it and its caller.
+
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: Song Shuai <suagrfillet@gmail.com>
+Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
+Fixes: ef69d2559fe9 ("riscv: Move early dtb mapping into the fixmap region")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230426100009.685435-1-suagrfillet@gmail.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/mm/init.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -843,8 +843,7 @@ static void __init create_kernel_page_ta
+  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+  * entry.
+  */
+-static void __init create_fdt_early_page_table(pgd_t *pgdir,
+-                                             uintptr_t fix_fdt_va,
++static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
+                                              uintptr_t dtb_pa)
+ {
+       uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+@@ -1034,8 +1033,7 @@ asmlinkage void __init setup_vm(uintptr_
+       create_kernel_page_table(early_pg_dir, true);
+       /* Setup early mapping for FDT early scan */
+-      create_fdt_early_page_table(early_pg_dir,
+-                                  __fix_to_virt(FIX_FDT), dtb_pa);
++      create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);
+       /*
+        * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
index 37a02cc2e2602987d6fac506f2d5d9ca779b78de..2f877ea319fa6dad5d632f6d5efa5ec9f889b163 100644 (file)
@@ -61,3 +61,24 @@ kvm-arm64-use-config_lock-to-protect-data-ordered-against-kvm_run.patch
 kvm-arm64-use-config_lock-to-protect-vgic-state.patch
 kvm-arm64-vgic-don-t-acquire-its_lock-before-config_lock.patch
 relayfs-fix-out-of-bounds-access-in-relay_file_read.patch
+kvm-risc-v-retry-fault-if-vma_lookup-results-become-invalid.patch
+kvm-x86-preserve-tdp-mmu-roots-until-they-are-explicitly-invalidated.patch
+ksmbd-fix-racy-issue-under-cocurrent-smb2-tree-disconnect.patch
+ksmbd-call-rcu_barrier-in-ksmbd_server_exit.patch
+ksmbd-fix-null-pointer-dereference-in-smb2_get_info_filesystem.patch
+ksmbd-fix-memleak-in-session-setup.patch
+ksmbd-not-allow-guest-user-on-multichannel.patch
+ksmbd-fix-deadlock-in-ksmbd_find_crypto_ctx.patch
+ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch
+ksmbd-block-asynchronous-requests-when-making-a-delay-on-session-setup.patch
+ksmbd-destroy-expired-sessions.patch
+ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with-multichannel.patch
+acpi-video-remove-acpi_backlight-video-quirk-for-lenovo-thinkpad-w530.patch
+igc-read-before-write-to-srrctl-register.patch
+i2c-omap-fix-standard-mode-false-ack-readings.patch
+riscv-mm-remove-redundant-parameter-of-create_fdt_early_page_table.patch
+thermal-intel-powerclamp-fix-null-pointer-access-issue.patch
+tracing-fix-permissions-for-the-buffer_percent-file.patch
+drm-amd-pm-re-enable-the-gfx-imu-when-smu-resume.patch
+iommu-amd-fix-guest-virtual-apic-table-root-pointer-configuration-in-irte.patch
+risc-v-align-sbi-probe-implementation-with-spec.patch
diff --git a/queue-6.3/thermal-intel-powerclamp-fix-null-pointer-access-issue.patch b/queue-6.3/thermal-intel-powerclamp-fix-null-pointer-access-issue.patch
new file mode 100644 (file)
index 0000000..97a9f91
--- /dev/null
@@ -0,0 +1,52 @@
+From b5d68f84f4c62c78bc3d004911d80da5aa22df8b Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Wed, 3 May 2023 16:38:50 -0700
+Subject: thermal: intel: powerclamp: Fix NULL pointer access issue
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit b5d68f84f4c62c78bc3d004911d80da5aa22df8b upstream.
+
+If cur_state for the powerclamp cooling device is set to the default
+minimum state of 0, without setting first to cur_state > 0, this results
+in NULL pointer access.
+
+This NULL pointer access happens in the powercap core idle-inject
+function idle_inject_set_duration() as there is no NULL check for
+idle_inject_device pointer. This pointer must be allocated by calling
+idle_inject_register() or idle_inject_register_full().
+
+In the function powerclamp_set_cur_state(), idle_inject_device pointer
+is allocated only when the cur_state > 0. But setting 0 without changing
+to any other state, idle_inject_set_duration() will be called with a
+NULL idle_inject_device pointer.
+
+To address this, just return from powerclamp_set_cur_state() if the
+current cooling device state is the same as the last one. Since the
+power-up default cooling device state is 0, changing the state to 0
+again here will return without calling idle_inject_set_duration().
+
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Fixes: 8526eb7fc75a ("thermal: intel: powerclamp: Use powercap idle-inject feature")
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=217386
+Tested-by: Risto A. Paju <teknohog@iki.fi>
+Cc: 6.3+ <stable@kernel.org> # 6.3+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/intel/intel_powerclamp.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/thermal/intel/intel_powerclamp.c
++++ b/drivers/thermal/intel/intel_powerclamp.c
+@@ -703,6 +703,10 @@ static int powerclamp_set_cur_state(stru
+       new_target_ratio = clamp(new_target_ratio, 0UL,
+                               (unsigned long) (max_idle - 1));
++
++      if (powerclamp_data.target_ratio == new_target_ratio)
++              goto exit_set;
++
+       if (!powerclamp_data.target_ratio && new_target_ratio > 0) {
+               pr_info("Start idle injection to reduce power\n");
+               powerclamp_data.target_ratio = new_target_ratio;
diff --git a/queue-6.3/tracing-fix-permissions-for-the-buffer_percent-file.patch b/queue-6.3/tracing-fix-permissions-for-the-buffer_percent-file.patch
new file mode 100644 (file)
index 0000000..28c3281
--- /dev/null
@@ -0,0 +1,37 @@
+From 4f94559f40ad06d627c0fdfc3319cec778a2845b Mon Sep 17 00:00:00 2001
+From: Ondrej Mosnacek <omosnace@redhat.com>
+Date: Wed, 3 May 2023 16:01:14 +0200
+Subject: tracing: Fix permissions for the buffer_percent file
+
+From: Ondrej Mosnacek <omosnace@redhat.com>
+
+commit 4f94559f40ad06d627c0fdfc3319cec778a2845b upstream.
+
+This file defines both read and write operations, yet it is being
+created as read-only. This means that it can't be written to without the
+CAP_DAC_OVERRIDE capability. Fix the permissions to allow root to write
+to it without the need to override DAC perms.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230503140114.3280002-1-omosnace@redhat.com
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Fixes: 03329f993978 ("tracing: Add tracefs file buffer_percentage")
+Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9658,7 +9658,7 @@ init_tracer_tracefs(struct trace_array *
+       tr->buffer_percent = 50;
+-      trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
++      trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
+                       tr, &buffer_percent_fops);
+       create_trace_options_dir(tr);