--- /dev/null
+From 77e92e9971dad05e318c0c76f585d29361229c04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 20:25:00 +0530
+Subject: 8250: microchip: pci1xxxx: Add PCIe Hot reset disable support for Rev
+ C0 and later devices
+
+From: Rengarajan S <rengarajan.s@microchip.com>
+
+[ Upstream commit c40b91e38eb8d4489def095d62ab476d45871323 ]
+
+Systems that issue PCIe hot reset requests during a suspend/resume
+cycle cause PCI1XXXX device revisions prior to C0 to get its UART
+configuration registers reset to hardware default values. This results
+in device inaccessibility and data transfer failures. Starting with
+Revision C0, support was added in the device hardware (via the Hot
+Reset Disable Bit) to allow resetting only the PCIe interface and its
+associated logic, but preserving the UART configuration during a hot
+reset. This patch enables the hot reset disable feature during suspend/
+resume for C0 and later revisions of the device.
+
+Signed-off-by: Rengarajan S <rengarajan.s@microchip.com>
+Reviewed-by: Jiri Slaby <jirislaby@kernel.org>
+Link: https://lore.kernel.org/r/20250425145500.29036-1-rengarajan.s@microchip.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/8250_pci1xxxx.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
+index f462b3d1c104c..d6b01e015a96b 100644
+--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
++++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
+@@ -115,6 +115,7 @@
+
+ #define UART_RESET_REG 0x94
+ #define UART_RESET_D3_RESET_DISABLE BIT(16)
++#define UART_RESET_HOT_RESET_DISABLE BIT(17)
+
+ #define UART_BURST_STATUS_REG 0x9C
+ #define UART_TX_BURST_FIFO 0xA0
+@@ -620,6 +621,10 @@ static int pci1xxxx_suspend(struct device *dev)
+ }
+
+ data = readl(p + UART_RESET_REG);
++
++ if (priv->dev_rev >= 0xC0)
++ data |= UART_RESET_HOT_RESET_DISABLE;
++
+ writel(data | UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
+
+ if (wakeup)
+@@ -647,7 +652,12 @@ static int pci1xxxx_resume(struct device *dev)
+ }
+
+ data = readl(p + UART_RESET_REG);
++
++ if (priv->dev_rev >= 0xC0)
++ data &= ~UART_RESET_HOT_RESET_DISABLE;
++
+ writel(data & ~UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
++
+ iounmap(p);
+
+ for (i = 0; i < priv->nr; i++) {
+--
+2.39.5
+
--- /dev/null
+From 91a96965b18ae2c1d03c0dcd87be35f4e0efad0e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Feb 2025 09:46:19 +0100
+Subject: accel/ivpu: Add debugfs interface for setting HWS priority bands
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+[ Upstream commit 320323d2e5456df9d6236ac1ce9c030b1a74aa5b ]
+
+Add debugfs interface to modify following priority bands properties:
+ * grace period
+ * process grace period
+ * process quantum
+
+This allows for the adjustment of hardware scheduling algorithm parameters
+for each existing priority band, facilitating validation and fine-tuning.
+
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250204084622.2422544-4-jacek.lawrynowicz@linux.intel.com
+Stable-dep-of: a47e36dc5d90 ("accel/ivpu: Trigger device recovery on engine reset/resume failure")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_debugfs.c | 84 +++++++++++++++++++++++++++++++
+ drivers/accel/ivpu/ivpu_hw.c | 21 ++++++++
+ drivers/accel/ivpu/ivpu_hw.h | 5 ++
+ drivers/accel/ivpu/ivpu_jsm_msg.c | 29 ++++-------
+ 4 files changed, 121 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
+index 05a0d99ce95c4..1edf6e5644026 100644
+--- a/drivers/accel/ivpu/ivpu_debugfs.c
++++ b/drivers/accel/ivpu/ivpu_debugfs.c
+@@ -423,6 +423,88 @@ static int dct_active_set(void *data, u64 active_percent)
+
+ DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+
++static int priority_bands_show(struct seq_file *s, void *v)
++{
++ struct ivpu_device *vdev = s->private;
++ struct ivpu_hw_info *hw = vdev->hw;
++
++ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
++ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
++ switch (band) {
++ case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
++ seq_puts(s, "Idle: ");
++ break;
++
++ case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
++ seq_puts(s, "Normal: ");
++ break;
++
++ case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
++ seq_puts(s, "Focus: ");
++ break;
++
++ case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
++ seq_puts(s, "Realtime: ");
++ break;
++ }
++
++ seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
++ hw->hws.grace_period[band], hw->hws.process_grace_period[band],
++ hw->hws.process_quantum[band]);
++ }
++
++ return 0;
++}
++
++static int priority_bands_fops_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, priority_bands_show, inode->i_private);
++}
++
++static ssize_t
++priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
++{
++ struct seq_file *s = file->private_data;
++ struct ivpu_device *vdev = s->private;
++ char buf[64];
++ u32 grace_period;
++ u32 process_grace_period;
++ u32 process_quantum;
++ u32 band;
++ int ret;
++
++ if (size >= sizeof(buf))
++ return -EINVAL;
++
++ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
++ if (ret < 0)
++ return ret;
++
++ buf[size] = '\0';
++ ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
++ &process_quantum);
++ if (ret != 4)
++ return -EINVAL;
++
++ if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
++ return -EINVAL;
++
++ vdev->hw->hws.grace_period[band] = grace_period;
++ vdev->hw->hws.process_grace_period[band] = process_grace_period;
++ vdev->hw->hws.process_quantum[band] = process_quantum;
++
++ return size;
++}
++
++static const struct file_operations ivpu_hws_priority_bands_fops = {
++ .owner = THIS_MODULE,
++ .open = priority_bands_fops_open,
++ .write = priority_bands_fops_write,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
+ void ivpu_debugfs_init(struct ivpu_device *vdev)
+ {
+ struct dentry *debugfs_root = vdev->drm.debugfs_root;
+@@ -445,6 +527,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
+ &fw_trace_hw_comp_mask_fops);
+ debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
+ &fw_trace_level_fops);
++ debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
++ &ivpu_hws_priority_bands_fops);
+
+ debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
+ &ivpu_reset_engine_fops);
+diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
+index 1214f155afa11..37ef8ce642109 100644
+--- a/drivers/accel/ivpu/ivpu_hw.c
++++ b/drivers/accel/ivpu/ivpu_hw.c
+@@ -110,6 +110,26 @@ static void timeouts_init(struct ivpu_device *vdev)
+ }
+ }
+
++static void priority_bands_init(struct ivpu_device *vdev)
++{
++ /* Idle */
++ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
++ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
++ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
++ /* Normal */
++ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
++ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
++ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
++ /* Focus */
++ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
++ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
++ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
++ /* Realtime */
++ vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
++ vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
++ vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
++}
++
+ static void memory_ranges_init(struct ivpu_device *vdev)
+ {
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+@@ -248,6 +268,7 @@ int ivpu_hw_init(struct ivpu_device *vdev)
+ {
+ ivpu_hw_btrs_info_init(vdev);
+ ivpu_hw_btrs_freq_ratios_init(vdev);
++ priority_bands_init(vdev);
+ memory_ranges_init(vdev);
+ platform_init(vdev);
+ wa_init(vdev);
+diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
+index 1e85306bcd065..1c016b99a0fdd 100644
+--- a/drivers/accel/ivpu/ivpu_hw.h
++++ b/drivers/accel/ivpu/ivpu_hw.h
+@@ -45,6 +45,11 @@ struct ivpu_hw_info {
+ u8 pn_ratio;
+ u32 profiling_freq;
+ } pll;
++ struct {
++ u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
++ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
++ u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
++ } hws;
+ u32 tile_fuse;
+ u32 sku;
+ u16 config;
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index 33d597b2a7f53..21018feb45978 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -7,6 +7,7 @@
+ #include "ivpu_hw.h"
+ #include "ivpu_ipc.h"
+ #include "ivpu_jsm_msg.h"
++#include "vpu_jsm_api.h"
+
+ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
+ {
+@@ -409,26 +410,18 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
+ {
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
+ struct vpu_jsm_msg resp;
++ struct ivpu_hw_info *hw = vdev->hw;
++ struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
++ &req.payload.hws_priority_band_setup;
+ int ret;
+
+- /* Idle */
+- req.payload.hws_priority_band_setup.grace_period[0] = 0;
+- req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
+- req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
+- /* Normal */
+- req.payload.hws_priority_band_setup.grace_period[1] = 50000;
+- req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
+- req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
+- /* Focus */
+- req.payload.hws_priority_band_setup.grace_period[2] = 50000;
+- req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
+- req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
+- /* Realtime */
+- req.payload.hws_priority_band_setup.grace_period[3] = 0;
+- req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
+- req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
+-
+- req.payload.hws_priority_band_setup.normal_band_percentage = 10;
++ for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
++ band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
++ setup->grace_period[band] = hw->hws.grace_period[band];
++ setup->process_grace_period[band] = hw->hws.process_grace_period[band];
++ setup->process_quantum[band] = hw->hws.process_quantum[band];
++ }
++ setup->normal_band_percentage = 10;
+
+ ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
+ &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+--
+2.39.5
+
--- /dev/null
+From f965f126a2ace27a176048a8162bb5095508349d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Sep 2024 21:53:16 +0200
+Subject: accel/ivpu: Do not fail on cmdq if failed to allocate preemption
+ buffers
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+[ Upstream commit 08eb99ce911d3ea202f79b42b96cd6e8498f7f69 ]
+
+Allow to proceed with job command queue creation even if preemption
+buffers failed to be allocated, print warning that preemption on such
+command queue will be disabled.
+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240930195322.461209-26-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Stable-dep-of: a47e36dc5d90 ("accel/ivpu: Trigger device recovery on engine reset/resume failure")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_job.c | 27 ++++++++++++++++-----------
+ 1 file changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index 27121c66e48f8..58d64a221a1e0 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -60,6 +60,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
+
+ err_free_primary:
+ ivpu_bo_free(cmdq->primary_preempt_buf);
++ cmdq->primary_preempt_buf = NULL;
+ return -ENOMEM;
+ }
+
+@@ -69,10 +70,10 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+- drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
+- drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
+- ivpu_bo_free(cmdq->primary_preempt_buf);
+- ivpu_bo_free(cmdq->secondary_preempt_buf);
++ if (cmdq->primary_preempt_buf)
++ ivpu_bo_free(cmdq->primary_preempt_buf);
++ if (cmdq->secondary_preempt_buf)
++ ivpu_bo_free(cmdq->secondary_preempt_buf);
+ }
+
+ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+@@ -98,12 +99,10 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+- goto err_free_cmdq_mem;
++ ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
+
+ return cmdq;
+
+-err_free_cmdq_mem:
+- ivpu_bo_free(cmdq->mem);
+ err_erase_xa:
+ xa_erase(&vdev->db_xa, cmdq->db_id);
+ err_free_cmdq:
+@@ -363,10 +362,16 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW &&
+ (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
+- entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
+- entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
+- entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
+- entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
++ if (cmdq->primary_preempt_buf) {
++ entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
++ entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
++ }
++
++ if (cmdq->secondary_preempt_buf) {
++ entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
++ entry->secondary_preempt_buf_size =
++ ivpu_bo_size(cmdq->secondary_preempt_buf);
++ }
+ }
+
+ wmb(); /* Ensure that tail is updated after filling entry */
+--
+2.39.5
+
--- /dev/null
+From b9dfb88147ed68b416c308ab724c798bd9bdcffc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2024 16:58:13 +0200
+Subject: accel/ivpu: Make command queue ID allocated on XArray
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+[ Upstream commit 76ad741ec7349bb1112f3a0ff27adf1ca75cf025 ]
+
+Use XArray for dynamic command queue ID allocations instead of fixed
+ones. This is required by upcoming changes to UAPI that will allow to
+manage command queues by user space instead of having predefined number
+of queues in a context.
+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241017145817.121590-8-jacek.lawrynowicz@linux.intel.com
+Stable-dep-of: a47e36dc5d90 ("accel/ivpu: Trigger device recovery on engine reset/resume failure")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_drv.c | 6 +++
+ drivers/accel/ivpu/ivpu_drv.h | 7 ++-
+ drivers/accel/ivpu/ivpu_job.c | 91 ++++++++++++++++++-----------------
+ drivers/accel/ivpu/ivpu_job.h | 2 +
+ 4 files changed, 60 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 67d56a944d549..00208c4a65807 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -102,6 +102,8 @@ static void file_priv_release(struct kref *ref)
+ pm_runtime_get_sync(vdev->drm.dev);
+ mutex_lock(&vdev->context_list_lock);
+ file_priv_unbind(vdev, file_priv);
++ drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
++ xa_destroy(&file_priv->cmdq_xa);
+ mutex_unlock(&vdev->context_list_lock);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+
+@@ -261,6 +263,10 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
+ file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
+
++ xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
++ file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
++ file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
++
+ mutex_unlock(&vdev->context_list_lock);
+ drm_dev_exit(idx);
+
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 4519c93fb377c..f2ba3ed8b3fc5 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -53,6 +53,9 @@
+ #define IVPU_NUM_PRIORITIES 4
+ #define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
+
++#define IVPU_CMDQ_MIN_ID 1
++#define IVPU_CMDQ_MAX_ID 255
++
+ #define IVPU_PLATFORM_SILICON 0
+ #define IVPU_PLATFORM_SIMICS 2
+ #define IVPU_PLATFORM_FPGA 3
+@@ -171,13 +174,15 @@ struct ivpu_file_priv {
+ struct kref ref;
+ struct ivpu_device *vdev;
+ struct mutex lock; /* Protects cmdq */
+- struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
++ struct xarray cmdq_xa;
+ struct ivpu_mmu_context ctx;
+ struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
+ struct list_head ms_instance_list;
+ struct ivpu_bo *ms_info_bo;
+ struct xa_limit job_limit;
+ u32 job_id_next;
++ struct xa_limit cmdq_limit;
++ u32 cmdq_id_next;
+ bool has_mmu_faults;
+ bool bound;
+ bool aborted;
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index ed3f60d809bc0..5eaf219170eee 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -93,9 +93,16 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+ goto err_free_cmdq;
+ }
+
++ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
++ &file_priv->cmdq_id_next, GFP_KERNEL);
++ if (ret < 0) {
++ ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
++ goto err_erase_db_xa;
++ }
++
+ cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!cmdq->mem)
+- goto err_erase_xa;
++ goto err_erase_cmdq_xa;
+
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+@@ -103,7 +110,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+
+ return cmdq;
+
+-err_erase_xa:
++err_erase_cmdq_xa:
++ xa_erase(&file_priv->cmdq_xa, cmdq->id);
++err_erase_db_xa:
+ xa_erase(&vdev->db_xa, cmdq->db_id);
+ err_free_cmdq:
+ kfree(cmdq);
+@@ -127,13 +136,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
+ struct ivpu_device *vdev = file_priv->vdev;
+ int ret;
+
+- ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
++ ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
+ task_pid_nr(current), engine,
+ cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ if (ret)
+ return ret;
+
+- ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
++ ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
+ priority);
+ if (ret)
+ return ret;
+@@ -147,20 +156,21 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
+ int ret;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+- ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
++ ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
+ cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+ else
+ ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
+ cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
+
+ if (!ret)
+- ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
++ ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
++ cmdq->db_id, cmdq->id, file_priv->ctx.id);
+
+ return ret;
+ }
+
+ static int
+-ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
++ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
+ {
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct vpu_job_queue_header *jobq_header;
+@@ -176,13 +186,13 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
+
+ cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
+ jobq_header = &cmdq->jobq->header;
+- jobq_header->engine_idx = engine;
++ jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
+ jobq_header->head = 0;
+ jobq_header->tail = 0;
+ wmb(); /* Flush WC buffer for jobq->header */
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+- ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
++ ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
+ if (ret)
+ return ret;
+ }
+@@ -209,9 +219,9 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ cmdq->db_registered = false;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+- ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
++ ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
+ if (!ret)
+- ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
++ ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
+ }
+
+ ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
+@@ -221,51 +231,46 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ return 0;
+ }
+
+-static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
+- u8 priority)
++static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
+ {
+- struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
++ struct ivpu_cmdq *cmdq;
++ unsigned long cmdq_id;
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
++ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
++ if (cmdq->priority == priority)
++ break;
++
+ if (!cmdq) {
+ cmdq = ivpu_cmdq_alloc(file_priv);
+ if (!cmdq)
+ return NULL;
+- file_priv->cmdq[priority] = cmdq;
++ cmdq->priority = priority;
+ }
+
+- ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
++ ret = ivpu_cmdq_init(file_priv, cmdq, priority);
+ if (ret)
+ return NULL;
+
+ return cmdq;
+ }
+
+-static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u8 priority)
++void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+ {
+- struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
++ struct ivpu_cmdq *cmdq;
++ unsigned long cmdq_id;
+
+ lockdep_assert_held(&file_priv->lock);
+
+- if (cmdq) {
+- file_priv->cmdq[priority] = NULL;
++ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
++ xa_erase(&file_priv->cmdq_xa, cmdq_id);
+ ivpu_cmdq_fini(file_priv, cmdq);
+ ivpu_cmdq_free(file_priv, cmdq);
+ }
+ }
+
+-void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+-{
+- u8 priority;
+-
+- lockdep_assert_held(&file_priv->lock);
+-
+- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
+- ivpu_cmdq_release_locked(file_priv, priority);
+-}
+-
+ /*
+ * Mark the doorbell as unregistered
+ * This function needs to be called when the VPU hardware is restarted
+@@ -274,16 +279,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+ */
+ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
+ {
+- u8 priority;
++ struct ivpu_cmdq *cmdq;
++ unsigned long cmdq_id;
+
+ mutex_lock(&file_priv->lock);
+
+- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
+- struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
+-
+- if (cmdq)
+- cmdq->db_registered = false;
+- }
++ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
++ cmdq->db_registered = false;
+
+ mutex_unlock(&file_priv->lock);
+ }
+@@ -303,12 +305,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
+
+ static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
+ {
+- u8 priority;
++ struct ivpu_cmdq *cmdq;
++ unsigned long cmdq_id;
+
+- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
+- if (file_priv->cmdq[priority])
+- ivpu_cmdq_fini(file_priv, file_priv->cmdq[priority]);
+- }
++ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
++ ivpu_cmdq_fini(file_priv, cmdq);
+ }
+
+ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
+@@ -335,8 +336,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+
+ /* Check if there is space left in job queue */
+ if (next_entry == header->head) {
+- ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
+- job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
++ ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
++ job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
+ return -EBUSY;
+ }
+
+@@ -550,7 +551,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
+ mutex_lock(&vdev->submitted_jobs_lock);
+ mutex_lock(&file_priv->lock);
+
+- cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority);
++ cmdq = ivpu_cmdq_acquire(file_priv, priority);
+ if (!cmdq) {
+ ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
+ file_priv->ctx.id, job->engine_idx, priority);
+diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
+index 0ae77f0638fad..af1ed039569cd 100644
+--- a/drivers/accel/ivpu/ivpu_job.h
++++ b/drivers/accel/ivpu/ivpu_job.h
+@@ -28,8 +28,10 @@ struct ivpu_cmdq {
+ struct ivpu_bo *secondary_preempt_buf;
+ struct ivpu_bo *mem;
+ u32 entry_count;
++ u32 id;
+ u32 db_id;
+ bool db_registered;
++ u8 priority;
+ };
+
+ /**
+--
+2.39.5
+
--- /dev/null
+From daa91bd97fb9118deb4ca795778122111e8d8932 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2024 16:58:09 +0200
+Subject: accel/ivpu: Remove copy engine support
+
+From: Andrzej Kacprowski <Andrzej.Kacprowski@intel.com>
+
+[ Upstream commit 94b2a2c0e7cba3f163609dbd94120ee533ad2a07 ]
+
+Copy engine was deprecated by the FW and is no longer supported.
+Compute engine includes all copy engine functionality and should be used
+instead.
+
+This change does not affect user space as the copy engine was never
+used outside of a couple of tests.
+
+Signed-off-by: Andrzej Kacprowski <Andrzej.Kacprowski@intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241017145817.121590-4-jacek.lawrynowicz@linux.intel.com
+Stable-dep-of: a47e36dc5d90 ("accel/ivpu: Trigger device recovery on engine reset/resume failure")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_drv.h | 5 +---
+ drivers/accel/ivpu/ivpu_job.c | 43 +++++++++++--------------------
+ drivers/accel/ivpu/ivpu_jsm_msg.c | 8 +++---
+ include/uapi/drm/ivpu_accel.h | 6 +----
+ 4 files changed, 21 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 1fe6a3bd4e36b..4519c93fb377c 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -50,11 +50,8 @@
+ #define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
+ #define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
+
+-#define IVPU_NUM_ENGINES 2
+ #define IVPU_NUM_PRIORITIES 4
+-#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
+-
+-#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
++#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
+
+ #define IVPU_PLATFORM_SILICON 0
+ #define IVPU_PLATFORM_SIMICS 2
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index 58d64a221a1e0..ed3f60d809bc0 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -224,8 +224,7 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
+ u8 priority)
+ {
+- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
++ struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+@@ -234,7 +233,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16
+ cmdq = ivpu_cmdq_alloc(file_priv);
+ if (!cmdq)
+ return NULL;
+- file_priv->cmdq[cmdq_idx] = cmdq;
++ file_priv->cmdq[priority] = cmdq;
+ }
+
+ ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
+@@ -244,15 +243,14 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16
+ return cmdq;
+ }
+
+-static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
++static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u8 priority)
+ {
+- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
++ struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
+
+ lockdep_assert_held(&file_priv->lock);
+
+ if (cmdq) {
+- file_priv->cmdq[cmdq_idx] = NULL;
++ file_priv->cmdq[priority] = NULL;
+ ivpu_cmdq_fini(file_priv, cmdq);
+ ivpu_cmdq_free(file_priv, cmdq);
+ }
+@@ -260,14 +258,12 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
+
+ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+ {
+- u16 engine;
+ u8 priority;
+
+ lockdep_assert_held(&file_priv->lock);
+
+- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
+- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
+- ivpu_cmdq_release_locked(file_priv, engine, priority);
++ for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
++ ivpu_cmdq_release_locked(file_priv, priority);
+ }
+
+ /*
+@@ -278,19 +274,15 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+ */
+ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
+ {
+- u16 engine;
+ u8 priority;
+
+ mutex_lock(&file_priv->lock);
+
+- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
+- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
+- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
++ for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
++ struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
+
+- if (cmdq)
+- cmdq->db_registered = false;
+- }
++ if (cmdq)
++ cmdq->db_registered = false;
+ }
+
+ mutex_unlock(&file_priv->lock);
+@@ -311,16 +303,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
+
+ static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
+ {
+- u16 engine;
+ u8 priority;
+
+- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
+- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
+- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+-
+- if (file_priv->cmdq[cmdq_idx])
+- ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
+- }
++ for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
++ if (file_priv->cmdq[priority])
++ ivpu_cmdq_fini(file_priv, file_priv->cmdq[priority]);
+ }
+ }
+
+@@ -703,7 +690,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+ int idx, ret;
+ u8 priority;
+
+- if (params->engine > DRM_IVPU_ENGINE_COPY)
++ if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index ae91ad24d10d8..33d597b2a7f53 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -132,7 +132,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
+ struct vpu_jsm_msg resp;
+ int ret;
+
+- if (engine > VPU_ENGINE_COPY)
++ if (engine != VPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ req.payload.query_engine_hb.engine_idx = engine;
+@@ -155,7 +155,7 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
+ struct vpu_jsm_msg resp;
+ int ret;
+
+- if (engine > VPU_ENGINE_COPY)
++ if (engine != VPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ req.payload.engine_reset.engine_idx = engine;
+@@ -174,7 +174,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
+ struct vpu_jsm_msg resp;
+ int ret;
+
+- if (engine > VPU_ENGINE_COPY)
++ if (engine != VPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ req.payload.engine_preempt.engine_idx = engine;
+@@ -346,7 +346,7 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
+ struct vpu_jsm_msg resp;
+ int ret;
+
+- if (engine >= VPU_ENGINE_NB)
++ if (engine != VPU_ENGINE_COMPUTE)
+ return -EINVAL;
+
+ req.payload.hws_resume_engine.engine_idx = engine;
+diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
+index 13001da141c33..4b261eb705bc0 100644
+--- a/include/uapi/drm/ivpu_accel.h
++++ b/include/uapi/drm/ivpu_accel.h
+@@ -261,7 +261,7 @@ struct drm_ivpu_bo_info {
+
+ /* drm_ivpu_submit engines */
+ #define DRM_IVPU_ENGINE_COMPUTE 0
+-#define DRM_IVPU_ENGINE_COPY 1
++#define DRM_IVPU_ENGINE_COPY 1 /* Deprecated */
+
+ /**
+ * struct drm_ivpu_submit - Submit commands to the VPU
+@@ -292,10 +292,6 @@ struct drm_ivpu_submit {
+ * %DRM_IVPU_ENGINE_COMPUTE:
+ *
+ * Performs Deep Learning Neural Compute Inference Operations
+- *
+- * %DRM_IVPU_ENGINE_COPY:
+- *
+- * Performs memory copy operations to/from system memory allocated for VPU
+ */
+ __u32 engine;
+
+--
+2.39.5
+
--- /dev/null
+From 7346aefe8b75201d55efb76529d6c45f747ff660 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2025 18:32:24 +0100
+Subject: accel/ivpu: Separate DB ID and CMDQ ID allocations from CMDQ
+ allocation
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+[ Upstream commit 950942b4813f8c44dbec683fdb140cf4a238516b ]
+
+Move doorbell ID and command queue ID XArray allocations from command
+queue memory allocation function. This will allow ID allocations to be
+done without the need for actual memory allocation.
+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-2-maciej.falkowski@linux.intel.com
+Stable-dep-of: a47e36dc5d90 ("accel/ivpu: Trigger device recovery on engine reset/resume failure")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_job.c | 88 +++++++++++++++++++++++++----------
+ 1 file changed, 64 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index 5eaf219170eee..e57acae3b42ef 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -86,23 +86,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+ if (!cmdq)
+ return NULL;
+
+- ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+- GFP_KERNEL);
+- if (ret < 0) {
+- ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
+- goto err_free_cmdq;
+- }
+-
+- ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
+- &file_priv->cmdq_id_next, GFP_KERNEL);
+- if (ret < 0) {
+- ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
+- goto err_erase_db_xa;
+- }
+-
+ cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!cmdq->mem)
+- goto err_erase_cmdq_xa;
++ goto err_free_cmdq;
+
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+@@ -110,10 +96,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
+
+ return cmdq;
+
+-err_erase_cmdq_xa:
+- xa_erase(&file_priv->cmdq_xa, cmdq->id);
+-err_erase_db_xa:
+- xa_erase(&vdev->db_xa, cmdq->db_id);
+ err_free_cmdq:
+ kfree(cmdq);
+ return NULL;
+@@ -231,30 +213,88 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
+ return 0;
+ }
+
++static int ivpu_db_id_alloc(struct ivpu_device *vdev, u32 *db_id)
++{
++ int ret;
++ u32 id;
++
++ ret = xa_alloc_cyclic(&vdev->db_xa, &id, NULL, vdev->db_limit, &vdev->db_next, GFP_KERNEL);
++ if (ret < 0)
++ return ret;
++
++ *db_id = id;
++ return 0;
++}
++
++static int ivpu_cmdq_id_alloc(struct ivpu_file_priv *file_priv, u32 *cmdq_id)
++{
++ int ret;
++ u32 id;
++
++ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &id, NULL, file_priv->cmdq_limit,
++ &file_priv->cmdq_id_next, GFP_KERNEL);
++ if (ret < 0)
++ return ret;
++
++ *cmdq_id = id;
++ return 0;
++}
++
+ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
+ {
++ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq;
+- unsigned long cmdq_id;
++ unsigned long id;
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
+- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
++ xa_for_each(&file_priv->cmdq_xa, id, cmdq)
+ if (cmdq->priority == priority)
+ break;
+
+ if (!cmdq) {
+ cmdq = ivpu_cmdq_alloc(file_priv);
+- if (!cmdq)
++ if (!cmdq) {
++ ivpu_err(vdev, "Failed to allocate command queue\n");
+ return NULL;
++ }
++
++ ret = ivpu_db_id_alloc(vdev, &cmdq->db_id);
++ if (ret) {
++ ivpu_err(file_priv->vdev, "Failed to allocate doorbell ID: %d\n", ret);
++ goto err_free_cmdq;
++ }
++
++ ret = ivpu_cmdq_id_alloc(file_priv, &cmdq->id);
++ if (ret) {
++ ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
++ goto err_erase_db_id;
++ }
++
+ cmdq->priority = priority;
++ ret = xa_err(xa_store(&file_priv->cmdq_xa, cmdq->id, cmdq, GFP_KERNEL));
++ if (ret) {
++ ivpu_err(vdev, "Failed to store command queue in cmdq_xa: %d\n", ret);
++ goto err_erase_cmdq_id;
++ }
+ }
+
+ ret = ivpu_cmdq_init(file_priv, cmdq, priority);
+- if (ret)
+- return NULL;
++ if (ret) {
++ ivpu_err(vdev, "Failed to initialize command queue: %d\n", ret);
++ goto err_free_cmdq;
++ }
+
+ return cmdq;
++
++err_erase_cmdq_id:
++ xa_erase(&file_priv->cmdq_xa, cmdq->id);
++err_erase_db_id:
++ xa_erase(&vdev->db_xa, cmdq->db_id);
++err_free_cmdq:
++ ivpu_cmdq_free(file_priv, cmdq);
++ return NULL;
+ }
+
+ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
+--
+2.39.5
+
--- /dev/null
+From aadafda2f29ac5bd5ae4df51fe06b8131e58ea15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 17:42:53 +0200
+Subject: accel/ivpu: Trigger device recovery on engine reset/resume failure
+
+From: Karol Wachowski <karol.wachowski@intel.com>
+
+[ Upstream commit a47e36dc5d90dc664cac87304c17d50f1595d634 ]
+
+Trigger full device recovery when the driver fails to restore device state
+via engine reset and resume operations. This is necessary because, even if
+submissions from a faulty context are blocked, the NPU may still process
+previously submitted faulty jobs if the engine reset fails to abort them.
+Such jobs can continue to generate faults and occupy device resources.
+When engine reset is ineffective, the only way to recover is to perform
+a full device recovery.
+
+Fixes: dad945c27a42 ("accel/ivpu: Add handling of VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW")
+Cc: stable@vger.kernel.org # v6.15+
+Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
+Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://lore.kernel.org/r/20250528154253.500556-1-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ivpu/ivpu_job.c | 6 ++++--
+ drivers/accel/ivpu/ivpu_jsm_msg.c | 9 +++++++--
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index e57acae3b42ef..e631098718b15 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -849,7 +849,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work)
+ unsigned long id;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+- ivpu_jsm_reset_engine(vdev, 0);
++ if (ivpu_jsm_reset_engine(vdev, 0))
++ return;
+
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+@@ -865,7 +866,8 @@ void ivpu_context_abort_thread_handler(struct work_struct *work)
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+- ivpu_jsm_hws_resume_engine(vdev, 0);
++ if (ivpu_jsm_hws_resume_engine(vdev, 0))
++ return;
+ /*
+ * In hardware scheduling mode NPU already has stopped processing jobs
+ * and won't send us any further notifications, thus we have to free job related resources
+diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
+index 21018feb45978..7c08308d5725d 100644
+--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
++++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
+@@ -7,6 +7,7 @@
+ #include "ivpu_hw.h"
+ #include "ivpu_ipc.h"
+ #include "ivpu_jsm_msg.h"
++#include "ivpu_pm.h"
+ #include "vpu_jsm_api.h"
+
+ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
+@@ -163,8 +164,10 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+- if (ret)
++ if (ret) {
+ ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
++ ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
++ }
+
+ return ret;
+ }
+@@ -354,8 +357,10 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+- if (ret)
++ if (ret) {
+ ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
++ ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
++ }
+
+ return ret;
+ }
+--
+2.39.5
+
--- /dev/null
+From 6fc4a8016b9483de5bc7568c3ad188c536f17537 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Jun 2025 21:13:55 -0700
+Subject: af_unix: Don't leave consecutive consumed OOB skbs.
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 32ca245464e1479bfea8592b9db227fdc1641705 ]
+
+Jann Horn reported a use-after-free in unix_stream_read_generic().
+
+The following sequences reproduce the issue:
+
+ $ python3
+ from socket import *
+ s1, s2 = socketpair(AF_UNIX, SOCK_STREAM)
+ s1.send(b'x', MSG_OOB)
+ s2.recv(1, MSG_OOB) # leave a consumed OOB skb
+ s1.send(b'y', MSG_OOB)
+ s2.recv(1, MSG_OOB) # leave a consumed OOB skb
+ s1.send(b'z', MSG_OOB)
+ s2.recv(1) # recv 'z' illegally
+ s2.recv(1, MSG_OOB) # access 'z' skb (use-after-free)
+
+Even though a user reads OOB data, the skb holding the data stays on
+the recv queue to mark the OOB boundary and break the next recv().
+
+After the last send() in the scenario above, the sk2's recv queue has
+2 leading consumed OOB skbs and 1 real OOB skb.
+
+Then, the following happens during the next recv() without MSG_OOB
+
+ 1. unix_stream_read_generic() peeks the first consumed OOB skb
+ 2. manage_oob() returns the next consumed OOB skb
+ 3. unix_stream_read_generic() fetches the next not-yet-consumed OOB skb
+ 4. unix_stream_read_generic() reads and frees the OOB skb
+
+, and the last recv(MSG_OOB) triggers KASAN splat.
+
+The 3. above occurs because of the SO_PEEK_OFF code, which does not
+expect unix_skb_len(skb) to be 0, but this is true for such consumed
+OOB skbs.
+
+ while (skip >= unix_skb_len(skb)) {
+ skip -= unix_skb_len(skb);
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ ...
+ }
+
+In addition to this use-after-free, there is another issue that
+ioctl(SIOCATMARK) does not function properly with consecutive consumed
+OOB skbs.
+
+So, nothing good comes out of such a situation.
+
+Instead of complicating manage_oob(), ioctl() handling, and the next
+ECONNRESET fix by introducing a loop for consecutive consumed OOB skbs,
+let's not leave such consecutive OOB unnecessarily.
+
+Now, while receiving an OOB skb in unix_stream_recv_urg(), if its
+previous skb is a consumed OOB skb, it is freed.
+
+[0]:
+BUG: KASAN: slab-use-after-free in unix_stream_read_actor (net/unix/af_unix.c:3027)
+Read of size 4 at addr ffff888106ef2904 by task python3/315
+
+CPU: 2 UID: 0 PID: 315 Comm: python3 Not tainted 6.16.0-rc1-00407-gec315832f6f9 #8 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-4.fc42 04/01/2014
+Call Trace:
+ <TASK>
+ dump_stack_lvl (lib/dump_stack.c:122)
+ print_report (mm/kasan/report.c:409 mm/kasan/report.c:521)
+ kasan_report (mm/kasan/report.c:636)
+ unix_stream_read_actor (net/unix/af_unix.c:3027)
+ unix_stream_read_generic (net/unix/af_unix.c:2708 net/unix/af_unix.c:2847)
+ unix_stream_recvmsg (net/unix/af_unix.c:3048)
+ sock_recvmsg (net/socket.c:1063 (discriminator 20) net/socket.c:1085 (discriminator 20))
+ __sys_recvfrom (net/socket.c:2278)
+ __x64_sys_recvfrom (net/socket.c:2291 (discriminator 1) net/socket.c:2287 (discriminator 1) net/socket.c:2287 (discriminator 1))
+ do_syscall_64 (arch/x86/entry/syscall_64.c:63 (discriminator 1) arch/x86/entry/syscall_64.c:94 (discriminator 1))
+ entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
+RIP: 0033:0x7f8911fcea06
+Code: 5d e8 41 8b 93 08 03 00 00 59 5e 48 83 f8 fc 75 19 83 e2 39 83 fa 08 75 11 e8 26 ff ff ff 66 0f 1f 44 00 00 48 8b 45 10 0f 05 <48> 8b 5d f8 c9 c3 0f 1f 40 00 f3 0f 1e fa 55 48 89 e5 48 83 ec 08
+RSP: 002b:00007fffdb0dccb0 EFLAGS: 00000202 ORIG_RAX: 000000000000002d
+RAX: ffffffffffffffda RBX: 00007fffdb0dcdc8 RCX: 00007f8911fcea06
+RDX: 0000000000000001 RSI: 00007f8911a5e060 RDI: 0000000000000006
+RBP: 00007fffdb0dccd0 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000001 R11: 0000000000000202 R12: 00007f89119a7d20
+R13: ffffffffc4653600 R14: 0000000000000000 R15: 0000000000000000
+ </TASK>
+
+Allocated by task 315:
+ kasan_save_stack (mm/kasan/common.c:48)
+ kasan_save_track (mm/kasan/common.c:60 (discriminator 1) mm/kasan/common.c:69 (discriminator 1))
+ __kasan_slab_alloc (mm/kasan/common.c:348)
+ kmem_cache_alloc_node_noprof (./include/linux/kasan.h:250 mm/slub.c:4148 mm/slub.c:4197 mm/slub.c:4249)
+ __alloc_skb (net/core/skbuff.c:660 (discriminator 4))
+ alloc_skb_with_frags (./include/linux/skbuff.h:1336 net/core/skbuff.c:6668)
+ sock_alloc_send_pskb (net/core/sock.c:2993)
+ unix_stream_sendmsg (./include/net/sock.h:1847 net/unix/af_unix.c:2256 net/unix/af_unix.c:2418)
+ __sys_sendto (net/socket.c:712 (discriminator 20) net/socket.c:727 (discriminator 20) net/socket.c:2226 (discriminator 20))
+ __x64_sys_sendto (net/socket.c:2233 (discriminator 1) net/socket.c:2229 (discriminator 1) net/socket.c:2229 (discriminator 1))
+ do_syscall_64 (arch/x86/entry/syscall_64.c:63 (discriminator 1) arch/x86/entry/syscall_64.c:94 (discriminator 1))
+ entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
+
+Freed by task 315:
+ kasan_save_stack (mm/kasan/common.c:48)
+ kasan_save_track (mm/kasan/common.c:60 (discriminator 1) mm/kasan/common.c:69 (discriminator 1))
+ kasan_save_free_info (mm/kasan/generic.c:579 (discriminator 1))
+ __kasan_slab_free (mm/kasan/common.c:271)
+ kmem_cache_free (mm/slub.c:4643 (discriminator 3) mm/slub.c:4745 (discriminator 3))
+ unix_stream_read_generic (net/unix/af_unix.c:3010)
+ unix_stream_recvmsg (net/unix/af_unix.c:3048)
+ sock_recvmsg (net/socket.c:1063 (discriminator 20) net/socket.c:1085 (discriminator 20))
+ __sys_recvfrom (net/socket.c:2278)
+ __x64_sys_recvfrom (net/socket.c:2291 (discriminator 1) net/socket.c:2287 (discriminator 1) net/socket.c:2287 (discriminator 1))
+ do_syscall_64 (arch/x86/entry/syscall_64.c:63 (discriminator 1) arch/x86/entry/syscall_64.c:94 (discriminator 1))
+ entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
+
+The buggy address belongs to the object at ffff888106ef28c0
+ which belongs to the cache skbuff_head_cache of size 224
+The buggy address is located 68 bytes inside of
+ freed 224-byte region [ffff888106ef28c0, ffff888106ef29a0)
+
+The buggy address belongs to the physical page:
+page: refcount:0 mapcount:0 mapping:0000000000000000 index:0xffff888106ef3cc0 pfn:0x106ef2
+head: order:1 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
+flags: 0x200000000000040(head|node=0|zone=2)
+page_type: f5(slab)
+raw: 0200000000000040 ffff8881001d28c0 ffffea000422fe00 0000000000000004
+raw: ffff888106ef3cc0 0000000080190010 00000000f5000000 0000000000000000
+head: 0200000000000040 ffff8881001d28c0 ffffea000422fe00 0000000000000004
+head: ffff888106ef3cc0 0000000080190010 00000000f5000000 0000000000000000
+head: 0200000000000001 ffffea00041bbc81 00000000ffffffff 00000000ffffffff
+head: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff888106ef2800: 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc fc
+ ffff888106ef2880: fc fc fc fc fc fc fc fc fa fb fb fb fb fb fb fb
+>ffff888106ef2900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff888106ef2980: fb fb fb fb fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff888106ef2a00: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+
+Fixes: 314001f0bf92 ("af_unix: Add OOB support")
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Reviewed-by: Jann Horn <jannh@google.com>
+Link: https://patch.msgid.link/20250619041457.1132791-2-kuni1840@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 6b17623004439..2dfd3b70a7178 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2613,11 +2613,11 @@ struct unix_stream_read_state {
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ {
++ struct sk_buff *oob_skb, *read_skb = NULL;
+ struct socket *sock = state->socket;
+ struct sock *sk = sock->sk;
+ struct unix_sock *u = unix_sk(sk);
+ int chunk = 1;
+- struct sk_buff *oob_skb;
+
+ mutex_lock(&u->iolock);
+ unix_state_lock(sk);
+@@ -2632,9 +2632,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+
+ oob_skb = u->oob_skb;
+
+- if (!(state->flags & MSG_PEEK))
++ if (!(state->flags & MSG_PEEK)) {
+ WRITE_ONCE(u->oob_skb, NULL);
+
++ if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue &&
++ !unix_skb_len(oob_skb->prev)) {
++ read_skb = oob_skb->prev;
++ __skb_unlink(read_skb, &sk->sk_receive_queue);
++ }
++ }
++
+ spin_unlock(&sk->sk_receive_queue.lock);
+ unix_state_unlock(sk);
+
+@@ -2645,6 +2652,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+
+ mutex_unlock(&u->iolock);
+
++ consume_skb(read_skb);
++
+ if (chunk < 0)
+ return -EFAULT;
+
+--
+2.39.5
+
--- /dev/null
+From b21a7396c5120381d24c5a970ffc586b46b597db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 May 2025 11:08:13 +0530
+Subject: ALSA: hda: Add new pci id for AMD GPU display HD audio controller
+
+From: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+
+[ Upstream commit ab72bfce7647522e01a181e3600c3d14ff5c143e ]
+
+Add new pci id for AMD GPU display HD audio controller(device id- 0xab40).
+
+Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://patch.msgid.link/20250529053838.2350071-1-Vijendar.Mukunda@amd.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/hda_intel.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 1872c8b750537..d4e325b785332 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2727,6 +2727,9 @@ static const struct pci_device_id azx_ids[] = {
+ { PCI_VDEVICE(ATI, 0xab38),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
++ { PCI_VDEVICE(ATI, 0xab40),
++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
++ AZX_DCAPS_PM_RUNTIME },
+ /* GLENFLY */
+ { PCI_DEVICE(PCI_VENDOR_ID_GLENFLY, PCI_ANY_ID),
+ .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+--
+2.39.5
+
--- /dev/null
+From 354142cde35d87d2381de10d5b965a56ab8bdd80 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 May 2025 16:13:09 +0200
+Subject: ALSA: hda: Ignore unsol events for cards being shut down
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Cezary Rojewski <cezary.rojewski@intel.com>
+
+[ Upstream commit 3f100f524e75586537e337b34d18c8d604b398e7 ]
+
+For the classic snd_hda_intel driver, codec->card and bus->card point to
+the exact same thing. When snd_card_diconnect() fires, bus->shutdown is
+set thanks to azx_dev_disconnect(). card->shutdown is already set when
+that happens but both provide basically the same functionality.
+
+For the DSP snd_soc_avs driver where multiple codecs are located on
+multiple cards, bus->shutdown 'shortcut' is not sufficient. One codec
+card may be unregistered while other codecs are still operational.
+Proper check in form of card->shutdown must be used to verify whether
+the codec's card is being shut down.
+
+Reviewed-by: Amadeusz Sławiński <amadeuszx.slawinski@linux.intel.com>
+Signed-off-by: Cezary Rojewski <cezary.rojewski@intel.com>
+Link: https://patch.msgid.link/20250530141309.2943404-1-cezary.rojewski@intel.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/hda_bind.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index 90633970b59f7..f8f1b1f6b1382 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -44,7 +44,7 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
+ struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+
+ /* ignore unsol events during shutdown */
+- if (codec->bus->shutdown)
++ if (codec->card->shutdown || codec->bus->shutdown)
+ return;
+
+ /* ignore unsol events during system suspend/resume */
+--
+2.39.5
+
--- /dev/null
+From 8d66de777bc56ecf4bfff3d3dd2eb5fc45412028 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 May 2025 12:26:56 -0500
+Subject: ALSA: usb-audio: Add a quirk for Lenovo Thinkpad Thunderbolt 3 dock
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 4919353c7789b8047e06a9b2b943f775a8f72883 ]
+
+The audio controller in the Lenovo Thinkpad Thunderbolt 3 dock doesn't
+support reading the sampling rate.
+
+Add a quirk for it.
+
+Suggested-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Link: https://patch.msgid.link/20250527172657.1972565-1-superm1@kernel.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index c7387081577cd..0da4ee9757c01 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2282,6 +2282,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DISABLE_AUTOSUSPEND),
+ DEVICE_FLG(0x17aa, 0x104d, /* Lenovo ThinkStation P620 Internal Speaker + Front Headset */
+ QUIRK_FLAG_DISABLE_AUTOSUSPEND),
++ DEVICE_FLG(0x17ef, 0x3083, /* Lenovo TBT3 dock */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1852, 0x5062, /* Luxman D-08u */
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x1852, 0x5065, /* Luxman DA-06 */
+--
+2.39.5
+
--- /dev/null
+From ad9d11d80a1278e9c418698f12889d96fbf5fa76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 May 2025 18:06:28 +0800
+Subject: amd/amdkfd: fix a kfd_process ref leak
+
+From: Yifan Zhang <yifan1.zhang@amd.com>
+
+[ Upstream commit 90237b16ec1d7afa16e2173cc9a664377214cdd9 ]
+
+This patch is to fix a kfd_prcess ref leak.
+
+Signed-off-by: Yifan Zhang <yifan1.zhang@amd.com>
+Reviewed-by: Philip Yang <Philip.Yang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index ea37922492093..6798510c4a707 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -1315,6 +1315,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
+ user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+ if (unlikely(user_gpu_id == -EINVAL)) {
+ WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
++ kfd_unref_process(p);
+ return;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From c0e5f51954e2603a441fd3f0c649c9248823d615 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Mar 2025 19:51:29 +0800
+Subject: ASoC: codec: wcd9335: Convert to GPIO descriptors
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit d5099bc1b56417733f4cccf10c61ee74dadd5562 ]
+
+of_gpio.h is deprecated, update the driver to use GPIO descriptors.
+- Use dev_gpiod_get to get GPIO descriptor.
+- Use gpiod_set_value to configure output value.
+
+With legacy of_gpio API, the driver set gpio value 0 to assert reset,
+and 1 to deassert reset. And the reset-gpios use GPIO_ACTIVE_LOW flag in
+DTS, so set GPIOD_OUT_LOW when get GPIO descriptors, and set value 1 means
+output low, set value 0 means output high with gpiod API.
+
+The in-tree DTS files have the right polarity set up already so we can
+expect this to "just work"
+
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Link: https://patch.msgid.link/20250324-wcd-gpiod-v2-3-773f67ce3b56@nxp.com
+Reviewed-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 9079db287fc3 ("ASoC: codecs: wcd9335: Fix missing free of regulator supplies")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/wcd9335.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
+index 373a31ddccb2d..db6a1facf8a92 100644
+--- a/sound/soc/codecs/wcd9335.c
++++ b/sound/soc/codecs/wcd9335.c
+@@ -17,7 +17,7 @@
+ #include <sound/soc.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc-dapm.h>
+-#include <linux/of_gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include <sound/tlv.h>
+@@ -329,7 +329,7 @@ struct wcd9335_codec {
+ int comp_enabled[COMPANDER_MAX];
+
+ int intr1;
+- int reset_gpio;
++ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY];
+
+ unsigned int rx_port_value[WCD9335_RX_MAX];
+@@ -4973,12 +4973,11 @@ static const struct regmap_irq_chip wcd9335_regmap_irq1_chip = {
+ static int wcd9335_parse_dt(struct wcd9335_codec *wcd)
+ {
+ struct device *dev = wcd->dev;
+- struct device_node *np = dev->of_node;
+ int ret;
+
+- wcd->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+- if (wcd->reset_gpio < 0)
+- return dev_err_probe(dev, wcd->reset_gpio, "Reset GPIO missing from DT\n");
++ wcd->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
++ if (IS_ERR(wcd->reset_gpio))
++ return dev_err_probe(dev, PTR_ERR(wcd->reset_gpio), "Reset GPIO missing from DT\n");
+
+ wcd->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(wcd->mclk))
+@@ -5021,9 +5020,9 @@ static int wcd9335_power_on_reset(struct wcd9335_codec *wcd)
+ */
+ usleep_range(600, 650);
+
+- gpio_direction_output(wcd->reset_gpio, 0);
++ gpiod_set_value(wcd->reset_gpio, 1);
+ msleep(20);
+- gpio_set_value(wcd->reset_gpio, 1);
++ gpiod_set_value(wcd->reset_gpio, 0);
+ msleep(20);
+
+ return 0;
+--
+2.39.5
+
--- /dev/null
+From eb121d8860da745c5a394d6564679e676c3ab80f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 May 2025 11:47:01 +0200
+Subject: ASoC: codecs: wcd9335: Fix missing free of regulator supplies
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit 9079db287fc3e38e040b0edeb0a25770bb679c8e ]
+
+Driver gets and enables all regulator supplies in probe path
+(wcd9335_parse_dt() and wcd9335_power_on_reset()), but does not cleanup
+in final error paths and in unbind (missing remove() callback). This
+leads to leaked memory and unbalanced regulator enable count during
+probe errors or unbind.
+
+Fix this by converting entire code into devm_regulator_bulk_get_enable()
+which also greatly simplifies the code.
+
+Fixes: 20aedafdf492 ("ASoC: wcd9335: add support to wcd9335 codec")
+Cc: stable@vger.kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://patch.msgid.link/20250526-b4-b4-asoc-wcd9395-vdd-px-fixes-v1-1-0b8a2993b7d3@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/wcd9335.c | 25 +++++++------------------
+ 1 file changed, 7 insertions(+), 18 deletions(-)
+
+diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
+index db6a1facf8a92..1375ac571fbf3 100644
+--- a/sound/soc/codecs/wcd9335.c
++++ b/sound/soc/codecs/wcd9335.c
+@@ -330,7 +330,6 @@ struct wcd9335_codec {
+
+ int intr1;
+ struct gpio_desc *reset_gpio;
+- struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY];
+
+ unsigned int rx_port_value[WCD9335_RX_MAX];
+ unsigned int tx_port_value[WCD9335_TX_MAX];
+@@ -353,6 +352,10 @@ struct wcd9335_irq {
+ char *name;
+ };
+
++static const char * const wcd9335_supplies[] = {
++ "vdd-buck", "vdd-buck-sido", "vdd-tx", "vdd-rx", "vdd-io",
++};
++
+ static const struct wcd9335_slim_ch wcd9335_tx_chs[WCD9335_TX_MAX] = {
+ WCD9335_SLIM_TX_CH(0),
+ WCD9335_SLIM_TX_CH(1),
+@@ -4987,30 +4990,16 @@ static int wcd9335_parse_dt(struct wcd9335_codec *wcd)
+ if (IS_ERR(wcd->native_clk))
+ return dev_err_probe(dev, PTR_ERR(wcd->native_clk), "slimbus clock not found\n");
+
+- wcd->supplies[0].supply = "vdd-buck";
+- wcd->supplies[1].supply = "vdd-buck-sido";
+- wcd->supplies[2].supply = "vdd-tx";
+- wcd->supplies[3].supply = "vdd-rx";
+- wcd->supplies[4].supply = "vdd-io";
+-
+- ret = regulator_bulk_get(dev, WCD9335_MAX_SUPPLY, wcd->supplies);
++ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(wcd9335_supplies),
++ wcd9335_supplies);
+ if (ret)
+- return dev_err_probe(dev, ret, "Failed to get supplies\n");
++ return dev_err_probe(dev, ret, "Failed to get and enable supplies\n");
+
+ return 0;
+ }
+
+ static int wcd9335_power_on_reset(struct wcd9335_codec *wcd)
+ {
+- struct device *dev = wcd->dev;
+- int ret;
+-
+- ret = regulator_bulk_enable(WCD9335_MAX_SUPPLY, wcd->supplies);
+- if (ret) {
+- dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+- return ret;
+- }
+-
+ /*
+ * For WCD9335, it takes about 600us for the Vout_A and
+ * Vout_D to be ready after BUCK_SIDO is powered up.
+--
+2.39.5
+
--- /dev/null
+From f5c7066e2672a1c0a7a3edcc685f7be0addfcb30 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Jun 2025 16:58:51 +0800
+Subject: ASoC: rt1320: fix speaker noise when volume bar is 100%
+
+From: Shuming Fan <shumingf@realtek.com>
+
+[ Upstream commit 9adf2de86611ac108d07e769a699556d87f052e2 ]
+
+This patch updates the settings to fix the speaker noise.
+
+Signed-off-by: Shuming Fan <shumingf@realtek.com>
+Link: https://patch.msgid.link/20250602085851.4081886-1-shumingf@realtek.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/rt1320-sdw.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c
+index f2d194e76a947..8755a63478d79 100644
+--- a/sound/soc/codecs/rt1320-sdw.c
++++ b/sound/soc/codecs/rt1320-sdw.c
+@@ -2085,7 +2085,7 @@ static const struct reg_sequence rt1320_vc_patch_code_write[] = {
+ { 0x3fc2bfc0, 0x03 },
+ { 0x0000d486, 0x43 },
+ { SDW_SDCA_CTL(FUNC_NUM_AMP, RT1320_SDCA_ENT_PDE23, RT1320_SDCA_CTL_REQ_POWER_STATE, 0), 0x00 },
+- { 0x1000db00, 0x04 },
++ { 0x1000db00, 0x07 },
+ { 0x1000db01, 0x00 },
+ { 0x1000db02, 0x11 },
+ { 0x1000db03, 0x00 },
+@@ -2106,6 +2106,21 @@ static const struct reg_sequence rt1320_vc_patch_code_write[] = {
+ { 0x1000db12, 0x00 },
+ { 0x1000db13, 0x00 },
+ { 0x1000db14, 0x45 },
++ { 0x1000db15, 0x0d },
++ { 0x1000db16, 0x01 },
++ { 0x1000db17, 0x00 },
++ { 0x1000db18, 0x00 },
++ { 0x1000db19, 0xbf },
++ { 0x1000db1a, 0x13 },
++ { 0x1000db1b, 0x09 },
++ { 0x1000db1c, 0x00 },
++ { 0x1000db1d, 0x00 },
++ { 0x1000db1e, 0x00 },
++ { 0x1000db1f, 0x12 },
++ { 0x1000db20, 0x09 },
++ { 0x1000db21, 0x00 },
++ { 0x1000db22, 0x00 },
++ { 0x1000db23, 0x00 },
+ { 0x0000d540, 0x01 },
+ { 0x0000c081, 0xfc },
+ { 0x0000f01e, 0x80 },
+--
+2.39.5
+
--- /dev/null
+From 69f7923570142a36bba6fab5d491d646daca68de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 May 2025 13:15:59 +0800
+Subject: bcache: fix NULL pointer in cache_set_flush()
+
+From: Linggang Zeng <linggang.zeng@easystack.cn>
+
+[ Upstream commit 1e46ed947ec658f89f1a910d880cd05e42d3763e ]
+
+1. LINE#1794 - LINE#1887 is some codes about function of
+ bch_cache_set_alloc().
+2. LINE#2078 - LINE#2142 is some codes about function of
+ register_cache_set().
+3. register_cache_set() will call bch_cache_set_alloc() in LINE#2098.
+
+ 1794 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ 1795 {
+ ...
+ 1860 if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
+ 1861 mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
+ 1862 mempool_init_kmalloc_pool(&c->bio_meta, 2,
+ 1863 sizeof(struct bbio) + sizeof(struct bio_vec) *
+ 1864 bucket_pages(c)) ||
+ 1865 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
+ 1866 bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
+ 1867 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
+ 1868 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
+ 1869 !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
+ 1870 WQ_MEM_RECLAIM, 0)) ||
+ 1871 bch_journal_alloc(c) ||
+ 1872 bch_btree_cache_alloc(c) ||
+ 1873 bch_open_buckets_alloc(c) ||
+ 1874 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
+ 1875 goto err;
+ ^^^^^^^^
+ 1876
+ ...
+ 1883 return c;
+ 1884 err:
+ 1885 bch_cache_set_unregister(c);
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ 1886 return NULL;
+ 1887 }
+ ...
+ 2078 static const char *register_cache_set(struct cache *ca)
+ 2079 {
+ ...
+ 2098 c = bch_cache_set_alloc(&ca->sb);
+ 2099 if (!c)
+ 2100 return err;
+ ^^^^^^^^^^
+ ...
+ 2128 ca->set = c;
+ 2129 ca->set->cache[ca->sb.nr_this_dev] = ca;
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ ...
+ 2138 return NULL;
+ 2139 err:
+ 2140 bch_cache_set_unregister(c);
+ 2141 return err;
+ 2142 }
+
+(1) If LINE#1860 - LINE#1874 is true, then do 'goto err'(LINE#1875) and
+ call bch_cache_set_unregister()(LINE#1885).
+(2) As (1) return NULL(LINE#1886), LINE#2098 - LINE#2100 would return.
+(3) As (2) has returned, LINE#2128 - LINE#2129 would do *not* give the
+ value to c->cache[], it means that c->cache[] is NULL.
+
+LINE#1624 - LINE#1665 is some codes about function of cache_set_flush().
+As (1), in LINE#1885 call
+bch_cache_set_unregister()
+---> bch_cache_set_stop()
+ ---> closure_queue()
+ -.-> cache_set_flush() (as below LINE#1624)
+
+ 1624 static void cache_set_flush(struct closure *cl)
+ 1625 {
+ ...
+ 1654 for_each_cache(ca, c, i)
+ 1655 if (ca->alloc_thread)
+ ^^
+ 1656 kthread_stop(ca->alloc_thread);
+ ...
+ 1665 }
+
+(4) In LINE#1655 ca is NULL(see (3)) in cache_set_flush() then the
+ kernel crash occurred as below:
+[ 846.712887] bcache: register_cache() error drbd6: cannot allocate memory
+[ 846.713242] bcache: register_bcache() error : failed to register device
+[ 846.713336] bcache: cache_set_free() Cache set 2f84bdc1-498a-4f2f-98a7-01946bf54287 unregistered
+[ 846.713768] BUG: unable to handle kernel NULL pointer dereference at 00000000000009f8
+[ 846.714790] PGD 0 P4D 0
+[ 846.715129] Oops: 0000 [#1] SMP PTI
+[ 846.715472] CPU: 19 PID: 5057 Comm: kworker/19:16 Kdump: loaded Tainted: G OE --------- - - 4.18.0-147.5.1.el8_1.5es.3.x86_64 #1
+[ 846.716082] Hardware name: ESPAN GI-25212/X11DPL-i, BIOS 2.1 06/15/2018
+[ 846.716451] Workqueue: events cache_set_flush [bcache]
+[ 846.716808] RIP: 0010:cache_set_flush+0xc9/0x1b0 [bcache]
+[ 846.717155] Code: 00 4c 89 a5 b0 03 00 00 48 8b 85 68 f6 ff ff a8 08 0f 84 88 00 00 00 31 db 66 83 bd 3c f7 ff ff 00 48 8b 85 48 ff ff ff 74 28 <48> 8b b8 f8 09 00 00 48 85 ff 74 05 e8 b6 58 a2 e1 0f b7 95 3c f7
+[ 846.718026] RSP: 0018:ffffb56dcf85fe70 EFLAGS: 00010202
+[ 846.718372] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
+[ 846.718725] RDX: 0000000000000001 RSI: 0000000040000001 RDI: 0000000000000000
+[ 846.719076] RBP: ffffa0ccc0f20df8 R08: ffffa0ce1fedb118 R09: 000073746e657665
+[ 846.719428] R10: 8080808080808080 R11: 0000000000000000 R12: ffffa0ce1fee8700
+[ 846.719779] R13: ffffa0ccc0f211a8 R14: ffffa0cd1b902840 R15: ffffa0ccc0f20e00
+[ 846.720132] FS: 0000000000000000(0000) GS:ffffa0ce1fec0000(0000) knlGS:0000000000000000
+[ 846.720726] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 846.721073] CR2: 00000000000009f8 CR3: 00000008ba00a005 CR4: 00000000007606e0
+[ 846.721426] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 846.721778] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 846.722131] PKRU: 55555554
+[ 846.722467] Call Trace:
+[ 846.722814] process_one_work+0x1a7/0x3b0
+[ 846.723157] worker_thread+0x30/0x390
+[ 846.723501] ? create_worker+0x1a0/0x1a0
+[ 846.723844] kthread+0x112/0x130
+[ 846.724184] ? kthread_flush_work_fn+0x10/0x10
+[ 846.724535] ret_from_fork+0x35/0x40
+
+Now, check whether that ca is NULL in LINE#1655 to fix the issue.
+
+Signed-off-by: Linggang Zeng <linggang.zeng@easystack.cn>
+Signed-off-by: Mingzhe Zou <mingzhe.zou@easystack.cn>
+Signed-off-by: Coly Li <colyli@kernel.org>
+Link: https://lore.kernel.org/r/20250527051601.74407-2-colyli@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/bcache/super.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index e42f1400cea9d..f5171167819b5 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1733,7 +1733,12 @@ static CLOSURE_CALLBACK(cache_set_flush)
+ mutex_unlock(&b->write_lock);
+ }
+
+- if (ca->alloc_thread)
++ /*
++ * If the register_cache_set() call to bch_cache_set_alloc() failed,
++ * ca has not been assigned a value and return error.
++ * So we need check ca is not NULL during bch_cache_set_unregister().
++ */
++ if (ca && ca->alloc_thread)
+ kthread_stop(ca->alloc_thread);
+
+ if (c->journal.cur) {
+--
+2.39.5
+
--- /dev/null
+From 01a3eb5bbf128564ec487250dd0aba1de4c651fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2025 13:53:41 +1030
+Subject: btrfs: factor out nocow ordered extent and extent map generation into
+ a helper
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit 10326fdcb3ace2f2dcbc8b9fc50b87e5cab93345 ]
+
+Currently we're doing all the ordered extent and extent map generation
+inside a while() loop of run_delalloc_nocow(). This makes it pretty
+hard to read, nor doing proper error handling.
+
+So move that part of code into a helper, nocow_one_range().
+
+This should not change anything, but there is a tiny timing change where
+btrfs_dec_nocow_writers() is only called after nocow_one_range() helper
+exits.
+
+This timing change is small, and makes error handling easier, thus
+should be fine.
+
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 1f2889f5594a ("btrfs: fix qgroup reservation leak on failure to allocate ordered extent")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 122 +++++++++++++++++++++++------------------------
+ 1 file changed, 61 insertions(+), 61 deletions(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1ab5b0c1b9b76..65517efd3433e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2055,6 +2055,63 @@ static void cleanup_dirty_folios(struct btrfs_inode *inode,
+ mapping_set_error(mapping, error);
+ }
+
++static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
++ struct extent_state **cached,
++ struct can_nocow_file_extent_args *nocow_args,
++ u64 file_pos, bool is_prealloc)
++{
++ struct btrfs_ordered_extent *ordered;
++ u64 len = nocow_args->file_extent.num_bytes;
++ u64 end = file_pos + len - 1;
++ int ret = 0;
++
++ lock_extent(&inode->io_tree, file_pos, end, cached);
++
++ if (is_prealloc) {
++ struct extent_map *em;
++
++ em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
++ BTRFS_ORDERED_PREALLOC);
++ if (IS_ERR(em)) {
++ unlock_extent(&inode->io_tree, file_pos, end, cached);
++ return PTR_ERR(em);
++ }
++ free_extent_map(em);
++ }
++
++ ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
++ is_prealloc
++ ? (1 << BTRFS_ORDERED_PREALLOC)
++ : (1 << BTRFS_ORDERED_NOCOW));
++ if (IS_ERR(ordered)) {
++ if (is_prealloc)
++ btrfs_drop_extent_map_range(inode, file_pos, end, false);
++ unlock_extent(&inode->io_tree, file_pos, end, cached);
++ return PTR_ERR(ordered);
++ }
++
++ if (btrfs_is_data_reloc_root(inode->root))
++ /*
++ * Errors are handled later, as we must prevent
++ * extent_clear_unlock_delalloc() in error handler from freeing
++ * metadata of the created ordered extent.
++ */
++ ret = btrfs_reloc_clone_csums(ordered);
++ btrfs_put_ordered_extent(ordered);
++
++ extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
++ EXTENT_LOCKED | EXTENT_DELALLOC |
++ EXTENT_CLEAR_DATA_RESV,
++ PAGE_UNLOCK | PAGE_SET_ORDERED);
++
++ /*
++ * btrfs_reloc_clone_csums() error, now we're OK to call error handler,
++ * as metadata for created ordered extent will only be freed by
++ * btrfs_finish_ordered_io().
++ */
++ return ret;
++}
++
+ /*
+ * when nowcow writeback call back. This checks for snapshots or COW copies
+ * of the extents that exist in the file, and COWs the file as required.
+@@ -2099,15 +2156,12 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+
+ while (cur_offset <= end) {
+ struct btrfs_block_group *nocow_bg = NULL;
+- struct btrfs_ordered_extent *ordered;
+ struct btrfs_key found_key;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *leaf;
+ struct extent_state *cached_state = NULL;
+ u64 extent_end;
+- u64 nocow_end;
+ int extent_type;
+- bool is_prealloc;
+
+ ret = btrfs_lookup_file_extent(NULL, root, path, ino,
+ cur_offset, 0);
+@@ -2242,67 +2296,13 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ }
+ }
+
+- nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
+- lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state);
+-
+- is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
+- if (is_prealloc) {
+- struct extent_map *em;
+-
+- em = btrfs_create_io_em(inode, cur_offset,
+- &nocow_args.file_extent,
+- BTRFS_ORDERED_PREALLOC);
+- if (IS_ERR(em)) {
+- unlock_extent(&inode->io_tree, cur_offset,
+- nocow_end, &cached_state);
+- btrfs_dec_nocow_writers(nocow_bg);
+- ret = PTR_ERR(em);
+- goto error;
+- }
+- free_extent_map(em);
+- }
+-
+- ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
+- &nocow_args.file_extent,
+- is_prealloc
+- ? (1 << BTRFS_ORDERED_PREALLOC)
+- : (1 << BTRFS_ORDERED_NOCOW));
++ ret = nocow_one_range(inode, locked_folio, &cached_state,
++ &nocow_args, cur_offset,
++ extent_type == BTRFS_FILE_EXTENT_PREALLOC);
+ btrfs_dec_nocow_writers(nocow_bg);
+- if (IS_ERR(ordered)) {
+- if (is_prealloc) {
+- btrfs_drop_extent_map_range(inode, cur_offset,
+- nocow_end, false);
+- }
+- unlock_extent(&inode->io_tree, cur_offset,
+- nocow_end, &cached_state);
+- ret = PTR_ERR(ordered);
++ if (ret < 0)
+ goto error;
+- }
+-
+- if (btrfs_is_data_reloc_root(root))
+- /*
+- * Error handled later, as we must prevent
+- * extent_clear_unlock_delalloc() in error handler
+- * from freeing metadata of created ordered extent.
+- */
+- ret = btrfs_reloc_clone_csums(ordered);
+- btrfs_put_ordered_extent(ordered);
+-
+- extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
+- locked_folio, &cached_state,
+- EXTENT_LOCKED | EXTENT_DELALLOC |
+- EXTENT_CLEAR_DATA_RESV,
+- PAGE_UNLOCK | PAGE_SET_ORDERED);
+-
+ cur_offset = extent_end;
+-
+- /*
+- * btrfs_reloc_clone_csums() error, now we're OK to call error
+- * handler, as metadata for created ordered extent will only
+- * be freed by btrfs_finish_ordered_io().
+- */
+- if (ret)
+- goto error;
+ }
+ btrfs_release_path(path);
+
+--
+2.39.5
+
--- /dev/null
+From 3687c495dba70c8dba8e75b876647783befa9880 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 13:05:36 +0100
+Subject: btrfs: fix qgroup reservation leak on failure to allocate ordered
+ extent
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 1f2889f5594a2bc4c6a52634c4a51b93e785def5 ]
+
+If we fail to allocate an ordered extent for a COW write we end up leaking
+a qgroup data reservation since we called btrfs_qgroup_release_data() but
+we didn't call btrfs_qgroup_free_refroot() (which would happen when
+running the respective data delayed ref created by ordered extent
+completion or when finishing the ordered extent in case an error happened).
+
+So make sure we call btrfs_qgroup_free_refroot() if we fail to allocate an
+ordered extent for a COW write.
+
+Fixes: 7dbeaad0af7d ("btrfs: change timing for qgroup reserved space for ordered extents to fix reserved space leak")
+CC: stable@vger.kernel.org # 6.1+
+Reviewed-by: Boris Burkov <boris@bur.io>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ordered-data.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 7baee52cac184..880f9553d79d3 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -153,9 +153,10 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ struct btrfs_ordered_extent *entry;
+ int ret;
+ u64 qgroup_rsv = 0;
++ const bool is_nocow = (flags &
++ ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)));
+
+- if (flags &
+- ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC))) {
++ if (is_nocow) {
+ /* For nocow write, we can release the qgroup rsv right now */
+ ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
+ if (ret < 0)
+@@ -170,8 +171,13 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ return ERR_PTR(ret);
+ }
+ entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
+- if (!entry)
++ if (!entry) {
++ if (!is_nocow)
++ btrfs_qgroup_free_refroot(inode->root->fs_info,
++ btrfs_root_id(inode->root),
++ qgroup_rsv, BTRFS_QGROUP_RSV_DATA);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ entry->file_offset = file_offset;
+ entry->num_bytes = num_bytes;
+--
+2.39.5
+
--- /dev/null
+From 963e30dc0214a2d7c007ffb36428cc8a54b835c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Jun 2025 09:18:43 +0930
+Subject: btrfs: handle csum tree error with rescue=ibadroots correctly
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit 547e836661554dcfa15c212a3821664e85b4191a ]
+
+[BUG]
+There is syzbot based reproducer that can crash the kernel, with the
+following call trace: (With some debug output added)
+
+ DEBUG: rescue=ibadroots parsed
+ BTRFS: device fsid 14d642db-7b15-43e4-81e6-4b8fac6a25f8 devid 1 transid 8 /dev/loop0 (7:0) scanned by repro (1010)
+ BTRFS info (device loop0): first mount of filesystem 14d642db-7b15-43e4-81e6-4b8fac6a25f8
+ BTRFS info (device loop0): using blake2b (blake2b-256-generic) checksum algorithm
+ BTRFS info (device loop0): using free-space-tree
+ BTRFS warning (device loop0): checksum verify failed on logical 5312512 mirror 1 wanted 0xb043382657aede36608fd3386d6b001692ff406164733d94e2d9a180412c6003 found 0x810ceb2bacb7f0f9eb2bf3b2b15c02af867cb35ad450898169f3b1f0bd818651 level 0
+ DEBUG: read tree root path failed for tree csum, ret=-5
+ BTRFS warning (device loop0): checksum verify failed on logical 5328896 mirror 1 wanted 0x51be4e8b303da58e6340226815b70e3a93592dac3f30dd510c7517454de8567a found 0x51be4e8b303da58e634022a315b70e3a93592dac3f30dd510c7517454de8567a level 0
+ BTRFS warning (device loop0): checksum verify failed on logical 5292032 mirror 1 wanted 0x1924ccd683be9efc2fa98582ef58760e3848e9043db8649ee382681e220cdee4 found 0x0cb6184f6e8799d9f8cb335dccd1d1832da1071d12290dab3b85b587ecacca6e level 0
+ process 'repro' launched './file2' with NULL argv: empty string added
+ DEBUG: no csum root, idatacsums=0 ibadroots=134217728
+ Oops: general protection fault, probably for non-canonical address 0xdffffc0000000041: 0000 [#1] SMP KASAN NOPTI
+ KASAN: null-ptr-deref in range [0x0000000000000208-0x000000000000020f]
+ CPU: 5 UID: 0 PID: 1010 Comm: repro Tainted: G OE 6.15.0-custom+ #249 PREEMPT(full)
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS unknown 02/02/2022
+ RIP: 0010:btrfs_lookup_csum+0x93/0x3d0 [btrfs]
+ Call Trace:
+ <TASK>
+ btrfs_lookup_bio_sums+0x47a/0xdf0 [btrfs]
+ btrfs_submit_bbio+0x43e/0x1a80 [btrfs]
+ submit_one_bio+0xde/0x160 [btrfs]
+ btrfs_readahead+0x498/0x6a0 [btrfs]
+ read_pages+0x1c3/0xb20
+ page_cache_ra_order+0x4b5/0xc20
+ filemap_get_pages+0x2d3/0x19e0
+ filemap_read+0x314/0xde0
+ __kernel_read+0x35b/0x900
+ bprm_execve+0x62e/0x1140
+ do_execveat_common.isra.0+0x3fc/0x520
+ __x64_sys_execveat+0xdc/0x130
+ do_syscall_64+0x54/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ ---[ end trace 0000000000000000 ]---
+
+[CAUSE]
+Firstly the fs has a corrupted csum tree root, thus to mount the fs we
+have to go "ro,rescue=ibadroots" mount option.
+
+Normally with that mount option, a bad csum tree root should set
+BTRFS_FS_STATE_NO_DATA_CSUMS flag, so that any future data read will
+ignore csum search.
+
+But in this particular case, we have the following call trace that
+caused NULL csum root, but not setting BTRFS_FS_STATE_NO_DATA_CSUMS:
+
+load_global_roots_objectid():
+
+ ret = btrfs_search_slot();
+ /* Succeeded */
+ btrfs_item_key_to_cpu()
+ found = true;
+ /* We found the root item for csum tree. */
+ root = read_tree_root_path();
+ if (IS_ERR(root)) {
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
+ /*
+ * Since we have rescue=ibadroots mount option,
+ * @ret is still 0.
+ */
+ break;
+ if (!found || ret) {
+ /* @found is true, @ret is 0, error handling for csum
+ * tree is skipped.
+ */
+ }
+
+This means we completely skipped to set BTRFS_FS_STATE_NO_DATA_CSUMS if
+the csum tree is corrupted, which results unexpected later csum lookup.
+
+[FIX]
+If read_tree_root_path() failed, always populate @ret to the error
+number.
+
+As at the end of the function, we need @ret to determine if we need to
+do the extra error handling for csum tree.
+
+Fixes: abed4aaae4f7 ("btrfs: track the csum, extent, and free space trees in a rb tree")
+Reported-by: Zhiyu Zhang <zhiyuzhang999@gmail.com>
+Reported-by: Longxing Li <coregee2000@gmail.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 147c50ef912ac..96282bf28b19c 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2168,8 +2168,7 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root,
+ found = true;
+ root = read_tree_root_path(tree_root, path, &key);
+ if (IS_ERR(root)) {
+- if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
+- ret = PTR_ERR(root);
++ ret = PTR_ERR(root);
+ break;
+ }
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+--
+2.39.5
+
--- /dev/null
+From f8e9b8675b75f8d62d7e941ac576a82069ac55a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 17:55:41 +0200
+Subject: btrfs: use unsigned types for constants defined as bit shifts
+
+From: David Sterba <dsterba@suse.com>
+
+[ Upstream commit 05a6ec865d091fe8244657df8063f74e704d1711 ]
+
+The unsigned type is a recommended practice (CWE-190, CWE-194) for bit
+shifts to avoid problems with potential unwanted sign extensions.
+Although there are no such cases in btrfs codebase, follow the
+recommendation.
+
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 1f2889f5594a ("btrfs: fix qgroup reservation leak on failure to allocate ordered extent")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/backref.h | 4 ++--
+ fs/btrfs/direct-io.c | 4 ++--
+ fs/btrfs/extent_io.h | 2 +-
+ fs/btrfs/inode.c | 12 ++++++------
+ fs/btrfs/ordered-data.c | 4 ++--
+ fs/btrfs/raid56.c | 5 ++---
+ fs/btrfs/tests/extent-io-tests.c | 6 +++---
+ fs/btrfs/zstd.c | 2 +-
+ 8 files changed, 19 insertions(+), 20 deletions(-)
+
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
+index e8c22cccb5c13..7dfcc9351bce5 100644
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -427,8 +427,8 @@ struct btrfs_backref_node *btrfs_backref_alloc_node(
+ struct btrfs_backref_edge *btrfs_backref_alloc_edge(
+ struct btrfs_backref_cache *cache);
+
+-#define LINK_LOWER (1 << 0)
+-#define LINK_UPPER (1 << 1)
++#define LINK_LOWER (1U << 0)
++#define LINK_UPPER (1U << 1)
+
+ void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
+ struct btrfs_backref_node *lower,
+diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
+index bd38df5647e35..71984d7db839b 100644
+--- a/fs/btrfs/direct-io.c
++++ b/fs/btrfs/direct-io.c
+@@ -151,8 +151,8 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
+ }
+
+ ordered = btrfs_alloc_ordered_extent(inode, start, file_extent,
+- (1 << type) |
+- (1 << BTRFS_ORDERED_DIRECT));
++ (1U << type) |
++ (1U << BTRFS_ORDERED_DIRECT));
+ if (IS_ERR(ordered)) {
+ if (em) {
+ free_extent_map(em);
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index fcb60837d7dc6..039a73731135a 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -79,7 +79,7 @@ enum {
+ * single word in a bitmap may straddle two pages in the extent buffer.
+ */
+ #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
+-#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
++#define BYTE_MASK ((1U << BITS_PER_BYTE) - 1)
+ #define BITMAP_FIRST_BYTE_MASK(start) \
+ ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
+ #define BITMAP_LAST_BYTE_MASK(nbits) \
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 65517efd3433e..94664c1822930 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1249,7 +1249,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ free_extent_map(em);
+
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+- 1 << BTRFS_ORDERED_COMPRESSED);
++ 1U << BTRFS_ORDERED_COMPRESSED);
+ if (IS_ERR(ordered)) {
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = PTR_ERR(ordered);
+@@ -1484,7 +1484,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ free_extent_map(em);
+
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+- 1 << BTRFS_ORDERED_REGULAR);
++ 1U << BTRFS_ORDERED_REGULAR);
+ if (IS_ERR(ordered)) {
+ unlock_extent(&inode->io_tree, start,
+ start + ram_size - 1, &cached);
+@@ -2081,8 +2081,8 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
+
+ ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
+ is_prealloc
+- ? (1 << BTRFS_ORDERED_PREALLOC)
+- : (1 << BTRFS_ORDERED_NOCOW));
++ ? (1U << BTRFS_ORDERED_PREALLOC)
++ : (1U << BTRFS_ORDERED_NOCOW));
+ if (IS_ERR(ordered)) {
+ if (is_prealloc)
+ btrfs_drop_extent_map_range(inode, file_pos, end, false);
+@@ -9683,8 +9683,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ free_extent_map(em);
+
+ ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
+- (1 << BTRFS_ORDERED_ENCODED) |
+- (1 << BTRFS_ORDERED_COMPRESSED));
++ (1U << BTRFS_ORDERED_ENCODED) |
++ (1U << BTRFS_ORDERED_COMPRESSED));
+ if (IS_ERR(ordered)) {
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = PTR_ERR(ordered);
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 4ed11b089ea95..7baee52cac184 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -155,7 +155,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ u64 qgroup_rsv = 0;
+
+ if (flags &
+- ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
++ ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC))) {
+ /* For nocow write, we can release the qgroup rsv right now */
+ ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
+ if (ret < 0)
+@@ -253,7 +253,7 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
+ * @disk_bytenr: Offset of extent on disk.
+ * @disk_num_bytes: Size of extent on disk.
+ * @offset: Offset into unencoded data where file data starts.
+- * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
++ * @flags: Flags specifying type of extent (1U << BTRFS_ORDERED_*).
+ * @compress_type: Compression algorithm used for data.
+ *
+ * Most of these parameters correspond to &struct btrfs_file_extent_item. The
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index 39bec672df0cc..8afadf994b8c8 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -200,8 +200,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
+ struct btrfs_stripe_hash_table *x;
+ struct btrfs_stripe_hash *cur;
+ struct btrfs_stripe_hash *h;
+- int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
+- int i;
++ unsigned int num_entries = 1U << BTRFS_STRIPE_HASH_TABLE_BITS;
+
+ if (info->stripe_hash_table)
+ return 0;
+@@ -222,7 +221,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
+
+ h = table->table;
+
+- for (i = 0; i < num_entries; i++) {
++ for (unsigned int i = 0; i < num_entries; i++) {
+ cur = h + i;
+ INIT_LIST_HEAD(&cur->hash_list);
+ spin_lock_init(&cur->lock);
+diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
+index 0a2dbfaaf49e2..de226209220fe 100644
+--- a/fs/btrfs/tests/extent-io-tests.c
++++ b/fs/btrfs/tests/extent-io-tests.c
+@@ -14,9 +14,9 @@
+ #include "../disk-io.h"
+ #include "../btrfs_inode.h"
+
+-#define PROCESS_UNLOCK (1 << 0)
+-#define PROCESS_RELEASE (1 << 1)
+-#define PROCESS_TEST_LOCKED (1 << 2)
++#define PROCESS_UNLOCK (1U << 0)
++#define PROCESS_RELEASE (1U << 1)
++#define PROCESS_TEST_LOCKED (1U << 2)
+
+ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
+ unsigned long flags)
+diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
+index 866607fd3e588..c9ea37fabf659 100644
+--- a/fs/btrfs/zstd.c
++++ b/fs/btrfs/zstd.c
+@@ -24,7 +24,7 @@
+ #include "super.h"
+
+ #define ZSTD_BTRFS_MAX_WINDOWLOG 17
+-#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
++#define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG)
+ #define ZSTD_BTRFS_DEFAULT_LEVEL 3
+ #define ZSTD_BTRFS_MAX_LEVEL 15
+ /* 307s to avoid pathologically clashing with transaction commit */
+--
+2.39.5
+
--- /dev/null
+From 1f725ce532d47eabb6be888ffac5a866b226b36e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 12:32:04 +0300
+Subject: ceph: fix possible integer overflow in ceph_zero_objects()
+
+From: Dmitry Kandybka <d.kandybka@gmail.com>
+
+[ Upstream commit 0abd87942e0c93964e93224836944712feba1d91 ]
+
+In 'ceph_zero_objects', promote 'object_size' to 'u64' to avoid possible
+integer overflow.
+
+Compile tested only.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Signed-off-by: Dmitry Kandybka <d.kandybka@gmail.com>
+Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 851d70200c6b8..a7254cab44cc2 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2616,7 +2616,7 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
+ s32 stripe_unit = ci->i_layout.stripe_unit;
+ s32 stripe_count = ci->i_layout.stripe_count;
+ s32 object_size = ci->i_layout.object_size;
+- u64 object_set_size = object_size * stripe_count;
++ u64 object_set_size = (u64) object_size * stripe_count;
+ u64 nearly, t;
+
+ /* round offset up to next period boundary */
+--
+2.39.5
+
--- /dev/null
+From 0571f3a3b295107413ccb056b53cd584a3a421c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 Nov 2024 17:58:31 +0100
+Subject: cifs: Correctly set SMB1 SessionKey field in Session Setup Request
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pali Rohár <pali@kernel.org>
+
+[ Upstream commit 89381c72d52094988e11d23ef24a00066a0fa458 ]
+
+[MS-CIFS] specification in section 2.2.4.53.1 where is described
+SMB_COM_SESSION_SETUP_ANDX Request, for SessionKey field says:
+
+ The client MUST set this field to be equal to the SessionKey field in
+ the SMB_COM_NEGOTIATE Response for this SMB connection.
+
+Linux SMB client currently set this field to zero. This is working fine
+against Windows NT SMB servers thanks to [MS-CIFS] product behavior <94>:
+
+ Windows NT Server ignores the client's SessionKey.
+
+For compatibility with [MS-CIFS], set this SessionKey field in Session
+Setup Request to value retrieved from Negotiate response.
+
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifsglob.h | 1 +
+ fs/smb/client/cifspdu.h | 6 +++---
+ fs/smb/client/cifssmb.c | 1 +
+ fs/smb/client/sess.c | 1 +
+ 4 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index e0faee22be07e..d573740e54a1a 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -739,6 +739,7 @@ struct TCP_Server_Info {
+ char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+ __u32 sequence_number; /* for signing, protected by srv_mutex */
+ __u32 reconnect_instance; /* incremented on each reconnect */
++ __le32 session_key_id; /* retrieved from negotiate response and send in session setup request */
+ struct session_key session_key;
+ unsigned long lstrp; /* when we got last response from this server */
+ struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index 28f8ca470770d..688a26aeef3b4 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -557,7 +557,7 @@ typedef union smb_com_session_setup_andx {
+ __le16 MaxBufferSize;
+ __le16 MaxMpxCount;
+ __le16 VcNumber;
+- __u32 SessionKey;
++ __le32 SessionKey;
+ __le16 SecurityBlobLength;
+ __u32 Reserved;
+ __le32 Capabilities; /* see below */
+@@ -576,7 +576,7 @@ typedef union smb_com_session_setup_andx {
+ __le16 MaxBufferSize;
+ __le16 MaxMpxCount;
+ __le16 VcNumber;
+- __u32 SessionKey;
++ __le32 SessionKey;
+ __le16 CaseInsensitivePasswordLength; /* ASCII password len */
+ __le16 CaseSensitivePasswordLength; /* Unicode password length*/
+ __u32 Reserved; /* see below */
+@@ -614,7 +614,7 @@ typedef union smb_com_session_setup_andx {
+ __le16 MaxBufferSize;
+ __le16 MaxMpxCount;
+ __le16 VcNumber;
+- __u32 SessionKey;
++ __le32 SessionKey;
+ __le16 PasswordLength;
+ __u32 Reserved; /* encrypt key len and offset */
+ __le16 ByteCount;
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index cf8d9de2298fc..d6ba55d4720d2 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -481,6 +481,7 @@ CIFSSMBNegotiate(const unsigned int xid,
+ server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
+ cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
+ server->capabilities = le32_to_cpu(pSMBr->Capabilities);
++ server->session_key_id = pSMBr->SessionKey;
+ server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
+ server->timeAdj *= 60;
+
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 10d82d0dc6a9e..830516a9e03b0 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -658,6 +658,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
+ USHRT_MAX));
+ pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq);
+ pSMB->req.VcNumber = cpu_to_le16(1);
++ pSMB->req.SessionKey = server->session_key_id;
+
+ /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+
+--
+2.39.5
+
--- /dev/null
+From b6ca56eee9b84b60524af4831b4f8402023d7363 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Dec 2024 16:06:22 +0100
+Subject: cifs: Fix cifs_query_path_info() for Windows NT servers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pali Rohár <pali@kernel.org>
+
+[ Upstream commit a3e771afbb3bce91c8296828304903e7348003fe ]
+
+For TRANS2 QUERY_PATH_INFO request when the path does not exist, the
+Windows NT SMB server returns error response STATUS_OBJECT_NAME_NOT_FOUND
+or ERRDOS/ERRbadfile without the SMBFLG_RESPONSE flag set. Similarly it
+returns STATUS_DELETE_PENDING when the file is being deleted. And looks
+like that any error response from TRANS2 QUERY_PATH_INFO does not have
+SMBFLG_RESPONSE flag set.
+
+So relax check in check_smb_hdr() for detecting if the packet is response
+for this special case.
+
+This change fixes stat() operation against Windows NT SMB servers and also
+all operations which depends on -ENOENT result from stat like creat() or
+mkdir().
+
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/misc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 4373dd64b66d4..5122f3895dfc2 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -323,6 +323,14 @@ check_smb_hdr(struct smb_hdr *smb)
+ if (smb->Command == SMB_COM_LOCKING_ANDX)
+ return 0;
+
++ /*
++ * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING
++ * or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other)
++ * for some TRANS2 requests without the RESPONSE flag set in header.
++ */
++ if (smb->Command == SMB_COM_TRANSACTION2 && smb->Status.CifsError != 0)
++ return 0;
++
+ cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
+ get_mid(smb));
+ return 1;
+--
+2.39.5
+
--- /dev/null
+From e6bda7f1a385b55084290059d42c1f1049c14d3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Oct 2024 19:24:29 +0200
+Subject: cifs: Fix encoding of SMB1 Session Setup NTLMSSP Request in
+ non-UNICODE mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pali Rohár <pali@kernel.org>
+
+[ Upstream commit 6510ef4230b68c960309e0c1d6eb3e32eb785142 ]
+
+SMB1 Session Setup NTLMSSP Request in non-UNICODE mode is similar to
+UNICODE mode, just strings are encoded in ASCII and not in UTF-16.
+
+With this change it is possible to setup SMB1 session with NTLM
+authentication in non-UNICODE mode with Windows SMB server.
+
+This change fixes mounting SMB1 servers with -o nounicode mount option
+together with -o sec=ntlmssp mount option (which is the default sec=).
+
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/sess.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 830516a9e03b0..8be7c4d2d9d62 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -1715,22 +1715,22 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
+ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+
+ capabilities = cifs_ssetup_hdr(ses, server, pSMB);
+- if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
+- cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
+- return -ENOSYS;
+- }
+-
+ pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ capabilities |= CAP_EXTENDED_SECURITY;
+ pSMB->req.Capabilities |= cpu_to_le32(capabilities);
+
+ bcc_ptr = sess_data->iov[2].iov_base;
+- /* unicode strings must be word aligned */
+- if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
+- *bcc_ptr = 0;
+- bcc_ptr++;
++
++ if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) {
++ /* unicode strings must be word aligned */
++ if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
++ *bcc_ptr = 0;
++ bcc_ptr++;
++ }
++ unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
++ } else {
++ ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ }
+- unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+
+ sess_data->iov[2].iov_len = (long) bcc_ptr -
+ (long) sess_data->iov[2].iov_base;
+--
+2.39.5
+
--- /dev/null
+From 7395b52ffa067bf52c84a51fb93a8c8cd9083149 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Mar 2025 11:58:47 +0000
+Subject: coresight: Only check bottom two claim bits
+
+From: James Clark <james.clark@linaro.org>
+
+[ Upstream commit a4e65842e1142aa18ef36113fbd81d614eaefe5a ]
+
+The use of the whole register and == could break the claim mechanism if
+any of the other bits are used in the future. The referenced doc "PSCI -
+ARM DEN 0022D" also says to only read and clear the bottom two bits.
+
+Use FIELD_GET() to extract only the relevant part.
+
+Reviewed-by: Leo Yan <leo.yan@arm.com>
+Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com>
+Signed-off-by: James Clark <james.clark@linaro.org>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/20250325-james-coresight-claim-tags-v4-2-dfbd3822b2e5@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwtracing/coresight/coresight-core.c | 3 ++-
+ drivers/hwtracing/coresight/coresight-priv.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index c7e35a431ab00..b7941d8abbfe7 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -97,7 +97,8 @@ coresight_find_out_connection(struct coresight_device *src_dev,
+
+ static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
+ {
+- return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
++ return FIELD_GET(CORESIGHT_CLAIM_MASK,
++ csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR));
+ }
+
+ static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)
+diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
+index 05f891ca6b5c9..cc7ff1e36ef42 100644
+--- a/drivers/hwtracing/coresight/coresight-priv.h
++++ b/drivers/hwtracing/coresight/coresight-priv.h
+@@ -35,6 +35,7 @@ extern const struct device_type coresight_dev_type[];
+ * Coresight device CLAIM protocol.
+ * See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore.
+ */
++#define CORESIGHT_CLAIM_MASK GENMASK(1, 0)
+ #define CORESIGHT_CLAIM_SELF_HOSTED BIT(1)
+
+ #define TIMEOUT_US 100
+--
+2.39.5
+
--- /dev/null
+From e8430c539b0cc960854278ee8a5d1569daf0ba98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 May 2025 17:06:58 +0200
+Subject: cxl/region: Add a dev_err() on missing target list entries
+
+From: Robert Richter <rrichter@amd.com>
+
+[ Upstream commit d90acdf49e18029cfe4194475c45ef143657737a ]
+
+Broken target lists are hard to discover as the driver fails at a
+later initialization stage. Add an error message for this.
+
+Example log messages:
+
+ cxl_mem mem1: failed to find endpoint6:0000:e0:01.3 in target list of decoder1.1
+ cxl_port endpoint6: failed to register decoder6.0: -6
+ cxl_port endpoint6: probe: 0
+
+Signed-off-by: Robert Richter <rrichter@amd.com>
+Reviewed-by: Gregory Price <gourry@gourry.net>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Reviewed-by: Alison Schofield <alison.schofield@intel.com>
+Reviewed-by: "Fabio M. De Francesco" <fabio.m.de.francesco@linux.intel.com>
+Tested-by: Gregory Price <gourry@gourry.net>
+Acked-by: Dan Williams <dan.j.williams@intel.com>
+Link: https://patch.msgid.link/20250509150700.2817697-14-rrichter@amd.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cxl/core/region.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index a0d6e8d7f42c8..f5429666822f0 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1781,6 +1781,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
+ }
+ put_device(dev);
+
++ if (rc)
++ dev_err(port->uport_dev,
++ "failed to find %s:%s in target list of %s\n",
++ dev_name(&port->dev),
++ dev_name(port->parent_dport->dport_dev),
++ dev_name(&cxlsd->cxld.dev));
++
+ return rc;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From a1b26f34bb5dd99ed50c38cca16520edf9ad24d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 May 2025 21:10:10 -0400
+Subject: dm vdo indexer: don't read request structure after enqueuing
+
+From: Matthew Sakai <msakai@redhat.com>
+
+[ Upstream commit 3da732687d72078e52cc7f334a482383e84ca156 ]
+
+The function get_volume_page_protected may place a request on
+a queue for another thread to process asynchronously. When this
+happens, the volume should not read the request from the original
+thread. This can not currently cause problems, due to the way
+request processing is handled, but it is not safe in general.
+
+Reviewed-by: Ken Raeburn <raeburn@redhat.com>
+Signed-off-by: Matthew Sakai <msakai@redhat.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-vdo/indexer/volume.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c
+index 655453bb276be..425b3a74f4dba 100644
+--- a/drivers/md/dm-vdo/indexer/volume.c
++++ b/drivers/md/dm-vdo/indexer/volume.c
+@@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ u32 physical_page, struct cached_page **page_ptr)
+ {
+ struct cached_page *page;
++ unsigned int zone_number = request->zone_number;
+
+ get_page_from_cache(&volume->page_cache, physical_page, &page);
+ if (page != NULL) {
+- if (request->zone_number == 0) {
++ if (zone_number == 0) {
+ /* Only one zone is allowed to update the LRU. */
+ make_page_most_recent(&volume->page_cache, page);
+ }
+@@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ }
+
+ /* Prepare to enqueue a read for the page. */
+- end_pending_search(&volume->page_cache, request->zone_number);
++ end_pending_search(&volume->page_cache, zone_number);
+ mutex_lock(&volume->read_threads_mutex);
+
+ /*
+@@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ * the order does not matter for correctness as it does below.
+ */
+ mutex_unlock(&volume->read_threads_mutex);
+- begin_pending_search(&volume->page_cache, physical_page,
+- request->zone_number);
++ begin_pending_search(&volume->page_cache, physical_page, zone_number);
+ return UDS_QUEUED;
+ }
+
+@@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
+ * "search pending" state in careful order so no other thread can mess with the data before
+ * the caller gets to look at it.
+ */
+- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
++ begin_pending_search(&volume->page_cache, physical_page, zone_number);
+ mutex_unlock(&volume->read_threads_mutex);
+ *page_ptr = page;
+ return UDS_SUCCESS;
+@@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
+ {
+ int result;
+ struct cached_page *page = NULL;
++ unsigned int zone_number = request->zone_number;
+ u32 physical_page = map_to_physical_page(volume->geometry, chapter,
+ index_page_number);
+
+@@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
+ * invalidation by the reader thread, before the reader thread has noticed that the
+ * invalidate_counter has been incremented.
+ */
+- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
++ begin_pending_search(&volume->page_cache, physical_page, zone_number);
+
+ result = get_volume_page_protected(volume, request, physical_page, &page);
+ if (result != UDS_SUCCESS) {
+- end_pending_search(&volume->page_cache, request->zone_number);
++ end_pending_search(&volume->page_cache, zone_number);
+ return result;
+ }
+
+ result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
+ &request->record_name,
+ record_page_number);
+- end_pending_search(&volume->page_cache, request->zone_number);
++ end_pending_search(&volume->page_cache, zone_number);
+ return result;
+ }
+
+@@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
+ {
+ struct cached_page *record_page;
+ struct index_geometry *geometry = volume->geometry;
++ unsigned int zone_number = request->zone_number;
+ int result;
+ u32 physical_page, page_number;
+
+@@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
+ * invalidation by the reader thread, before the reader thread has noticed that the
+ * invalidate_counter has been incremented.
+ */
+- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
++ begin_pending_search(&volume->page_cache, physical_page, zone_number);
+
+ result = get_volume_page_protected(volume, request, physical_page, &record_page);
+ if (result != UDS_SUCCESS) {
+- end_pending_search(&volume->page_cache, request->zone_number);
++ end_pending_search(&volume->page_cache, zone_number);
+ return result;
+ }
+
+@@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
+ &request->record_name, geometry, &request->old_metadata))
+ *found = true;
+
+- end_pending_search(&volume->page_cache, request->zone_number);
++ end_pending_search(&volume->page_cache, zone_number);
+ return UDS_SUCCESS;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From ea87478540d59019627e1ce22331b7db1bc15170 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 May 2025 08:03:04 +0800
+Subject: dmaengine: idxd: Check availability of workqueue allocated by idxd wq
+ driver before using
+
+From: Yi Sun <yi.sun@intel.com>
+
+[ Upstream commit 17502e7d7b7113346296f6758324798d536c31fd ]
+
+Running IDXD workloads in a container with the /dev directory mounted can
+trigger a call trace or even a kernel panic when the parent process of the
+container is terminated.
+
+This issue occurs because, under certain configurations, Docker does not
+properly propagate the mount replica back to the original mount point.
+
+In this case, when the user driver detaches, the WQ is destroyed but it
+still calls destroy_workqueue() attempting to completes all pending work.
+It's necessary to check wq->wq and skip the drain if it no longer exists.
+
+Signed-off-by: Yi Sun <yi.sun@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+
+Link: https://lore.kernel.org/r/20250509000304.1402863-1-yi.sun@intel.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/cdev.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 19a58c4ecef3f..8b27bd545685a 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -354,7 +354,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
+ set_bit(h, evl->bmap);
+ h = (h + 1) % size;
+ }
+- drain_workqueue(wq->wq);
++ if (wq->wq)
++ drain_workqueue(wq->wq);
++
+ mutex_unlock(&evl->lock);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 2cc1a8e96312487a44207a9edb9c8f68de5499de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 20:21:01 +0200
+Subject: dmaengine: xilinx_dma: Set dma_device directions
+
+From: Thomas Gessler <thomas.gessler@brueckmann-gmbh.de>
+
+[ Upstream commit 7e01511443c30a55a5ae78d3debd46d4d872517e ]
+
+Coalesce the direction bits from the enabled TX and/or RX channels into
+the directions bit mask of dma_device. Without this mask set,
+dma_get_slave_caps() in the DMAEngine fails, which prevents the driver
+from being used with an IIO DMAEngine buffer.
+
+Signed-off-by: Thomas Gessler <thomas.gessler@brueckmann-gmbh.de>
+Reviewed-by: Suraj Gupta <suraj.gupta2@amd.com>
+Tested-by: Folker Schwesinger <dev@folker-schwesinger.de>
+Link: https://lore.kernel.org/r/20250507182101.909010-1-thomas.gessler@brueckmann-gmbh.de
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 5eb51ae93e89d..aa59b62cd83fb 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2906,6 +2906,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+ return -EINVAL;
+ }
+
++ xdev->common.directions |= chan->direction;
++
+ /* Request the interrupt */
+ chan->irq = of_irq_get(node, chan->tdest);
+ if (chan->irq < 0)
+--
+2.39.5
+
--- /dev/null
+From 03b2aaed7fe44b87abdc2e05121c418808fe07b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 May 2025 11:13:52 -0400
+Subject: drm/amdgpu: seq64 memory unmap uses uninterruptible lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Philip Yang <Philip.Yang@amd.com>
+
+[ Upstream commit a359288ccb4dd8edb086e7de8fdf6e36f544c922 ]
+
+To unmap and free seq64 memory when drm node close to free vm, if there
+is signal accepted, then taking vm lock failed and leaking seq64 va
+mapping, and then dmesg has error log "still active bo inside vm".
+
+Change to use uninterruptible lock fix the mapping leaking and no dmesg
+error log.
+
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+index e22cb2b5cd926..dba8051b8c14b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+@@ -133,7 +133,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
+
+ vm = &fpriv->vm;
+
+- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
++ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+--
+2.39.5
+
--- /dev/null
+From e8ed4921903bc1837c71f1844a7d0e30828d93de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 May 2025 21:22:15 +0200
+Subject: drm/i915/gem: Allow EXEC_CAPTURE on recoverable contexts on DG1
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 25eeba495b2fc16037647c1a51bcdf6fc157af5c ]
+
+The intel-media-driver is currently broken on DG1 because
+it uses EXEC_CAPTURE with recovarable contexts. Relax the
+check to allow that.
+
+I've also submitted a fix for the intel-media-driver:
+https://github.com/intel/media-driver/pull/1920
+
+Cc: stable@vger.kernel.org # v6.0+
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Testcase: igt/gem_exec_capture/capture-invisible
+Fixes: 71b1669ea9bd ("drm/i915/uapi: tweak error capture on recoverable contexts")
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20250411144313.11660-2-ville.syrjala@linux.intel.com
+(cherry picked from commit d6e020819612a4a06207af858e0978be4d3e3140)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Stable-dep-of: ed5915cfce2a ("Revert "drm/i915/gem: Allow EXEC_CAPTURE on recoverable contexts on DG1"")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index a3b83cfe17267..841438301d802 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2014,7 +2014,7 @@ static int eb_capture_stage(struct i915_execbuffer *eb)
+ continue;
+
+ if (i915_gem_context_is_recoverable(eb->gem_context) &&
+- (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0)))
++ GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 10))
+ return -EINVAL;
+
+ for_each_batch_create_order(eb, j) {
+--
+2.39.5
+
--- /dev/null
+From 7ee40ae59d9e4cde69bb0a1d371e85deb8005a0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 May 2025 10:07:13 +0800
+Subject: drm/scheduler: signal scheduled fence when kill job
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lin.Cao <lincao12@amd.com>
+
+[ Upstream commit 471db2c2d4f80ee94225a1ef246e4f5011733e50 ]
+
+When an entity from application B is killed, drm_sched_entity_kill()
+removes all jobs belonging to that entity through
+drm_sched_entity_kill_jobs_work(). If application A's job depends on a
+scheduled fence from application B's job, and that fence is not properly
+signaled during the killing process, application A's dependency cannot be
+cleared.
+
+This leads to application A hanging indefinitely while waiting for a
+dependency that will never be resolved. Fix this issue by ensuring that
+scheduled fences are properly signaled when an entity is killed, allowing
+dependent applications to continue execution.
+
+Signed-off-by: Lin.Cao <lincao12@amd.com>
+Reviewed-by: Philipp Stanner <phasta@kernel.org>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://lore.kernel.org/r/20250515020713.1110476-1-lincao12@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/scheduler/sched_entity.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index 002057be0d84a..c9c50e3b18a23 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -189,6 +189,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
+ {
+ struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+
++ drm_sched_fence_scheduled(job->s_fence, NULL);
+ drm_sched_fence_finished(job->s_fence, -ESRCH);
+ WARN_ON(job->s_fence->parent);
+ job->sched->ops->free_job(job);
+--
+2.39.5
+
--- /dev/null
+From 37a5b83b74aaf720dd8374553c554660c3a24a2f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:29 +0800
+Subject: ext4: don't explicit update times in ext4_fallocate()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 73ae756ecdfa9684446134590eef32b0f067249c ]
+
+After commit 'ad5cd4f4ee4d ("ext4: fix fallocate to use file_modified to
+update permissions consistently"), we can update mtime and ctime
+appropriately through file_modified() when doing zero range, collapse
+rage, insert range and punch hole, hence there is no need to explicit
+update times in those paths, just drop them.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-3-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 5 -----
+ fs/ext4/inode.c | 1 -
+ 2 files changed, 6 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index b16d72275e105..43da9906b9240 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4675,8 +4675,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ goto out_mutex;
+ }
+
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+-
+ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+ flags);
+ filemap_invalidate_unlock(mapping);
+@@ -4700,7 +4698,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ goto out_mutex;
+ }
+
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ if (new_size)
+ ext4_update_inode_size(inode, new_size);
+ ret = ext4_mark_inode_dirty(handle, inode);
+@@ -5431,7 +5428,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = ext4_mark_inode_dirty(handle, inode);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+@@ -5541,7 +5537,6 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ /* Expand file to avoid data loss if there is error while shifting */
+ inode->i_size += len;
+ EXT4_I(inode)->i_disksize += len;
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (ret)
+ goto out_stop;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index f769f5cb6deb7..e4b6ab28d7055 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4113,7 +4113,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+
+- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret2))
+ ret = ret2;
+--
+2.39.5
+
--- /dev/null
+From 5da2388d9c7e32de7786856cb29667414dc6c242 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:35 +0800
+Subject: ext4: factor out ext4_do_fallocate()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit fd2f764826df5489b849a8937b5a093aae5b1816 ]
+
+Now the real job of normal fallocate are open coded in ext4_fallocate(),
+factor out a new helper ext4_do_fallocate() to do the real job, like
+others functions (e.g. ext4_zero_range()) in ext4_fallocate() do, this
+can make the code more clear, no functional changes.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-9-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 125 ++++++++++++++++++++++------------------------
+ 1 file changed, 60 insertions(+), 65 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 961e7b634401d..eb58f7a1ab5aa 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4690,6 +4690,58 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ return ret;
+ }
+
++static long ext4_do_fallocate(struct file *file, loff_t offset,
++ loff_t len, int mode)
++{
++ struct inode *inode = file_inode(file);
++ loff_t end = offset + len;
++ loff_t new_size = 0;
++ ext4_lblk_t start_lblk, len_lblk;
++ int ret;
++
++ trace_ext4_fallocate_enter(inode, offset, len, mode);
++
++ start_lblk = offset >> inode->i_blkbits;
++ len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
++
++ inode_lock(inode);
++
++ /* We only support preallocation for extent-based files only. */
++ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
++
++ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
++ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
++ new_size = end;
++ ret = inode_newsize_ok(inode, new_size);
++ if (ret)
++ goto out;
++ }
++
++ /* Wait all existing dio workers, newcomers will block on i_rwsem */
++ inode_dio_wait(inode);
++
++ ret = file_modified(file);
++ if (ret)
++ goto out;
++
++ ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
++ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++ if (ret)
++ goto out;
++
++ if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
++ ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
++ EXT4_I(inode)->i_sync_tid);
++ }
++out:
++ inode_unlock(inode);
++ trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
++ return ret;
++}
++
+ /*
+ * preallocate space for a file. This implements ext4's fallocate file
+ * operation, which gets called from sys_fallocate system call.
+@@ -4700,12 +4752,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ {
+ struct inode *inode = file_inode(file);
+- loff_t new_size = 0;
+- unsigned int max_blocks;
+- int ret = 0;
+- int flags;
+- ext4_lblk_t lblk;
+- unsigned int blkbits = inode->i_blkbits;
++ int ret;
+
+ /*
+ * Encrypted inodes can't handle collapse range or insert
+@@ -4727,71 +4774,19 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ ret = ext4_convert_inline_data(inode);
+ inode_unlock(inode);
+ if (ret)
+- goto exit;
++ return ret;
+
+- if (mode & FALLOC_FL_PUNCH_HOLE) {
++ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = ext4_punch_hole(file, offset, len);
+- goto exit;
+- }
+-
+- if (mode & FALLOC_FL_COLLAPSE_RANGE) {
++ else if (mode & FALLOC_FL_COLLAPSE_RANGE)
+ ret = ext4_collapse_range(file, offset, len);
+- goto exit;
+- }
+-
+- if (mode & FALLOC_FL_INSERT_RANGE) {
++ else if (mode & FALLOC_FL_INSERT_RANGE)
+ ret = ext4_insert_range(file, offset, len);
+- goto exit;
+- }
+-
+- if (mode & FALLOC_FL_ZERO_RANGE) {
++ else if (mode & FALLOC_FL_ZERO_RANGE)
+ ret = ext4_zero_range(file, offset, len, mode);
+- goto exit;
+- }
+- trace_ext4_fallocate_enter(inode, offset, len, mode);
+- lblk = offset >> blkbits;
+-
+- max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
+- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+-
+- inode_lock(inode);
+-
+- /*
+- * We only support preallocation for extent-based files only
+- */
+- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
+-
+- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+- (offset + len > inode->i_size ||
+- offset + len > EXT4_I(inode)->i_disksize)) {
+- new_size = offset + len;
+- ret = inode_newsize_ok(inode, new_size);
+- if (ret)
+- goto out;
+- }
+-
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- goto out;
+-
+- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
+- if (ret)
+- goto out;
++ else
++ ret = ext4_do_fallocate(file, offset, len, mode);
+
+- if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
+- ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
+- EXT4_I(inode)->i_sync_tid);
+- }
+-out:
+- inode_unlock(inode);
+- trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
+-exit:
+ return ret;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 4c07f89a4b4fe53f0fb66b6c7e3563e935470200 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 09:20:07 +0800
+Subject: ext4: fix incorrect punch max_end
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 29ec9bed2395061350249ae356fb300dd82a78e7 ]
+
+For the extents based inodes, the maxbytes should be sb->s_maxbytes
+instead of sbi->s_bitmap_maxbytes. Additionally, for the calculation of
+max_end, the -sb->s_blocksize operation is necessary only for
+indirect-block based inodes. Correct the maxbytes and max_end value to
+correct the behavior of punch hole.
+
+Fixes: 2da376228a24 ("ext4: limit length to bitmap_maxbytes - blocksize in punch_hole")
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Link: https://patch.msgid.link/20250506012009.3896990-2-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/inode.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ca98f04fcf556..fe1d19d920a96 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3992,7 +3992,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ ext4_lblk_t start_lblk, end_lblk;
+- loff_t max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
++ loff_t max_end = sb->s_maxbytes;
+ loff_t end = offset + length;
+ handle_t *handle;
+ unsigned int credits;
+@@ -4001,14 +4001,20 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ trace_ext4_punch_hole(inode, offset, length, 0);
+ WARN_ON_ONCE(!inode_is_locked(inode));
+
++ /*
++ * For indirect-block based inodes, make sure that the hole within
++ * one block before last range.
++ */
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
++
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+ return 0;
+
+ /*
+ * If the hole extends beyond i_size, set the hole to end after
+- * the page that contains i_size, and also make sure that the hole
+- * within one block before last range.
++ * the page that contains i_size.
+ */
+ if (end > inode->i_size)
+ end = round_up(inode->i_size, PAGE_SIZE);
+--
+2.39.5
+
--- /dev/null
+From 3f74e7916f808513bf4f644d15c7ae6b55db81cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:37 +0800
+Subject: ext4: move out common parts into ext4_fallocate()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 2890e5e0f49e10f3dadc5f7b7ea434e3e77e12a6 ]
+
+Currently, all zeroing ranges, punch holes, collapse ranges, and insert
+ranges first wait for all existing direct I/O workers to complete, and
+then they acquire the mapping's invalidate lock before performing the
+actual work. These common components are nearly identical, so we can
+simplify the code by factoring them out into the ext4_fallocate().
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-11-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 124 ++++++++++++++++------------------------------
+ fs/ext4/inode.c | 25 ++--------
+ 2 files changed, 45 insertions(+), 104 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 30d412b62d9ed..51b9533416e04 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4569,7 +4569,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ loff_t len, int mode)
+ {
+ struct inode *inode = file_inode(file);
+- struct address_space *mapping = file->f_mapping;
+ handle_t *handle = NULL;
+ loff_t new_size = 0;
+ loff_t end = offset + len;
+@@ -4593,23 +4592,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ return ret;
+ }
+
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released
+- * from page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+-
+ flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+ /* Preallocate the range including the unaligned edges */
+ if (!IS_ALIGNED(offset | end, blocksize)) {
+@@ -4619,17 +4601,17 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
+ new_size, flags);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+ }
+
+ ret = ext4_update_disksize_before_punch(inode, offset, len);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ /* Now release the pages and zero block aligned part of pages */
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ /* Zero range excluding the unaligned edges */
+ start_lblk = EXT4_B_TO_LBLK(inode, offset);
+@@ -4641,11 +4623,11 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
+ new_size, flags);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+ }
+ /* Finish zeroing out if it doesn't contain partial block */
+ if (IS_ALIGNED(offset | end, blocksize))
+- goto out_invalidate_lock;
++ return ret;
+
+ /*
+ * In worst case we have to writeout two nonadjacent unwritten
+@@ -4658,7 +4640,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(inode->i_sb, ret);
+- goto out_invalidate_lock;
++ return ret;
+ }
+
+ /* Zero out partial block at the edges of the range */
+@@ -4678,8 +4660,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
+@@ -4712,13 +4692,6 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
+ goto out;
+ }
+
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- goto out;
+-
+ ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
+ if (ret)
+@@ -4743,6 +4716,7 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
+ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ {
+ struct inode *inode = file_inode(file);
++ struct address_space *mapping = file->f_mapping;
+ int ret;
+
+ /*
+@@ -4766,6 +4740,29 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ if (ret)
+ goto out_inode_lock;
+
++ /* Wait all existing dio workers, newcomers will block on i_rwsem */
++ inode_dio_wait(inode);
++
++ ret = file_modified(file);
++ if (ret)
++ return ret;
++
++ if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
++ ret = ext4_do_fallocate(file, offset, len, mode);
++ goto out_inode_lock;
++ }
++
++ /*
++ * Follow-up operations will drop page cache, hold invalidate lock
++ * to prevent page faults from reinstantiating pages we have
++ * released from page cache.
++ */
++ filemap_invalidate_lock(mapping);
++
++ ret = ext4_break_layouts(inode);
++ if (ret)
++ goto out_invalidate_lock;
++
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = ext4_punch_hole(file, offset, len);
+ else if (mode & FALLOC_FL_COLLAPSE_RANGE)
+@@ -4775,7 +4772,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ else if (mode & FALLOC_FL_ZERO_RANGE)
+ ret = ext4_zero_range(file, offset, len, mode);
+ else
+- ret = ext4_do_fallocate(file, offset, len, mode);
++ ret = -EOPNOTSUPP;
++
++out_invalidate_lock:
++ filemap_invalidate_unlock(mapping);
+ out_inode_lock:
+ inode_unlock(inode);
+ return ret;
+@@ -5297,23 +5297,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ if (end >= inode->i_size)
+ return -EINVAL;
+
+- /* Wait for existing dio to complete */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released from
+- * page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+-
+ /*
+ * Write tail of the last page before removed range and data that
+ * will be shifted since they will get removed from the page cache
+@@ -5327,16 +5310,15 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ if (!ret)
+ ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+- if (IS_ERR(handle)) {
+- ret = PTR_ERR(handle);
+- goto out_invalidate_lock;
+- }
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
+ start_lblk = offset >> inode->i_blkbits;
+@@ -5375,8 +5357,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
+@@ -5417,23 +5397,6 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ if (len > inode->i_sb->s_maxbytes - inode->i_size)
+ return -EFBIG;
+
+- /* Wait for existing dio to complete */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released from
+- * page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+-
+ /*
+ * Write out all dirty pages. Need to round down to align start offset
+ * to page size boundary for page size > block size.
+@@ -5441,16 +5404,15 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ start = round_down(offset, PAGE_SIZE);
+ ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+- if (IS_ERR(handle)) {
+- ret = PTR_ERR(handle);
+- goto out_invalidate_lock;
+- }
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
+ /* Expand file to avoid data loss if there is error while shifting */
+@@ -5521,8 +5483,6 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 6f0b1b0bd1af8..ca98f04fcf556 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3992,7 +3992,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ ext4_lblk_t start_lblk, end_lblk;
+- struct address_space *mapping = inode->i_mapping;
+ loff_t max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
+ loff_t end = offset + length;
+ handle_t *handle;
+@@ -4027,31 +4026,15 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ return ret;
+ }
+
+- /* Wait all existing dio workers, newcomers will block on i_rwsem */
+- inode_dio_wait(inode);
+-
+- ret = file_modified(file);
+- if (ret)
+- return ret;
+-
+- /*
+- * Prevent page faults from reinstantiating pages we have released from
+- * page cache.
+- */
+- filemap_invalidate_lock(mapping);
+-
+- ret = ext4_break_layouts(inode);
+- if (ret)
+- goto out_invalidate_lock;
+
+ ret = ext4_update_disksize_before_punch(inode, offset, length);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ /* Now release the pages and zero block aligned part of pages*/
+ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
+ if (ret)
+- goto out_invalidate_lock;
++ return ret;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ credits = ext4_writepage_trans_blocks(inode);
+@@ -4061,7 +4044,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(sb, ret);
+- goto out_invalidate_lock;
++ return ret;
+ }
+
+ ret = ext4_zero_partial_blocks(handle, inode, offset, length);
+@@ -4106,8 +4089,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ ext4_handle_sync(handle);
+ out_handle:
+ ext4_journal_stop(handle);
+-out_invalidate_lock:
+- filemap_invalidate_unlock(mapping);
+ return ret;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From d2f78acc707114b100293aaaa6888b34db0a71bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:36 +0800
+Subject: ext4: move out inode_lock into ext4_fallocate()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit ea3f17efd36b56c5839289716ba83eaa85893590 ]
+
+Currently, all five sub-functions of ext4_fallocate() acquire the
+inode's i_rwsem at the beginning and release it before exiting. This
+process can be simplified by factoring out the management of i_rwsem
+into the ext4_fallocate() function.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-10-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 90 +++++++++++++++--------------------------------
+ fs/ext4/inode.c | 13 +++----
+ 2 files changed, 33 insertions(+), 70 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index eb58f7a1ab5aa..30d412b62d9ed 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4579,23 +4579,18 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ int ret, flags, credits;
+
+ trace_ext4_zero_range(inode, offset, len, mode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+- inode_lock(inode);
+-
+- /*
+- * Indirect files do not support unwritten extents
+- */
+- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
++ /* Indirect files do not support unwritten extents */
++ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
++ return -EOPNOTSUPP;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
+ new_size = end;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+- goto out;
++ return ret;
+ }
+
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+@@ -4603,7 +4598,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released
+@@ -4685,8 +4680,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
+@@ -4700,12 +4693,11 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
+ int ret;
+
+ trace_ext4_fallocate_enter(inode, offset, len, mode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ start_lblk = offset >> inode->i_blkbits;
+ len_lblk = EXT4_MAX_BLOCKS(len, offset, inode->i_blkbits);
+
+- inode_lock(inode);
+-
+ /* We only support preallocation for extent-based files only. */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+@@ -4737,7 +4729,6 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
+ EXT4_I(inode)->i_sync_tid);
+ }
+ out:
+- inode_unlock(inode);
+ trace_ext4_fallocate_exit(inode, offset, len_lblk, ret);
+ return ret;
+ }
+@@ -4772,9 +4763,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+
+ inode_lock(inode);
+ ret = ext4_convert_inline_data(inode);
+- inode_unlock(inode);
+ if (ret)
+- return ret;
++ goto out_inode_lock;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = ext4_punch_hole(file, offset, len);
+@@ -4786,7 +4776,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ ret = ext4_zero_range(file, offset, len, mode);
+ else
+ ret = ext4_do_fallocate(file, offset, len, mode);
+-
++out_inode_lock:
++ inode_unlock(inode);
+ return ret;
+ }
+
+@@ -5291,36 +5282,27 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ int ret;
+
+ trace_ext4_collapse_range(inode, offset, len);
+-
+- inode_lock(inode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
+-
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ return -EOPNOTSUPP;
+ /* Collapse range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
+- ret = -EINVAL;
+- goto out;
+- }
+-
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
++ return -EINVAL;
+ /*
+ * There is no need to overlap collapse range with EOF, in which case
+ * it is effectively a truncate operation
+ */
+- if (end >= inode->i_size) {
+- ret = -EINVAL;
+- goto out;
+- }
++ if (end >= inode->i_size)
++ return -EINVAL;
+
+ /* Wait for existing dio to complete */
+ inode_dio_wait(inode);
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5395,8 +5377,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
+@@ -5422,39 +5402,27 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ loff_t start;
+
+ trace_ext4_insert_range(inode, offset, len);
+-
+- inode_lock(inode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* Currently just for extent based files */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- ret = -EOPNOTSUPP;
+- goto out;
+- }
+-
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ return -EOPNOTSUPP;
+ /* Insert range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
+- ret = -EINVAL;
+- goto out;
+- }
+-
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
++ return -EINVAL;
+ /* Offset must be less than i_size */
+- if (offset >= inode->i_size) {
+- ret = -EINVAL;
+- goto out;
+- }
+-
++ if (offset >= inode->i_size)
++ return -EINVAL;
+ /* Check whether the maximum file size would be exceeded */
+- if (len > inode->i_sb->s_maxbytes - inode->i_size) {
+- ret = -EFBIG;
+- goto out;
+- }
++ if (len > inode->i_sb->s_maxbytes - inode->i_size)
++ return -EFBIG;
+
+ /* Wait for existing dio to complete */
+ inode_dio_wait(inode);
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5555,8 +5523,6 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index bb68c851b33ad..6f0b1b0bd1af8 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3997,15 +3997,14 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ loff_t end = offset + length;
+ handle_t *handle;
+ unsigned int credits;
+- int ret = 0;
++ int ret;
+
+ trace_ext4_punch_hole(inode, offset, length, 0);
+-
+- inode_lock(inode);
++ WARN_ON_ONCE(!inode_is_locked(inode));
+
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+- goto out;
++ return 0;
+
+ /*
+ * If the hole extends beyond i_size, set the hole to end after
+@@ -4025,7 +4024,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ if (!IS_ALIGNED(offset | end, sb->s_blocksize)) {
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+- goto out;
++ return ret;
+ }
+
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+@@ -4033,7 +4032,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+
+ ret = file_modified(file);
+ if (ret)
+- goto out;
++ return ret;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -4109,8 +4108,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ ext4_journal_stop(handle);
+ out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out:
+- inode_unlock(inode);
+ return ret;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From a2a5dcfb39092ada8d900a25835ec584aa77fc2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:33 +0800
+Subject: ext4: refactor ext4_collapse_range()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 162e3c5ad1672ef41dccfb28ad198c704b8aa9e7 ]
+
+Simplify ext4_collapse_range() and align its code style with that of
+ext4_zero_range() and ext4_punch_hole(). Refactor it by: a) renaming
+variables, b) removing redundant input parameter checks and moving
+the remaining checks under i_rwsem in preparation for future
+refactoring, and c) renaming the three stale error tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-7-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 103 +++++++++++++++++++++-------------------------
+ 1 file changed, 48 insertions(+), 55 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 00c7a03cc7c6e..54fbeba3a929d 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5288,43 +5288,36 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ struct address_space *mapping = inode->i_mapping;
+- ext4_lblk_t punch_start, punch_stop;
++ loff_t end = offset + len;
++ ext4_lblk_t start_lblk, end_lblk;
+ handle_t *handle;
+ unsigned int credits;
+- loff_t new_size, ioffset;
++ loff_t start, new_size;
+ int ret;
+
+- /*
+- * We need to test this early because xfstests assumes that a
+- * collapse range of (0, 1) will return EOPNOTSUPP if the file
+- * system does not support collapse range.
+- */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- return -EOPNOTSUPP;
++ trace_ext4_collapse_range(inode, offset, len);
+
+- /* Collapse range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
+- return -EINVAL;
++ inode_lock(inode);
+
+- trace_ext4_collapse_range(inode, offset, len);
++ /* Currently just for extent based files */
++ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
+
+- punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+- punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
++ /* Collapse range works only on fs cluster size aligned regions. */
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+- inode_lock(inode);
+ /*
+ * There is no need to overlap collapse range with EOF, in which case
+ * it is effectively a truncate operation
+ */
+- if (offset + len >= inode->i_size) {
++ if (end >= inode->i_size) {
+ ret = -EINVAL;
+- goto out_mutex;
+- }
+-
+- /* Currently just for extent based files */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- ret = -EOPNOTSUPP;
+- goto out_mutex;
++ goto out;
+ }
+
+ /* Wait for existing dio to complete */
+@@ -5332,7 +5325,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
++ goto out;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5342,55 +5335,52 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+- goto out_mmap;
++ goto out_invalidate_lock;
+
+ /*
++ * Write tail of the last page before removed range and data that
++ * will be shifted since they will get removed from the page cache
++ * below. We are also protected from pages becoming dirty by
++ * i_rwsem and invalidate_lock.
+ * Need to round down offset to be aligned with page size boundary
+ * for page size > block size.
+ */
+- ioffset = round_down(offset, PAGE_SIZE);
+- /*
+- * Write tail of the last page before removed range since it will get
+- * removed from the page cache below.
+- */
+- ret = filemap_write_and_wait_range(mapping, ioffset, offset);
+- if (ret)
+- goto out_mmap;
+- /*
+- * Write data that will be shifted to preserve them when discarding
+- * page cache below. We are also protected from pages becoming dirty
+- * by i_rwsem and invalidate_lock.
+- */
+- ret = filemap_write_and_wait_range(mapping, offset + len,
+- LLONG_MAX);
++ start = round_down(offset, PAGE_SIZE);
++ ret = filemap_write_and_wait_range(mapping, start, offset);
++ if (!ret)
++ ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
+ if (ret)
+- goto out_mmap;
+- truncate_pagecache(inode, ioffset);
++ goto out_invalidate_lock;
++
++ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- goto out_mmap;
++ goto out_invalidate_lock;
+ }
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
++ start_lblk = offset >> inode->i_blkbits;
++ end_lblk = (offset + len) >> inode->i_blkbits;
++
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+- ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
++ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
+
+- ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
++ ret = ext4_ext_remove_space(inode, start_lblk, end_lblk - 1);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+- goto out_stop;
++ goto out_handle;
+ }
+ ext4_discard_preallocations(inode);
+
+- ret = ext4_ext_shift_extents(inode, handle, punch_stop,
+- punch_stop - punch_start, SHIFT_LEFT);
++ ret = ext4_ext_shift_extents(inode, handle, end_lblk,
++ end_lblk - start_lblk, SHIFT_LEFT);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+- goto out_stop;
++ goto out_handle;
+ }
+
+ new_size = inode->i_size - len;
+@@ -5398,16 +5388,19 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ EXT4_I(inode)->i_disksize = new_size;
+
+ up_write(&EXT4_I(inode)->i_data_sem);
+- if (IS_SYNC(inode))
+- ext4_handle_sync(handle);
+ ret = ext4_mark_inode_dirty(handle, inode);
++ if (ret)
++ goto out_handle;
++
+ ext4_update_inode_fsync_trans(handle, inode, 1);
++ if (IS_SYNC(inode))
++ ext4_handle_sync(handle);
+
+-out_stop:
++out_handle:
+ ext4_journal_stop(handle);
+-out_mmap:
++out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out_mutex:
++out:
+ inode_unlock(inode);
+ return ret;
+ }
+--
+2.39.5
+
--- /dev/null
+From 91763fcc230f877b527273e437f05b2a446a4b8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:34 +0800
+Subject: ext4: refactor ext4_insert_range()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 49425504376c335c68f7be54ae7c32312afd9475 ]
+
+Simplify ext4_insert_range() and align its code style with that of
+ext4_collapse_range(). Refactor it by: a) renaming variables, b)
+removing redundant input parameter checks and moving the remaining
+checks under i_rwsem in preparation for future refactoring, and c)
+renaming the three stale error tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-8-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 101 ++++++++++++++++++++++------------------------
+ 1 file changed, 48 insertions(+), 53 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 54fbeba3a929d..961e7b634401d 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5421,45 +5421,37 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ handle_t *handle;
+ struct ext4_ext_path *path;
+ struct ext4_extent *extent;
+- ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
++ ext4_lblk_t start_lblk, len_lblk, ee_start_lblk = 0;
+ unsigned int credits, ee_len;
+- int ret = 0, depth, split_flag = 0;
+- loff_t ioffset;
+-
+- /*
+- * We need to test this early because xfstests assumes that an
+- * insert range of (0, 1) will return EOPNOTSUPP if the file
+- * system does not support insert range.
+- */
+- if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- return -EOPNOTSUPP;
+-
+- /* Insert range works only on fs cluster size aligned regions. */
+- if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
+- return -EINVAL;
++ int ret, depth, split_flag = 0;
++ loff_t start;
+
+ trace_ext4_insert_range(inode, offset, len);
+
+- offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+- len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
+-
+ inode_lock(inode);
++
+ /* Currently just for extent based files */
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ ret = -EOPNOTSUPP;
+- goto out_mutex;
++ goto out;
+ }
+
+- /* Check whether the maximum file size would be exceeded */
+- if (len > inode->i_sb->s_maxbytes - inode->i_size) {
+- ret = -EFBIG;
+- goto out_mutex;
++ /* Insert range works only on fs cluster size aligned regions. */
++ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) {
++ ret = -EINVAL;
++ goto out;
+ }
+
+ /* Offset must be less than i_size */
+ if (offset >= inode->i_size) {
+ ret = -EINVAL;
+- goto out_mutex;
++ goto out;
++ }
++
++ /* Check whether the maximum file size would be exceeded */
++ if (len > inode->i_sb->s_maxbytes - inode->i_size) {
++ ret = -EFBIG;
++ goto out;
+ }
+
+ /* Wait for existing dio to complete */
+@@ -5467,7 +5459,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
++ goto out;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -5477,25 +5469,24 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+- goto out_mmap;
++ goto out_invalidate_lock;
+
+ /*
+- * Need to round down to align start offset to page size boundary
+- * for page size > block size.
++ * Write out all dirty pages. Need to round down to align start offset
++ * to page size boundary for page size > block size.
+ */
+- ioffset = round_down(offset, PAGE_SIZE);
+- /* Write out all dirty pages */
+- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+- LLONG_MAX);
++ start = round_down(offset, PAGE_SIZE);
++ ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
+ if (ret)
+- goto out_mmap;
+- truncate_pagecache(inode, ioffset);
++ goto out_invalidate_lock;
++
++ truncate_pagecache(inode, start);
+
+ credits = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- goto out_mmap;
++ goto out_invalidate_lock;
+ }
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
+
+@@ -5504,16 +5495,19 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ EXT4_I(inode)->i_disksize += len;
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (ret)
+- goto out_stop;
++ goto out_handle;
++
++ start_lblk = offset >> inode->i_blkbits;
++ len_lblk = len >> inode->i_blkbits;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+
+- path = ext4_find_extent(inode, offset_lblk, NULL, 0);
++ path = ext4_find_extent(inode, start_lblk, NULL, 0);
+ if (IS_ERR(path)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
+- goto out_stop;
++ goto out_handle;
+ }
+
+ depth = ext_depth(inode);
+@@ -5523,16 +5517,16 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ ee_len = ext4_ext_get_actual_len(extent);
+
+ /*
+- * If offset_lblk is not the starting block of extent, split
+- * the extent @offset_lblk
++ * If start_lblk is not the starting block of extent, split
++ * the extent @start_lblk
+ */
+- if ((offset_lblk > ee_start_lblk) &&
+- (offset_lblk < (ee_start_lblk + ee_len))) {
++ if ((start_lblk > ee_start_lblk) &&
++ (start_lblk < (ee_start_lblk + ee_len))) {
+ if (ext4_ext_is_unwritten(extent))
+ split_flag = EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ path = ext4_split_extent_at(handle, inode, path,
+- offset_lblk, split_flag,
++ start_lblk, split_flag,
+ EXT4_EX_NOCACHE |
+ EXT4_GET_BLOCKS_PRE_IO |
+ EXT4_GET_BLOCKS_METADATA_NOFAIL);
+@@ -5541,31 +5535,32 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ if (IS_ERR(path)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
+- goto out_stop;
++ goto out_handle;
+ }
+ }
+
+ ext4_free_ext_path(path);
+- ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
++ ext4_es_remove_extent(inode, start_lblk, EXT_MAX_BLOCKS - start_lblk);
+
+ /*
+- * if offset_lblk lies in a hole which is at start of file, use
++ * if start_lblk lies in a hole which is at start of file, use
+ * ee_start_lblk to shift extents
+ */
+ ret = ext4_ext_shift_extents(inode, handle,
+- max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT);
+-
++ max(ee_start_lblk, start_lblk), len_lblk, SHIFT_RIGHT);
+ up_write(&EXT4_I(inode)->i_data_sem);
++ if (ret)
++ goto out_handle;
++
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+- if (ret >= 0)
+- ext4_update_inode_fsync_trans(handle, inode, 1);
+
+-out_stop:
++out_handle:
+ ext4_journal_stop(handle);
+-out_mmap:
++out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out_mutex:
++out:
+ inode_unlock(inode);
+ return ret;
+ }
+--
+2.39.5
+
--- /dev/null
+From 77bae9f3a064c00f2f2824f50cf77ab3fb5250fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:31 +0800
+Subject: ext4: refactor ext4_punch_hole()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 982bf37da09d078570650b691d9084f43805a5de ]
+
+The current implementation of ext4_punch_hole() contains complex
+position calculations and stale error tags. To improve the code's
+clarity and maintainability, it is essential to clean up the code and
+improve its readability, this can be achieved by: a) simplifying and
+renaming variables; b) eliminating unnecessary position calculations;
+c) writing back all data in data=journal mode, and drop page cache from
+the original offset to the end, rather than using aligned blocks,
+d) renaming the stale error tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-5-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 2 +
+ fs/ext4/inode.c | 119 +++++++++++++++++++++---------------------------
+ 2 files changed, 55 insertions(+), 66 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index e94df69ee2e0d..a95525bfb99cf 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -368,6 +368,8 @@ struct ext4_io_submit {
+ #define EXT4_MAX_BLOCKS(size, offset, blkbits) \
+ ((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
+ blkbits))
++#define EXT4_B_TO_LBLK(inode, offset) \
++ (round_up((offset), i_blocksize(inode)) >> (inode)->i_blkbits)
+
+ /* Translate a block number to a cluster number */
+ #define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index e4b6ab28d7055..bb68c851b33ad 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3991,13 +3991,13 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ {
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+- ext4_lblk_t first_block, stop_block;
++ ext4_lblk_t start_lblk, end_lblk;
+ struct address_space *mapping = inode->i_mapping;
+- loff_t first_block_offset, last_block_offset, max_length;
+- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++ loff_t max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
++ loff_t end = offset + length;
+ handle_t *handle;
+ unsigned int credits;
+- int ret = 0, ret2 = 0;
++ int ret = 0;
+
+ trace_ext4_punch_hole(inode, offset, length, 0);
+
+@@ -4005,36 +4005,27 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+- goto out_mutex;
++ goto out;
+
+ /*
+- * If the hole extends beyond i_size, set the hole
+- * to end after the page that contains i_size
++ * If the hole extends beyond i_size, set the hole to end after
++ * the page that contains i_size, and also make sure that the hole
++ * within one block before last range.
+ */
+- if (offset + length > inode->i_size) {
+- length = inode->i_size +
+- PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
+- offset;
+- }
++ if (end > inode->i_size)
++ end = round_up(inode->i_size, PAGE_SIZE);
++ if (end > max_end)
++ end = max_end;
++ length = end - offset;
+
+ /*
+- * For punch hole the length + offset needs to be within one block
+- * before last range. Adjust the length if it goes beyond that limit.
++ * Attach jinode to inode for jbd2 if we do any zeroing of partial
++ * block.
+ */
+- max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
+- if (offset + length > max_length)
+- length = max_length - offset;
+-
+- if (offset & (sb->s_blocksize - 1) ||
+- (offset + length) & (sb->s_blocksize - 1)) {
+- /*
+- * Attach jinode to inode for jbd2 if we do any zeroing of
+- * partial block
+- */
++ if (!IS_ALIGNED(offset | end, sb->s_blocksize)) {
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+- goto out_mutex;
+-
++ goto out;
+ }
+
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+@@ -4042,7 +4033,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
++ goto out;
+
+ /*
+ * Prevent page faults from reinstantiating pages we have released from
+@@ -4052,22 +4043,16 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+- goto out_dio;
++ goto out_invalidate_lock;
+
+- first_block_offset = round_up(offset, sb->s_blocksize);
+- last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
++ ret = ext4_update_disksize_before_punch(inode, offset, length);
++ if (ret)
++ goto out_invalidate_lock;
+
+ /* Now release the pages and zero block aligned part of pages*/
+- if (last_block_offset > first_block_offset) {
+- ret = ext4_update_disksize_before_punch(inode, offset, length);
+- if (ret)
+- goto out_dio;
+-
+- ret = ext4_truncate_page_cache_block_range(inode,
+- first_block_offset, last_block_offset + 1);
+- if (ret)
+- goto out_dio;
+- }
++ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
++ if (ret)
++ goto out_invalidate_lock;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ credits = ext4_writepage_trans_blocks(inode);
+@@ -4077,52 +4062,54 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(sb, ret);
+- goto out_dio;
++ goto out_invalidate_lock;
+ }
+
+- ret = ext4_zero_partial_blocks(handle, inode, offset,
+- length);
++ ret = ext4_zero_partial_blocks(handle, inode, offset, length);
+ if (ret)
+- goto out_stop;
+-
+- first_block = (offset + sb->s_blocksize - 1) >>
+- EXT4_BLOCK_SIZE_BITS(sb);
+- stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
++ goto out_handle;
+
+ /* If there are blocks to remove, do it */
+- if (stop_block > first_block) {
+- ext4_lblk_t hole_len = stop_block - first_block;
++ start_lblk = EXT4_B_TO_LBLK(inode, offset);
++ end_lblk = end >> inode->i_blkbits;
++
++ if (end_lblk > start_lblk) {
++ ext4_lblk_t hole_len = end_lblk - start_lblk;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
+
+- ext4_es_remove_extent(inode, first_block, hole_len);
++ ext4_es_remove_extent(inode, start_lblk, hole_len);
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- ret = ext4_ext_remove_space(inode, first_block,
+- stop_block - 1);
++ ret = ext4_ext_remove_space(inode, start_lblk,
++ end_lblk - 1);
+ else
+- ret = ext4_ind_remove_space(handle, inode, first_block,
+- stop_block);
++ ret = ext4_ind_remove_space(handle, inode, start_lblk,
++ end_lblk);
++ if (ret) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ goto out_handle;
++ }
+
+- ext4_es_insert_extent(inode, first_block, hole_len, ~0,
++ ext4_es_insert_extent(inode, start_lblk, hole_len, ~0,
+ EXTENT_STATUS_HOLE, 0);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ }
+- ext4_fc_track_range(handle, inode, first_block, stop_block);
++ ext4_fc_track_range(handle, inode, start_lblk, end_lblk);
++
++ ret = ext4_mark_inode_dirty(handle, inode);
++ if (unlikely(ret))
++ goto out_handle;
++
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+-
+- ret2 = ext4_mark_inode_dirty(handle, inode);
+- if (unlikely(ret2))
+- ret = ret2;
+- if (ret >= 0)
+- ext4_update_inode_fsync_trans(handle, inode, 1);
+-out_stop:
++out_handle:
+ ext4_journal_stop(handle);
+-out_dio:
++out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
+-out_mutex:
++out:
+ inode_unlock(inode);
+ return ret;
+ }
+--
+2.39.5
+
--- /dev/null
+From 88513a4e8db009de1fbed55ec13bb9b66995ce43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Dec 2024 09:16:32 +0800
+Subject: ext4: refactor ext4_zero_range()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 53471e0bedad5891b860d02233819dc0e28189e2 ]
+
+The current implementation of ext4_zero_range() contains complex
+position calculations and stale error tags. To improve the code's
+clarity and maintainability, it is essential to clean up the code and
+improve its readability, this can be achieved by: a) simplifying and
+renaming variables, making the style the same as ext4_punch_hole(); b)
+eliminating unnecessary position calculations, writing back all data in
+data=journal mode, and drop page cache from the original offset to the
+end, rather than using aligned blocks; c) renaming the stale out_mutex
+tags.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20241220011637.1157197-6-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 29ec9bed2395 ("ext4: fix incorrect punch max_end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 142 +++++++++++++++++++---------------------------
+ 1 file changed, 57 insertions(+), 85 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 43da9906b9240..00c7a03cc7c6e 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4571,40 +4571,15 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
+ handle_t *handle = NULL;
+- unsigned int max_blocks;
+ loff_t new_size = 0;
+- int ret = 0;
+- int flags;
+- int credits;
+- int partial_begin, partial_end;
+- loff_t start, end;
+- ext4_lblk_t lblk;
++ loff_t end = offset + len;
++ ext4_lblk_t start_lblk, end_lblk;
++ unsigned int blocksize = i_blocksize(inode);
+ unsigned int blkbits = inode->i_blkbits;
++ int ret, flags, credits;
+
+ trace_ext4_zero_range(inode, offset, len, mode);
+
+- /*
+- * Round up offset. This is not fallocate, we need to zero out
+- * blocks, so convert interior block aligned part of the range to
+- * unwritten and possibly manually zero out unaligned parts of the
+- * range. Here, start and partial_begin are inclusive, end and
+- * partial_end are exclusive.
+- */
+- start = round_up(offset, 1 << blkbits);
+- end = round_down((offset + len), 1 << blkbits);
+-
+- if (start < offset || end > offset + len)
+- return -EINVAL;
+- partial_begin = offset & ((1 << blkbits) - 1);
+- partial_end = (offset + len) & ((1 << blkbits) - 1);
+-
+- lblk = start >> blkbits;
+- max_blocks = (end >> blkbits);
+- if (max_blocks < lblk)
+- max_blocks = 0;
+- else
+- max_blocks -= lblk;
+-
+ inode_lock(inode);
+
+ /*
+@@ -4612,77 +4587,70 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+- goto out_mutex;
++ goto out;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+- (offset + len > inode->i_size ||
+- offset + len > EXT4_I(inode)->i_disksize)) {
+- new_size = offset + len;
++ (end > inode->i_size || end > EXT4_I(inode)->i_disksize)) {
++ new_size = end;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+- goto out_mutex;
++ goto out;
+ }
+
+- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+-
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+ inode_dio_wait(inode);
+
+ ret = file_modified(file);
+ if (ret)
+- goto out_mutex;
+-
+- /* Preallocate the range including the unaligned edges */
+- if (partial_begin || partial_end) {
+- ret = ext4_alloc_file_blocks(file,
+- round_down(offset, 1 << blkbits) >> blkbits,
+- (round_up((offset + len), 1 << blkbits) -
+- round_down(offset, 1 << blkbits)) >> blkbits,
+- new_size, flags);
+- if (ret)
+- goto out_mutex;
++ goto out;
+
+- }
++ /*
++ * Prevent page faults from reinstantiating pages we have released
++ * from page cache.
++ */
++ filemap_invalidate_lock(mapping);
+
+- /* Zero range excluding the unaligned edges */
+- if (max_blocks > 0) {
+- flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+- EXT4_EX_NOCACHE);
++ ret = ext4_break_layouts(inode);
++ if (ret)
++ goto out_invalidate_lock;
+
+- /*
+- * Prevent page faults from reinstantiating pages we have
+- * released from page cache.
+- */
+- filemap_invalidate_lock(mapping);
++ flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
++ /* Preallocate the range including the unaligned edges */
++ if (!IS_ALIGNED(offset | end, blocksize)) {
++ ext4_lblk_t alloc_lblk = offset >> blkbits;
++ ext4_lblk_t len_lblk = EXT4_MAX_BLOCKS(len, offset, blkbits);
+
+- ret = ext4_break_layouts(inode);
+- if (ret) {
+- filemap_invalidate_unlock(mapping);
+- goto out_mutex;
+- }
++ ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
++ new_size, flags);
++ if (ret)
++ goto out_invalidate_lock;
++ }
+
+- ret = ext4_update_disksize_before_punch(inode, offset, len);
+- if (ret) {
+- filemap_invalidate_unlock(mapping);
+- goto out_mutex;
+- }
++ ret = ext4_update_disksize_before_punch(inode, offset, len);
++ if (ret)
++ goto out_invalidate_lock;
+
+- /* Now release the pages and zero block aligned part of pages */
+- ret = ext4_truncate_page_cache_block_range(inode, start, end);
+- if (ret) {
+- filemap_invalidate_unlock(mapping);
+- goto out_mutex;
+- }
++ /* Now release the pages and zero block aligned part of pages */
++ ret = ext4_truncate_page_cache_block_range(inode, offset, end);
++ if (ret)
++ goto out_invalidate_lock;
+
+- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+- flags);
+- filemap_invalidate_unlock(mapping);
++ /* Zero range excluding the unaligned edges */
++ start_lblk = EXT4_B_TO_LBLK(inode, offset);
++ end_lblk = end >> blkbits;
++ if (end_lblk > start_lblk) {
++ ext4_lblk_t zero_blks = end_lblk - start_lblk;
++
++ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE);
++ ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
++ new_size, flags);
+ if (ret)
+- goto out_mutex;
++ goto out_invalidate_lock;
+ }
+- if (!partial_begin && !partial_end)
+- goto out_mutex;
++ /* Finish zeroing out if it doesn't contain partial block */
++ if (IS_ALIGNED(offset | end, blocksize))
++ goto out_invalidate_lock;
+
+ /*
+ * In worst case we have to writeout two nonadjacent unwritten
+@@ -4695,25 +4663,29 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(inode->i_sb, ret);
+- goto out_mutex;
++ goto out_invalidate_lock;
+ }
+
++ /* Zero out partial block at the edges of the range */
++ ret = ext4_zero_partial_blocks(handle, inode, offset, len);
++ if (ret)
++ goto out_handle;
++
+ if (new_size)
+ ext4_update_inode_size(inode, new_size);
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret))
+ goto out_handle;
+- /* Zero out partial block at the edges of the range */
+- ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+- if (ret >= 0)
+- ext4_update_inode_fsync_trans(handle, inode, 1);
+
++ ext4_update_inode_fsync_trans(handle, inode, 1);
+ if (file->f_flags & O_SYNC)
+ ext4_handle_sync(handle);
+
+ out_handle:
+ ext4_journal_stop(handle);
+-out_mutex:
++out_invalidate_lock:
++ filemap_invalidate_unlock(mapping);
++out:
+ inode_unlock(inode);
+ return ret;
+ }
+--
+2.39.5
+
--- /dev/null
+From 4e9e5eaffb94f4c71da782330107ca6e8cf866e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 May 2025 19:25:38 +0800
+Subject: f2fs: don't over-report free space or inodes in statvfs
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit a9201960623287927bf5776de3f70fb2fbde7e02 ]
+
+This fixes an analogus bug that was fixed in modern filesystems:
+a) xfs in commit 4b8d867ca6e2 ("xfs: don't over-report free space or
+inodes in statvfs")
+b) ext4 in commit f87d3af74193 ("ext4: don't over-report free space
+or inodes in statvfs")
+where statfs can report misleading / incorrect information where
+project quota is enabled, and the free space is less than the
+remaining quota.
+
+This commit will resolve a test failure in generic/762 which tests
+for this bug.
+
+generic/762 - output mismatch (see /share/git/fstests/results//generic/762.out.bad)
+ --- tests/generic/762.out 2025-04-15 10:21:53.371067071 +0800
+ +++ /share/git/fstests/results//generic/762.out.bad 2025-05-13 16:13:37.000000000 +0800
+ @@ -6,8 +6,10 @@
+ root blocks2 is in range
+ dir blocks2 is in range
+ root bavail2 is in range
+ -dir bavail2 is in range
+ +dir bavail2 has value of 1539066
+ +dir bavail2 is NOT in range 304734.87 .. 310891.13
+ root blocks3 is in range
+ ...
+ (Run 'diff -u /share/git/fstests/tests/generic/762.out /share/git/fstests/results//generic/762.out.bad' to see the entire diff)
+
+HINT: You _MAY_ be missing kernel fix:
+ XXXXXXXXXXXXXX xfs: don't over-report free space or inodes in statvfs
+
+Cc: stable@kernel.org
+Fixes: ddc34e328d06 ("f2fs: introduce f2fs_statfs_project")
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/super.c | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 330f89ddb5c8f..f0e83ea56e38c 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1787,26 +1787,32 @@ static int f2fs_statfs_project(struct super_block *sb,
+
+ limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
+ dquot->dq_dqb.dqb_bhardlimit);
+- if (limit)
+- limit >>= sb->s_blocksize_bits;
++ limit >>= sb->s_blocksize_bits;
++
++ if (limit) {
++ uint64_t remaining = 0;
+
+- if (limit && buf->f_blocks > limit) {
+ curblock = (dquot->dq_dqb.dqb_curspace +
+ dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
+- buf->f_blocks = limit;
+- buf->f_bfree = buf->f_bavail =
+- (buf->f_blocks > curblock) ?
+- (buf->f_blocks - curblock) : 0;
++ if (limit > curblock)
++ remaining = limit - curblock;
++
++ buf->f_blocks = min(buf->f_blocks, limit);
++ buf->f_bfree = min(buf->f_bfree, remaining);
++ buf->f_bavail = min(buf->f_bavail, remaining);
+ }
+
+ limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
+ dquot->dq_dqb.dqb_ihardlimit);
+
+- if (limit && buf->f_files > limit) {
+- buf->f_files = limit;
+- buf->f_ffree =
+- (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
+- (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
++ if (limit) {
++ uint64_t remaining = 0;
++
++ if (limit > dquot->dq_dqb.dqb_curinodes)
++ remaining = limit - dquot->dq_dqb.dqb_curinodes;
++
++ buf->f_files = min(buf->f_files, limit);
++ buf->f_ffree = min(buf->f_ffree, remaining);
+ }
+
+ spin_unlock(&dquot->dq_dqb_lock);
+--
+2.39.5
+
--- /dev/null
+From bf2cbbc3b494f9b999f554ae7068f36d4a7b9151 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Feb 2025 10:31:19 -0600
+Subject: fs/jfs: consolidate sanity checking in dbMount
+
+From: Dave Kleikamp <dave.kleikamp@oracle.com>
+
+[ Upstream commit 0d250b1c52484d489e31df2cf9118b7c4bd49d31 ]
+
+Sanity checks have been added to dbMount as individual if clauses with
+identical error handling. Move these all into one clause.
+
+Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
+Stable-dep-of: 37bfb464ddca ("jfs: validate AG parameters in dbMount() to prevent crashes")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jfs/jfs_dmap.c | 37 +++++++++----------------------------
+ 1 file changed, 9 insertions(+), 28 deletions(-)
+
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 0e1019382cf51..26e89d0c69b61 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -178,45 +178,26 @@ int dbMount(struct inode *ipbmap)
+ dbmp_le = (struct dbmap_disk *) mp->data;
+ bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
+ bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+-
+ bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+- if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
+- bmp->db_l2nbperpage < 0) {
+- err = -EINVAL;
+- goto err_release_metapage;
+- }
+-
+ bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+- if (!bmp->db_numag || bmp->db_numag > MAXAG) {
+- err = -EINVAL;
+- goto err_release_metapage;
+- }
+-
+ bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+ bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+ bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
+- if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
+- bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
+- err = -EINVAL;
+- goto err_release_metapage;
+- }
+-
+ bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+- if (!bmp->db_agwidth) {
+- err = -EINVAL;
+- goto err_release_metapage;
+- }
+ bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+- if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
+- bmp->db_agl2size < 0) {
+- err = -EINVAL;
+- goto err_release_metapage;
+- }
+
+- if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
++ if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) ||
++ (bmp->db_l2nbperpage < 0) ||
++ !bmp->db_numag || (bmp->db_numag > MAXAG) ||
++ (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) ||
++ (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) ||
++ !bmp->db_agwidth ||
++ (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) ||
++ (bmp->db_agl2size < 0) ||
++ ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+--
+2.39.5
+
--- /dev/null
+From 91d533f7aa683051edfe3188bbe9b1cfb339300a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 May 2025 04:04:21 +0000
+Subject: fuse: fix race between concurrent setattrs from multiple nodes
+
+From: Guang Yuan Wu <gwu@ddn.com>
+
+[ Upstream commit 69efbff69f89c9b2b72c4d82ad8b59706add768a ]
+
+When mounting a user-space filesystem on multiple clients, after
+concurrent ->setattr() calls from different node, stale inode
+attributes may be cached in some node.
+
+This is caused by fuse_setattr() racing with
+fuse_reverse_inval_inode().
+
+When filesystem server receives setattr request, the client node
+with valid iattr cached will be required to update the fuse_inode's
+attr_version and invalidate the cache by fuse_reverse_inval_inode(),
+and at the next call to ->getattr() they will be fetched from user
+space.
+
+The race scenario is:
+1. client-1 sends setattr (iattr-1) request to server
+2. client-1 receives the reply from server
+3. before client-1 updates iattr-1 to the cached attributes by
+ fuse_change_attributes_common(), server receives another setattr
+ (iattr-2) request from client-2
+4. server requests client-1 to update the inode attr_version and
+ invalidate the cached iattr, and iattr-1 becomes staled
+5. client-2 receives the reply from server, and caches iattr-2
+6. continue with step 2, client-1 invokes
+ fuse_change_attributes_common(), and caches iattr-1
+
+The issue has been observed from concurrent of chmod, chown, or
+truncate, which all invoke ->setattr() call.
+
+The solution is to use fuse_inode's attr_version to check whether
+the attributes have been modified during the setattr request's
+lifetime. If so, mark the attributes as invalid in the function
+fuse_change_attributes_common().
+
+Signed-off-by: Guang Yuan Wu <gwu@ddn.com>
+Reviewed-by: Bernd Schubert <bschubert@ddn.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fuse/dir.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index ff543dc09130e..ce7324d0d9ed1 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1921,6 +1921,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ int err;
+ bool trust_local_cmtime = is_wb;
+ bool fault_blocked = false;
++ u64 attr_version;
+
+ if (!fc->default_permissions)
+ attr->ia_valid |= ATTR_FORCE;
+@@ -2005,6 +2006,8 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
+ inarg.valid |= FATTR_KILL_SUIDGID;
+ }
++
++ attr_version = fuse_get_attr_version(fm->fc);
+ fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
+ err = fuse_simple_request(fm, &args);
+ if (err) {
+@@ -2030,6 +2033,14 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ /* FIXME: clear I_DIRTY_SYNC? */
+ }
+
++ if (fi->attr_version > attr_version) {
++ /*
++ * Apply attributes, for example for fsnotify_change(), but set
++ * attribute timeout to zero.
++ */
++ outarg.attr_valid = outarg.attr_valid_nsec = 0;
++ }
++
+ fuse_change_attributes_common(inode, &outarg.attr, NULL,
+ ATTR_TIMEOUT(&outarg),
+ fuse_get_cache_mask(inode));
+--
+2.39.5
+
--- /dev/null
+From c12a2fe51c6cbe38e9583c418d0688f562e588c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Apr 2025 11:47:24 +0800
+Subject: hwmon: (pmbus/max34440) Fix support for max34451
+
+From: Alexis Czezar Torreno <alexisczezar.torreno@analog.com>
+
+[ Upstream commit 19932f844f3f51646f762f3eac4744ec3a405064 ]
+
+The max344** family has an issue with some PMBUS address being switched.
+This includes max34451 however version MAX34451-NA6 and later has this
+issue fixed and this commit supports that update.
+
+Signed-off-by: Alexis Czezar Torreno <alexisczezar.torreno@analog.com>
+Link: https://lore.kernel.org/r/20250407-dev_adpm12160-v3-1-9cd3095445c8@analog.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/pmbus/max34440.c | 48 +++++++++++++++++++++++++++++++---
+ 1 file changed, 44 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
+index fe7f6b1b09851..e14be8ebaad30 100644
+--- a/drivers/hwmon/pmbus/max34440.c
++++ b/drivers/hwmon/pmbus/max34440.c
+@@ -34,16 +34,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
+ /*
+ * The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
+ * swapped from the standard pmbus spec addresses.
++ * For max34451, version MAX34451ETNA6+ and later has this issue fixed.
+ */
+ #define MAX34440_IOUT_OC_WARN_LIMIT 0x46
+ #define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
+
++#define MAX34451ETNA6_MFR_REV 0x0012
++
+ #define MAX34451_MFR_CHANNEL_CONFIG 0xe4
+ #define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
+
+ struct max34440_data {
+ int id;
+ struct pmbus_driver_info info;
++ u8 iout_oc_warn_limit;
++ u8 iout_oc_fault_limit;
+ };
+
+ #define to_max34440_data(x) container_of(x, struct max34440_data, info)
+@@ -60,11 +65,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
+ switch (reg) {
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase,
+- MAX34440_IOUT_OC_FAULT_LIMIT);
++ data->iout_oc_fault_limit);
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase,
+- MAX34440_IOUT_OC_WARN_LIMIT);
++ data->iout_oc_warn_limit);
+ break;
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ ret = pmbus_read_word_data(client, page, phase,
+@@ -133,11 +138,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
+
+ switch (reg) {
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
++ ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
+ word);
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
++ ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
+ word);
+ break;
+ case PMBUS_VIRT_RESET_POUT_HISTORY:
+@@ -235,6 +240,25 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
+ */
+
+ int page, rv;
++ bool max34451_na6 = false;
++
++ rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
++ if (rv < 0)
++ return rv;
++
++ if (rv >= MAX34451ETNA6_MFR_REV) {
++ max34451_na6 = true;
++ data->info.format[PSC_VOLTAGE_IN] = direct;
++ data->info.format[PSC_CURRENT_IN] = direct;
++ data->info.m[PSC_VOLTAGE_IN] = 1;
++ data->info.b[PSC_VOLTAGE_IN] = 0;
++ data->info.R[PSC_VOLTAGE_IN] = 3;
++ data->info.m[PSC_CURRENT_IN] = 1;
++ data->info.b[PSC_CURRENT_IN] = 0;
++ data->info.R[PSC_CURRENT_IN] = 2;
++ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
++ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
++ }
+
+ for (page = 0; page < 16; page++) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+@@ -251,16 +275,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
+ case 0x20:
+ data->info.func[page] = PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_STATUS_VOUT;
++
++ if (max34451_na6)
++ data->info.func[page] |= PMBUS_HAVE_VIN |
++ PMBUS_HAVE_STATUS_INPUT;
+ break;
+ case 0x21:
+ data->info.func[page] = PMBUS_HAVE_VOUT;
++
++ if (max34451_na6)
++ data->info.func[page] |= PMBUS_HAVE_VIN;
+ break;
+ case 0x22:
+ data->info.func[page] = PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT;
++
++ if (max34451_na6)
++ data->info.func[page] |= PMBUS_HAVE_IIN |
++ PMBUS_HAVE_STATUS_INPUT;
+ break;
+ case 0x23:
+ data->info.func[page] = PMBUS_HAVE_IOUT;
++
++ if (max34451_na6)
++ data->info.func[page] |= PMBUS_HAVE_IIN;
+ break;
+ default:
+ break;
+@@ -494,6 +532,8 @@ static int max34440_probe(struct i2c_client *client)
+ return -ENOMEM;
+ data->id = i2c_match_id(max34440_id, client)->driver_data;
+ data->info = max34440_info[data->id];
++ data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
++ data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
+
+ if (data->id == max34451) {
+ rv = max34451_set_supported_funcs(client, data);
+--
+2.39.5
+
--- /dev/null
+From 1d8b1e7e9a828e73b7195e24df7db708e82efa9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Apr 2025 22:34:08 +0530
+Subject: iio: adc: ad_sigma_delta: Fix use of uninitialized status_pos
+
+From: Purva Yeshi <purvayeshi550@gmail.com>
+
+[ Upstream commit e5cdb098a3cb165d52282ffc3a6448642953ea13 ]
+
+Fix Smatch-detected issue:
+drivers/iio/adc/ad_sigma_delta.c:604 ad_sd_trigger_handler() error:
+uninitialized symbol 'status_pos'.
+
+The variable `status_pos` was only initialized in specific switch cases
+(1, 2, 3, 4), which could leave it uninitialized if `reg_size` had an
+unexpected value.
+
+Fix by adding a default case to the switch block to catch unexpected
+values of `reg_size`. Use `dev_err_ratelimited()` for error logging and
+`goto irq_handled` instead of returning early.
+
+Signed-off-by: Purva Yeshi <purvayeshi550@gmail.com>
+Link: https://patch.msgid.link/20250410170408.8585-1-purvayeshi550@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/ad_sigma_delta.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index ea4aabd3960a0..3df1d4f6bc959 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -477,6 +477,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
+ * byte set to zero. */
+ ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]);
+ break;
++
++ default:
++ dev_err_ratelimited(&indio_dev->dev, "Unsupported reg_size: %u\n", reg_size);
++ goto irq_handled;
+ }
+
+ /*
+--
+2.39.5
+
--- /dev/null
+From f7a2a926ea51a64cbe4c678ab180373e38339db5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Apr 2025 11:34:41 +0100
+Subject: iio: pressure: zpa2326: Use aligned_s64 for the timestamp
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+[ Upstream commit 886a446b76afddfad307488e95e87f23a08ffd51 ]
+
+On x86_32 s64 fields are only 32-bit aligned. Hence force the alignment of
+the field and padding in the structure by using aligned_s64 instead.
+
+Reviewed-by: David Lechner <dlechner@baylibre.com>
+Link: https://patch.msgid.link/20250413103443.2420727-19-jic23@kernel.org
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/pressure/zpa2326.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
+index b4c6c7c472569..8fae58db1d639 100644
+--- a/drivers/iio/pressure/zpa2326.c
++++ b/drivers/iio/pressure/zpa2326.c
+@@ -582,7 +582,7 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
+ struct {
+ u32 pressure;
+ u16 temperature;
+- u64 timestamp;
++ aligned_s64 timestamp;
+ } sample;
+ int err;
+
+--
+2.39.5
+
--- /dev/null
+From 52c45465646cf30f3aedd9601621d625fdbd223c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Mar 2025 11:56:02 +0300
+Subject: jfs: validate AG parameters in dbMount() to prevent crashes
+
+From: Vasiliy Kovalev <kovalev@altlinux.org>
+
+[ Upstream commit 37bfb464ddca87f203071b5bd562cd91ddc0b40a ]
+
+Validate db_agheight, db_agwidth, and db_agstart in dbMount to catch
+corrupted metadata early and avoid undefined behavior in dbAllocAG.
+Limits are derived from L2LPERCTL, LPERCTL/MAXAG, and CTLTREESIZE:
+
+- agheight: 0 to L2LPERCTL/2 (0 to 5) ensures shift
+ (L2LPERCTL - 2*agheight) >= 0.
+- agwidth: 1 to min(LPERCTL/MAXAG, 2^(L2LPERCTL - 2*agheight))
+ ensures agperlev >= 1.
+ - Ranges: 1-8 (agheight 0-3), 1-4 (agheight 4), 1 (agheight 5).
+ - LPERCTL/MAXAG = 1024/128 = 8 limits leaves per AG;
+ 2^(10 - 2*agheight) prevents division to 0.
+- agstart: 0 to CTLTREESIZE-1 - agwidth*(MAXAG-1) keeps ti within
+ stree (size 1365).
+ - Ranges: 0-1237 (agwidth 1), 0-348 (agwidth 8).
+
+UBSAN: shift-out-of-bounds in fs/jfs/jfs_dmap.c:1400:9
+shift exponent -335544310 is negative
+CPU: 0 UID: 0 PID: 5822 Comm: syz-executor130 Not tainted 6.14.0-rc5-syzkaller #0
+Hardware name: Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2025
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120
+ ubsan_epilogue lib/ubsan.c:231 [inline]
+ __ubsan_handle_shift_out_of_bounds+0x3c8/0x420 lib/ubsan.c:468
+ dbAllocAG+0x1087/0x10b0 fs/jfs/jfs_dmap.c:1400
+ dbDiscardAG+0x352/0xa20 fs/jfs/jfs_dmap.c:1613
+ jfs_ioc_trim+0x45a/0x6b0 fs/jfs/jfs_discard.c:105
+ jfs_ioctl+0x2cd/0x3e0 fs/jfs/ioctl.c:131
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:906 [inline]
+ __se_sys_ioctl+0xf5/0x170 fs/ioctl.c:892
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Cc: stable@vger.kernel.org
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+fe8264911355151c487f@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=fe8264911355151c487f
+Signed-off-by: Vasiliy Kovalev <kovalev@altlinux.org>
+Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jfs/jfs_dmap.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 26e89d0c69b61..35e063c9f3a42 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -194,7 +194,11 @@ int dbMount(struct inode *ipbmap)
+ !bmp->db_numag || (bmp->db_numag > MAXAG) ||
+ (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) ||
+ (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) ||
+- !bmp->db_agwidth ||
++ (bmp->db_agheight < 0) || (bmp->db_agheight > (L2LPERCTL >> 1)) ||
++ (bmp->db_agwidth < 1) || (bmp->db_agwidth > (LPERCTL / MAXAG)) ||
++ (bmp->db_agwidth > (1 << (L2LPERCTL - (bmp->db_agheight << 1)))) ||
++ (bmp->db_agstart < 0) ||
++ (bmp->db_agstart > (CTLTREESIZE - 1 - bmp->db_agwidth * (MAXAG - 1))) ||
+ (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) ||
+ (bmp->db_agl2size < 0) ||
+ ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
+--
+2.39.5
+
--- /dev/null
+From 4b5479996a9b653f59e7999a69f79b38795b65d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 May 2025 11:23:01 +0900
+Subject: ksmbd: allow a filename to contain special characters on SMB3.1.1
+ posix extension
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit dc3e0f17f74558e8a2fce00608855f050de10230 ]
+
+If client send SMB2_CREATE_POSIX_CONTEXT to ksmbd, Allow a filename
+to contain special characters.
+
+Reported-by: Philipp Kerling <pkerling@casix.org>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/smb2pdu.c | 53 +++++++++++++++++++++--------------------
+ 1 file changed, 27 insertions(+), 26 deletions(-)
+
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6537ffd2b9651..ef921a370cb98 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2871,7 +2871,7 @@ int smb2_open(struct ksmbd_work *work)
+ int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
+ int rc = 0;
+ int contxt_cnt = 0, query_disk_id = 0;
+- int maximal_access_ctxt = 0, posix_ctxt = 0;
++ bool maximal_access_ctxt = false, posix_ctxt = false;
+ int s_type = 0;
+ int next_off = 0;
+ char *name = NULL;
+@@ -2898,6 +2898,27 @@ int smb2_open(struct ksmbd_work *work)
+ return create_smb2_pipe(work);
+ }
+
++ if (req->CreateContextsOffset && tcon->posix_extensions) {
++ context = smb2_find_context_vals(req, SMB2_CREATE_TAG_POSIX, 16);
++ if (IS_ERR(context)) {
++ rc = PTR_ERR(context);
++ goto err_out2;
++ } else if (context) {
++ struct create_posix *posix = (struct create_posix *)context;
++
++ if (le16_to_cpu(context->DataOffset) +
++ le32_to_cpu(context->DataLength) <
++ sizeof(struct create_posix) - 4) {
++ rc = -EINVAL;
++ goto err_out2;
++ }
++ ksmbd_debug(SMB, "get posix context\n");
++
++ posix_mode = le32_to_cpu(posix->Mode);
++ posix_ctxt = true;
++ }
++ }
++
+ if (req->NameLength) {
+ name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
+ le16_to_cpu(req->NameLength),
+@@ -2920,9 +2941,11 @@ int smb2_open(struct ksmbd_work *work)
+ goto err_out2;
+ }
+
+- rc = ksmbd_validate_filename(name);
+- if (rc < 0)
+- goto err_out2;
++ if (posix_ctxt == false) {
++ rc = ksmbd_validate_filename(name);
++ if (rc < 0)
++ goto err_out2;
++ }
+
+ if (ksmbd_share_veto_filename(share, name)) {
+ rc = -ENOENT;
+@@ -3080,28 +3103,6 @@ int smb2_open(struct ksmbd_work *work)
+ rc = -EBADF;
+ goto err_out2;
+ }
+-
+- if (tcon->posix_extensions) {
+- context = smb2_find_context_vals(req,
+- SMB2_CREATE_TAG_POSIX, 16);
+- if (IS_ERR(context)) {
+- rc = PTR_ERR(context);
+- goto err_out2;
+- } else if (context) {
+- struct create_posix *posix =
+- (struct create_posix *)context;
+- if (le16_to_cpu(context->DataOffset) +
+- le32_to_cpu(context->DataLength) <
+- sizeof(struct create_posix) - 4) {
+- rc = -EINVAL;
+- goto err_out2;
+- }
+- ksmbd_debug(SMB, "get posix context\n");
+-
+- posix_mode = le32_to_cpu(posix->Mode);
+- posix_ctxt = 1;
+- }
+- }
+ }
+
+ if (ksmbd_override_fsids(work)) {
+--
+2.39.5
+
--- /dev/null
+From 3f52db6d80a01554733f95040f6974fb00518c37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 May 2025 09:02:29 +0900
+Subject: ksmbd: provide zero as a unique ID to the Mac client
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 571781eb7ffefa65b0e922c8031e42b4411a40d4 ]
+
+The Mac SMB client code seems to expect the on-disk file identifier
+to have the semantics of HFS+ Catalog Node Identifier (CNID).
+ksmbd provides the inode number as a unique ID to the client,
+but in the case of subvolumes of btrfs, there are cases where different
+files have the same inode number, so the mac smb client treats it
+as an error. There is a report that a similar problem occurs
+when the share is ZFS.
+Returning UniqueId of zero will make the Mac client to stop using and
+trusting the file id returned from the server.
+
+Reported-by: Justin Turner Arthur <justinarthur@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/connection.h | 1 +
+ fs/smb/server/smb2pdu.c | 19 +++++++++++++++++--
+ fs/smb/server/smb2pdu.h | 3 +++
+ 3 files changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 572102098c108..dd3e0e3f7bf04 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -108,6 +108,7 @@ struct ksmbd_conn {
+ __le16 signing_algorithm;
+ bool binding;
+ atomic_t refcnt;
++ bool is_aapl;
+ };
+
+ struct ksmbd_conn_ops {
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index ef921a370cb98..5d2324c09a070 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -3535,6 +3535,15 @@ int smb2_open(struct ksmbd_work *work)
+ ksmbd_debug(SMB, "get query on disk id context\n");
+ query_disk_id = 1;
+ }
++
++ if (conn->is_aapl == false) {
++ context = smb2_find_context_vals(req, SMB2_CREATE_AAPL, 4);
++ if (IS_ERR(context)) {
++ rc = PTR_ERR(context);
++ goto err_out1;
++ } else if (context)
++ conn->is_aapl = true;
++ }
+ }
+
+ rc = ksmbd_vfs_getattr(&path, &stat);
+@@ -3974,7 +3983,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+ if (dinfo->EaSize)
+ dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+ dinfo->Reserved = 0;
+- dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
++ if (conn->is_aapl)
++ dinfo->UniqueId = 0;
++ else
++ dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+ if (d_info->hide_dot_file && d_info->name[0] == '.')
+ dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+ memcpy(dinfo->FileName, conv_name, conv_len);
+@@ -3991,7 +4003,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+ smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+ if (fibdinfo->EaSize)
+ fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+- fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
++ if (conn->is_aapl)
++ fibdinfo->UniqueId = 0;
++ else
++ fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+ fibdinfo->ShortNameLength = 0;
+ fibdinfo->Reserved = 0;
+ fibdinfo->Reserved2 = cpu_to_le16(0);
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+index 17a0b18a8406b..16ae8a10490be 100644
+--- a/fs/smb/server/smb2pdu.h
++++ b/fs/smb/server/smb2pdu.h
+@@ -63,6 +63,9 @@ struct preauth_integrity_info {
+
+ #define SMB2_SESSION_TIMEOUT (10 * HZ)
+
++/* Apple Defined Contexts */
++#define SMB2_CREATE_AAPL "AAPL"
++
+ struct create_durable_req_v2 {
+ struct create_context_hdr ccontext;
+ __u8 Name[8];
+--
+2.39.5
+
--- /dev/null
+From 77e3135a6ae18c60161dcc9ca4d1b4bb0c83ee66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Apr 2025 20:40:36 +0200
+Subject: leds: multicolor: Fix intensity setting while SW blinking
+
+From: Sven Schwermer <sven.schwermer@disruptive-technologies.com>
+
+[ Upstream commit e35ca991a777ef513040cbb36bc8245a031a2633 ]
+
+When writing to the multi_intensity file, don't unconditionally call
+led_set_brightness. By only doing this if blinking is inactive we
+prevent blinking from stopping if the blinking is in its off phase while
+the file is written.
+
+Instead, if blinking is active, the changed intensity values are applied
+upon the next blink. This is consistent with changing the brightness on
+monochrome LEDs with active blinking.
+
+Suggested-by: Jacek Anaszewski <jacek.anaszewski@gmail.com>
+Acked-by: Jacek Anaszewski <jacek.anaszewski@gmail.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Reviewed-by: Tobias Deiminger <tobias.deiminger@linutronix.de>
+Tested-by: Sven Schuchmann <schuchmann@schleissheimer.de>
+Signed-off-by: Sven Schwermer <sven.schwermer@disruptive-technologies.com>
+Link: https://lore.kernel.org/r/20250404184043.227116-1-sven@svenschwermer.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/leds/led-class-multicolor.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
+index 30c1ecb5f361e..c707be97049b7 100644
+--- a/drivers/leds/led-class-multicolor.c
++++ b/drivers/leds/led-class-multicolor.c
+@@ -61,7 +61,8 @@ static ssize_t multi_intensity_store(struct device *dev,
+ for (i = 0; i < mcled_cdev->num_colors; i++)
+ mcled_cdev->subled_info[i].intensity = intensity_value[i];
+
+- led_set_brightness(led_cdev, led_cdev->brightness);
++ if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags))
++ led_set_brightness(led_cdev, led_cdev->brightness);
+ ret = size;
+ err_out:
+ mutex_unlock(&led_cdev->led_access);
+--
+2.39.5
+
--- /dev/null
+From df9fc61118f7c9c4f9344f5138040d5f6537db77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Apr 2025 21:14:10 +0800
+Subject: mailbox: Not protect module_put with spin_lock_irqsave
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit dddbd233e67e792bb0a3f9694a4707e6be29b2c6 ]
+
+&chan->lock is not supposed to protect 'chan->mbox'.
+And in __mbox_bind_client, try_module_get is also not protected
+by &chan->lock. So move module_put out of the lock protected
+region.
+
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index cb174e788a96c..92c2fb618c8e1 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -490,8 +490,8 @@ void mbox_free_channel(struct mbox_chan *chan)
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+
+- module_put(chan->mbox->dev->driver->owner);
+ spin_unlock_irqrestore(&chan->lock, flags);
++ module_put(chan->mbox->dev->driver->owner);
+ }
+ EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+--
+2.39.5
+
--- /dev/null
+From 578d46c81bd25d2d814419af3d1f86bff8aec2ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 24 May 2025 14:13:10 +0800
+Subject: md/md-bitmap: fix dm-raid max_write_behind setting
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+[ Upstream commit 2afe17794cfed5f80295b1b9facd66e6f65e5002 ]
+
+It's supposed to be COUNTER_MAX / 2, not COUNTER_MAX.
+
+Link: https://lore.kernel.org/linux-raid/20250524061320.370630-14-yukuai1@huaweicloud.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md-bitmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index fbb4f57010da6..c12359fd3a420 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -787,7 +787,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
+ * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
+ */
+ write_behind = bitmap->mddev->bitmap_info.max_write_behind;
+- if (write_behind > COUNTER_MAX)
++ if (write_behind > COUNTER_MAX / 2)
+ write_behind = COUNTER_MAX / 2;
+ sb->write_behind = cpu_to_le32(write_behind);
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+--
+2.39.5
+
--- /dev/null
+From 3d6951fcf826d70ef842047c22cc23eea1d7365c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Apr 2025 21:50:11 +0200
+Subject: mfd: max14577: Fix wakeup source leaks on device unbind
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit d905d06e64b0eb3da43af6186c132f5282197998 ]
+
+Device can be unbound, so driver must also release memory for the wakeup
+source.
+
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20250406-mfd-device-wakekup-leak-v1-3-318e14bdba0a@linaro.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/max14577.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
+index 6fce79ec2dc64..7e7e8af9af224 100644
+--- a/drivers/mfd/max14577.c
++++ b/drivers/mfd/max14577.c
+@@ -456,6 +456,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c)
+ {
+ struct max14577 *max14577 = i2c_get_clientdata(i2c);
+
++ device_init_wakeup(max14577->dev, false);
+ mfd_remove_devices(max14577->dev);
+ regmap_del_irq_chip(max14577->irq, max14577->irq_data);
+ if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
+--
+2.39.5
+
--- /dev/null
+From 9702743a0d90fdc8b5b64bd4f72bb3a92984e28f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Mar 2025 20:05:11 -0500
+Subject: misc: tps6594-pfsm: Add NULL pointer check in tps6594_pfsm_probe()
+
+From: Chenyuan Yang <chenyuan0y@gmail.com>
+
+[ Upstream commit a99b598d836c9c6411110c70a2da134c78d96e67 ]
+
+The returned value, pfsm->miscdev.name, from devm_kasprintf()
+could be NULL.
+A pointer check is added to prevent potential NULL pointer dereference.
+This is similar to the fix in commit 3027e7b15b02
+("ice: Fix some null pointer dereference issues in ice_ptp.c").
+
+This issue is found by our static analysis tool.
+
+Signed-off-by: Chenyuan Yang <chenyuan0y@gmail.com>
+Link: https://lore.kernel.org/r/20250311010511.1028269-1-chenyuan0y@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/misc/tps6594-pfsm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c
+index 9bcca1856bfee..db3d6a21a2122 100644
+--- a/drivers/misc/tps6594-pfsm.c
++++ b/drivers/misc/tps6594-pfsm.c
+@@ -281,6 +281,9 @@ static int tps6594_pfsm_probe(struct platform_device *pdev)
+ pfsm->miscdev.minor = MISC_DYNAMIC_MINOR;
+ pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x",
+ tps->chip_id, tps->reg);
++ if (!pfsm->miscdev.name)
++ return -ENOMEM;
++
+ pfsm->miscdev.fops = &tps6594_pfsm_fops;
+ pfsm->miscdev.parent = dev->parent;
+ pfsm->chip_id = tps->chip_id;
+--
+2.39.5
+
--- /dev/null
+From 2ff08d40b2afb399ec5eda169466921881f4f16e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 4 May 2025 20:57:04 +0800
+Subject: NFSv4: Always set NLINK even if the server doesn't support it
+
+From: Han Young <hanyang.tony@bytedance.com>
+
+[ Upstream commit 3a3065352f73381d3a1aa0ccab44aec3a5a9b365 ]
+
+fattr4_numlinks is a recommended attribute, so the client should emulate
+it even if the server doesn't support it. In decode_attr_nlink function
+in nfs4xdr.c, nlink is initialized to 1. However, this default value
+isn't set to the inode due to the check in nfs_fhget.
+
+So if the server doesn't support numlinks, inode's nlink will be zero,
+the mount will fail with error "Stale file handle". Set the nlink to 1
+if the server doesn't support it.
+
+Signed-off-by: Han Young <hanyang.tony@bytedance.com>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/inode.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 330273cf94531..9f10771331007 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -557,6 +557,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
+ set_nlink(inode, fattr->nlink);
+ else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK);
++ else
++ set_nlink(inode, 1);
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER)
+ inode->i_uid = fattr->uid;
+ else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
+--
+2.39.5
+
--- /dev/null
+From 8e9ada835ff1fa3e8e2c1a11609934ca5a742074 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Apr 2025 11:23:38 -0400
+Subject: NFSv4: xattr handlers should check for absent nfs filehandles
+
+From: Scott Mayhew <smayhew@redhat.com>
+
+[ Upstream commit 6e9a2f8dbe93c8004c2af2c0158888628b7ca034 ]
+
+The nfs inodes for referral anchors that have not yet been followed have
+their filehandles zeroed out.
+
+Attempting to call getxattr() on one of these will cause the nfs client
+to send a GETATTR to the nfs server with the preceding PUTFH sans
+filehandle. The server will reply NFS4ERR_NOFILEHANDLE, leading to -EIO
+being returned to the application.
+
+For example:
+
+$ strace -e trace=getxattr getfattr -n system.nfs4_acl /mnt/t/ref
+getxattr("/mnt/t/ref", "system.nfs4_acl", NULL, 0) = -1 EIO (Input/output error)
+/mnt/t/ref: system.nfs4_acl: Input/output error
++++ exited with 1 +++
+
+Have the xattr handlers return -ENODATA instead.
+
+Signed-off-by: Scott Mayhew <smayhew@redhat.com>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 413f8be7106cc..77b239b10d418 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6174,6 +6174,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
+ struct nfs_server *server = NFS_SERVER(inode);
+ int ret;
+
++ if (unlikely(NFS_FH(inode)->size == 0))
++ return -ENODATA;
+ if (!nfs4_server_supports_acls(server, type))
+ return -EOPNOTSUPP;
+ ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
+@@ -6248,6 +6250,9 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
+ {
+ struct nfs4_exception exception = { };
+ int err;
++
++ if (unlikely(NFS_FH(inode)->size == 0))
++ return -ENODATA;
+ do {
+ err = __nfs4_proc_set_acl(inode, buf, buflen, type);
+ trace_nfs4_set_acl(inode, err);
+--
+2.39.5
+
--- /dev/null
+From e34e7e8c37dabafeb7118196742bb5e603ca8261 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 14:09:21 -0400
+Subject: NFSv4.2: fix listxattr to return selinux security label
+
+From: Olga Kornievskaia <okorniev@redhat.com>
+
+[ Upstream commit 243fea134633ba3d64aceb4c16129c59541ea2c6 ]
+
+Currently, when NFS is queried for all the labels present on the
+file via a command example "getfattr -d -m . /mnt/testfile", it
+does not return the security label. Yet when asked specifically for
+the label (getfattr -n security.selinux) it will be returned.
+Include the security label when all attributes are queried.
+
+Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 57d49e874f51f..9832e27b5d29b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10814,7 +10814,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+
+ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+- ssize_t error, error2, error3;
++ ssize_t error, error2, error3, error4;
+ size_t left = size;
+
+ error = generic_listxattr(dentry, list, left);
+@@ -10837,8 +10837,16 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
+ if (error3 < 0)
+ return error3;
++ if (list) {
++ list += error3;
++ left -= error3;
++ }
++
++ error4 = security_inode_listsecurity(d_inode(dentry), list, left);
++ if (error4 < 0)
++ return error4;
+
+- error += error2 + error3;
++ error += error2 + error3 + error4;
+ if (size && error > size)
+ return -ERANGE;
+ return error;
+--
+2.39.5
+
--- /dev/null
+From 02b9fbaf0a9e195e8feaf24aad3738b999c19f88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 15:49:19 +0300
+Subject: NFSv4.2: fix setattr caching of TIME_[MODIFY|ACCESS]_SET when
+ timestamps are delegated
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit aba41e90aadeca8d4656f90639aa5f91ce564f1c ]
+
+nfs_setattr will flush all pending writes before updating a file time
+attributes. However when the client holds delegated timestamps, it can
+update its timestamps locally as it is the authority for the file
+times attributes. The client will later set the file attributes by
+adding a setattr to the delegreturn compound updating the server time
+attributes.
+
+Fix nfs_setattr to avoid flushing pending writes when the file time
+attributes are delegated and the mtime/atime are set to a fixed
+timestamp (ATTR_[MODIFY|ACCESS]_SET. Also, when sending the setattr
+procedure over the wire, we need to clear the correct attribute bits
+from the bitmask.
+
+I was able to measure a noticable speedup when measuring untar performance.
+Test: $ time tar xzf ~/dir.tgz
+Baseline: 1m13.072s
+Patched: 0m49.038s
+
+Which is more than 30% latency improvement.
+
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/inode.c | 49 +++++++++++++++++++++++++++++++++++++++++++----
+ fs/nfs/nfs4proc.c | 8 ++++----
+ 2 files changed, 49 insertions(+), 8 deletions(-)
+
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 9f10771331007..16607b24ab9c1 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -635,6 +635,34 @@ nfs_fattr_fixup_delegated(struct inode *inode, struct nfs_fattr *fattr)
+ }
+ }
+
++static void nfs_set_timestamps_to_ts(struct inode *inode, struct iattr *attr)
++{
++ unsigned int cache_flags = 0;
++
++ if (attr->ia_valid & ATTR_MTIME_SET) {
++ struct timespec64 ctime = inode_get_ctime(inode);
++ struct timespec64 mtime = inode_get_mtime(inode);
++ struct timespec64 now;
++ int updated = 0;
++
++ now = inode_set_ctime_current(inode);
++ if (!timespec64_equal(&now, &ctime))
++ updated |= S_CTIME;
++
++ inode_set_mtime_to_ts(inode, attr->ia_mtime);
++ if (!timespec64_equal(&now, &mtime))
++ updated |= S_MTIME;
++
++ inode_maybe_inc_iversion(inode, updated);
++ cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
++ }
++ if (attr->ia_valid & ATTR_ATIME_SET) {
++ inode_set_atime_to_ts(inode, attr->ia_atime);
++ cache_flags |= NFS_INO_INVALID_ATIME;
++ }
++ NFS_I(inode)->cache_validity &= ~cache_flags;
++}
++
+ static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid)
+ {
+ enum file_time_flags time_flags = 0;
+@@ -703,14 +731,27 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+
+ if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
+ spin_lock(&inode->i_lock);
+- nfs_update_timestamps(inode, attr->ia_valid);
++ if (attr->ia_valid & ATTR_MTIME_SET) {
++ nfs_set_timestamps_to_ts(inode, attr);
++ attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
++ ATTR_ATIME|ATTR_ATIME_SET);
++ } else {
++ nfs_update_timestamps(inode, attr->ia_valid);
++ attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME);
++ }
+ spin_unlock(&inode->i_lock);
+- attr->ia_valid &= ~(ATTR_MTIME | ATTR_ATIME);
+ } else if (nfs_have_delegated_atime(inode) &&
+ attr->ia_valid & ATTR_ATIME &&
+ !(attr->ia_valid & ATTR_MTIME)) {
+- nfs_update_delegated_atime(inode);
+- attr->ia_valid &= ~ATTR_ATIME;
++ if (attr->ia_valid & ATTR_ATIME_SET) {
++ spin_lock(&inode->i_lock);
++ nfs_set_timestamps_to_ts(inode, attr);
++ spin_unlock(&inode->i_lock);
++ attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
++ } else {
++ nfs_update_delegated_atime(inode);
++ attr->ia_valid &= ~ATTR_ATIME;
++ }
+ }
+
+ /* Optimization: if the end result is no change, don't RPC */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 9832e27b5d29b..413f8be7106cc 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -313,14 +313,14 @@ static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
+
+ if (nfs_have_delegated_mtime(inode)) {
+ if (!(cache_validity & NFS_INO_INVALID_ATIME))
+- dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
++ dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
+ if (!(cache_validity & NFS_INO_INVALID_MTIME))
+- dst[1] &= ~FATTR4_WORD1_TIME_MODIFY;
++ dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET);
+ if (!(cache_validity & NFS_INO_INVALID_CTIME))
+- dst[1] &= ~FATTR4_WORD1_TIME_METADATA;
++ dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET);
+ } else if (nfs_have_delegated_atime(inode)) {
+ if (!(cache_validity & NFS_INO_INVALID_ATIME))
+- dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
++ dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
+ }
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 1e4a83cde8d2ad254634003271bd759a498a3776 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 08:45:34 +0200
+Subject: nvme-tcp: fix I/O stalls on congested sockets
+
+From: Hannes Reinecke <hare@kernel.org>
+
+[ Upstream commit f42d4796ee100fade86086d1cf98537fb4d326c8 ]
+
+When the socket is busy processing nvme_tcp_try_recv() might return
+-EAGAIN, but this doesn't automatically imply that the sending side is
+blocked, too. So check if there are pending requests once
+nvme_tcp_try_recv() returns -EAGAIN and continue with the sending loop
+to avoid I/O stalls.
+
+Signed-off-by: Hannes Reinecke <hare@kernel.org>
+Acked-by: Chris Leech <cleech@redhat.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 4cc72be28c731..13ede6e309092 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1349,7 +1349,7 @@ static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
+ queue->nr_cqe = 0;
+ consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
+ release_sock(sk);
+- return consumed;
++ return consumed == -EAGAIN ? 0 : consumed;
+ }
+
+ static void nvme_tcp_io_work(struct work_struct *w)
+@@ -1377,6 +1377,11 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ else if (unlikely(result < 0))
+ return;
+
++ /* did we get some space after spending time in recv? */
++ if (nvme_tcp_queue_has_pending(queue) &&
++ sk_stream_is_writeable(queue->sock->sk))
++ pending = true;
++
+ if (!pending || !queue->rd_enabled)
+ return;
+
+--
+2.39.5
+
--- /dev/null
+From dc5258bd85e39935ae481a4c4abb3d949dccd03c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 08:45:33 +0200
+Subject: nvme-tcp: sanitize request list handling
+
+From: Hannes Reinecke <hare@kernel.org>
+
+[ Upstream commit 0bf04c874fcb1ae46a863034296e4b33d8fbd66c ]
+
+Validate the request in nvme_tcp_handle_r2t() to ensure it's not part of
+any list, otherwise a malicious R2T PDU might inject a loop in request
+list processing.
+
+Signed-off-by: Hannes Reinecke <hare@kernel.org>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/tcp.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 13ede6e309092..25e486e6e8054 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -453,7 +453,8 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
+ return NULL;
+ }
+
+- list_del(&req->entry);
++ list_del_init(&req->entry);
++ init_llist_node(&req->lentry);
+ return req;
+ }
+
+@@ -561,6 +562,8 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
+ req->queue = queue;
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
+ nvme_req(rq)->cmd = &pdu->cmd;
++ init_llist_node(&req->lentry);
++ INIT_LIST_HEAD(&req->entry);
+
+ return 0;
+ }
+@@ -765,6 +768,14 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
+ return -EPROTO;
+ }
+
++ if (llist_on_list(&req->lentry) ||
++ !list_empty(&req->entry)) {
++ dev_err(queue->ctrl->ctrl.device,
++ "req %d unexpected r2t while processing request\n",
++ rq->tag);
++ return -EPROTO;
++ }
++
+ req->pdu_len = 0;
+ req->h2cdata_left = r2t_length;
+ req->h2cdata_offset = r2t_offset;
+@@ -2599,6 +2610,8 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
+ ctrl->async_req.offset = 0;
+ ctrl->async_req.curr_bio = NULL;
+ ctrl->async_req.data_len = 0;
++ init_llist_node(&ctrl->async_req.lentry);
++ INIT_LIST_HEAD(&ctrl->async_req.entry);
+
+ nvme_tcp_queue_request(&ctrl->async_req, true, true);
+ }
+--
+2.39.5
+
--- /dev/null
+From 442b8f5e2fda2e9cdffc676c4c34047e89c2a45e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 16:15:19 -0700
+Subject: ovl: Check for NULL d_inode() in ovl_dentry_upper()
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit 8a39f1c870e9d6fbac5638f3a42a6a6363829c49 ]
+
+In ovl_path_type() and ovl_is_metacopy_dentry() GCC notices that it is
+possible for OVL_E() to return NULL (which implies that d_inode(dentry)
+may be NULL). This would result in out of bounds reads via container_of(),
+seen with GCC 15's -Warray-bounds -fdiagnostics-details. For example:
+
+In file included from arch/x86/include/generated/asm/rwonce.h:1,
+ from include/linux/compiler.h:339,
+ from include/linux/export.h:5,
+ from include/linux/linkage.h:7,
+ from include/linux/fs.h:5,
+ from fs/overlayfs/util.c:7:
+In function 'ovl_upperdentry_dereference',
+ inlined from 'ovl_dentry_upper' at ../fs/overlayfs/util.c:305:9,
+ inlined from 'ovl_path_type' at ../fs/overlayfs/util.c:216:6:
+include/asm-generic/rwonce.h:44:26: error: array subscript 0 is outside array bounds of 'struct inode[7486503276667837]' [-Werror=array-bounds=]
+ 44 | #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
+ | ~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+include/asm-generic/rwonce.h:50:9: note: in expansion of macro '__READ_ONCE'
+ 50 | __READ_ONCE(x); \
+ | ^~~~~~~~~~~
+fs/overlayfs/ovl_entry.h:195:16: note: in expansion of macro 'READ_ONCE'
+ 195 | return READ_ONCE(oi->__upperdentry);
+ | ^~~~~~~~~
+ 'ovl_path_type': event 1
+ 185 | return inode ? OVL_I(inode)->oe : NULL;
+ 'ovl_path_type': event 2
+
+Avoid this by allowing ovl_dentry_upper() to return NULL if d_inode() is
+NULL, as that means the problematic dereferencing can never be reached.
+Note that this fixes the over-eager compiler warning in an effort to
+being able to enable -Warray-bounds globally. There is no known
+behavioral bug here.
+
+Suggested-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Kees Cook <kees@kernel.org>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/overlayfs/util.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 8f080046c59d9..99571de665dde 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -300,7 +300,9 @@ enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path)
+
+ struct dentry *ovl_dentry_upper(struct dentry *dentry)
+ {
+- return ovl_upperdentry_dereference(OVL_I(d_inode(dentry)));
++ struct inode *inode = d_inode(dentry);
++
++ return inode ? ovl_upperdentry_dereference(OVL_I(inode)) : NULL;
+ }
+
+ struct dentry *ovl_dentry_lower(struct dentry *dentry)
+--
+2.39.5
+
--- /dev/null
+From 3badba8be815c6d68b6306268011566a0fc486f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Apr 2025 10:17:08 +0100
+Subject: PCI: apple: Fix missing OF node reference in apple_pcie_setup_port
+
+From: Hector Martin <marcan@marcan.st>
+
+[ Upstream commit 7fa9fbf39116b061f8a41cd84f1884c545f322c4 ]
+
+In the success path, we hang onto a reference to the node, so make sure
+to grab one. The caller iterator puts our borrowed reference when we
+return.
+
+Signed-off-by: Hector Martin <marcan@marcan.st>
+Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Tested-by: Janne Grunau <j@jannau.net>
+Reviewed-by: Rob Herring (Arm) <robh@kernel.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+Link: https://patch.msgid.link/20250401091713.2765724-9-maz@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/pcie-apple.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
+index ddc65368e77d1..7f02fe198d41c 100644
+--- a/drivers/pci/controller/pcie-apple.c
++++ b/drivers/pci/controller/pcie-apple.c
+@@ -585,6 +585,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
+ list_add_tail(&port->entry, &pcie->ports);
+ init_completion(&pcie->event);
+
++ /* In the success path, we keep a reference to np around */
++ of_node_get(np);
++
+ ret = apple_pcie_port_register_irqs(port);
+ WARN_ON(ret);
+
+--
+2.39.5
+
--- /dev/null
+From f914e8da2c4c5fa64119a668410c760678252ee7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Apr 2025 10:17:01 +0100
+Subject: PCI: apple: Set only available ports up
+
+From: Janne Grunau <j@jannau.net>
+
+[ Upstream commit 751bec089c4eed486578994abd2c5395f08d0302 ]
+
+Iterating over disabled ports results in of_irq_parse_raw() parsing
+the wrong "interrupt-map" entries, as it takes the status of the node
+into account.
+
+This became apparent after disabling unused PCIe ports in the Apple
+Silicon device trees instead of deleting them.
+
+Switching from for_each_child_of_node_scoped() to
+for_each_available_child_of_node_scoped() solves this issue.
+
+Fixes: 1e33888fbe44 ("PCI: apple: Add initial hardware bring-up")
+Fixes: a0189fdfb73d ("arm64: dts: apple: t8103: Disable unused PCIe ports")
+Signed-off-by: Janne Grunau <j@jannau.net>
+Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Tested-by: Janne Grunau <j@jannau.net>
+Reviewed-by: Rob Herring (Arm) <robh@kernel.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/asahi/20230214-apple_dts_pcie_disable_unused-v1-0-5ea0d3ddcde3@jannau.net/
+Link: https://lore.kernel.org/asahi/1ea2107a-bb86-8c22-0bbc-82c453ab08ce@linaro.org/
+Link: https://patch.msgid.link/20250401091713.2765724-2-maz@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/pcie-apple.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
+index 43280a366266b..16725f9536f65 100644
+--- a/drivers/pci/controller/pcie-apple.c
++++ b/drivers/pci/controller/pcie-apple.c
+@@ -789,7 +789,7 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ if (ret)
+ return ret;
+
+- for_each_child_of_node_scoped(dev->of_node, of_port) {
++ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ ret = apple_pcie_setup_port(pcie, of_port);
+ if (ret) {
+ dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+--
+2.39.5
+
--- /dev/null
+From 3d327b96905ac516594d60913bbf1b6e8a5ef545 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Aug 2024 12:04:12 +0800
+Subject: PCI: apple: Use helper function for_each_child_of_node_scoped()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zhang Zekun <zhangzekun11@huawei.com>
+
+[ Upstream commit f60b4e06a945f25d463ae065c6e41c6e24faee0a ]
+
+The for_each_available_child_of_node_scoped() helper provides
+a scope-based clean-up functionality to put the device_node
+automatically, and as such, there is no need to call of_node_put()
+directly.
+
+Thus, use this helper to simplify the code.
+
+Signed-off-by: Zhang Zekun <zhangzekun11@huawei.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20240831040413.126417-6-zhangzekun11@huawei.com
+[kwilczynski: commit log]
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Stable-dep-of: 751bec089c4e ("PCI: apple: Set only available ports up")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/pcie-apple.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
+index 7f02fe198d41c..43280a366266b 100644
+--- a/drivers/pci/controller/pcie-apple.c
++++ b/drivers/pci/controller/pcie-apple.c
+@@ -767,7 +767,6 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ {
+ struct device *dev = cfg->parent;
+ struct platform_device *platform = to_platform_device(dev);
+- struct device_node *of_port;
+ struct apple_pcie *pcie;
+ int ret;
+
+@@ -790,11 +789,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ if (ret)
+ return ret;
+
+- for_each_child_of_node(dev->of_node, of_port) {
++ for_each_child_of_node_scoped(dev->of_node, of_port) {
+ ret = apple_pcie_setup_port(pcie, of_port);
+ if (ret) {
+ dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+- of_node_put(of_port);
+ return ret;
+ }
+ }
+--
+2.39.5
+
--- /dev/null
+From c21c5fd8c527ab9d26a10883b27fb052722cc438 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Apr 2025 18:36:23 +0800
+Subject: PCI: dwc: Make link training more robust by setting
+ PORT_LOGIC_LINK_WIDTH to one lane
+
+From: Wenbin Yao <quic_wenbyao@quicinc.com>
+
+[ Upstream commit af3c6eacce0c464f28fe0e3d365b3860aba07931 ]
+
+As per DWC PCIe registers description 4.30a, section 1.13.43, NUM_OF_LANES
+named as PORT_LOGIC_LINK_WIDTH in PCIe DWC driver, is referred to as the
+"Predetermined Number of Lanes" in PCIe r6.0, sec 4.2.7.2.1, which explains
+the conditions required to enter Polling.Configuration:
+
+ Next state is Polling.Configuration after at least 1024 TS1 Ordered Sets
+ were transmitted, and all Lanes that detected a Receiver during Detect
+ receive eight consecutive training sequences ...
+
+ Otherwise, after a 24 ms timeout the next state is:
+
+ Polling.Configuration if,
+
+ (i) Any Lane, which detected a Receiver during Detect, received eight
+ consecutive training sequences ... and a minimum of 1024 TS1 Ordered
+ Sets are transmitted after receiving one TS1 or TS2 Ordered Set.
+
+ And
+
+ (ii) At least a predetermined set of Lanes that detected a Receiver
+ during Detect have detected an exit from Electrical Idle at least
+ once since entering Polling.Active.
+
+ Note: This may prevent one or more bad Receivers or Transmitters
+ from holding up a valid Link from being configured, and allow for
+ additional training in Polling.Configuration. The exact set of
+ predetermined Lanes is implementation specific.
+
+ Note: Any Lane that receives eight consecutive TS1 or TS2 Ordered
+ Sets should have detected an exit from Electrical Idle at least
+ once since entering Polling.Active.
+
+In a PCIe link supporting multiple lanes, if PORT_LOGIC_LINK_WIDTH is set
+to lane width the hardware supports, all lanes that detect a receiver
+during the Detect phase must receive eight consecutive training sequences.
+Otherwise, LTSSM will not enter Polling.Configuration and link training
+will fail.
+
+Therefore, always set PORT_LOGIC_LINK_WIDTH to 1, regardless of the number
+of lanes the port actually supports, to make link up more robust. This
+setting will not affect the intended link width if all lanes are
+functional. Additionally, the link can still be established with at least
+one lane if other lanes are faulty.
+
+Co-developed-by: Qiang Yu <quic_qianyu@quicinc.com>
+Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com>
+Signed-off-by: Wenbin Yao <quic_wenbyao@quicinc.com>
+[mani: subject change]
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+[bhelgaas: update PCIe spec citation, format quote]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Niklas Cassel <cassel@kernel.org>
+Link: https://patch.msgid.link/20250422103623.462277-1-quic_wenbyao@quicinc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 6d6cbc8b5b2c6..d40afe74ddd1a 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -752,22 +752,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
+ /* Set link width speed control register */
+ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ switch (num_lanes) {
+ case 1:
+ plc |= PORT_LINK_MODE_1_LANES;
+- lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ case 2:
+ plc |= PORT_LINK_MODE_2_LANES;
+- lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ break;
+ case 4:
+ plc |= PORT_LINK_MODE_4_LANES;
+- lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ break;
+ case 8:
+ plc |= PORT_LINK_MODE_8_LANES;
+- lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+ break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
+--
+2.39.5
+
--- /dev/null
+From 438490a88a92b92bba4b19187d5d7b50f6e8bf5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Apr 2025 16:13:11 +0800
+Subject: PCI: imx6: Add workaround for errata ERR051624
+
+From: Richard Zhu <hongxing.zhu@nxp.com>
+
+[ Upstream commit ce0c43e855c7f652b6351110aaaabf9b521debd7 ]
+
+ERR051624: The Controller Without Vaux Cannot Exit L23 Ready Through Beacon
+or PERST# De-assertion
+
+When the auxiliary power is not available, the controller cannot exit from
+L23 Ready with beacon or PERST# de-assertion when main power is not
+removed. So the workaround is to set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
+
+This workaround is required irrespective of whether Vaux is supplied to the
+link partner or not.
+
+Signed-off-by: Richard Zhu <hongxing.zhu@nxp.com>
+[mani: subject and description rewording]
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://patch.msgid.link/20250416081314.3929794-5-hongxing.zhu@nxp.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pci-imx6.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index ad3028b755d16..3b24fed3177de 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -48,6 +48,8 @@
+ #define IMX95_PCIE_SS_RW_REG_0 0xf0
+ #define IMX95_PCIE_REF_CLKEN BIT(23)
+ #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
++#define IMX95_PCIE_SS_RW_REG_1 0xf4
++#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31)
+
+ #define IMX95_PE0_GEN_CTRL_1 0x1050
+ #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
+@@ -206,6 +208,19 @@ static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
+
+ static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
+ {
++ /*
++ * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
++ * Through Beacon or PERST# De-assertion
++ *
++ * When the auxiliary power is not available, the controller
++ * cannot exit from L23 Ready with beacon or PERST# de-assertion
++ * when main power is not removed.
++ *
++ * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
++ */
++ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
++ IMX95_PCIE_SYS_AUX_PWR_DET);
++
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_PHY_CR_PARA_SEL,
+--
+2.39.5
+
--- /dev/null
+From 237dff4781fcf3217ce29c5031f1276c26ff061d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 May 2025 09:41:27 +0300
+Subject: Revert "drm/i915/gem: Allow EXEC_CAPTURE on recoverable contexts on
+ DG1"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+
+[ Upstream commit ed5915cfce2abb9a553c3737badebd4a11d6c9c7 ]
+
+This reverts commit d6e020819612a4a06207af858e0978be4d3e3140.
+
+The IS_DGFX check was put in place because error capture of buffer
+objects is expected to be broken on devices with VRAM.
+
+Userspace fix[1] to the impacted media driver has been submitted, merged
+and a new driver release is out as 25.2.3 where the capture flag is
+dropped on DG1 thus unblocking the usage of media driver on DG1.
+
+[1] https://github.com/intel/media-driver/commit/93c07d9b4b96a78bab21f6acd4eb863f4313ea4a
+
+Cc: stable@vger.kernel.org # v6.0+
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Andi Shyti <andi.shyti@linux.intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: Tvrtko Ursulin <tursulin@ursulin.net>
+Acked-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://lore.kernel.org/r/20250522064127.24293-1-joonas.lahtinen@linux.intel.com
+[Joonas: Update message to point out the merged userspace fix]
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+(cherry picked from commit d2dc30e0aa252830f908c8e793d3139d51321370)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index 841438301d802..a3b83cfe17267 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2014,7 +2014,7 @@ static int eb_capture_stage(struct i915_execbuffer *eb)
+ continue;
+
+ if (i915_gem_context_is_recoverable(eb->gem_context) &&
+- GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 10))
++ (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0)))
+ return -EINVAL;
+
+ for_each_batch_create_order(eb, j) {
+--
+2.39.5
+
--- /dev/null
+From d23d8cf41c53264e01db904e59b8170aa2795969 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 11:24:21 +0200
+Subject: Revert "iommu/amd: Prevent binding other PCI drivers to IOMMU PCI
+ devices"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lukas Wunner <lukas@wunner.de>
+
+[ Upstream commit 3be5fa236649da6404f1bca1491bf02d4b0d5cce ]
+
+Commit 991de2e59090 ("PCI, x86: Implement pcibios_alloc_irq() and
+pcibios_free_irq()") changed IRQ handling on PCI driver probing.
+It inadvertently broke resume from system sleep on AMD platforms:
+
+ https://lore.kernel.org/r/20150926164651.GA3640@pd.tnic/
+
+This was fixed by two independent commits:
+
+* 8affb487d4a4 ("x86/PCI: Don't alloc pcibios-irq when MSI is enabled")
+* cbbc00be2ce3 ("iommu/amd: Prevent binding other PCI drivers to IOMMU PCI devices")
+
+The breaking change and one of these two fixes were subsequently reverted:
+
+* fe25d078874f ("Revert "x86/PCI: Don't alloc pcibios-irq when MSI is enabled"")
+* 6c777e8799a9 ("Revert "PCI, x86: Implement pcibios_alloc_irq() and pcibios_free_irq()"")
+
+This rendered the second fix unnecessary, so revert it as well. It used
+the match_driver flag in struct pci_dev, which is internal to the PCI core
+and not supposed to be touched by arbitrary drivers.
+
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Acked-by: Joerg Roedel <jroedel@suse.de>
+Link: https://patch.msgid.link/9a3ddff5cc49512044f963ba0904347bd404094d.1745572340.git.lukas@wunner.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index ff11cd7e5c068..b8b10140d41b2 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -2026,9 +2026,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
+ if (!iommu->dev)
+ return -ENODEV;
+
+- /* Prevent binding other PCI device drivers to IOMMU devices */
+- iommu->dev->match_driver = false;
+-
+ /* ACPI _PRT won't have an IRQ for IOMMU */
+ iommu->dev->irq_managed = 1;
+
+--
+2.39.5
+
--- /dev/null
+From e7f277c3f5b324637df923507bb2af7aeff49c0e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Apr 2025 02:08:32 +0800
+Subject: riscv: add a data fence for CMODX in the kernel mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andy Chiu <andybnac@gmail.com>
+
+[ Upstream commit ca358692de41b273468e625f96926fa53e13bd8c ]
+
+RISC-V spec explicitly calls out that a local fence.i is not enough for
+the code modification to be visble from a remote hart. In fact, it
+states:
+
+To make a store to instruction memory visible to all RISC-V harts, the
+writing hart also has to execute a data FENCE before requesting that all
+remote RISC-V harts execute a FENCE.I.
+
+Although current riscv drivers for IPI use ordered MMIO when sending IPIs
+in order to synchronize the action between previous csd writes, riscv
+does not restrict itself to any particular flavor of IPI. Any driver or
+firmware implementation that does not order data writes before the IPI
+may pose a risk for code-modifying race.
+
+Thus, add a fence here to order data writes before making the IPI.
+
+Signed-off-by: Andy Chiu <andybnac@gmail.com>
+Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
+Link: https://lore.kernel.org/r/20250407180838.42877-8-andybnac@gmail.com
+Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/mm/cacheflush.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
+index b816727298872..b2e4b81763f88 100644
+--- a/arch/riscv/mm/cacheflush.c
++++ b/arch/riscv/mm/cacheflush.c
+@@ -24,7 +24,20 @@ void flush_icache_all(void)
+
+ if (num_online_cpus() < 2)
+ return;
+- else if (riscv_use_sbi_for_rfence())
++
++ /*
++ * Make sure all previous writes to the D$ are ordered before making
++ * the IPI. The RISC-V spec states that a hart must execute a data fence
++ * before triggering a remote fence.i in order to make the modification
++ * visable for remote harts.
++ *
++ * IPIs on RISC-V are triggered by MMIO writes to either CLINT or
++ * S-IMSIC, so the fence ensures previous data writes "happen before"
++ * the MMIO.
++ */
++ RISCV_FENCE(w, o);
++
++ if (riscv_use_sbi_for_rfence())
+ sbi_remote_fence_i(NULL);
+ else
+ on_each_cpu(ipi_remote_fence_i, NULL, 1);
+--
+2.39.5
+
--- /dev/null
+From 7cf706efe80dde390bc372e1cba0ee1f645ba7a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 May 2025 05:02:31 -0700
+Subject: rust: arm: fix unknown (to Clang) argument '-mno-fdpic'
+
+From: Rudraksha Gupta <guptarud@gmail.com>
+
+[ Upstream commit 977c4308ee4270cf46e2c66b37de8e04670daa0c ]
+
+Currently rust on arm fails to compile due to '-mno-fdpic'. This flag
+disables a GCC feature that we don't want for kernel builds, so let's
+skip it as it doesn't apply to Clang.
+
+ UPD include/generated/asm-offsets.h
+ CALL scripts/checksyscalls.sh
+ RUSTC L rust/core.o
+ BINDGEN rust/bindings/bindings_generated.rs
+ BINDGEN rust/bindings/bindings_helpers_generated.rs
+ CC rust/helpers/helpers.o
+ Unable to generate bindings: clang diagnosed error: error: unknown argument: '-mno-fdpic'
+ make[2]: *** [rust/Makefile:369: rust/bindings/bindings_helpers_generated.rs] Error 1
+ make[2]: *** Deleting file 'rust/bindings/bindings_helpers_generated.rs'
+ make[2]: *** Waiting for unfinished jobs....
+ Unable to generate bindings: clang diagnosed error: error: unknown argument: '-mno-fdpic'
+ make[2]: *** [rust/Makefile:349: rust/bindings/bindings_generated.rs] Error 1
+ make[2]: *** Deleting file 'rust/bindings/bindings_generated.rs'
+ make[1]: *** [/home/pmos/build/src/linux-next-next-20250521/Makefile:1285: prepare] Error 2
+ make: *** [Makefile:248: __sub-make] Error 2
+
+[ Naresh provided the draft diff [1].
+
+ Ben explained [2]:
+
+ FDPIC is only relevant with no-MMU targets, and then only for userspace.
+ When configured for the arm-*-uclinuxfdpiceabi target, GCC enables FDPIC
+ by default to facilitate compiling userspace programs. FDPIC is never
+ used for the kernel, and we pass -mno-fdpic when building the kernel to
+ override the default and make sure FDPIC is disabled.
+
+ and [3]:
+
+ -mno-fdpic disables a GCC feature that we don't want for kernel builds.
+ clang does not support this feature, so it always behaves as though
+ -mno-fdpic is passed. Therefore, it should be fine to mix the two, at
+ least as far as FDPIC is concerned.
+
+ [1] https://lore.kernel.org/rust-for-linux/CA+G9fYt4otQK4pHv8pJBW9e28yHSGCDncKquwuJiJ_1ou0pq0w@mail.gmail.com/
+ [2] https://lore.kernel.org/rust-for-linux/aAKrq2InExQk7f_k@dell-precision-5540/
+ [3] https://lore.kernel.org/rust-for-linux/aAo_F_UP1Gd4jHlZ@dell-precision-5540/
+
+ - Miguel ]
+
+Reported-by: Linux Kernel Functional Testing <lkft@linaro.org>
+Closes: https://lore.kernel.org/all/CA+G9fYvOanQBYXKSg7C6EU30k8sTRC0JRPJXYu7wWK51w38QUQ@mail.gmail.com/
+Suggested-by: Miguel Ojeda <ojeda@kernel.org>
+Acked-by: Naresh Kamboju <naresh.kamboju@linaro.org>
+Signed-off-by: Rudraksha Gupta <guptarud@gmail.com>
+Link: https://lore.kernel.org/r/20250522-rust-mno-fdpic-arm-fix-v2-1-a6f691d9c198@gmail.com
+[ Reworded title. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rust/Makefile b/rust/Makefile
+index 93650b2ee7d57..b8b7f817c48e4 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -238,7 +238,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ -fzero-call-used-regs=% -fno-stack-clash-protection \
+ -fno-inline-functions-called-once -fsanitize=bounds-strict \
+ -fstrict-flex-arrays=% -fmin-function-alignment=% \
+- -fzero-init-padding-bits=% \
++ -fzero-init-padding-bits=% -mno-fdpic \
+ --param=% --param asan-%
+
+ # Derived from `scripts/Makefile.clang`.
+--
+2.39.5
+
--- /dev/null
+From a42597630ccab4148ae16e0d85858f6db0d1bd63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 Mar 2025 13:45:06 +0900
+Subject: rust: module: place cleanup_module() in .exit.text section
+
+From: FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+[ Upstream commit 249c3a0e53acefc2b06d3b3e1fc28fb2081f878d ]
+
+Place cleanup_module() in .exit.text section. Currently,
+cleanup_module() is likely placed in the .text section. It's
+inconsistent with the layout of C modules, where cleanup_module() is
+placed in .exit.text.
+
+[ Boqun asked for an example of how the section changed to be
+ put in the log. Tomonori provided the following examples:
+
+ C module:
+
+ $ objdump -t ~/build/x86/drivers/block/loop.o|grep clean
+ 0000000000000000 l O .exit.data 0000000000000008 __UNIQUE_ID___addressable_cleanup_module412
+ 0000000000000000 g F .exit.text 000000000000009c cleanup_module
+
+ Rust module without this patch:
+
+ $ objdump -t ~/build/x86/samples/rust/rust_minimal.o|grep clean
+ 00000000000002b0 g F .text 00000000000000c6 cleanup_module
+ 0000000000000000 g O .exit.data 0000000000000008 _R...___UNIQUE_ID___addressable_cleanup_module
+
+ Rust module with this patch:
+
+ $ objdump -t ~/build/x86/samples/rust/rust_minimal.o|grep clean
+ 0000000000000000 g F .exit.text 00000000000000c6 cleanup_module
+ 0000000000000000 g O .exit.data 0000000000000008 _R...___UNIQUE_ID___addressable_cleanup_module
+
+ - Miguel ]
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@gmail.com>
+Acked-by: Jarkko Sakkinen <jarkko@kernel.org>
+Link: https://lore.kernel.org/r/20250308044506.14458-1-fujita.tomonori@gmail.com
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/macros/module.rs | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/rust/macros/module.rs b/rust/macros/module.rs
+index da2a18b276e0b..a5ea5850e307a 100644
+--- a/rust/macros/module.rs
++++ b/rust/macros/module.rs
+@@ -260,6 +260,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
++ #[link_section = \".exit.text\"]
+ pub extern \"C\" fn cleanup_module() {{
+ // SAFETY:
+ // - This function is inaccessible to the outside due to the double
+--
+2.39.5
+
--- /dev/null
+From d54444a05d020172e0386e45f792183c483faa6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 May 2025 16:12:28 +0800
+Subject: scsi: ufs: core: Don't perform UFS clkscaling during host async scan
+
+From: Ziqi Chen <quic_ziqichen@quicinc.com>
+
+[ Upstream commit e97633492f5a3eca7b3ff03b4ef6f993017f7955 ]
+
+When preparing for UFS clock scaling, the UFS driver will quiesce all
+sdevs queues in the UFS SCSI host tagset list and then unquiesce them in
+ufshcd_clock_scaling_unprepare(). If the UFS SCSI host async scan is in
+progress at this time, some LUs may be added to the tagset list between
+UFS clkscale prepare and unprepare. This can cause two issues:
+
+1. During clock scaling, there may be I/O requests issued through new
+added queues that have not been quiesced, leading to task abort issue.
+
+2. These new added queues that have not been quiesced will be unquiesced
+as well when UFS clkscale is unprepared, resulting in warning prints.
+
+Therefore, use the mutex lock scan_mutex in
+ufshcd_clock_scaling_prepare() and ufshcd_clock_scaling_unprepare() to
+protect it.
+
+Co-developed-by: Can Guo <quic_cang@quicinc.com>
+Signed-off-by: Can Guo <quic_cang@quicinc.com>
+Signed-off-by: Ziqi Chen <quic_ziqichen@quicinc.com>
+Link: https://lore.kernel.org/r/20250522081233.2358565-1-quic_ziqichen@quicinc.com
+Suggested-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 374f505fec3d1..c5cef57e64ce3 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -1392,6 +1392,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ * make sure that there are no outstanding requests when
+ * clock scaling is in progress
+ */
++ mutex_lock(&hba->host->scan_mutex);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
+ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+@@ -1402,6 +1403,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
++ mutex_unlock(&hba->host->scan_mutex);
+ goto out;
+ }
+
+@@ -1423,6 +1425,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
+ mutex_unlock(&hba->wb_mutex);
+
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
++ mutex_unlock(&hba->host->scan_mutex);
+ ufshcd_release(hba);
+ }
+
+--
+2.39.5
+
--- /dev/null
+cifs-correctly-set-smb1-sessionkey-field-in-session-.patch
+cifs-fix-cifs_query_path_info-for-windows-nt-servers.patch
+cifs-fix-encoding-of-smb1-session-setup-ntlmssp-requ.patch
+nfsv4-always-set-nlink-even-if-the-server-doesn-t-su.patch
+nfsv4.2-fix-listxattr-to-return-selinux-security-lab.patch
+nfsv4.2-fix-setattr-caching-of-time_-modify-access-_.patch
+mailbox-not-protect-module_put-with-spin_lock_irqsav.patch
+mfd-max14577-fix-wakeup-source-leaks-on-device-unbin.patch
+sunrpc-don-t-immediately-retransmit-on-seqno-miss.patch
+dm-vdo-indexer-don-t-read-request-structure-after-en.patch
+leds-multicolor-fix-intensity-setting-while-sw-blink.patch
+fuse-fix-race-between-concurrent-setattrs-from-multi.patch
+cxl-region-add-a-dev_err-on-missing-target-list-entr.patch
+nfsv4-xattr-handlers-should-check-for-absent-nfs-fil.patch
+hwmon-pmbus-max34440-fix-support-for-max34451.patch
+ksmbd-allow-a-filename-to-contain-special-characters.patch
+ksmbd-provide-zero-as-a-unique-id-to-the-mac-client.patch
+rust-module-place-cleanup_module-in-.exit.text-secti.patch
+rust-arm-fix-unknown-to-clang-argument-mno-fdpic.patch
+revert-iommu-amd-prevent-binding-other-pci-drivers-t.patch
+dmaengine-idxd-check-availability-of-workqueue-alloc.patch
+dmaengine-xilinx_dma-set-dma_device-directions.patch
+pci-dwc-make-link-training-more-robust-by-setting-po.patch
+pci-apple-fix-missing-of-node-reference-in-apple_pci.patch
+pci-imx6-add-workaround-for-errata-err051624.patch
+nvme-tcp-fix-i-o-stalls-on-congested-sockets.patch
+nvme-tcp-sanitize-request-list-handling.patch
+md-md-bitmap-fix-dm-raid-max_write_behind-setting.patch
+amd-amdkfd-fix-a-kfd_process-ref-leak.patch
+bcache-fix-null-pointer-in-cache_set_flush.patch
+drm-amdgpu-seq64-memory-unmap-uses-uninterruptible-l.patch
+drm-scheduler-signal-scheduled-fence-when-kill-job.patch
+iio-pressure-zpa2326-use-aligned_s64-for-the-timesta.patch
+um-add-cmpxchg8b_emu-and-checksum-functions-to-asm-p.patch
+um-use-proper-care-when-taking-mmap-lock-during-segf.patch
+8250-microchip-pci1xxxx-add-pcie-hot-reset-disable-s.patch
+coresight-only-check-bottom-two-claim-bits.patch
+usb-dwc2-also-exit-clock_gating-when-stopping-udc-wh.patch
+iio-adc-ad_sigma_delta-fix-use-of-uninitialized-stat.patch
+misc-tps6594-pfsm-add-null-pointer-check-in-tps6594_.patch
+usb-potential-integer-overflow-in-usbg_make_tpg.patch
+tty-serial-uartlite-register-uart-driver-in-init.patch
+usb-common-usb-conn-gpio-use-a-unique-name-for-usb-c.patch
+usb-add-checks-for-snprintf-calls-in-usb_alloc_dev.patch
+usb-cdc-wdm-avoid-setting-wdm_read-for-zlp-s.patch
+usb-gadget-f_hid-wake-up-readers-on-disable-unbind.patch
+usb-typec-displayport-receive-dp-status-update-nak-r.patch
+usb-typec-mux-do-not-return-on-eopnotsupp-in-mux-swi.patch
+riscv-add-a-data-fence-for-cmodx-in-the-kernel-mode.patch
+alsa-hda-ignore-unsol-events-for-cards-being-shut-do.patch
+alsa-hda-add-new-pci-id-for-amd-gpu-display-hd-audio.patch
+alsa-usb-audio-add-a-quirk-for-lenovo-thinkpad-thund.patch
+asoc-rt1320-fix-speaker-noise-when-volume-bar-is-100.patch
+ceph-fix-possible-integer-overflow-in-ceph_zero_obje.patch
+scsi-ufs-core-don-t-perform-ufs-clkscaling-during-ho.patch
+ovl-check-for-null-d_inode-in-ovl_dentry_upper.patch
+btrfs-handle-csum-tree-error-with-rescue-ibadroots-c.patch
+drm-i915-gem-allow-exec_capture-on-recoverable-conte.patch
+revert-drm-i915-gem-allow-exec_capture-on-recoverabl.patch
+btrfs-factor-out-nocow-ordered-extent-and-extent-map.patch
+btrfs-use-unsigned-types-for-constants-defined-as-bi.patch
+btrfs-fix-qgroup-reservation-leak-on-failure-to-allo.patch
+fs-jfs-consolidate-sanity-checking-in-dbmount.patch
+jfs-validate-ag-parameters-in-dbmount-to-prevent-cra.patch
+asoc-codec-wcd9335-convert-to-gpio-descriptors.patch
+asoc-codecs-wcd9335-fix-missing-free-of-regulator-su.patch
+ext4-don-t-explicit-update-times-in-ext4_fallocate.patch
+ext4-refactor-ext4_punch_hole.patch
+ext4-refactor-ext4_zero_range.patch
+ext4-refactor-ext4_collapse_range.patch
+ext4-refactor-ext4_insert_range.patch
+ext4-factor-out-ext4_do_fallocate.patch
+ext4-move-out-inode_lock-into-ext4_fallocate.patch
+ext4-move-out-common-parts-into-ext4_fallocate.patch
+ext4-fix-incorrect-punch-max_end.patch
+f2fs-don-t-over-report-free-space-or-inodes-in-statv.patch
+pci-apple-use-helper-function-for_each_child_of_node.patch
+pci-apple-set-only-available-ports-up.patch
+accel-ivpu-do-not-fail-on-cmdq-if-failed-to-allocate.patch
+accel-ivpu-remove-copy-engine-support.patch
+accel-ivpu-make-command-queue-id-allocated-on-xarray.patch
+accel-ivpu-separate-db-id-and-cmdq-id-allocations-fr.patch
+accel-ivpu-add-debugfs-interface-for-setting-hws-pri.patch
+accel-ivpu-trigger-device-recovery-on-engine-reset-r.patch
+af_unix-don-t-leave-consecutive-consumed-oob-skbs.patch
--- /dev/null
+From c0b9efe70c13d3265cb9a2a4dd1112135e140354 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Mar 2025 13:02:40 -0400
+Subject: sunrpc: don't immediately retransmit on seqno miss
+
+From: Nikhil Jha <njha@janestreet.com>
+
+[ Upstream commit fadc0f3bb2de8c570ced6d9c1f97222213d93140 ]
+
+RFC2203 requires that retransmitted messages use a new gss sequence
+number, but the same XID. This means that if the server is just slow
+(e.x. overloaded), the client might receive a response using an older
+seqno than the one it has recorded.
+
+Currently, Linux's client immediately retransmits in this case. However,
+this leads to a lot of wasted retransmits until the server eventually
+responds faster than the client can resend.
+
+Client -> SEQ 1 -> Server
+Client -> SEQ 2 -> Server
+Client <- SEQ 1 <- Server (misses, expecting seqno = 2)
+Client -> SEQ 3 -> Server (immediate retransmission on miss)
+Client <- SEQ 2 <- Server (misses, expecting seqno = 3)
+Client -> SEQ 4 -> Server (immediate retransmission on miss)
+... and so on ...
+
+This commit makes it so that we ignore messages with bad checksums
+due to seqnum mismatch, and rely on the usual timeout behavior for
+retransmission instead of doing so immediately.
+
+Signed-off-by: Nikhil Jha <njha@janestreet.com>
+Acked-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/clnt.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 17a4de75bfaf6..e492655cb2212 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2749,8 +2749,13 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
+ case -EPROTONOSUPPORT:
+ goto out_err;
+ case -EACCES:
+- /* Re-encode with a fresh cred */
+- fallthrough;
++ /* possible RPCSEC_GSS out-of-sequence event (RFC2203),
++ * reset recv state and keep waiting, don't retransmit
++ */
++ task->tk_rqstp->rq_reply_bytes_recvd = 0;
++ task->tk_status = xprt_request_enqueue_receive(task);
++ task->tk_action = call_transmit_status;
++ return -EBADMSG;
+ default:
+ goto out_garbage;
+ }
+--
+2.39.5
+
--- /dev/null
+From f6b33620dd0a3d16a491bdd07a3acd8a0b0f325d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 31 Mar 2025 18:06:19 +0200
+Subject: tty: serial: uartlite: register uart driver in init
+
+From: Jakub Lewalski <jakub.lewalski@nokia.com>
+
+[ Upstream commit 6bd697b5fc39fd24e2aa418c7b7d14469f550a93 ]
+
+When two instances of uart devices are probing, a concurrency race can
+occur. If one thread calls uart_register_driver function, which first
+allocates and assigns memory to 'uart_state' member of uart_driver
+structure, the other instance can bypass uart driver registration and
+call ulite_assign. This calls uart_add_one_port, which expects the uart
+driver to be fully initialized. This leads to a kernel panic due to a
+null pointer dereference:
+
+[ 8.143581] BUG: kernel NULL pointer dereference, address: 00000000000002b8
+[ 8.156982] #PF: supervisor write access in kernel mode
+[ 8.156984] #PF: error_code(0x0002) - not-present page
+[ 8.156986] PGD 0 P4D 0
+...
+[ 8.180668] RIP: 0010:mutex_lock+0x19/0x30
+[ 8.188624] Call Trace:
+[ 8.188629] ? __die_body.cold+0x1a/0x1f
+[ 8.195260] ? page_fault_oops+0x15c/0x290
+[ 8.209183] ? __irq_resolve_mapping+0x47/0x80
+[ 8.209187] ? exc_page_fault+0x64/0x140
+[ 8.209190] ? asm_exc_page_fault+0x22/0x30
+[ 8.209196] ? mutex_lock+0x19/0x30
+[ 8.223116] uart_add_one_port+0x60/0x440
+[ 8.223122] ? proc_tty_register_driver+0x43/0x50
+[ 8.223126] ? tty_register_driver+0x1ca/0x1e0
+[ 8.246250] ulite_probe+0x357/0x4b0 [uartlite]
+
+To prevent it, move uart driver registration in to init function. This
+will ensure that uart_driver is always registered when probe function
+is called.
+
+Signed-off-by: Jakub Lewalski <jakub.lewalski@nokia.com>
+Signed-off-by: Elodie Decerle <elodie.decerle@nokia.com>
+Link: https://lore.kernel.org/r/20250331160732.2042-1-elodie.decerle@nokia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/uartlite.c | 25 ++++++++++++-------------
+ 1 file changed, 12 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
+index 68357ac8ffe3c..71890f3244a0f 100644
+--- a/drivers/tty/serial/uartlite.c
++++ b/drivers/tty/serial/uartlite.c
+@@ -880,16 +880,6 @@ static int ulite_probe(struct platform_device *pdev)
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+- if (!ulite_uart_driver.state) {
+- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
+- ret = uart_register_driver(&ulite_uart_driver);
+- if (ret < 0) {
+- dev_err(&pdev->dev, "Failed to register driver\n");
+- clk_disable_unprepare(pdata->clk);
+- return ret;
+- }
+- }
+-
+ ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+@@ -929,16 +919,25 @@ static struct platform_driver ulite_platform_driver = {
+
+ static int __init ulite_init(void)
+ {
++ int ret;
++
++ pr_debug("uartlite: calling uart_register_driver()\n");
++ ret = uart_register_driver(&ulite_uart_driver);
++ if (ret)
++ return ret;
+
+ pr_debug("uartlite: calling platform_driver_register()\n");
+- return platform_driver_register(&ulite_platform_driver);
++ ret = platform_driver_register(&ulite_platform_driver);
++ if (ret)
++ uart_unregister_driver(&ulite_uart_driver);
++
++ return ret;
+ }
+
+ static void __exit ulite_exit(void)
+ {
+ platform_driver_unregister(&ulite_platform_driver);
+- if (ulite_uart_driver.state)
+- uart_unregister_driver(&ulite_uart_driver);
++ uart_unregister_driver(&ulite_uart_driver);
+ }
+
+ module_init(ulite_init);
+--
+2.39.5
+
--- /dev/null
+From 53ae42abf862bcb433f7615dc2d45330ede77ee5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Mar 2025 19:05:00 +0000
+Subject: um: Add cmpxchg8b_emu and checksum functions to asm-prototypes.h
+
+From: Sami Tolvanen <samitolvanen@google.com>
+
+[ Upstream commit 674d03f6bd6b0f8327f1a4920ff5893557facfbd ]
+
+With CONFIG_GENDWARFKSYMS, um builds fail due to missing prototypes
+in asm/asm-prototypes.h. Add declarations for cmpxchg8b_emu and the
+exported checksum functions, including csum_partial_copy_generic as
+it's also exported.
+
+Cc: Masahiro Yamada <masahiroy@kernel.org>
+Cc: linux-kbuild@vger.kernel.org
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202503251216.lE4t9Ikj-lkp@intel.com/
+Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
+Link: https://patch.msgid.link/20250326190500.847236-2-samitolvanen@google.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/include/asm/asm-prototypes.h | 5 +++++
+ arch/x86/um/asm/checksum.h | 3 +++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/arch/um/include/asm/asm-prototypes.h b/arch/um/include/asm/asm-prototypes.h
+index 5898a26daa0dd..408b31d591279 100644
+--- a/arch/um/include/asm/asm-prototypes.h
++++ b/arch/um/include/asm/asm-prototypes.h
+@@ -1 +1,6 @@
+ #include <asm-generic/asm-prototypes.h>
++#include <asm/checksum.h>
++
++#ifdef CONFIG_UML_X86
++extern void cmpxchg8b_emu(void);
++#endif
+diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h
+index b07824500363f..ddc144657efad 100644
+--- a/arch/x86/um/asm/checksum.h
++++ b/arch/x86/um/asm/checksum.h
+@@ -20,6 +20,9 @@
+ */
+ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
++/* Do not call this directly. Declared for export type visibility. */
++extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
++
+ /**
+ * csum_fold - Fold and invert a 32bit checksum.
+ * sum: 32bit unfolded sum
+--
+2.39.5
+
--- /dev/null
+From 79bcd8c53fa52fd00faa9e50b00f5e2291bfd64f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Apr 2025 09:45:24 +0200
+Subject: um: use proper care when taking mmap lock during segfault
+
+From: Benjamin Berg <benjamin.berg@intel.com>
+
+[ Upstream commit 6767e8784cd2e8b386a62330ea6864949d983a3e ]
+
+Segfaults can occur at times where the mmap lock cannot be taken. If
+that happens the segfault handler may not be able to take the mmap lock.
+
+Fix the code to use the same approach as most other architectures.
+Unfortunately, this requires copying code from mm/memory.c and modifying
+it slightly as UML does not have exception tables.
+
+Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
+Link: https://patch.msgid.link/20250408074524.300153-2-benjamin@sipsolutions.net
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/kernel/trap.c | 129 ++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 117 insertions(+), 12 deletions(-)
+
+diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
+index 97c8df9c44017..9077bdb26cc35 100644
+--- a/arch/um/kernel/trap.c
++++ b/arch/um/kernel/trap.c
+@@ -17,6 +17,122 @@
+ #include <os.h>
+ #include <skas.h>
+
++/*
++ * NOTE: UML does not have exception tables. As such, this is almost a copy
++ * of the code in mm/memory.c, only adjusting the logic to simply check whether
++ * we are coming from the kernel instead of doing an additional lookup in the
++ * exception table.
++ * We can do this simplification because we never get here if the exception was
++ * fixable.
++ */
++static inline bool get_mmap_lock_carefully(struct mm_struct *mm, bool is_user)
++{
++ if (likely(mmap_read_trylock(mm)))
++ return true;
++
++ if (!is_user)
++ return false;
++
++ return !mmap_read_lock_killable(mm);
++}
++
++static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
++{
++ /*
++ * We don't have this operation yet.
++ *
++ * It should be easy enough to do: it's basically a
++ * atomic_long_try_cmpxchg_acquire()
++ * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
++ * it also needs the proper lockdep magic etc.
++ */
++ return false;
++}
++
++static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, bool is_user)
++{
++ mmap_read_unlock(mm);
++ if (!is_user)
++ return false;
++
++ return !mmap_write_lock_killable(mm);
++}
++
++/*
++ * Helper for page fault handling.
++ *
++ * This is kind of equivalend to "mmap_read_lock()" followed
++ * by "find_extend_vma()", except it's a lot more careful about
++ * the locking (and will drop the lock on failure).
++ *
++ * For example, if we have a kernel bug that causes a page
++ * fault, we don't want to just use mmap_read_lock() to get
++ * the mm lock, because that would deadlock if the bug were
++ * to happen while we're holding the mm lock for writing.
++ *
++ * So this checks the exception tables on kernel faults in
++ * order to only do this all for instructions that are actually
++ * expected to fault.
++ *
++ * We can also actually take the mm lock for writing if we
++ * need to extend the vma, which helps the VM layer a lot.
++ */
++static struct vm_area_struct *
++um_lock_mm_and_find_vma(struct mm_struct *mm,
++ unsigned long addr, bool is_user)
++{
++ struct vm_area_struct *vma;
++
++ if (!get_mmap_lock_carefully(mm, is_user))
++ return NULL;
++
++ vma = find_vma(mm, addr);
++ if (likely(vma && (vma->vm_start <= addr)))
++ return vma;
++
++ /*
++ * Well, dang. We might still be successful, but only
++ * if we can extend a vma to do so.
++ */
++ if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
++ mmap_read_unlock(mm);
++ return NULL;
++ }
++
++ /*
++ * We can try to upgrade the mmap lock atomically,
++ * in which case we can continue to use the vma
++ * we already looked up.
++ *
++ * Otherwise we'll have to drop the mmap lock and
++ * re-take it, and also look up the vma again,
++ * re-checking it.
++ */
++ if (!mmap_upgrade_trylock(mm)) {
++ if (!upgrade_mmap_lock_carefully(mm, is_user))
++ return NULL;
++
++ vma = find_vma(mm, addr);
++ if (!vma)
++ goto fail;
++ if (vma->vm_start <= addr)
++ goto success;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto fail;
++ }
++
++ if (expand_stack_locked(vma, addr))
++ goto fail;
++
++success:
++ mmap_write_downgrade(mm);
++ return vma;
++
++fail:
++ mmap_write_unlock(mm);
++ return NULL;
++}
++
+ /*
+ * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
+ * segv().
+@@ -43,21 +159,10 @@ int handle_page_fault(unsigned long address, unsigned long ip,
+ if (is_user)
+ flags |= FAULT_FLAG_USER;
+ retry:
+- mmap_read_lock(mm);
+- vma = find_vma(mm, address);
+- if (!vma)
+- goto out;
+- if (vma->vm_start <= address)
+- goto good_area;
+- if (!(vma->vm_flags & VM_GROWSDOWN))
+- goto out;
+- if (is_user && !ARCH_IS_STACKGROW(address))
+- goto out;
+- vma = expand_stack(mm, address);
++ vma = um_lock_mm_and_find_vma(mm, address, is_user);
+ if (!vma)
+ goto out_nosemaphore;
+
+-good_area:
+ *code_out = SEGV_ACCERR;
+ if (is_write) {
+ if (!(vma->vm_flags & VM_WRITE))
+--
+2.39.5
+
--- /dev/null
+From 8374ed1224bee23d4406661a76eb81a309bb1246 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Mar 2025 18:49:49 +0200
+Subject: usb: Add checks for snprintf() calls in usb_alloc_dev()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 82fe5107fa3d21d6c3fba091c9dbc50495588630 ]
+
+When creating a device path in the driver the snprintf() takes
+up to 16 characters long argument along with the additional up to
+12 characters for the signed integer (as it can't see the actual limits)
+and tries to pack this into 16 bytes array. GCC complains about that
+when build with `make W=1`:
+
+ drivers/usb/core/usb.c:705:25: note: ‘snprintf’ output between 3 and 28 bytes into a destination of size 16
+
+Since everything works until now, let's just check for the potential
+buffer overflow and bail out. It is most likely a never happen situation,
+but at least it makes GCC happy.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20250321164949.423957-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/core/usb.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 0b4685aad2d50..118fa4c93a795 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -695,15 +695,16 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+ device_set_of_node_from_dev(&dev->dev, bus->sysdev);
+ dev_set_name(&dev->dev, "usb%d", bus->busnum);
+ } else {
++ int n;
++
+ /* match any labeling on the hubs; it's one-based */
+ if (parent->devpath[0] == '0') {
+- snprintf(dev->devpath, sizeof dev->devpath,
+- "%d", port1);
++ n = snprintf(dev->devpath, sizeof(dev->devpath), "%d", port1);
+ /* Root ports are not counted in route string */
+ dev->route = 0;
+ } else {
+- snprintf(dev->devpath, sizeof dev->devpath,
+- "%s.%d", parent->devpath, port1);
++ n = snprintf(dev->devpath, sizeof(dev->devpath), "%s.%d",
++ parent->devpath, port1);
+ /* Route string assumes hubs have less than 16 ports */
+ if (port1 < 15)
+ dev->route = parent->route +
+@@ -712,6 +713,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+ dev->route = parent->route +
+ (15 << ((parent->level - 1)*4));
+ }
++ if (n >= sizeof(dev->devpath)) {
++ usb_put_hcd(bus_to_hcd(bus));
++ usb_put_dev(dev);
++ return NULL;
++ }
+
+ dev->dev.parent = &parent->dev;
+ dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
+--
+2.39.5
+
--- /dev/null
+From 8c9872a79231c2dc76b28c4d57478d7d03f0f503 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Apr 2025 16:40:04 +0200
+Subject: usb: cdc-wdm: avoid setting WDM_READ for ZLP-s
+
+From: Robert Hodaszi <robert.hodaszi@digi.com>
+
+[ Upstream commit 387602d8a75574fafb451b7a8215e78dfd67ee63 ]
+
+Don't set WDM_READ flag in wdm_in_callback() for ZLP-s, otherwise when
+userspace tries to poll for available data, it might - incorrectly -
+believe there is something available, and when it tries to non-blocking
+read it, it might get stuck in the read loop.
+
+For example this is what glib does for non-blocking read (briefly):
+
+ 1. poll()
+ 2. if poll returns with non-zero, starts a read data loop:
+ a. loop on poll() (EINTR disabled)
+ b. if revents was set, reads data
+ I. if read returns with EINTR or EAGAIN, goto 2.a.
+ II. otherwise return with data
+
+So if ZLP sets WDM_READ (#1), we expect data, and try to read it (#2).
+But as that was a ZLP, and we are doing non-blocking read, wdm_read()
+returns with EAGAIN (#2.b.I), so loop again, and try to read again
+(#2.a.).
+
+With glib, we might stuck in this loop forever, as EINTR is disabled
+(#2.a).
+
+Signed-off-by: Robert Hodaszi <robert.hodaszi@digi.com>
+Acked-by: Oliver Neukum <oneukum@suse.com>
+Link: https://lore.kernel.org/r/20250403144004.3889125-1-robert.hodaszi@digi.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/class/cdc-wdm.c | 23 +++++++++--------------
+ 1 file changed, 9 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 16e7fa4d488d3..ecd6d1f39e498 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -92,7 +92,6 @@ struct wdm_device {
+ u16 wMaxCommand;
+ u16 wMaxPacketSize;
+ __le16 inum;
+- int reslength;
+ int length;
+ int read;
+ int count;
+@@ -214,6 +213,11 @@ static void wdm_in_callback(struct urb *urb)
+ if (desc->rerr == 0 && status != -EPIPE)
+ desc->rerr = status;
+
++ if (length == 0) {
++ dev_dbg(&desc->intf->dev, "received ZLP\n");
++ goto skip_zlp;
++ }
++
+ if (length + desc->length > desc->wMaxCommand) {
+ /* The buffer would overflow */
+ set_bit(WDM_OVERFLOW, &desc->flags);
+@@ -222,18 +226,18 @@ static void wdm_in_callback(struct urb *urb)
+ if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
+ memmove(desc->ubuf + desc->length, desc->inbuf, length);
+ desc->length += length;
+- desc->reslength = length;
+ }
+ }
+ skip_error:
+
+ if (desc->rerr) {
+ /*
+- * Since there was an error, userspace may decide to not read
+- * any data after poll'ing.
++ * If there was a ZLP or an error, userspace may decide to not
++ * read any data after poll'ing.
+ * We should respond to further attempts from the device to send
+ * data, so that we can get unstuck.
+ */
++skip_zlp:
+ schedule_work(&desc->service_outs_intr);
+ } else {
+ set_bit(WDM_READ, &desc->flags);
+@@ -585,15 +589,6 @@ static ssize_t wdm_read
+ goto retry;
+ }
+
+- if (!desc->reslength) { /* zero length read */
+- dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n");
+- clear_bit(WDM_READ, &desc->flags);
+- rv = service_outstanding_interrupt(desc);
+- spin_unlock_irq(&desc->iuspin);
+- if (rv < 0)
+- goto err;
+- goto retry;
+- }
+ cntr = desc->length;
+ spin_unlock_irq(&desc->iuspin);
+ }
+@@ -1016,7 +1011,7 @@ static void service_interrupt_work(struct work_struct *work)
+
+ spin_lock_irq(&desc->iuspin);
+ service_outstanding_interrupt(desc);
+- if (!desc->resp_count) {
++ if (!desc->resp_count && (desc->length || desc->rerr)) {
+ set_bit(WDM_READ, &desc->flags);
+ wake_up(&desc->wait);
+ }
+--
+2.39.5
+
--- /dev/null
+From fe64d45ece31a7a507b60a87bb8e38ad3dd3c11c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Apr 2025 16:33:26 +0800
+Subject: usb: common: usb-conn-gpio: use a unique name for usb connector
+ device
+
+From: Chance Yang <chance.yang@kneron.us>
+
+[ Upstream commit d4e5b10c55627e2f3fc9e5b337a28b4e2f02a55e ]
+
+The current implementation of the usb-conn-gpio driver uses a fixed
+"usb-charger" name for all USB connector devices. This causes conflicts
+in the power supply subsystem when multiple USB connectors are present,
+as duplicate names are not allowed.
+
+Use IDA to manage unique IDs for naming usb connectors (e.g.,
+usb-charger-0, usb-charger-1).
+
+Signed-off-by: Chance Yang <chance.yang@kneron.us>
+Link: https://lore.kernel.org/r/20250411-work-next-v3-1-7cd9aa80190c@kneron.us
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/common/usb-conn-gpio.c | 25 ++++++++++++++++++++++---
+ 1 file changed, 22 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
+index 501e8bc9738eb..1096a884c8d70 100644
+--- a/drivers/usb/common/usb-conn-gpio.c
++++ b/drivers/usb/common/usb-conn-gpio.c
+@@ -20,6 +20,9 @@
+ #include <linux/power_supply.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/usb/role.h>
++#include <linux/idr.h>
++
++static DEFINE_IDA(usb_conn_ida);
+
+ #define USB_GPIO_DEB_MS 20 /* ms */
+ #define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */
+@@ -29,6 +32,7 @@
+
+ struct usb_conn_info {
+ struct device *dev;
++ int conn_id; /* store the IDA-allocated ID */
+ struct usb_role_switch *role_sw;
+ enum usb_role last_role;
+ struct regulator *vbus;
+@@ -160,7 +164,17 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
+ .of_node = dev->of_node,
+ };
+
+- desc->name = "usb-charger";
++ info->conn_id = ida_alloc(&usb_conn_ida, GFP_KERNEL);
++ if (info->conn_id < 0)
++ return info->conn_id;
++
++ desc->name = devm_kasprintf(dev, GFP_KERNEL, "usb-charger-%d",
++ info->conn_id);
++ if (!desc->name) {
++ ida_free(&usb_conn_ida, info->conn_id);
++ return -ENOMEM;
++ }
++
+ desc->properties = usb_charger_properties;
+ desc->num_properties = ARRAY_SIZE(usb_charger_properties);
+ desc->get_property = usb_charger_get_property;
+@@ -168,8 +182,10 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
+ cfg.drv_data = info;
+
+ info->charger = devm_power_supply_register(dev, desc, &cfg);
+- if (IS_ERR(info->charger))
+- dev_err(dev, "Unable to register charger\n");
++ if (IS_ERR(info->charger)) {
++ dev_err(dev, "Unable to register charger %d\n", info->conn_id);
++ ida_free(&usb_conn_ida, info->conn_id);
++ }
+
+ return PTR_ERR_OR_ZERO(info->charger);
+ }
+@@ -277,6 +293,9 @@ static void usb_conn_remove(struct platform_device *pdev)
+
+ cancel_delayed_work_sync(&info->dw_det);
+
++ if (info->charger)
++ ida_free(&usb_conn_ida, info->conn_id);
++
+ if (info->last_role == USB_ROLE_HOST && info->vbus)
+ regulator_disable(info->vbus);
+
+--
+2.39.5
+
--- /dev/null
+From e3309e966ef5afc47c8956d707afec4fa5123f7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Apr 2025 19:40:17 +0200
+Subject: usb: dwc2: also exit clock_gating when stopping udc while suspended
+
+From: Michael Grzeschik <m.grzeschik@pengutronix.de>
+
+[ Upstream commit af076a41f8a28faf9ceb9dd2d88aef2c202ef39a ]
+
+It is possible that the gadget will be disabled, while the udc is
+suspended. When enabling the udc in that case, the clock gating
+will not be enabled again. Leaving the phy unclocked. Even when the
+udc is not enabled, connecting this powered but not clocked phy leads
+to enumeration errors on the host side.
+
+To ensure that the clock gating will be in an valid state, we ensure
+that the clock gating will be enabled before stopping the udc.
+
+Signed-off-by: Michael Grzeschik <m.grzeschik@pengutronix.de>
+Acked-by: Minas Harutyunyan <hminas@synopsys.com>
+Link: https://lore.kernel.org/r/20250417-dwc2_clock_gating-v1-1-8ea7c4d53d73@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/dwc2/gadget.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index bd4c788f03bc1..d3d0d75ab1f59 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -4604,6 +4604,12 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
+ if (!hsotg)
+ return -ENODEV;
+
++ /* Exit clock gating when driver is stopped. */
++ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
++ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
++ dwc2_gadget_exit_clock_gating(hsotg, 0);
++ }
++
+ /* all endpoints should be shutdown */
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+--
+2.39.5
+
--- /dev/null
+From 6fda8ed12d00a330f7ed0e5672fefc99c0283f75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Mar 2025 16:22:07 +0100
+Subject: usb: gadget: f_hid: wake up readers on disable/unbind
+
+From: Peter Korsgaard <peter@korsgaard.com>
+
+[ Upstream commit 937a8a3a8d46a3377b4195cd8f2aa656666ebc8b ]
+
+Similar to how it is done in the write path.
+
+Add a disabled flag to track the function state and use it to exit the read
+loops to ensure no readers get stuck when the function is disabled/unbound,
+protecting against corruption when the waitq and spinlocks are reinitialized
+in hidg_bind().
+
+Signed-off-by: Peter Korsgaard <peter@korsgaard.com>
+Link: https://lore.kernel.org/r/20250318152207.330997-1-peter@korsgaard.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/f_hid.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index c7a05f842745b..d8bd2d82e9ec6 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -75,6 +75,7 @@ struct f_hidg {
+ /* recv report */
+ spinlock_t read_spinlock;
+ wait_queue_head_t read_queue;
++ bool disabled;
+ /* recv report - interrupt out only (use_out_ep == 1) */
+ struct list_head completed_out_req;
+ unsigned int qlen;
+@@ -329,7 +330,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
+
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+
+-#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req))
++#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req) || hidg->disabled)
+
+ /* wait for at least one buffer to complete */
+ while (!READ_COND_INTOUT) {
+@@ -343,6 +344,11 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+ }
+
++ if (hidg->disabled) {
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++ return -ESHUTDOWN;
++ }
++
+ /* pick the first one */
+ list = list_first_entry(&hidg->completed_out_req,
+ struct f_hidg_req_list, list);
+@@ -387,7 +393,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
+ return count;
+ }
+
+-#define READ_COND_SSREPORT (hidg->set_report_buf != NULL)
++#define READ_COND_SSREPORT (hidg->set_report_buf != NULL || hidg->disabled)
+
+ static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ptr)
+@@ -1012,6 +1018,11 @@ static void hidg_disable(struct usb_function *f)
+ }
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+
++ spin_lock_irqsave(&hidg->read_spinlock, flags);
++ hidg->disabled = true;
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++ wake_up(&hidg->read_queue);
++
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ if (!hidg->write_pending) {
+ free_ep_req(hidg->in_ep, hidg->req);
+@@ -1097,6 +1108,10 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ }
+ }
+
++ spin_lock_irqsave(&hidg->read_spinlock, flags);
++ hidg->disabled = false;
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++
+ if (hidg->in_ep != NULL) {
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ hidg->req = req_in;
+--
+2.39.5
+
--- /dev/null
+From 8135f85bf840f4832d1a2e80722dae4988b2bbc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Apr 2025 14:58:57 +0800
+Subject: usb: potential integer overflow in usbg_make_tpg()
+
+From: Chen Yufeng <chenyufeng@iie.ac.cn>
+
+[ Upstream commit 153874010354d050f62f8ae25cbb960c17633dc5 ]
+
+The variable tpgt in usbg_make_tpg() is defined as unsigned long and is
+assigned to tpgt->tport_tpgt, which is defined as u16. This may cause an
+integer overflow when tpgt is greater than USHRT_MAX (65535). I
+haven't tried to trigger it myself, but it is possible to trigger it
+by calling usbg_make_tpg() with a large value for tpgt.
+
+I modified the type of tpgt to match tpgt->tport_tpgt and adjusted the
+relevant code accordingly.
+
+This patch is similar to commit 59c816c1f24d ("vhost/scsi: potential
+memory corruption").
+
+Signed-off-by: Chen Yufeng <chenyufeng@iie.ac.cn>
+Link: https://lore.kernel.org/r/20250415065857.1619-1-chenyufeng@iie.ac.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/f_tcm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 7b23631f47449..6ad205046032c 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -1297,14 +1297,14 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
+ struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
+ tport_wwn);
+ struct usbg_tpg *tpg;
+- unsigned long tpgt;
++ u16 tpgt;
+ int ret;
+ struct f_tcm_opts *opts;
+ unsigned i;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+- if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
++ if (kstrtou16(name + 5, 0, &tpgt))
+ return ERR_PTR(-EINVAL);
+ ret = -ENODEV;
+ mutex_lock(&tpg_instances_lock);
+--
+2.39.5
+
--- /dev/null
+From 2ff1277133dc9fc7edaee41ed24fad6afcc2484b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Feb 2025 15:19:26 +0800
+Subject: usb: typec: displayport: Receive DP Status Update NAK request exit dp
+ altmode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jos Wang <joswang@lenovo.com>
+
+[ Upstream commit b4b38ffb38c91afd4dc387608db26f6fc34ed40b ]
+
+Although some Type-C DRD devices that do not support the DP Sink
+function (such as Huawei Mate 40Pro), the Source Port initiates
+Enter Mode CMD, but the device responds to Enter Mode ACK, the
+Source port then initiates DP Status Update CMD, and the device
+responds to DP Status Update NAK.
+
+As PD2.0 spec ("6.4.4.3.4 Enter Mode Command"),A DR_Swap Message
+Shall Not be sent during Modal Operation between the Port Partners.
+At this time, the source port initiates DR_Swap message through the
+"echo device > /sys/class/typec/port0/data_role" command to switch
+the data role from host to device. The device will initiate a Hard
+Reset for recovery, resulting in the failure of data role swap.
+
+Therefore, when DP Status Update NAK is received, Exit Mode CMD is
+initiated to exit the currently entered DP altmode.
+
+Signed-off-by: Jos Wang <joswang@lenovo.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20250209071926.69625-1-joswang1221@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/typec/altmodes/displayport.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 92cc1b1361208..4976a7238b287 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -393,6 +393,10 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
++ case DP_CMD_STATUS_UPDATE:
++ if (typec_altmode_exit(alt))
++ dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
++ break;
+ case DP_CMD_CONFIGURE:
+ dp->data.conf = 0;
+ ret = dp_altmode_configured(dp);
+--
+2.39.5
+
--- /dev/null
+From 440a9bbccddef062aac90466db416f20a3ab8f6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Apr 2025 00:21:01 +0200
+Subject: usb: typec: mux: do not return on EOPNOTSUPP in {mux, switch}_set
+
+From: Michael Grzeschik <m.grzeschik@pengutronix.de>
+
+[ Upstream commit 0f7bbef1794dc87141897f804e5871a293aa174b ]
+
+Since the typec connectors can have many muxes or switches for different
+lanes (sbu, usb2, usb3) going into different modal states (usb2, usb3,
+audio, debug) all of them will be called on typec_switch_set and
+typec_mux_set. But not all of them will be handling the expected mode.
+
+If one of the mux or switch will come back with EOPTNOSUPP this is no
+reason to stop running through the next ones. Therefor we skip this
+particular error value and continue calling the next.
+
+Signed-off-by: Michael Grzeschik <m.grzeschik@pengutronix.de>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20250404-ml-topic-typec-mux-v1-1-22c0526381ba@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/typec/mux.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
+index 49926d6e72c71..182c902c42f61 100644
+--- a/drivers/usb/typec/mux.c
++++ b/drivers/usb/typec/mux.c
+@@ -214,7 +214,7 @@ int typec_switch_set(struct typec_switch *sw,
+ sw_dev = sw->sw_devs[i];
+
+ ret = sw_dev->set(sw_dev, orientation);
+- if (ret)
++ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+@@ -378,7 +378,7 @@ int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
+ mux_dev = mux->mux_devs[i];
+
+ ret = mux_dev->set(mux_dev, state);
+- if (ret)
++ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+--
+2.39.5
+