--- /dev/null
+From f506ef34786cfa942c58cba206b210b0457944b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 21:08:10 +0100
+Subject: afs: Fix post-setattr file edit to do truncation correctly
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit a74ee0e878e262c0276966528d72d4e887174410 ]
+
+At the end of an kAFS RPC operation, there is an "edit" phase (originally
+intended for post-directory modification ops to edit the local image) that
+the setattr VFS op uses to fix up the pagecache if the RPC that requested
+truncation of a file was successful.
+
+afs_setattr_edit_file() calls truncate_setsize() which sets i_size, expands
+the pagecache if needed and truncates the pagecache. The first two of
+those, however, are redundant as they've already been done by
+afs_setattr_success() under the io_lock and the first is also done under
+the callback lock (cb_lock).
+
+Fix afs_setattr_edit_file() to call truncate_pagecache() instead (which is
+called by truncate_setsize(), thereby skipping the redundant parts.
+
+Fixes: 100ccd18bb41 ("netfs: Optimise away reads above the point at which there can be no data")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/20240823200819.532106-3-dhowells@redhat.com
+cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+cc: Pankaj Raghav <p.raghav@samsung.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+cc: netfs@lists.linux.dev
+cc: linux-mm@kvack.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/inode.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 3acf5e0500728..a95e77670b494 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -695,13 +695,18 @@ static void afs_setattr_edit_file(struct afs_operation *op)
+ {
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_vnode *vnode = vp->vnode;
++ struct inode *inode = &vnode->netfs.inode;
+
+ if (op->setattr.attr->ia_valid & ATTR_SIZE) {
+ loff_t size = op->setattr.attr->ia_size;
+- loff_t i_size = op->setattr.old_i_size;
++ loff_t old = op->setattr.old_i_size;
++
++ /* Note: inode->i_size was updated by afs_apply_status() inside
++ * the I/O and callback locks.
++ */
+
+- if (size != i_size) {
+- truncate_setsize(&vnode->netfs.inode, size);
++ if (size != old) {
++ truncate_pagecache(inode, size);
+ netfs_resize_file(&vnode->netfs, size, true);
+ fscache_resize_cookie(afs_vnode_cache(vnode), size);
+ }
+--
+2.43.0
+
--- /dev/null
+From 86ec3fbd42faec02e86d1ec972d598c2f8bea4bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 12:47:11 +0000
+Subject: ALSA: hda: cs35l56: Don't use the device index as a calibration index
+
+From: Simon Trimmer <simont@opensource.cirrus.com>
+
+[ Upstream commit 91191a6e50a2ff752da244493171037663536768 ]
+
+The HDA driver cannot assume that the order that the devices are
+specified in the cirrus,dev-index matches the order of calibration
+entries.
+
+Only a calibration entry with a matching silicon id will be used.
+
+Fixes: cfa43aaa7948 ("ALSA: hda: cs35l56: Apply amp calibration from EFI data")
+Signed-off-by: Simon Trimmer <simont@opensource.cirrus.com>
+Link: https://patch.msgid.link/20240821124711.44325-1-simont@opensource.cirrus.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/cs35l56_hda.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index e134ede6c5aa5..357fd59aa49e4 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -980,7 +980,7 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
+ goto err;
+ }
+
+- cs35l56->base.cal_index = cs35l56->index;
++ cs35l56->base.cal_index = -1;
+
+ cs35l56_init_cs_dsp(&cs35l56->base, &cs35l56->cs_dsp);
+ cs35l56->cs_dsp.client_ops = &cs35l56_hda_client_ops;
+--
+2.43.0
+
--- /dev/null
+From 5460506ad535cd2a956424b45aad780e00016388 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Aug 2024 08:49:23 +0000
+Subject: ASoC: amd: acp: fix module autoloading
+
+From: Yuntao Liu <liuyuntao12@huawei.com>
+
+[ Upstream commit 164199615ae230ace4519141285f06766d6d8036 ]
+
+Add MODULE_DEVICE_TABLE(), so modules could be properly autoloaded
+based on the alias from platform_device_id table.
+
+Fixes: 9d8a7be88b336 ("ASoC: amd: acp: Add legacy sound card support for Chrome audio")
+Signed-off-by: Yuntao Liu <liuyuntao12@huawei.com>
+Link: https://patch.msgid.link/20240815084923.756476-1-liuyuntao12@huawei.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/amd/acp/acp-legacy-mach.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/soc/amd/acp/acp-legacy-mach.c b/sound/soc/amd/acp/acp-legacy-mach.c
+index 47c3b5f167f59..0d529e32e552b 100644
+--- a/sound/soc/amd/acp/acp-legacy-mach.c
++++ b/sound/soc/amd/acp/acp-legacy-mach.c
+@@ -227,6 +227,8 @@ static const struct platform_device_id board_ids[] = {
+ },
+ { }
+ };
++MODULE_DEVICE_TABLE(platform, board_ids);
++
+ static struct platform_driver acp_asoc_audio = {
+ .driver = {
+ .pm = &snd_soc_pm_ops,
+--
+2.43.0
+
--- /dev/null
+From ffb393bdbd6237963a07e4fe9bcf62bf976c0f3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 14:35:44 +0100
+Subject: ASoC: cs-amp-lib: Ignore empty UEFI calibration entries
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit bb4485562f5907708f1c218b5d70dce04165d1e1 ]
+
+If the timestamp of a calibration entry is 0 it is an unused entry and
+must be ignored.
+
+Some end-products reserve EFI space for calibration entries by shipping
+with a zero-filled EFI file. When searching the file for calibration
+data the driver must skip the empty entries. The timestamp of a valid
+entry is always non-zero.
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Fixes: 1cad8725f2b9 ("ASoC: cs-amp-lib: Add helpers for factory calibration data")
+Link: https://patch.msgid.link/20240822133544.304421-1-rf@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/cs-amp-lib.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/codecs/cs-amp-lib.c b/sound/soc/codecs/cs-amp-lib.c
+index 605964af8afad..51b128c806718 100644
+--- a/sound/soc/codecs/cs-amp-lib.c
++++ b/sound/soc/codecs/cs-amp-lib.c
+@@ -182,6 +182,10 @@ static int _cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid,
+ for (i = 0; i < efi_data->count; ++i) {
+ u64 cal_target = cs_amp_cal_target_u64(&efi_data->data[i]);
+
++ /* Skip empty entries */
++ if (!efi_data->data[i].calTime[0] && !efi_data->data[i].calTime[1])
++ continue;
++
+ /* Skip entries with unpopulated silicon ID */
+ if (cal_target == 0)
+ continue;
+@@ -193,7 +197,8 @@ static int _cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid,
+ }
+ }
+
+- if (!cal && (amp_index >= 0) && (amp_index < efi_data->count)) {
++ if (!cal && (amp_index >= 0) && (amp_index < efi_data->count) &&
++ (efi_data->data[amp_index].calTime[0] || efi_data->data[amp_index].calTime[1])) {
+ u64 cal_target = cs_amp_cal_target_u64(&efi_data->data[amp_index]);
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From dd34c240aef84c6f859bf281ea913e20359d249a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 12:57:25 +0100
+Subject: ASoC: cs-amp-lib-test: Force test calibration blob entries to be
+ valid
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit bff980d8d9ca537fd5f3c0e9a99876c1e3713e81 ]
+
+For a normal calibration blob the calTarget values must be non-zero and
+unique, and the calTime values must be non-zero. Don't rely on
+get_random_bytes() to be random enough to guarantee this. Force the
+calTarget and calTime values to be valid while retaining randomness
+in the values.
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Fixes: 177862317a98 ("ASoC: cs-amp-lib: Add KUnit test for calibration helpers")
+Link: https://patch.msgid.link/20240822115725.259568-1-rf@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/cs-amp-lib-test.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/sound/soc/codecs/cs-amp-lib-test.c b/sound/soc/codecs/cs-amp-lib-test.c
+index 15f991b2e16e2..8169ec88a8ba8 100644
+--- a/sound/soc/codecs/cs-amp-lib-test.c
++++ b/sound/soc/codecs/cs-amp-lib-test.c
+@@ -38,6 +38,7 @@ static void cs_amp_lib_test_init_dummy_cal_blob(struct kunit *test, int num_amps
+ {
+ struct cs_amp_lib_test_priv *priv = test->priv;
+ unsigned int blob_size;
++ int i;
+
+ blob_size = offsetof(struct cirrus_amp_efi_data, data) +
+ sizeof(struct cirrus_amp_cal_data) * num_amps;
+@@ -49,6 +50,14 @@ static void cs_amp_lib_test_init_dummy_cal_blob(struct kunit *test, int num_amps
+ priv->cal_blob->count = num_amps;
+
+ get_random_bytes(priv->cal_blob->data, sizeof(struct cirrus_amp_cal_data) * num_amps);
++
++ /* Ensure all timestamps are non-zero to mark the entry valid. */
++ for (i = 0; i < num_amps; i++)
++ priv->cal_blob->data[i].calTime[0] |= 1;
++
++ /* Ensure that all UIDs are non-zero and unique. */
++ for (i = 0; i < num_amps; i++)
++ *(u8 *)&priv->cal_blob->data[i].calTarget[0] = i + 1;
+ }
+
+ static u64 cs_amp_lib_test_get_target_uid(struct kunit *test)
+--
+2.43.0
+
--- /dev/null
+From f968f79bd996991b3c76d57812751ede8be6d3bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Aug 2024 12:33:28 +0530
+Subject: ASoC: SOF: amd: Fix for acp init sequence
+
+From: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+
+[ Upstream commit a42db293e5983aa1508d12644f23d73f0553b32c ]
+
+When ACP is not powered on by default, acp power on sequence explicitly
+invoked by programming pgfsm control mask. The existing implementation
+checks the same PGFSM status mask and programs the same PGFSM control mask
+in all ACP variants which breaks acp power on sequence for ACP6.0 and
+ACP6.3 variants. So to fix this issue, update ACP pgfsm control mask and
+status mask based on acp descriptor rev field, which will vary based on
+acp variant.
+
+Fixes: 846aef1d7cc0 ("ASoC: SOF: amd: Add Renoir ACP HW support")
+Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+Link: https://patch.msgid.link/20240816070328.610360-1-Vijendar.Mukunda@amd.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/amd/acp.c | 19 +++++++++++++++++--
+ sound/soc/sof/amd/acp.h | 7 +++++--
+ 2 files changed, 22 insertions(+), 4 deletions(-)
+
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index d95f865669a69..85b58c8ccd0da 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -433,6 +433,7 @@ static int acp_power_on(struct snd_sof_dev *sdev)
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int base = desc->pgfsm_base;
+ unsigned int val;
++ unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask;
+ int ret;
+
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
+@@ -440,9 +441,23 @@ static int acp_power_on(struct snd_sof_dev *sdev)
+ if (val == ACP_POWERED_ON)
+ return 0;
+
+- if (val & ACP_PGFSM_STATUS_MASK)
++ switch (desc->rev) {
++ case 3:
++ case 5:
++ acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK;
++ acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK;
++ break;
++ case 6:
++ acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK;
++ acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (val & acp_pgfsm_status_mask)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
+- ACP_PGFSM_CNTL_POWER_ON_MASK);
++ acp_pgfsm_cntl_mask);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
+ !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index 1af86b5b28db8..61b28df8c9081 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -25,8 +25,11 @@
+ #define ACP_REG_POLL_TIMEOUT_US 2000
+ #define ACP_DMA_COMPLETE_TIMEOUT_US 5000
+
+-#define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01
+-#define ACP_PGFSM_STATUS_MASK 0x03
++#define ACP3X_PGFSM_CNTL_POWER_ON_MASK 0x01
++#define ACP3X_PGFSM_STATUS_MASK 0x03
++#define ACP6X_PGFSM_CNTL_POWER_ON_MASK 0x07
++#define ACP6X_PGFSM_STATUS_MASK 0x0F
++
+ #define ACP_POWERED_ON 0x00
+ #define ACP_ASSERT_RESET 0x01
+ #define ACP_RELEASE_RESET 0x00
+--
+2.43.0
+
--- /dev/null
+From 4860ad49ee6f5bc47e59c4a54c18e6006c7055e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Aug 2024 16:29:44 +0530
+Subject: ASoC: SOF: amd: Fix for incorrect acp error register offsets
+
+From: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+
+[ Upstream commit 897e91e995b338002b00454fd0018af26a098148 ]
+
+Addition of 'dsp_intr_base' to ACP error register offsets points to
+wrong register offsets in irq handler. Correct the acp error register
+offsets. ACP error status register offset and acp error reason register
+offset got changed from ACP6.0 onwards. Add 'acp_error_stat' and
+'acp_sw0_i2s_err_reason' as descriptor fields in sof_amd_acp_desc
+structure and update the values based on the ACP variant.
+>From Rembrandt platform onwards, errors related to SW1 Soundwire manager
+instance/I2S controller connected on P1 power tile is reported with
+ACP_SW1_I2S_ERROR_REASON register. Add conditional check for the same.
+
+Fixes: 96eb81851012 ("ASoC: SOF: amd: add interrupt handling for SoundWire manager devices")
+Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+Link: https://patch.msgid.link/20240813105944.3126903-2-Vijendar.Mukunda@amd.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/amd/acp-dsp-offset.h | 6 ++++--
+ sound/soc/sof/amd/acp.c | 11 +++++++----
+ sound/soc/sof/amd/acp.h | 2 ++
+ sound/soc/sof/amd/pci-acp63.c | 2 ++
+ sound/soc/sof/amd/pci-rmb.c | 2 ++
+ sound/soc/sof/amd/pci-rn.c | 2 ++
+ 6 files changed, 19 insertions(+), 6 deletions(-)
+
+diff --git a/sound/soc/sof/amd/acp-dsp-offset.h b/sound/soc/sof/amd/acp-dsp-offset.h
+index 59afbe2e0f420..072b703f9b3f3 100644
+--- a/sound/soc/sof/amd/acp-dsp-offset.h
++++ b/sound/soc/sof/amd/acp-dsp-offset.h
+@@ -76,13 +76,15 @@
+ #define DSP_SW_INTR_CNTL_OFFSET 0x0
+ #define DSP_SW_INTR_STAT_OFFSET 0x4
+ #define DSP_SW_INTR_TRIG_OFFSET 0x8
+-#define ACP_ERROR_STATUS 0x18C4
++#define ACP3X_ERROR_STATUS 0x18C4
++#define ACP6X_ERROR_STATUS 0x1A4C
+ #define ACP3X_AXI2DAGB_SEM_0 0x1880
+ #define ACP5X_AXI2DAGB_SEM_0 0x1884
+ #define ACP6X_AXI2DAGB_SEM_0 0x1874
+
+ /* ACP common registers to report errors related to I2S & SoundWire interfaces */
+-#define ACP_SW0_I2S_ERROR_REASON 0x18B4
++#define ACP3X_SW_I2S_ERROR_REASON 0x18C8
++#define ACP6X_SW0_I2S_ERROR_REASON 0x18B4
+ #define ACP_SW1_I2S_ERROR_REASON 0x1A50
+
+ /* Registers from ACP_SHA block */
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index 9123427fab4e3..d95f865669a69 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -92,6 +92,7 @@ static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch,
+ unsigned int idx, unsigned int dscr_count)
+ {
+ struct snd_sof_dev *sdev = adata->dev;
++ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int val, status;
+ int ret;
+
+@@ -102,7 +103,7 @@ static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch,
+ val & (1 << ch), ACP_REG_POLL_INTERVAL,
+ ACP_REG_POLL_TIMEOUT_US);
+ if (ret < 0) {
+- status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS);
++ status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->acp_error_stat);
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32));
+
+ dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status);
+@@ -402,9 +403,11 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
+
+ if (val & ACP_ERROR_IRQ_MASK) {
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_ERROR_IRQ_MASK);
+- snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_SW0_I2S_ERROR_REASON, 0);
+- snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_SW1_I2S_ERROR_REASON, 0);
+- snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_ERROR_STATUS, 0);
++ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_sw0_i2s_err_reason, 0);
++ /* ACP_SW1_I2S_ERROR_REASON is newly added register from rmb platform onwards */
++ if (desc->rev >= 6)
++ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SW1_I2S_ERROR_REASON, 0);
++ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_error_stat, 0);
+ irq_flag = 1;
+ }
+
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index 87e79d500865a..1af86b5b28db8 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -203,6 +203,8 @@ struct sof_amd_acp_desc {
+ u32 probe_reg_offset;
+ u32 reg_start_addr;
+ u32 reg_end_addr;
++ u32 acp_error_stat;
++ u32 acp_sw0_i2s_err_reason;
+ u32 sdw_max_link_count;
+ u64 sdw_acpi_dev_addr;
+ };
+diff --git a/sound/soc/sof/amd/pci-acp63.c b/sound/soc/sof/amd/pci-acp63.c
+index fc89844473657..986f5928caedd 100644
+--- a/sound/soc/sof/amd/pci-acp63.c
++++ b/sound/soc/sof/amd/pci-acp63.c
+@@ -35,6 +35,8 @@ static const struct sof_amd_acp_desc acp63_chip_info = {
+ .ext_intr_cntl = ACP6X_EXTERNAL_INTR_CNTL,
+ .ext_intr_stat = ACP6X_EXT_INTR_STAT,
+ .ext_intr_stat1 = ACP6X_EXT_INTR_STAT1,
++ .acp_error_stat = ACP6X_ERROR_STATUS,
++ .acp_sw0_i2s_err_reason = ACP6X_SW0_I2S_ERROR_REASON,
+ .dsp_intr_base = ACP6X_DSP_SW_INTR_BASE,
+ .sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
+ .hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
+diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
+index 4bc30951f8b0d..a366f904e6f31 100644
+--- a/sound/soc/sof/amd/pci-rmb.c
++++ b/sound/soc/sof/amd/pci-rmb.c
+@@ -33,6 +33,8 @@ static const struct sof_amd_acp_desc rembrandt_chip_info = {
+ .pgfsm_base = ACP6X_PGFSM_BASE,
+ .ext_intr_stat = ACP6X_EXT_INTR_STAT,
+ .dsp_intr_base = ACP6X_DSP_SW_INTR_BASE,
++ .acp_error_stat = ACP6X_ERROR_STATUS,
++ .acp_sw0_i2s_err_reason = ACP6X_SW0_I2S_ERROR_REASON,
+ .sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
+ .hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
+ .fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
+diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
+index e08875bdfa8b1..2b7c53470ce82 100644
+--- a/sound/soc/sof/amd/pci-rn.c
++++ b/sound/soc/sof/amd/pci-rn.c
+@@ -33,6 +33,8 @@ static const struct sof_amd_acp_desc renoir_chip_info = {
+ .pgfsm_base = ACP3X_PGFSM_BASE,
+ .ext_intr_stat = ACP3X_EXT_INTR_STAT,
+ .dsp_intr_base = ACP3X_DSP_SW_INTR_BASE,
++ .acp_error_stat = ACP3X_ERROR_STATUS,
++ .acp_sw0_i2s_err_reason = ACP3X_SW_I2S_ERROR_REASON,
+ .sram_pte_offset = ACP3X_SRAM_PTE_OFFSET,
+ .hw_semaphore_offset = ACP3X_AXI2DAGB_SEM_0,
+ .acp_clkmux_sel = ACP3X_CLKMUX_SEL,
+--
+2.43.0
+
--- /dev/null
+From 694def8cc8d8bd52aa8cdd1b1665ded7fba60559 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Aug 2024 16:29:43 +0530
+Subject: ASoC: SOF: amd: move iram-dram fence register programming sequence
+
+From: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+
+[ Upstream commit c56ba3e44784527fd6efe5eb7a4fa6c9f6969a58 ]
+
+The existing code modifies IRAM and DRAM size after sha dma start for
+vangogh platform. The problem with this sequence is that it might cause
+sha dma failure when firmware code binary size is greater than the default
+IRAM size. To fix this issue, Move the iram-dram fence register sequence
+prior to sha dma start.
+
+Fixes: 094d11768f74 ("ASoC: SOF: amd: Skip IRAM/DRAM size modification for Steam Deck OLED")
+Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+Link: https://patch.msgid.link/20240813105944.3126903-1-Vijendar.Mukunda@amd.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/amd/acp.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index 74fd5f2b148b8..9123427fab4e3 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -263,6 +263,17 @@ int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length);
++
++ /* psp_send_cmd only required for vangogh platform (rev - 5) */
++ if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
++ /* Modify IRAM and DRAM size */
++ ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
++ if (ret)
++ return ret;
++ ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG);
++ if (ret)
++ return ret;
++ }
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT,
+@@ -280,17 +291,6 @@ int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
+ return ret;
+ }
+
+- /* psp_send_cmd only required for vangogh platform (rev - 5) */
+- if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
+- /* Modify IRAM and DRAM size */
+- ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
+- if (ret)
+- return ret;
+- ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG);
+- if (ret)
+- return ret;
+- }
+-
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER,
+ fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE,
+ ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
+--
+2.43.0
+
--- /dev/null
+From 511cf885ff52fe6eee75c1825222f042335fce06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Jul 2024 15:22:06 +0800
+Subject: backing-file: convert to using fops->splice_write
+
+From: Ed Tsai <ed.tsai@mediatek.com>
+
+[ Upstream commit 996b37da1e0f51314d4186b326742c2a95a9f0dd ]
+
+Filesystems may define their own splice write. Therefore, use the file
+fops instead of invoking iter_file_splice_write() directly.
+
+Signed-off-by: Ed Tsai <ed.tsai@mediatek.com>
+Link: https://lore.kernel.org/r/20240708072208.25244-1-ed.tsai@mediatek.com
+Fixes: 5ca73468612d ("fuse: implement splice read/write passthrough")
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/backing-file.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/backing-file.c b/fs/backing-file.c
+index afb557446c27c..8860dac58c37e 100644
+--- a/fs/backing-file.c
++++ b/fs/backing-file.c
+@@ -303,13 +303,16 @@ ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING)))
+ return -EIO;
+
++ if (!out->f_op->splice_write)
++ return -EINVAL;
++
+ ret = file_remove_privs(ctx->user_file);
+ if (ret)
+ return ret;
+
+ old_cred = override_creds(ctx->cred);
+ file_start_write(out);
+- ret = iter_file_splice_write(pipe, out, ppos, len, flags);
++ ret = out->f_op->splice_write(pipe, out, ppos, len, flags);
+ file_end_write(out);
+ revert_creds(old_cred);
+
+--
+2.43.0
+
--- /dev/null
+From 39e90a452b70063ee3136d8e86a78985e081b9b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 14:22:42 +0100
+Subject: cifs: Fix FALLOC_FL_PUNCH_HOLE support
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 416871f4fb84bc96822562e654941d5625a25bf8 ]
+
+The cifs filesystem doesn't quite emulate FALLOC_FL_PUNCH_HOLE correctly
+(note that due to lack of protocol support, it can't actually implement it
+directly). Whilst it will (partially) invalidate dirty folios in the
+pagecache, it doesn't write them back first, and so the EOF marker on the
+server may be lower than inode->i_size.
+
+This presents a problem, however, as if the punched hole invalidates the
+tail of the locally cached dirty data, writeback won't know it needs to
+move the EOF over to account for the hole punch (which isn't supposed to
+move the EOF). We could just write zeroes over the punched out region of
+the pagecache and write that back - but this is supposed to be a
+deallocatory operation.
+
+Fix this by manually moving the EOF over on the server after the operation
+if the hole punched would corrupt it.
+
+Note that the FSCTL_SET_ZERO_DATA RPC and the setting of the EOF should
+probably be compounded to stop a third party interfering (or, at least,
+massively reduce the chance).
+
+This was reproducible occasionally by using fsx with the following script:
+
+ truncate 0x0 0x375e2 0x0
+ punch_hole 0x2f6d3 0x6ab5 0x375e2
+ truncate 0x0 0x3a71f 0x375e2
+ mapread 0xee05 0xcf12 0x3a71f
+ write 0x2078e 0x5604 0x3a71f
+ write 0x3ebdf 0x1421 0x3a71f *
+ punch_hole 0x379d0 0x8630 0x40000 *
+ mapread 0x2aaa2 0x85b 0x40000
+ fallocate 0x1b401 0x9ada 0x40000
+ read 0x15f2 0x7d32 0x40000
+ read 0x32f37 0x7a3b 0x40000 *
+
+The second "write" should extend the EOF to 0x40000, and the "punch_hole"
+should operate inside of that - but that depends on whether the VM gets in
+and writes back the data first. If it doesn't, the file ends up 0x3a71f in
+size, not 0x40000.
+
+Fixes: 31742c5a3317 ("enable fallocate punch hole ("fallocate -p") for SMB3")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Steve French <sfrench@samba.org>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: Shyam Prasad N <nspmangalore@gmail.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: linux-cifs@vger.kernel.org
+cc: netfs@lists.linux.dev
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2ops.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index cfbca3489ece1..f44f5f2494006 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3287,6 +3287,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+ struct inode *inode = file_inode(file);
+ struct cifsFileInfo *cfile = file->private_data;
+ struct file_zero_data_information fsctl_buf;
++ unsigned long long end = offset + len, i_size, remote_i_size;
+ long rc;
+ unsigned int xid;
+ __u8 set_sparse = 1;
+@@ -3318,6 +3319,27 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+ (char *)&fsctl_buf,
+ sizeof(struct file_zero_data_information),
+ CIFSMaxBufSize, NULL, NULL);
++
++ if (rc)
++ goto unlock;
++
++ /* If there's dirty data in the buffer that would extend the EOF if it
++ * were written, then we need to move the EOF marker over to the lower
++ * of the high end of the hole and the proposed EOF. The problem is
++ * that we locally hole-punch the tail of the dirty data, the proposed
++ * EOF update will end up in the wrong place.
++ */
++ i_size = i_size_read(inode);
++ remote_i_size = netfs_inode(inode)->remote_i_size;
++ if (end > remote_i_size && i_size > remote_i_size) {
++ unsigned long long extend_to = umin(end, i_size);
++ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid, cfile->pid, extend_to);
++ if (rc >= 0)
++ netfs_inode(inode)->remote_i_size = extend_to;
++ }
++
++unlock:
+ filemap_invalidate_unlock(inode->i_mapping);
+ out:
+ inode_unlock(inode);
+--
+2.43.0
+
--- /dev/null
+From d63b924d950659ec84e21fbc25c8ab3f985a3a7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 31 May 2024 14:59:22 -0400
+Subject: drm/amdgpu: Do not wait for MP0_C2PMSG_33 IFWI init in SRIOV
+
+From: Victor Lu <victorchengchi.lu@amd.com>
+
+[ Upstream commit b32563859d6f61265222ec0f27d394964a8f7669 ]
+
+SRIOV does not need to wait for IFWI init, and MP0_C2PMSG_33 is blocked
+for VF access.
+
+Signed-off-by: Victor Lu <victorchengchi.lu@amd.com>
+Reviewed-by: Vignesh Chander <Vignesh.Chander@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 9cead81eff63 ("drm/amdgpu: fix eGPU hotplug regression")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 26 ++++++++++---------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 0e31bdb4b7cb6..ea5223388cff2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -256,19 +256,21 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ u32 msg;
+ int i, ret = 0;
+
+- /* It can take up to a second for IFWI init to complete on some dGPUs,
+- * but generally it should be in the 60-100ms range. Normally this starts
+- * as soon as the device gets power so by the time the OS loads this has long
+- * completed. However, when a card is hotplugged via e.g., USB4, we need to
+- * wait for this to complete. Once the C2PMSG is updated, we can
+- * continue.
+- */
++ if (!amdgpu_sriov_vf(adev)) {
++ /* It can take up to a second for IFWI init to complete on some dGPUs,
++ * but generally it should be in the 60-100ms range. Normally this starts
++ * as soon as the device gets power so by the time the OS loads this has long
++ * completed. However, when a card is hotplugged via e.g., USB4, we need to
++ * wait for this to complete. Once the C2PMSG is updated, we can
++ * continue.
++ */
+
+- for (i = 0; i < 1000; i++) {
+- msg = RREG32(mmMP0_SMN_C2PMSG_33);
+- if (msg & 0x80000000)
+- break;
+- usleep_range(1000, 1100);
++ for (i = 0; i < 1000; i++) {
++ msg = RREG32(mmMP0_SMN_C2PMSG_33);
++ if (msg & 0x80000000)
++ break;
++ usleep_range(1000, 1100);
++ }
+ }
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+--
+2.43.0
+
--- /dev/null
+From a0c88d216551d83f589ae844233745d85b0f506e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Aug 2024 11:14:29 -0400
+Subject: drm/amdgpu: fix eGPU hotplug regression
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 9cead81eff635e3b3cbce51b40228f3bdc6f2b8c ]
+
+The driver needs to wait for the on board firmware
+to finish its initialization before probing the card.
+Commit 959056982a9b ("drm/amdgpu: Fix discovery initialization failure during pci rescan")
+switched from using msleep() to using usleep_range() which
+seems to have caused init failures on some navi1x boards. Switch
+back to msleep().
+
+Fixes: 959056982a9b ("drm/amdgpu: Fix discovery initialization failure during pci rescan")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3559
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3500
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Ma Jun <Jun.Ma2@amd.com>
+(cherry picked from commit c69b07f7bbc905022491c45097923d3487479529)
+Cc: stable@vger.kernel.org # 6.10.x
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index ea5223388cff2..f1b08893765cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -269,7 +269,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+- usleep_range(1000, 1100);
++ msleep(1);
+ }
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 22fcfc713e2d8dda5ed50360e33049487f3b5451 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Aug 2024 12:50:44 +0200
+Subject: drm/xe/display: Make display suspend/resume work on discrete
+
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+[ Upstream commit ddf6492e0e508b7c2b42c8d5a4ac82bd38ef0dd5 ]
+
+We should unpin before evicting all memory, and repin after GT resume.
+This way, we preserve the contents of the framebuffers, and won't hang
+on resume due to migration engine not being restored yet.
+
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: stable@vger.kernel.org # v6.8+
+Reviewed-by: Uma Shankar <uma.shankar@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240806105044.596842-3-maarten.lankhorst@linux.intel.com
+Signed-off-by: Maarten Lankhorst,,, <maarten.lankhorst@linux.intel.com>
+(cherry picked from commit cb8f81c1753187995b7a43e79c12959f14eb32d3)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/display/xe_display.c | 23 +++++++++++++++++++++++
+ drivers/gpu/drm/xe/xe_pm.c | 11 ++++++-----
+ 2 files changed, 29 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
+index 79d33d592093c..96835ffa5734e 100644
+--- a/drivers/gpu/drm/xe/display/xe_display.c
++++ b/drivers/gpu/drm/xe/display/xe_display.c
+@@ -302,6 +302,27 @@ static bool suspend_to_idle(void)
+ return false;
+ }
+
++static void xe_display_flush_cleanup_work(struct xe_device *xe)
++{
++ struct intel_crtc *crtc;
++
++ for_each_intel_crtc(&xe->drm, crtc) {
++ struct drm_crtc_commit *commit;
++
++ spin_lock(&crtc->base.commit_lock);
++ commit = list_first_entry_or_null(&crtc->base.commit_list,
++ struct drm_crtc_commit, commit_entry);
++ if (commit)
++ drm_crtc_commit_get(commit);
++ spin_unlock(&crtc->base.commit_lock);
++
++ if (commit) {
++ wait_for_completion(&commit->cleanup_done);
++ drm_crtc_commit_put(commit);
++ }
++ }
++}
++
+ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+ {
+ bool s2idle = suspend_to_idle();
+@@ -319,6 +340,8 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+ if (!runtime)
+ intel_display_driver_suspend(xe);
+
++ xe_display_flush_cleanup_work(xe);
++
+ intel_dp_mst_suspend(xe);
+
+ intel_hpd_cancel_work(xe);
+diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
+index 07615acd2c299..cf80679ceb701 100644
+--- a/drivers/gpu/drm/xe/xe_pm.c
++++ b/drivers/gpu/drm/xe/xe_pm.c
+@@ -91,13 +91,13 @@ int xe_pm_suspend(struct xe_device *xe)
+ for_each_gt(gt, xe, id)
+ xe_gt_suspend_prepare(gt);
+
++ xe_display_pm_suspend(xe, false);
++
+ /* FIXME: Super racey... */
+ err = xe_bo_evict_all(xe);
+ if (err)
+ goto err;
+
+- xe_display_pm_suspend(xe, false);
+-
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_suspend(gt);
+ if (err) {
+@@ -151,11 +151,11 @@ int xe_pm_resume(struct xe_device *xe)
+
+ xe_irq_resume(xe);
+
+- xe_display_pm_resume(xe, false);
+-
+ for_each_gt(gt, xe, id)
+ xe_gt_resume(gt);
+
++ xe_display_pm_resume(xe, false);
++
+ err = xe_bo_restore_user(xe);
+ if (err)
+ goto err;
+@@ -363,10 +363,11 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
+ if (xe->d3cold.allowed) {
++ xe_display_pm_suspend(xe, true);
++
+ err = xe_bo_evict_all(xe);
+ if (err)
+ goto out;
+- xe_display_pm_suspend(xe, true);
+ }
+
+ for_each_gt(gt, xe, id) {
+--
+2.43.0
+
--- /dev/null
+From d223aefbe0dbbd9758523a44ed575d187a338dcf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Jun 2024 19:03:48 +0200
+Subject: drm/xe/exec_queue: Rename xe_exec_queue::compute to xe_exec_queue::lr
+
+From: Francois Dugast <francois.dugast@intel.com>
+
+[ Upstream commit 731e46c032281601756f08cfa7d8505fe41166a9 ]
+
+The properties of this struct are used in long running context so
+make that clear by renaming it to lr, in alignment with the rest
+of the code.
+
+Cc: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Francois Dugast <francois.dugast@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240613170348.723245-1-francois.dugast@intel.com
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Stable-dep-of: 730b72480e29 ("drm/xe: prevent UAF around preempt fence")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_exec_queue.c | 6 +--
+ drivers/gpu/drm/xe/xe_exec_queue_types.h | 14 +++---
+ drivers/gpu/drm/xe/xe_preempt_fence.c | 2 +-
+ drivers/gpu/drm/xe/xe_vm.c | 58 ++++++++++++------------
+ 4 files changed, 40 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
+index 2ae4420e29353..316731c5cce6d 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue.c
++++ b/drivers/gpu/drm/xe/xe_exec_queue.c
+@@ -67,7 +67,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
+ q->fence_irq = >->fence_irq[hwe->class];
+ q->ring_ops = gt->ring_ops[hwe->class];
+ q->ops = gt->exec_queue_ops;
+- INIT_LIST_HEAD(&q->compute.link);
++ INIT_LIST_HEAD(&q->lr.link);
+ INIT_LIST_HEAD(&q->multi_gt_link);
+
+ q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
+@@ -631,8 +631,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
+ return PTR_ERR(q);
+
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+- q->compute.context = dma_fence_context_alloc(1);
+- spin_lock_init(&q->compute.lock);
++ q->lr.context = dma_fence_context_alloc(1);
++ spin_lock_init(&q->lr.lock);
+
+ err = xe_vm_add_compute_exec_queue(vm, q);
+ if (XE_IOCTL_DBG(xe, err))
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
+index f0c40e8ad80a1..52a1965d91375 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
++++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
+@@ -115,19 +115,19 @@ struct xe_exec_queue {
+ enum xe_exec_queue_priority priority;
+ } sched_props;
+
+- /** @compute: compute exec queue state */
++ /** @lr: long-running exec queue state */
+ struct {
+- /** @compute.pfence: preemption fence */
++ /** @lr.pfence: preemption fence */
+ struct dma_fence *pfence;
+- /** @compute.context: preemption fence context */
++ /** @lr.context: preemption fence context */
+ u64 context;
+- /** @compute.seqno: preemption fence seqno */
++ /** @lr.seqno: preemption fence seqno */
+ u32 seqno;
+- /** @compute.link: link into VM's list of exec queues */
++ /** @lr.link: link into VM's list of exec queues */
+ struct list_head link;
+- /** @compute.lock: preemption fences lock */
++ /** @lr.lock: preemption fences lock */
+ spinlock_t lock;
+- } compute;
++ } lr;
+
+ /** @ops: submission backend exec queue operations */
+ const struct xe_exec_queue_ops *ops;
+diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
+index 5b243b7feb59d..e8b8ae5c6485e 100644
+--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
++++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
+@@ -129,7 +129,7 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
+ list_del_init(&pfence->link);
+ pfence->q = xe_exec_queue_get(q);
+ dma_fence_init(&pfence->base, &preempt_fence_ops,
+- &q->compute.lock, context, seqno);
++ &q->lr.lock, context, seqno);
+
+ return &pfence->base;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 3137cbbaabde0..fd5612cc6f19b 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -83,10 +83,10 @@ static bool preempt_fences_waiting(struct xe_vm *vm)
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+
+- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+- if (!q->compute.pfence ||
++ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
++ if (!q->lr.pfence ||
+ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+- &q->compute.pfence->flags)) {
++ &q->lr.pfence->flags)) {
+ return true;
+ }
+ }
+@@ -129,14 +129,14 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
+
+ xe_vm_assert_held(vm);
+
+- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+- if (q->compute.pfence) {
+- long timeout = dma_fence_wait(q->compute.pfence, false);
++ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
++ if (q->lr.pfence) {
++ long timeout = dma_fence_wait(q->lr.pfence, false);
+
+ if (timeout < 0)
+ return -ETIME;
+- dma_fence_put(q->compute.pfence);
+- q->compute.pfence = NULL;
++ dma_fence_put(q->lr.pfence);
++ q->lr.pfence = NULL;
+ }
+ }
+
+@@ -148,7 +148,7 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
+ struct xe_exec_queue *q;
+
+ xe_vm_assert_held(vm);
+- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
++ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
+ if (!xe_exec_queue_is_idle(q))
+ return false;
+ }
+@@ -161,17 +161,17 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
+ struct list_head *link;
+ struct xe_exec_queue *q;
+
+- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
++ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
+ struct dma_fence *fence;
+
+ link = list->next;
+ xe_assert(vm->xe, link != list);
+
+ fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
+- q, q->compute.context,
+- ++q->compute.seqno);
+- dma_fence_put(q->compute.pfence);
+- q->compute.pfence = fence;
++ q, q->lr.context,
++ ++q->lr.seqno);
++ dma_fence_put(q->lr.pfence);
++ q->lr.pfence = fence;
+ }
+ }
+
+@@ -191,10 +191,10 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
+ if (err)
+ goto out_unlock;
+
+- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+- if (q->compute.pfence) {
++ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
++ if (q->lr.pfence) {
+ dma_resv_add_fence(bo->ttm.base.resv,
+- q->compute.pfence,
++ q->lr.pfence,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+
+@@ -211,10 +211,10 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+
+- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
++ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
+ q->ops->resume(q);
+
+- drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
++ drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
+ DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
+ }
+ }
+@@ -238,16 +238,16 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+ if (err)
+ goto out_up_write;
+
+- pfence = xe_preempt_fence_create(q, q->compute.context,
+- ++q->compute.seqno);
++ pfence = xe_preempt_fence_create(q, q->lr.context,
++ ++q->lr.seqno);
+ if (!pfence) {
+ err = -ENOMEM;
+ goto out_fini;
+ }
+
+- list_add(&q->compute.link, &vm->preempt.exec_queues);
++ list_add(&q->lr.link, &vm->preempt.exec_queues);
+ ++vm->preempt.num_exec_queues;
+- q->compute.pfence = pfence;
++ q->lr.pfence = pfence;
+
+ down_read(&vm->userptr.notifier_lock);
+
+@@ -284,12 +284,12 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+ return;
+
+ down_write(&vm->lock);
+- list_del(&q->compute.link);
++ list_del(&q->lr.link);
+ --vm->preempt.num_exec_queues;
+- if (q->compute.pfence) {
+- dma_fence_enable_sw_signaling(q->compute.pfence);
+- dma_fence_put(q->compute.pfence);
+- q->compute.pfence = NULL;
++ if (q->lr.pfence) {
++ dma_fence_enable_sw_signaling(q->lr.pfence);
++ dma_fence_put(q->lr.pfence);
++ q->lr.pfence = NULL;
+ }
+ up_write(&vm->lock);
+ }
+@@ -325,7 +325,7 @@ static void xe_vm_kill(struct xe_vm *vm)
+ vm->flags |= XE_VM_FLAG_BANNED;
+ trace_xe_vm_kill(vm);
+
+- list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
++ list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
+ q->ops->kill(q);
+ xe_vm_unlock(vm);
+
+--
+2.43.0
+
--- /dev/null
+From d928c88dd2cfa5a9a92d01340d996d62e6f0b81e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 13:01:03 -0400
+Subject: drm/xe: Prepare display for D3Cold
+
+From: Rodrigo Vivi <rodrigo.vivi@intel.com>
+
+[ Upstream commit e7b180b22022f52e3f5fca695cc75d63bddc5a1c ]
+
+Prepare power-well and DC handling for a full power
+lost during D3Cold, then sanitize it upon D3->D0.
+Otherwise we get a bunch of state mismatch.
+
+Ideally we could leave DC9 enabled and wouldn't need
+to move DC9->DC0 on every runtime resume, however,
+the disable_DC is part of the power-well checks and
+intrinsic to the dc_off power well. In the future that
+can be detangled so we can have even bigger power savings.
+But for now, let's focus on getting a D3Cold, which saves
+much more power by itself.
+
+v2: create new functions to avoid full-suspend-resume path,
+which would result in a deadlock between xe_gem_fault and the
+modeset-ioctl.
+
+v3: Only avoid the full modeset to avoid the race, for a more
+robust suspend-resume.
+
+Cc: Anshuman Gupta <anshuman.gupta@intel.com>
+Cc: Uma Shankar <uma.shankar@intel.com>
+Tested-by: Francois Dugast <francois.dugast@intel.com>
+Reviewed-by: Anshuman Gupta <anshuman.gupta@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240522170105.327472-5-rodrigo.vivi@intel.com
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Stable-dep-of: ddf6492e0e50 ("drm/xe/display: Make display suspend/resume work on discrete")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/display/xe_display.c | 10 ++++++----
+ drivers/gpu/drm/xe/display/xe_display.h | 8 ++++----
+ drivers/gpu/drm/xe/xe_pm.c | 15 ++++++++++++---
+ 3 files changed, 22 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
+index 7cdc03dc40ed9..79d33d592093c 100644
+--- a/drivers/gpu/drm/xe/display/xe_display.c
++++ b/drivers/gpu/drm/xe/display/xe_display.c
+@@ -302,7 +302,7 @@ static bool suspend_to_idle(void)
+ return false;
+ }
+
+-void xe_display_pm_suspend(struct xe_device *xe)
++void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+ {
+ bool s2idle = suspend_to_idle();
+ if (!xe->info.enable_display)
+@@ -316,7 +316,8 @@ void xe_display_pm_suspend(struct xe_device *xe)
+ if (has_display(xe))
+ drm_kms_helper_poll_disable(&xe->drm);
+
+- intel_display_driver_suspend(xe);
++ if (!runtime)
++ intel_display_driver_suspend(xe);
+
+ intel_dp_mst_suspend(xe);
+
+@@ -352,7 +353,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
+ intel_power_domains_resume(xe);
+ }
+
+-void xe_display_pm_resume(struct xe_device *xe)
++void xe_display_pm_resume(struct xe_device *xe, bool runtime)
+ {
+ if (!xe->info.enable_display)
+ return;
+@@ -367,7 +368,8 @@ void xe_display_pm_resume(struct xe_device *xe)
+
+ /* MST sideband requires HPD interrupts enabled */
+ intel_dp_mst_resume(xe);
+- intel_display_driver_resume(xe);
++ if (!runtime)
++ intel_display_driver_resume(xe);
+
+ intel_hpd_poll_disable(xe);
+ if (has_display(xe))
+diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
+index 710e56180b52d..93d1f779b9788 100644
+--- a/drivers/gpu/drm/xe/display/xe_display.h
++++ b/drivers/gpu/drm/xe/display/xe_display.h
+@@ -34,10 +34,10 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
+ void xe_display_irq_reset(struct xe_device *xe);
+ void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
+
+-void xe_display_pm_suspend(struct xe_device *xe);
++void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
+ void xe_display_pm_suspend_late(struct xe_device *xe);
+ void xe_display_pm_resume_early(struct xe_device *xe);
+-void xe_display_pm_resume(struct xe_device *xe);
++void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+
+ #else
+
+@@ -63,10 +63,10 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
+ static inline void xe_display_irq_reset(struct xe_device *xe) {}
+ static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
+
+-static inline void xe_display_pm_suspend(struct xe_device *xe) {}
++static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
+ static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
+ static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
+-static inline void xe_display_pm_resume(struct xe_device *xe) {}
++static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+
+ #endif /* CONFIG_DRM_XE_DISPLAY */
+ #endif /* _XE_DISPLAY_H_ */
+diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
+index 37fbeda12d3bd..07615acd2c299 100644
+--- a/drivers/gpu/drm/xe/xe_pm.c
++++ b/drivers/gpu/drm/xe/xe_pm.c
+@@ -96,12 +96,12 @@ int xe_pm_suspend(struct xe_device *xe)
+ if (err)
+ goto err;
+
+- xe_display_pm_suspend(xe);
++ xe_display_pm_suspend(xe, false);
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_suspend(gt);
+ if (err) {
+- xe_display_pm_resume(xe);
++ xe_display_pm_resume(xe, false);
+ goto err;
+ }
+ }
+@@ -151,7 +151,7 @@ int xe_pm_resume(struct xe_device *xe)
+
+ xe_irq_resume(xe);
+
+- xe_display_pm_resume(xe);
++ xe_display_pm_resume(xe, false);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_resume(gt);
+@@ -366,6 +366,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
+ err = xe_bo_evict_all(xe);
+ if (err)
+ goto out;
++ xe_display_pm_suspend(xe, true);
+ }
+
+ for_each_gt(gt, xe, id) {
+@@ -375,7 +376,12 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
+ }
+
+ xe_irq_suspend(xe);
++
++ if (xe->d3cold.allowed)
++ xe_display_pm_suspend_late(xe);
+ out:
++ if (err)
++ xe_display_pm_resume(xe, true);
+ lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_pm_write_callback_task(xe, NULL);
+ return err;
+@@ -411,6 +417,8 @@ int xe_pm_runtime_resume(struct xe_device *xe)
+ if (err)
+ goto out;
+
++ xe_display_pm_resume_early(xe);
++
+ /*
+ * This only restores pinned memory which is the memory
+ * required for the GT(s) to resume.
+@@ -426,6 +434,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
+ xe_gt_resume(gt);
+
+ if (xe->d3cold.allowed && xe->d3cold.power_lost) {
++ xe_display_pm_resume(xe, true);
+ err = xe_bo_restore_user(xe);
+ if (err)
+ goto out;
+--
+2.43.0
+
--- /dev/null
+From e59cf8c139edce087792a08b37ec50bc5da56e47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Aug 2024 12:01:30 +0100
+Subject: drm/xe: prevent UAF around preempt fence
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+[ Upstream commit 730b72480e29f63fd644f5fa57c9d46109428953 ]
+
+The fence lock is part of the queue, therefore in the current design
+anything locking the fence should then also hold a ref to the queue to
+prevent the queue from being freed.
+
+However, currently it looks like we signal the fence and then drop the
+queue ref, but if something is waiting on the fence, the waiter is
+kicked to wake up at some later point, where upon waking up it first
+grabs the lock before checking the fence state. But if we have already
+dropped the queue ref, then the lock might already be freed as part of
+the queue, leading to uaf.
+
+To prevent this, move the fence lock into the fence itself so we don't
+run into lifetime issues. Alternative might be to have device level
+lock, or only release the queue in the fence release callback, however
+that might require pushing to another worker to avoid locking issues.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+References: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2454
+References: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2342
+References: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2020
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240814110129.825847-2-matthew.auld@intel.com
+(cherry picked from commit 7116c35aacedc38be6d15bd21b2fc936eed0008b)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_exec_queue.c | 1 -
+ drivers/gpu/drm/xe/xe_exec_queue_types.h | 2 --
+ drivers/gpu/drm/xe/xe_preempt_fence.c | 3 ++-
+ drivers/gpu/drm/xe/xe_preempt_fence_types.h | 2 ++
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
+index 316731c5cce6d..ba7013f82c8b6 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue.c
++++ b/drivers/gpu/drm/xe/xe_exec_queue.c
+@@ -632,7 +632,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
+
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+ q->lr.context = dma_fence_context_alloc(1);
+- spin_lock_init(&q->lr.lock);
+
+ err = xe_vm_add_compute_exec_queue(vm, q);
+ if (XE_IOCTL_DBG(xe, err))
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
+index 52a1965d91375..a5aa43942d8cf 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
++++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
+@@ -125,8 +125,6 @@ struct xe_exec_queue {
+ u32 seqno;
+ /** @lr.link: link into VM's list of exec queues */
+ struct list_head link;
+- /** @lr.lock: preemption fences lock */
+- spinlock_t lock;
+ } lr;
+
+ /** @ops: submission backend exec queue operations */
+diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
+index e8b8ae5c6485e..c453f45328b1c 100644
+--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
++++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
+@@ -128,8 +128,9 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
+ {
+ list_del_init(&pfence->link);
+ pfence->q = xe_exec_queue_get(q);
++ spin_lock_init(&pfence->lock);
+ dma_fence_init(&pfence->base, &preempt_fence_ops,
+- &q->lr.lock, context, seqno);
++ &pfence->lock, context, seqno);
+
+ return &pfence->base;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
+index b54b5c29b5331..312c3372a49f9 100644
+--- a/drivers/gpu/drm/xe/xe_preempt_fence_types.h
++++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
+@@ -25,6 +25,8 @@ struct xe_preempt_fence {
+ struct xe_exec_queue *q;
+ /** @preempt_work: work struct which issues preemption */
+ struct work_struct preempt_work;
++ /** @lock: dma-fence fence lock */
++ spinlock_t lock;
+ /** @error: preempt fence is in error state */
+ int error;
+ };
+--
+2.43.0
+
--- /dev/null
+From 4aaa541cdedc4167bac05041f861548abb8ed253 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 20:00:07 +0200
+Subject: drm/xe/vm: Simplify if condition
+
+From: Thorsten Blum <thorsten.blum@toblux.com>
+
+[ Upstream commit b3181f433206a1432bc7093d1896fe36026f7fff ]
+
+The if condition !A || A && B can be simplified to !A || B.
+
+Fixes the following Coccinelle/coccicheck warning reported by
+excluded_middle.cocci:
+
+ WARNING !A || A && B is equivalent to !A || B
+
+Compile-tested only.
+
+Signed-off-by: Thorsten Blum <thorsten.blum@toblux.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240603180005.191578-1-thorsten.blum@toblux.com
+Stable-dep-of: 730b72480e29 ("drm/xe: prevent UAF around preempt fence")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 4aa3943e6f292..3137cbbaabde0 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -85,8 +85,8 @@ static bool preempt_fences_waiting(struct xe_vm *vm)
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ if (!q->compute.pfence ||
+- (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+- &q->compute.pfence->flags))) {
++ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
++ &q->compute.pfence->flags)) {
+ return true;
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From 32d05e614a783ba3a2bb199e619e4ef728b45b83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 10:32:34 -0400
+Subject: fs/nfsd: fix update of inode attrs in CB_GETATTR
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit 7e8ae8486e4471513e2111aba6ac29f2357bed2a ]
+
+Currently, we copy the mtime and ctime to the in-core inode and then
+mark the inode dirty. This is fine for certain types of filesystems, but
+not all. Some require a real setattr to properly change these values
+(e.g. ceph or reexported NFS).
+
+Fix this code to call notify_change() instead, which is the proper way
+to effect a setattr. There is one problem though:
+
+In this case, the client is holding a write delegation and has sent us
+attributes to update our cache. We don't want to break the delegation
+for this since that would defeat the purpose. Add a new ATTR_DELEG flag
+that makes notify_change bypass the try_break_deleg call.
+
+Fixes: c5967721e106 ("NFSD: handle GETATTR conflict with write delegation")
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/attr.c | 14 +++++++++++---
+ fs/nfsd/nfs4state.c | 18 +++++++++++++-----
+ fs/nfsd/nfs4xdr.c | 2 +-
+ fs/nfsd/state.h | 2 +-
+ include/linux/fs.h | 1 +
+ 5 files changed, 27 insertions(+), 10 deletions(-)
+
+diff --git a/fs/attr.c b/fs/attr.c
+index 960a310581ebb..0dbf43b6555c8 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -489,9 +489,17 @@ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
+ error = security_inode_setattr(idmap, dentry, attr);
+ if (error)
+ return error;
+- error = try_break_deleg(inode, delegated_inode);
+- if (error)
+- return error;
++
++ /*
++ * If ATTR_DELEG is set, then these attributes are being set on
++ * behalf of the holder of a write delegation. We want to avoid
++ * breaking the delegation in this case.
++ */
++ if (!(ia_valid & ATTR_DELEG)) {
++ error = try_break_deleg(inode, delegated_inode);
++ if (error)
++ return error;
++ }
+
+ if (inode->i_op->setattr)
+ error = inode->i_op->setattr(idmap, dentry, attr);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 02d43f95146ee..07f2496850c4c 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -8815,7 +8815,7 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
+ /**
+ * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
+ * @rqstp: RPC transaction context
+- * @inode: file to be checked for a conflict
++ * @dentry: dentry of inode to be checked for a conflict
+ * @modified: return true if file was modified
+ * @size: new size of file if modified is true
+ *
+@@ -8830,7 +8830,7 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
+ * code is returned.
+ */
+ __be32
+-nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
++nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
+ bool *modified, u64 *size)
+ {
+ __be32 status;
+@@ -8839,6 +8839,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ struct file_lease *fl;
+ struct iattr attrs;
+ struct nfs4_cb_fattr *ncf;
++ struct inode *inode = d_inode(dentry);
+
+ *modified = false;
+ ctx = locks_inode_context(inode);
+@@ -8890,15 +8891,22 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
+ ncf->ncf_file_modified = true;
+ if (ncf->ncf_file_modified) {
++ int err;
++
+ /*
+ * Per section 10.4.3 of RFC 8881, the server would
+ * not update the file's metadata with the client's
+ * modified size
+ */
+ attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
+- attrs.ia_valid = ATTR_MTIME | ATTR_CTIME;
+- setattr_copy(&nop_mnt_idmap, inode, &attrs);
+- mark_inode_dirty(inode);
++ attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
++ inode_lock(inode);
++ err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
++ inode_unlock(inode);
++ if (err) {
++ nfs4_put_stid(&dp->dl_stid);
++ return nfserrno(err);
++ }
+ ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
+ *size = ncf->ncf_cur_fsize;
+ *modified = true;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 8a7bc2b58e721..0869062280ccc 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3565,7 +3565,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ }
+ args.size = 0;
+ if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+- status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
++ status = nfsd4_deleg_getattr_conflict(rqstp, dentry,
+ &file_modified, &size);
+ if (status)
+ goto out;
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index ffc217099d191..ec4559ecd193b 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -781,5 +781,5 @@ static inline bool try_to_expire_client(struct nfs4_client *clp)
+ }
+
+ extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
+- struct inode *inode, bool *file_modified, u64 *size);
++ struct dentry *dentry, bool *file_modified, u64 *size);
+ #endif /* NFSD4_STATE_H */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 36b9e87439221..5f07c1c377df6 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -208,6 +208,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+ #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
+ #define ATTR_TIMES_SET (1 << 16)
+ #define ATTR_TOUCH (1 << 17)
++#define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */
+
+ /*
+ * Whiteout is represented by a char device. The following constants define the
+--
+2.43.0
+
--- /dev/null
+From a7d676f7da1fe58b50e470c67a984caf36b7416b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 21:08:09 +0100
+Subject: mm: Fix missing folio invalidation calls during truncation
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 0aa2e1b2fb7a75aa4b5b4347055ccfea6f091769 ]
+
+When AS_RELEASE_ALWAYS is set on a mapping, the ->release_folio() and
+->invalidate_folio() calls should be invoked even if PG_private and
+PG_private_2 aren't set. This is used by netfslib to keep track of the
+point above which reads can be skipped in favour of just zeroing pagecache
+locally.
+
+There are a couple of places in truncation in which invalidation is only
+called when folio_has_private() is true. Fix these to check
+folio_needs_release() instead.
+
+Without this, the generic/075 and generic/112 xfstests (both fsx-based
+tests) fail with minimum folio size patches applied[1].
+
+Fixes: b4fa966f03b7 ("mm, netfs, fscache: stop read optimisation when folio removed from pagecache")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/20240815090849.972355-1-kernel@pankajraghav.com/ [1]
+Link: https://lore.kernel.org/r/20240823200819.532106-2-dhowells@redhat.com
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+cc: Pankaj Raghav <p.raghav@samsung.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+cc: netfs@lists.linux.dev
+cc: linux-mm@kvack.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/truncate.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/mm/truncate.c b/mm/truncate.c
+index e99085bf3d34d..a2af7f088407f 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -174,7 +174,7 @@ static void truncate_cleanup_folio(struct folio *folio)
+ if (folio_mapped(folio))
+ unmap_mapping_folio(folio);
+
+- if (folio_has_private(folio))
++ if (folio_needs_release(folio))
+ folio_invalidate(folio, 0, folio_size(folio));
+
+ /*
+@@ -235,7 +235,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
+ */
+ folio_zero_range(folio, offset, length);
+
+- if (folio_has_private(folio))
++ if (folio_needs_release(folio))
+ folio_invalidate(folio, offset, length);
+ if (!folio_test_large(folio))
+ return true;
+--
+2.43.0
+
--- /dev/null
+From 4fa4adae73f68de2b6d02daf30b7c9a39aefa77e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 24 Aug 2024 12:56:53 +0100
+Subject: netfs: Fix interaction of streaming writes with zero-point tracker
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit e00e99ba6c6b8e5239e75cd6684a6827d93c39a2 ]
+
+When a folio that is marked for streaming write (dirty, but not uptodate,
+with partial content specified in the private data) is written back, the
+folio is effectively switched to the blank state upon completion of the
+write. This means that if we want to read it in future, we need to reread
+the whole folio.
+
+However, if the folio is above the zero_point position, when it is read
+back, it will just be cleared and the read skipped, leading to apparent
+local corruption.
+
+Fix this by increasing the zero_point to the end of the dirty data in the
+folio when clearing the folio state after writeback. This is analogous to
+the folio having ->release_folio() called upon it.
+
+This was causing the config.log generated by configuring a cpython tree on
+a cifs share to get corrupted because the scripts involved were appending
+text to the file in small pieces.
+
+Fixes: 288ace2f57c9 ("netfs: New writeback implementation")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/563286.1724500613@warthog.procyon.org.uk
+cc: Steve French <sfrench@samba.org>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: linux-cifs@vger.kernel.org
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/write_collect.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 488147439fe0f..a2b697b4aa401 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -33,6 +33,7 @@
+ int netfs_folio_written_back(struct folio *folio)
+ {
+ enum netfs_folio_trace why = netfs_folio_trace_clear;
++ struct netfs_inode *ictx = netfs_inode(folio->mapping->host);
+ struct netfs_folio *finfo;
+ struct netfs_group *group = NULL;
+ int gcount = 0;
+@@ -41,6 +42,12 @@ int netfs_folio_written_back(struct folio *folio)
+ /* Streaming writes cannot be redirtied whilst under writeback,
+ * so discard the streaming record.
+ */
++ unsigned long long fend;
++
++ fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len;
++ if (fend > ictx->zero_point)
++ ictx->zero_point = fend;
++
+ folio_detach_private(folio);
+ group = finfo->netfs_group;
+ gcount++;
+--
+2.43.0
+
--- /dev/null
+From 77681897a29d831ff65d669b665518b0f3a888da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 21:08:13 +0100
+Subject: netfs: Fix missing iterator reset on retry of short read
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 950b03d0f664a54389a555d79215348ed413161f ]
+
+Fix netfs_rreq_perform_resubmissions() to reset before retrying a short
+read, otherwise the wrong part of the output buffer will be used.
+
+Fixes: 92b6cc5d1e7c ("netfs: Add iov_iters to (sub)requests to describe various buffers")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/20240823200819.532106-6-dhowells@redhat.com
+cc: Steve French <sfrench@samba.org>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: linux-cifs@vger.kernel.org
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/io.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/netfs/io.c b/fs/netfs/io.c
+index f3abc5dfdbc0c..c96431d3da6d8 100644
+--- a/fs/netfs/io.c
++++ b/fs/netfs/io.c
+@@ -313,6 +313,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
+ netfs_reset_subreq_iter(rreq, subreq);
+ netfs_read_from_server(rreq, subreq);
+ } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
++ netfs_reset_subreq_iter(rreq, subreq);
+ netfs_rreq_short_read(rreq, subreq);
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From 2733849992b8c3aa2eba07eee1ab3a43d794ad4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 21:08:11 +0100
+Subject: netfs: Fix netfs_release_folio() to say no if folio dirty
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 7dfc8f0c6144c290dbeb01835a67e81b34dda8cd ]
+
+Fix netfs_release_folio() to say no (ie. return false) if the folio is
+dirty (analogous with iomap's behaviour). Without this, it will say yes to
+the release of a dirty page by split_huge_page_to_list_to_order(), which
+will result in the loss of untruncated data in the folio.
+
+Without this, the generic/075 and generic/112 xfstests (both fsx-based
+tests) fail with minimum folio size patches applied[1].
+
+Fixes: c1ec4d7c2e13 ("netfs: Provide invalidate_folio and release_folio calls")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/20240815090849.972355-1-kernel@pankajraghav.com/ [1]
+Link: https://lore.kernel.org/r/20240823200819.532106-4-dhowells@redhat.com
+cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+cc: Pankaj Raghav <p.raghav@samsung.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+cc: netfs@lists.linux.dev
+cc: linux-mm@kvack.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/misc.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
+index 607a1972f4563..21acf4b092a46 100644
+--- a/fs/netfs/misc.c
++++ b/fs/netfs/misc.c
+@@ -161,6 +161,9 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
+ struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
+ unsigned long long end;
+
++ if (folio_test_dirty(folio))
++ return false;
++
+ end = folio_pos(folio) + folio_size(folio);
+ if (end > ctx->zero_point)
+ ctx->zero_point = end;
+--
+2.43.0
+
--- /dev/null
+From 74e5aa03f1e98a0daeff4dece1f2b6a8a4d3bd45 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 21:08:12 +0100
+Subject: netfs: Fix trimming of streaming-write folios in netfs_inval_folio()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit cce6bfa6ca0e30af9927b0074c97fe6a92f28092 ]
+
+When netfslib writes to a folio that it doesn't have data for, but that
+data exists on the server, it will make a 'streaming write' whereby it
+stores data in a folio that is marked dirty, but not uptodate. When it
+does this, it attaches a record to folio->private to track the dirty
+region.
+
+When truncate() or fallocate() wants to invalidate part of such a folio, it
+will call into ->invalidate_folio(), specifying the part of the folio that
+is to be invalidated. netfs_invalidate_folio(), on behalf of the
+filesystem, must then determine how to trim the streaming write record. In
+a couple of cases, however, it does this incorrectly (the reduce-length and
+move-start cases are switched over and don't, in any case, calculate the
+value correctly).
+
+Fix this by making the logic tree more obvious and fixing the cases.
+
+Fixes: 9ebff83e6481 ("netfs: Prep to use folio->private for write grouping and streaming write")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/20240823200819.532106-5-dhowells@redhat.com
+cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+cc: Pankaj Raghav <p.raghav@samsung.com>
+cc: Jeff Layton <jlayton@kernel.org>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+cc: netfs@lists.linux.dev
+cc: linux-mm@kvack.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/misc.c | 50 ++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 35 insertions(+), 15 deletions(-)
+
+diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
+index 21acf4b092a46..a46bf569303fc 100644
+--- a/fs/netfs/misc.c
++++ b/fs/netfs/misc.c
+@@ -97,10 +97,20 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback);
+ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+ {
+ struct netfs_folio *finfo;
++ struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
+ size_t flen = folio_size(folio);
+
+ kenter("{%lx},%zx,%zx", folio->index, offset, length);
+
++ if (offset == 0 && length == flen) {
++ unsigned long long i_size = i_size_read(&ctx->inode);
++ unsigned long long fpos = folio_pos(folio), end;
++
++ end = umin(fpos + flen, i_size);
++ if (fpos < i_size && end > ctx->zero_point)
++ ctx->zero_point = end;
++ }
++
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+
+ if (!folio_test_private(folio))
+@@ -115,18 +125,34 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+ /* We have a partially uptodate page from a streaming write. */
+ unsigned int fstart = finfo->dirty_offset;
+ unsigned int fend = fstart + finfo->dirty_len;
+- unsigned int end = offset + length;
++ unsigned int iend = offset + length;
+
+ if (offset >= fend)
+ return;
+- if (end <= fstart)
++ if (iend <= fstart)
++ return;
++
++ /* The invalidation region overlaps the data. If the region
++ * covers the start of the data, we either move along the start
++ * or just erase the data entirely.
++ */
++ if (offset <= fstart) {
++ if (iend >= fend)
++ goto erase_completely;
++ /* Move the start of the data. */
++ finfo->dirty_len = fend - iend;
++ finfo->dirty_offset = offset;
++ return;
++ }
++
++ /* Reduce the length of the data if the invalidation region
++ * covers the tail part.
++ */
++ if (iend >= fend) {
++ finfo->dirty_len = offset - fstart;
+ return;
+- if (offset <= fstart && end >= fend)
+- goto erase_completely;
+- if (offset <= fstart && end > fstart)
+- goto reduce_len;
+- if (offset > fstart && end >= fend)
+- goto move_start;
++ }
++
+ /* A partial write was split. The caller has already zeroed
+ * it, so just absorb the hole.
+ */
+@@ -139,12 +165,6 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+ folio_clear_uptodate(folio);
+ kfree(finfo);
+ return;
+-reduce_len:
+- finfo->dirty_len = offset + length - finfo->dirty_offset;
+- return;
+-move_start:
+- finfo->dirty_len -= offset - finfo->dirty_offset;
+- finfo->dirty_offset = offset;
+ }
+ EXPORT_SYMBOL(netfs_invalidate_folio);
+
+@@ -164,7 +184,7 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
+ if (folio_test_dirty(folio))
+ return false;
+
+- end = folio_pos(folio) + folio_size(folio);
++ end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
+ if (end > ctx->zero_point)
+ ctx->zero_point = end;
+
+--
+2.43.0
+
--- /dev/null
+From b71f66c0a6fbe9bf9d65131fda5e5c26282e476a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 14:47:01 -0400
+Subject: nfsd: ensure that nfsd4_fattr_args.context is zeroed out
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit f58bab6fd4063913bd8321e99874b8239e9ba726 ]
+
+If nfsd4_encode_fattr4 ends up doing a "goto out" before we get to
+checking for the security label, then args.context will be set to
+uninitialized junk on the stack, which we'll then try to free.
+Initialize it early.
+
+Fixes: f59388a579c6 ("NFSD: Add nfsd4_encode_fattr4_sec_label()")
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4xdr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index c7bfd2180e3f2..8a7bc2b58e721 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3545,6 +3545,9 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ args.dentry = dentry;
+ args.ignore_crossmnt = (ignore_crossmnt != 0);
+ args.acl = NULL;
++#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
++ args.context = NULL;
++#endif
+
+ /*
+ * Make a local copy of the attribute bitmap that can be modified.
+@@ -3617,7 +3620,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ args.contextsupport = false;
+
+ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+- args.context = NULL;
+ if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+ attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
+--
+2.43.0
+
--- /dev/null
+From 8b0fddc6c3b24f311712855fcbe263f3e3f526de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 18:27:39 -0400
+Subject: nfsd: fix potential UAF in nfsd4_cb_getattr_release
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit 1116e0e372eb16dd907ec571ce5d4af325c55c10 ]
+
+Once we drop the delegation reference, the fields embedded in it are no
+longer safe to access. Do that last.
+
+Fixes: c5967721e106 ("NFSD: handle GETATTR conflict with write delegation")
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4state.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 19d39872be325..02d43f95146ee 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3078,9 +3078,9 @@ nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+- nfs4_put_stid(&dp->dl_stid);
+ clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
+ wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
++ nfs4_put_stid(&dp->dl_stid);
+ }
+
+ static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
+--
+2.43.0
+
--- /dev/null
+From d5cef521ff861c969186dc5752519d49576691f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 18:27:38 -0400
+Subject: nfsd: hold reference to delegation when updating it for cb_getattr
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit da05ba23d4c8d3e8a45846b952e53dd76c4b5e36 ]
+
+Once we've dropped the flc_lock, there is nothing that ensures that the
+delegation that was found will still be around later. Take a reference
+to it while holding the lock and then drop it when we've finished with
+the delegation.
+
+Fixes: c5967721e106 ("NFSD: handle GETATTR conflict with write delegation")
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4state.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index dafff707e23a4..19d39872be325 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -8837,7 +8837,6 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct file_lock_context *ctx;
+ struct file_lease *fl;
+- struct nfs4_delegation *dp;
+ struct iattr attrs;
+ struct nfs4_cb_fattr *ncf;
+
+@@ -8862,7 +8861,8 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ goto break_lease;
+ }
+ if (type == F_WRLCK) {
+- dp = fl->c.flc_owner;
++ struct nfs4_delegation *dp = fl->c.flc_owner;
++
+ if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
+ spin_unlock(&ctx->flc_lock);
+ return 0;
+@@ -8870,6 +8870,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ break_lease:
+ nfsd_stats_wdeleg_getattr_inc(nn);
+ dp = fl->c.flc_owner;
++ refcount_inc(&dp->dl_stid.sc_count);
+ ncf = &dp->dl_cb_fattr;
+ nfs4_cb_getattr(&dp->dl_cb_fattr);
+ spin_unlock(&ctx->flc_lock);
+@@ -8879,8 +8880,10 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ /* Recall delegation only if client didn't respond */
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+- !nfsd_wait_for_delegreturn(rqstp, inode))
++ !nfsd_wait_for_delegreturn(rqstp, inode)) {
++ nfs4_put_stid(&dp->dl_stid);
+ return status;
++ }
+ }
+ if (!ncf->ncf_file_modified &&
+ (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
+@@ -8900,6 +8903,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ *size = ncf->ncf_cur_fsize;
+ *modified = true;
+ }
++ nfs4_put_stid(&dp->dl_stid);
+ return 0;
+ }
+ break;
+--
+2.43.0
+
--- /dev/null
+From 1890e33a315f1f3ed0a3e4591822dfc6f05d7816 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Aug 2024 11:51:08 -0400
+Subject: nfsd: prevent panic for nfsv4.0 closed files in nfs4_show_open
+
+From: Olga Kornievskaia <okorniev@redhat.com>
+
+[ Upstream commit a204501e1743d695ca2930ed25a2be9f8ced96d3 ]
+
+Prior to commit 3f29cc82a84c ("nfsd: split sc_status out of
+sc_type") states_show() relied on sc_type field to be of valid
+type before calling into a subfunction to show content of a
+particular stateid. From that commit, we split the validity of
+the stateid into sc_status and no longer changed sc_type to 0
+while unhashing the stateid. This resulted in kernel oopsing
+for nfsv4.0 opens that stay around and in nfs4_show_open()
+would derefence sc_file which was NULL.
+
+Instead, for closed open stateids forgo displaying information
+that relies of having a valid sc_file.
+
+To reproduce: mount the server with 4.0, read and close
+a file and then on the server cat /proc/fs/nfsd/clients/2/states
+
+[ 513.590804] Call trace:
+[ 513.590925] _raw_spin_lock+0xcc/0x160
+[ 513.591119] nfs4_show_open+0x78/0x2c0 [nfsd]
+[ 513.591412] states_show+0x44c/0x488 [nfsd]
+[ 513.591681] seq_read_iter+0x5d8/0x760
+[ 513.591896] seq_read+0x188/0x208
+[ 513.592075] vfs_read+0x148/0x470
+[ 513.592241] ksys_read+0xcc/0x178
+
+Fixes: 3f29cc82a84c ("nfsd: split sc_status out of sc_type")
+Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4state.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index a20c2c9d7d457..dafff707e23a4 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2789,15 +2789,18 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
+ deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
+ deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
+
+- spin_lock(&nf->fi_lock);
+- file = find_any_file_locked(nf);
+- if (file) {
+- nfs4_show_superblock(s, file);
+- seq_puts(s, ", ");
+- nfs4_show_fname(s, file);
+- seq_puts(s, ", ");
+- }
+- spin_unlock(&nf->fi_lock);
++ if (nf) {
++ spin_lock(&nf->fi_lock);
++ file = find_any_file_locked(nf);
++ if (file) {
++ nfs4_show_superblock(s, file);
++ seq_puts(s, ", ");
++ nfs4_show_fname(s, file);
++ seq_puts(s, ", ");
++ }
++ spin_unlock(&nf->fi_lock);
++ } else
++ seq_puts(s, "closed, ");
+ nfs4_show_owner(s, oo);
+ if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
+ seq_puts(s, ", admin-revoked");
+--
+2.43.0
+
--- /dev/null
+From cbd75ab8ccb7795edbaa4c09613fec975f4bbf5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Aug 2024 19:27:09 -0400
+Subject: pinctrl: mediatek: common-v2: Fix broken bias-disable for
+ PULL_PU_PD_RSEL_TYPE
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: NĂcolas F. R. A. Prado <nfraprado@collabora.com>
+
+[ Upstream commit 166bf8af91225576f85208a31eaedbadd182d1ea ]
+
+Despite its name, commit fed74d75277d ("pinctrl: mediatek: common-v2:
+Fix bias-disable for PULL_PU_PD_RSEL_TYPE") actually broke bias-disable
+for PULL_PU_PD_RSEL_TYPE.
+
+mtk_pinconf_bias_set_combo() tries every bias method supported by the
+pin until one succeeds. For PULL_PU_PD_RSEL_TYPE pins, before the
+breaking commit, mtk_pinconf_bias_set_rsel() would be called first to
+try and set the RSEL value (as well as PU and PD), and if that failed,
+the only other valid option was that bias-disable was specified, which
+would then be handled by calling mtk_pinconf_bias_set_pu_pd() and
+disabling both PU and PD.
+
+The breaking commit misunderstood this logic and added an early "return
+0" in mtk_pinconf_bias_set_rsel(). The result was that in the
+bias-disable case, the bias was left unchanged, since by returning
+success, mtk_pinconf_bias_set_combo() no longer tried calling
+mtk_pinconf_bias_set_pu_pd() to disable the bias.
+
+Since the logic for configuring bias-disable on PULL_PU_PD_RSEL_TYPE
+pins required mtk_pinconf_bias_set_rsel() to fail first, in that case,
+an error was printed to the log, eg:
+
+ mt8195-pinctrl 10005000.pinctrl: Not support rsel value 0 Ohm for pin = 29 (GPIO29)
+
+This is what the breaking commit actually got rid of, and likely part of
+the reason why that commit was thought to be fixing functionality, while
+in reality it was breaking it.
+
+Instead of simply reverting that commit, restore the functionality but
+in a way that avoids the error from being printed and makes the code
+less confusing:
+* Return 0 explicitly if a bias method was successful
+* Introduce an extra function mtk_pinconf_bias_set_pu_pd_rsel() that
+ calls both mtk_pinconf_bias_set_rsel() (only if needed) and
+ mtk_pinconf_bias_set_pu_pd()
+ * And analogously for the corresponding getters
+
+Fixes: fed74d75277d ("pinctrl: mediatek: common-v2: Fix bias-disable for PULL_PU_PD_RSEL_TYPE")
+Signed-off-by: NĂcolas F. R. A. Prado <nfraprado@collabora.com>
+Link: https://lore.kernel.org/20240808-mtk-rsel-bias-disable-fix-v1-1-1b4e85bf596c@collabora.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../pinctrl/mediatek/pinctrl-mtk-common-v2.c | 55 ++++++++++---------
+ 1 file changed, 29 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+index b7921b59eb7b1..54301fbba524a 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+@@ -709,32 +709,35 @@ static int mtk_pinconf_bias_set_rsel(struct mtk_pinctrl *hw,
+ {
+ int err, rsel_val;
+
+- if (!pullup && arg == MTK_DISABLE)
+- return 0;
+-
+ if (hw->rsel_si_unit) {
+ /* find pin rsel_index from pin_rsel array*/
+ err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val);
+ if (err)
+- goto out;
++ return err;
+ } else {
+- if (arg < MTK_PULL_SET_RSEL_000 ||
+- arg > MTK_PULL_SET_RSEL_111) {
+- err = -EINVAL;
+- goto out;
+- }
++ if (arg < MTK_PULL_SET_RSEL_000 || arg > MTK_PULL_SET_RSEL_111)
++ return -EINVAL;
+
+ rsel_val = arg - MTK_PULL_SET_RSEL_000;
+ }
+
+- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
+- if (err)
+- goto out;
++ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
++}
+
+- err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, MTK_ENABLE);
++static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw,
++ const struct mtk_pin_desc *desc,
++ u32 pullup, u32 arg)
++{
++ u32 enable = arg == MTK_DISABLE ? MTK_DISABLE : MTK_ENABLE;
++ int err;
+
+-out:
+- return err;
++ if (arg != MTK_DISABLE) {
++ err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
++ if (err)
++ return err;
++ }
++
++ return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable);
+ }
+
+ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+@@ -750,22 +753,22 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+ try_all_type = MTK_PULL_TYPE_MASK;
+
+ if (try_all_type & MTK_PULL_RSEL_TYPE) {
+- err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
++ err = mtk_pinconf_bias_set_pu_pd_rsel(hw, desc, pullup, arg);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PU_PD_TYPE) {
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
+ err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc,
+ pullup, arg);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
+@@ -803,9 +806,9 @@ static int mtk_rsel_get_si_unit(struct mtk_pinctrl *hw,
+ return 0;
+ }
+
+-static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw,
+- const struct mtk_pin_desc *desc,
+- u32 *pullup, u32 *enable)
++static int mtk_pinconf_bias_get_pu_pd_rsel(struct mtk_pinctrl *hw,
++ const struct mtk_pin_desc *desc,
++ u32 *pullup, u32 *enable)
+ {
+ int pu, pd, rsel, err;
+
+@@ -939,22 +942,22 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
+ try_all_type = MTK_PULL_TYPE_MASK;
+
+ if (try_all_type & MTK_PULL_RSEL_TYPE) {
+- err = mtk_pinconf_bias_get_rsel(hw, desc, pullup, enable);
++ err = mtk_pinconf_bias_get_pu_pd_rsel(hw, desc, pullup, enable);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PU_PD_TYPE) {
+ err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
+ err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc,
+ pullup, enable);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
+--
+2.43.0
+
--- /dev/null
+From a667d1c907b920b7ba592d6561446f96ae282b07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Aug 2024 02:22:04 +0200
+Subject: pinctrl: qcom: x1e80100: Fix special pin offsets
+
+From: Konrad Dybcio <quic_kdybcio@quicinc.com>
+
+[ Upstream commit d3692d95cc4d88114b070ee63cffc976f00f207f ]
+
+Remove the erroneus 0x100000 offset to prevent the boards from crashing
+on pin state setting, as well as for the intended state changes to take
+effect.
+
+Fixes: 05e4941d97ef ("pinctrl: qcom: Add X1E80100 pinctrl driver")
+Signed-off-by: Konrad Dybcio <quic_kdybcio@quicinc.com>
+Reviewed-by: Abel Vesa <abel.vesa@linaro.org>
+Reviewed-by: Bjorn Andersson <andersson@kernel.org>
+Link: https://lore.kernel.org/20240809-topic-h_sdc-v1-1-bb421532c531@quicinc.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-x1e80100.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+index 6cd4d10e6fd6f..65ed933f05ce1 100644
+--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
++++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+@@ -1805,10 +1805,10 @@ static const struct msm_pingroup x1e80100_groups[] = {
+ [235] = PINGROUP(235, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
+ [236] = PINGROUP(236, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
+ [237] = PINGROUP(237, _, _, _, _, _, _, _, _, _),
+- [238] = UFS_RESET(ufs_reset, 0x1f9000),
+- [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1f2000, 14, 6),
+- [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1f2000, 11, 3),
+- [241] = SDC_QDSD_PINGROUP(sdc2_data, 0x1f2000, 9, 0),
++ [238] = UFS_RESET(ufs_reset, 0xf9000),
++ [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0xf2000, 14, 6),
++ [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xf2000, 11, 3),
++ [241] = SDC_QDSD_PINGROUP(sdc2_data, 0xf2000, 9, 0),
+ };
+
+ static const struct msm_gpio_wakeirq_map x1e80100_pdc_map[] = {
+--
+2.43.0
+
--- /dev/null
+From 07eeeb75a8d0bbcb0bebbf702d7178f051cbe63e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Jul 2024 11:37:57 +0200
+Subject: pinctrl: qcom: x1e80100: Update PDC hwirq map
+
+From: Konrad Dybcio <konrad.dybcio@linaro.org>
+
+[ Upstream commit b7fd10333713e9984cc9b9c04f3681f80efdc809 ]
+
+The current map seems to be out of sync (and includes a duplicate entry
+for GPIO193..).
+
+Replace it with the map present in shipping devices' ACPI tables.
+
+This new one seems more complete, as it e.g. contains GPIO145 (PCIE6a
+WAKE#)
+
+Fixes: 05e4941d97ef ("pinctrl: qcom: Add X1E80100 pinctrl driver")
+Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Reviewed-by: Abel Vesa <abel.vesa@linaro.org>
+Reviewed-by: Rajendra Nayak <quic_rjendra@quicinc.com>
+Link: https://lore.kernel.org/20240711-topic-x1e_pdc_tlmm-v1-1-e278b249d793@linaro.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-x1e80100.c | 27 ++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+index e30e938403574..6cd4d10e6fd6f 100644
+--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
++++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+@@ -1813,18 +1813,21 @@ static const struct msm_pingroup x1e80100_groups[] = {
+
+ static const struct msm_gpio_wakeirq_map x1e80100_pdc_map[] = {
+ { 0, 72 }, { 2, 70 }, { 3, 71 }, { 6, 123 }, { 7, 67 }, { 11, 85 },
+- { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 }, { 26, 129 },
+- { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 }, { 32, 145 }, { 33, 115 },
+- { 34, 130 }, { 35, 146 }, { 36, 147 }, { 39, 80 }, { 43, 148 }, { 47, 149 },
+- { 51, 79 }, { 53, 89 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 },
+- { 67, 88 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 },
+- { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 }, { 94, 140 },
+- { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 }, { 129, 76 }, { 131, 82 },
+- { 134, 83 }, { 141, 93 }, { 144, 94 }, { 147, 96 }, { 148, 97 }, { 150, 102 },
+- { 151, 103 }, { 153, 104 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 },
+- { 172, 99 }, { 181, 101 }, { 184, 116 }, { 193, 40 }, { 193, 117 }, { 196, 108 },
+- { 203, 133 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 },
+- { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 },
++ { 13, 86 }, { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 },
++ { 24, 126 }, { 26, 129 }, { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 },
++ { 31, 159 }, { 32, 145 }, { 33, 115 }, { 34, 130 }, { 35, 146 }, { 36, 147 },
++ { 38, 113 }, { 39, 80 }, { 43, 148 }, { 47, 149 }, { 51, 79 }, { 53, 89 },
++ { 55, 81 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 }, { 67, 88 },
++ { 68, 151 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 },
++ { 83, 154 }, { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 },
++ { 93, 139 }, { 94, 140 }, { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 },
++ { 125, 75 }, { 129, 76 }, { 131, 82 }, { 134, 83 }, { 141, 93 }, { 144, 94 },
++ { 145, 95 }, { 147, 96 }, { 148, 97 }, { 150, 102 }, { 151, 103 }, { 153, 104 },
++ { 154, 100 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 }, { 172, 99 },
++ { 175, 114 }, { 181, 101 }, { 184, 116 }, { 193, 117 }, { 196, 108 }, { 203, 133 },
++ { 208, 134 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 },
++ { 219, 119 }, { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 },
++ { 228, 135 }, { 230, 136 }, { 232, 162 },
+ };
+
+ static const struct msm_pinctrl_soc_data x1e80100_pinctrl = {
+--
+2.43.0
+
--- /dev/null
+From 9b5e5bfbd3d5f8c29c293e535237b9e996d6c785 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Aug 2024 15:01:08 +0800
+Subject: pinctrl: starfive: jh7110: Correct the level trigger configuration of
+ iev register
+
+From: Hal Feng <hal.feng@starfivetech.com>
+
+[ Upstream commit 639766ca10d1e218e257ae7eabe76814bae6ab89 ]
+
+A mistake was made in level trigger register configuration. Correct it.
+
+Fixes: 447976ab62c5 ("pinctrl: starfive: Add StarFive JH7110 sys controller driver")
+Signed-off-by: Hal Feng <hal.feng@starfivetech.com>
+Link: https://lore.kernel.org/20240812070108.100923-1-hal.feng@starfivetech.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+index 9609eb1ecc3d8..7637de7452b91 100644
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+@@ -795,12 +795,12 @@ static int jh7110_irq_set_type(struct irq_data *d, unsigned int trigger)
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+- polarity = mask; /* 1: high level */
++ polarity = 0; /* 0: high level */
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+- polarity = 0; /* 0: low level */
++ polarity = mask; /* 1: low level */
+ break;
+ default:
+ return -EINVAL;
+--
+2.43.0
+
drm-vmwgfx-fix-prime-with-external-buffers.patch
drm-vmwgfx-disable-coherent-dumb-buffers-without-3d.patch
video-aperture-optionally-match-the-device-in-sysfb_disable.patch
+drm-xe-prepare-display-for-d3cold.patch
+drm-xe-display-make-display-suspend-resume-work-on-d.patch
+drm-xe-vm-simplify-if-condition.patch
+drm-xe-exec_queue-rename-xe_exec_queue-compute-to-xe.patch
+drm-xe-prevent-uaf-around-preempt-fence.patch
+drm-amdgpu-do-not-wait-for-mp0_c2pmsg_33-ifwi-init-i.patch
+drm-amdgpu-fix-egpu-hotplug-regression.patch
+pinctrl-qcom-x1e80100-update-pdc-hwirq-map.patch
+asoc-sof-amd-move-iram-dram-fence-register-programmi.patch
+asoc-sof-amd-fix-for-incorrect-acp-error-register-of.patch
+asoc-amd-acp-fix-module-autoloading.patch
+asoc-sof-amd-fix-for-acp-init-sequence.patch
+alsa-hda-cs35l56-don-t-use-the-device-index-as-a-cal.patch
+asoc-cs-amp-lib-test-force-test-calibration-blob-ent.patch
+asoc-cs-amp-lib-ignore-empty-uefi-calibration-entrie.patch
+nfsd-ensure-that-nfsd4_fattr_args.context-is-zeroed-.patch
+backing-file-convert-to-using-fops-splice_write.patch
+pinctrl-mediatek-common-v2-fix-broken-bias-disable-f.patch
+pinctrl-qcom-x1e80100-fix-special-pin-offsets.patch
+pinctrl-starfive-jh7110-correct-the-level-trigger-co.patch
+nfsd-prevent-panic-for-nfsv4.0-closed-files-in-nfs4_.patch
+mm-fix-missing-folio-invalidation-calls-during-trunc.patch
+afs-fix-post-setattr-file-edit-to-do-truncation-corr.patch
+netfs-fix-netfs_release_folio-to-say-no-if-folio-dir.patch
+netfs-fix-trimming-of-streaming-write-folios-in-netf.patch
+netfs-fix-missing-iterator-reset-on-retry-of-short-r.patch
+netfs-fix-interaction-of-streaming-writes-with-zero-.patch
+smb-client-remove-unused-rq_iter_size-from-struct-sm.patch
+cifs-fix-falloc_fl_punch_hole-support.patch
+nfsd-hold-reference-to-delegation-when-updating-it-f.patch
+nfsd-fix-potential-uaf-in-nfsd4_cb_getattr_release.patch
+fs-nfsd-fix-update-of-inode-attrs-in-cb_getattr.patch
--- /dev/null
+From 2ee937ce04a0b708d6ed88892adb0c4873511b52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 15:59:12 +0200
+Subject: smb/client: remove unused rq_iter_size from struct smb_rqst
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit b608e2c318789aeba49055747166e13bee57df4a ]
+
+Reviewed-by: David Howells <dhowells@redhat.com>
+Fixes: d08089f649a0 ("cifs: Change the I/O paths to use an iterator rather than a page list")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifsglob.h | 1 -
+ fs/smb/client/cifssmb.c | 1 -
+ fs/smb/client/smb2ops.c | 2 --
+ fs/smb/client/smb2pdu.c | 2 --
+ 4 files changed, 6 deletions(-)
+
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 0a271b9fbc622..1e4da268de3b4 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -254,7 +254,6 @@ struct cifs_open_info_data {
+ struct smb_rqst {
+ struct kvec *rq_iov; /* array of kvecs */
+ unsigned int rq_nvec; /* number of kvecs in array */
+- size_t rq_iter_size; /* Amount of data in ->rq_iter */
+ struct iov_iter rq_iter; /* Data iterator */
+ struct xarray rq_buffer; /* Page buffer for encryption */
+ };
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index 595c4b673707e..6dce70f172082 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -1713,7 +1713,6 @@ cifs_async_writev(struct cifs_io_subrequest *wdata)
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+ rqst.rq_iter = wdata->subreq.io_iter;
+- rqst.rq_iter_size = iov_iter_count(&wdata->subreq.io_iter);
+
+ cifs_dbg(FYI, "async write at %llu %zu bytes\n",
+ wdata->subreq.start, wdata->subreq.len);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 7fe59235f0901..cfbca3489ece1 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4428,7 +4428,6 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+ }
+ iov_iter_xarray(&new->rq_iter, ITER_SOURCE,
+ buffer, 0, size);
+- new->rq_iter_size = size;
+ }
+ }
+
+@@ -4474,7 +4473,6 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+ rqst.rq_nvec = 2;
+ if (iter) {
+ rqst.rq_iter = *iter;
+- rqst.rq_iter_size = iov_iter_count(iter);
+ iter_size = iov_iter_count(iter);
+ }
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 52b95f33db57d..d262e70100c9c 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -4517,7 +4517,6 @@ smb2_readv_callback(struct mid_q_entry *mid)
+
+ if (rdata->got_bytes) {
+ rqst.rq_iter = rdata->subreq.io_iter;
+- rqst.rq_iter_size = iov_iter_count(&rdata->subreq.io_iter);
+ }
+
+ WARN_ONCE(rdata->server != mid->server,
+@@ -4969,7 +4968,6 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+ rqst.rq_iter = wdata->subreq.io_iter;
+- rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
+ if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
+ smb2_set_replay(server, &rqst);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+--
+2.43.0
+