--- /dev/null
+From 1d9ce4440414c92acb17eece3218fe5c92b141e3 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 25 Jul 2024 08:54:28 +0200
+Subject: ASoC: amd: yc: Support mic on Lenovo Thinkpad E16 Gen 2
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 1d9ce4440414c92acb17eece3218fe5c92b141e3 upstream.
+
+Lenovo Thinkpad E16 Gen 2 AMD model (model 21M5) needs a corresponding
+quirk entry for making the internal mic working.
+
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1228269
+Cc: stable@vger.kernel.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20240725065442.9293-1-tiwai@suse.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/amd/yc/acp6x-mach.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -224,6 +224,13 @@ static const struct dmi_system_id yc_acp
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "21M5"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
+ }
+ },
--- /dev/null
+From e6fc5fcaeffa04a3fa1db8dfccdfd4b6001c0446 Mon Sep 17 00:00:00 2001
+From: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Date: Wed, 24 Jul 2024 11:19:32 +0300
+Subject: ASoC: SOF: ipc4-topology: Preserve the DMA Link ID for ChainDMA on unprepare
+
+From: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+
+commit e6fc5fcaeffa04a3fa1db8dfccdfd4b6001c0446 upstream.
+
+The DMA Link ID is set to the IPC message's primary during dai_config,
+which is only during hw_params.
+During xrun handling the hw_params is not called and the DMA Link ID
+information will be lost.
+
+All other fields in the message expected to be 0 for re-configuration, only
+the DMA Link ID needs to be preserved and the in case of repeated
+dai_config, it is correctly updated (masked and then set).
+
+Cc: stable@vger.kernel.org
+Fixes: ca5ce0caa67f ("ASoC: SOF: ipc4/intel: Add support for chained DMA")
+Link: https://github.com/thesofproject/linux/issues/5116
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Link: https://patch.msgid.link/20240724081932.24542-3-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc4-topology.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -1254,7 +1254,13 @@ static void sof_ipc4_unprepare_copier_mo
+ ipc4_copier = dai->private;
+
+ if (pipeline->use_chain_dma) {
+- pipeline->msg.primary = 0;
++ /*
++ * Preserve the DMA Link ID and clear other bits since
++ * the DMA Link ID is only configured once during
++ * dai_config, other fields are expected to be 0 for
++ * re-configuration
++ */
++ pipeline->msg.primary &= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
+ pipeline->msg.extension = 0;
+ }
+
--- /dev/null
+From 7e04da2dc7013af50ed3a2beb698d5168d1e594b Mon Sep 17 00:00:00 2001
+From: Yang Yang <yang.yang@vivo.com>
+Date: Wed, 24 Jul 2024 15:04:12 +0800
+Subject: block: fix deadlock between sd_remove & sd_release
+
+From: Yang Yang <yang.yang@vivo.com>
+
+commit 7e04da2dc7013af50ed3a2beb698d5168d1e594b upstream.
+
+Our test report the following hung task:
+
+[ 2538.459400] INFO: task "kworker/0:0":7 blocked for more than 188 seconds.
+[ 2538.459427] Call trace:
+[ 2538.459430] __switch_to+0x174/0x338
+[ 2538.459436] __schedule+0x628/0x9c4
+[ 2538.459442] schedule+0x7c/0xe8
+[ 2538.459447] schedule_preempt_disabled+0x24/0x40
+[ 2538.459453] __mutex_lock+0x3ec/0xf04
+[ 2538.459456] __mutex_lock_slowpath+0x14/0x24
+[ 2538.459459] mutex_lock+0x30/0xd8
+[ 2538.459462] del_gendisk+0xdc/0x350
+[ 2538.459466] sd_remove+0x30/0x60
+[ 2538.459470] device_release_driver_internal+0x1c4/0x2c4
+[ 2538.459474] device_release_driver+0x18/0x28
+[ 2538.459478] bus_remove_device+0x15c/0x174
+[ 2538.459483] device_del+0x1d0/0x358
+[ 2538.459488] __scsi_remove_device+0xa8/0x198
+[ 2538.459493] scsi_forget_host+0x50/0x70
+[ 2538.459497] scsi_remove_host+0x80/0x180
+[ 2538.459502] usb_stor_disconnect+0x68/0xf4
+[ 2538.459506] usb_unbind_interface+0xd4/0x280
+[ 2538.459510] device_release_driver_internal+0x1c4/0x2c4
+[ 2538.459514] device_release_driver+0x18/0x28
+[ 2538.459518] bus_remove_device+0x15c/0x174
+[ 2538.459523] device_del+0x1d0/0x358
+[ 2538.459528] usb_disable_device+0x84/0x194
+[ 2538.459532] usb_disconnect+0xec/0x300
+[ 2538.459537] hub_event+0xb80/0x1870
+[ 2538.459541] process_scheduled_works+0x248/0x4dc
+[ 2538.459545] worker_thread+0x244/0x334
+[ 2538.459549] kthread+0x114/0x1bc
+
+[ 2538.461001] INFO: task "fsck.":15415 blocked for more than 188 seconds.
+[ 2538.461014] Call trace:
+[ 2538.461016] __switch_to+0x174/0x338
+[ 2538.461021] __schedule+0x628/0x9c4
+[ 2538.461025] schedule+0x7c/0xe8
+[ 2538.461030] blk_queue_enter+0xc4/0x160
+[ 2538.461034] blk_mq_alloc_request+0x120/0x1d4
+[ 2538.461037] scsi_execute_cmd+0x7c/0x23c
+[ 2538.461040] ioctl_internal_command+0x5c/0x164
+[ 2538.461046] scsi_set_medium_removal+0x5c/0xb0
+[ 2538.461051] sd_release+0x50/0x94
+[ 2538.461054] blkdev_put+0x190/0x28c
+[ 2538.461058] blkdev_release+0x28/0x40
+[ 2538.461063] __fput+0xf8/0x2a8
+[ 2538.461066] __fput_sync+0x28/0x5c
+[ 2538.461070] __arm64_sys_close+0x84/0xe8
+[ 2538.461073] invoke_syscall+0x58/0x114
+[ 2538.461078] el0_svc_common+0xac/0xe0
+[ 2538.461082] do_el0_svc+0x1c/0x28
+[ 2538.461087] el0_svc+0x38/0x68
+[ 2538.461090] el0t_64_sync_handler+0x68/0xbc
+[ 2538.461093] el0t_64_sync+0x1a8/0x1ac
+
+ T1: T2:
+ sd_remove
+ del_gendisk
+ __blk_mark_disk_dead
+ blk_freeze_queue_start
+ ++q->mq_freeze_depth
+ bdev_release
+ mutex_lock(&disk->open_mutex)
+ sd_release
+ scsi_execute_cmd
+ blk_queue_enter
+ wait_event(!q->mq_freeze_depth)
+ mutex_lock(&disk->open_mutex)
+
+SCSI does not set GD_OWNS_QUEUE, so QUEUE_FLAG_DYING is not set in
+this scenario. This is a classic ABBA deadlock. To fix the deadlock,
+make sure we don't try to acquire disk->open_mutex after freezing
+the queue.
+
+Cc: stable@vger.kernel.org
+Fixes: eec1be4c30df ("block: delete partitions later in del_gendisk")
+Signed-off-by: Yang Yang <yang.yang@vivo.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Fixes: and Cc: stable tags are missing. Otherwise this patch looks fine
+Link: https://lore.kernel.org/r/20240724070412.22521-1-yang.yang@vivo.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/genhd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -655,12 +655,12 @@ void del_gendisk(struct gendisk *disk)
+ */
+ if (!test_bit(GD_DEAD, &disk->state))
+ blk_report_disk_dead(disk, false);
+- __blk_mark_disk_dead(disk);
+
+ /*
+ * Drop all partitions now that the disk is marked dead.
+ */
+ mutex_lock(&disk->open_mutex);
++ __blk_mark_disk_dead(disk);
+ xa_for_each_start(&disk->part_tbl, idx, part, 1)
+ drop_partition(part);
+ mutex_unlock(&disk->open_mutex);
--- /dev/null
+From a83b22754e351f13fb46596c85f667dc33da71ec Mon Sep 17 00:00:00 2001
+From: Bastien Curutchet <bastien.curutchet@bootlin.com>
+Date: Thu, 18 Jul 2024 13:55:34 +0200
+Subject: clk: davinci: da8xx-cfgchip: Initialize clk_init_data before use
+
+From: Bastien Curutchet <bastien.curutchet@bootlin.com>
+
+commit a83b22754e351f13fb46596c85f667dc33da71ec upstream.
+
+The flag attribute of the struct clk_init_data isn't initialized before
+the devm_clk_hw_register() call. This can lead to unexpected behavior
+during registration.
+
+Initialize the entire clk_init_data to zero at declaration.
+
+Cc: stable@vger.kernel.org
+Fixes: 58e1e2d2cd89 ("clk: davinci: cfgchip: Add TI DA8XX USB PHY clocks")
+Signed-off-by: Bastien Curutchet <bastien.curutchet@bootlin.com>
+Reviewed-by: David Lechner <david@lechnology.com>
+Link: https://lore.kernel.org/r/20240718115534.41513-1-bastien.curutchet@bootlin.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/davinci/da8xx-cfgchip.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/clk/davinci/da8xx-cfgchip.c
++++ b/drivers/clk/davinci/da8xx-cfgchip.c
+@@ -508,7 +508,7 @@ da8xx_cfgchip_register_usb0_clk48(struct
+ const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
+ struct clk *fck_clk;
+ struct da8xx_usb0_clk48 *usb0;
+- struct clk_init_data init;
++ struct clk_init_data init = {};
+ int ret;
+
+ fck_clk = devm_clk_get(dev, "fck");
+@@ -583,7 +583,7 @@ da8xx_cfgchip_register_usb1_clk48(struct
+ {
+ const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
+ struct da8xx_usb1_clk48 *usb1;
+- struct clk_init_data init;
++ struct clk_init_data init = {};
+ int ret;
+
+ usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
--- /dev/null
+From bf6acd5d16057d7accbbb1bf7dc6d8c56eeb4ecc Mon Sep 17 00:00:00 2001
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+Date: Wed, 17 Jul 2024 17:20:16 +0100
+Subject: decompress_bunzip2: fix rare decompression failure
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+commit bf6acd5d16057d7accbbb1bf7dc6d8c56eeb4ecc upstream.
+
+The decompression code parses a huffman tree and counts the number of
+symbols for a given bit length. In rare cases, there may be >= 256
+symbols with a given bit length, causing the unsigned char to overflow.
+This causes a decompression failure later when the code tries and fails to
+find the bit length for a given symbol.
+
+Since the maximum number of symbols is 258, use unsigned short instead.
+
+Link: https://lkml.kernel.org/r/20240717162016.1514077-1-ross.lagerwall@citrix.com
+Fixes: bc22c17e12c1 ("bzip2/lzma: library support for gzip, bzip2 and lzma decompression")
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Cc: Alain Knaff <alain@knaff.lu>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/decompress_bunzip2.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bu
+ RUNB) */
+ symCount = symTotal+2;
+ for (j = 0; j < groupCount; j++) {
+- unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
++ unsigned char length[MAX_SYMBOLS];
++ unsigned short temp[MAX_HUFCODE_BITS+1];
+ int minLen, maxLen, pp;
+ /* Read Huffman code lengths for each symbol. They're
+ stored in a way similar to mtf; record a starting
--- /dev/null
+From c884e3249f753dcef7a2b2023541ac1dc46b318e Mon Sep 17 00:00:00 2001
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+Date: Tue, 2 Jul 2024 22:51:50 +0800
+Subject: devres: Fix devm_krealloc() wasting memory
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+commit c884e3249f753dcef7a2b2023541ac1dc46b318e upstream.
+
+Driver API devm_krealloc() calls alloc_dr() with wrong argument
+@total_new_size, so causes more memory to be allocated than required
+fix this memory waste by using @new_size as the argument for alloc_dr().
+
+Fixes: f82485722e5d ("devres: provide devm_krealloc()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/r/1719931914-19035-2-git-send-email-quic_zijuhu@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/devres.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -896,9 +896,12 @@ void *devm_krealloc(struct device *dev,
+ /*
+ * Otherwise: allocate new, larger chunk. We need to allocate before
+ * taking the lock as most probably the caller uses GFP_KERNEL.
++ * alloc_dr() will call check_dr_size() to reserve extra memory
++ * for struct devres automatically, so size @new_size user request
++ * is delivered to it directly as devm_kmalloc() does.
+ */
+ new_dr = alloc_dr(devm_kmalloc_release,
+- total_new_size, gfp, dev_to_node(dev));
++ new_size, gfp, dev_to_node(dev));
+ if (!new_dr)
+ return NULL;
+
--- /dev/null
+From bd50a974097bb82d52a458bd3ee39fb723129a0c Mon Sep 17 00:00:00 2001
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+Date: Tue, 2 Jul 2024 22:51:51 +0800
+Subject: devres: Fix memory leakage caused by driver API devm_free_percpu()
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+commit bd50a974097bb82d52a458bd3ee39fb723129a0c upstream.
+
+It will cause memory leakage when use driver API devm_free_percpu()
+to free memory allocated by devm_alloc_percpu(), fixed by using
+devres_release() instead of devres_destroy() within devm_free_percpu().
+
+Fixes: ff86aae3b411 ("devres: add devm_alloc_percpu()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/r/1719931914-19035-3-git-send-email-quic_zijuhu@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/devres.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -1225,7 +1225,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+ */
+ void devm_free_percpu(struct device *dev, void __percpu *pdata)
+ {
+- WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
++ /*
++ * Use devres_release() to prevent memory leakage as
++ * devm_free_pages() does.
++ */
++ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
+ (__force void *)pdata));
+ }
+ EXPORT_SYMBOL_GPL(devm_free_percpu);
--- /dev/null
+From 36e3b949e35964e22b9a57f960660fc599038dd4 Mon Sep 17 00:00:00 2001
+From: Bailey Forrest <bcf@google.com>
+Date: Wed, 24 Jul 2024 07:34:31 -0700
+Subject: gve: Fix an edge case for TSO skb validity check
+
+From: Bailey Forrest <bcf@google.com>
+
+commit 36e3b949e35964e22b9a57f960660fc599038dd4 upstream.
+
+The NIC requires each TSO segment to not span more than 10
+descriptors. NIC further requires each descriptor to not exceed
+16KB - 1 (GVE_TX_MAX_BUF_SIZE_DQO).
+
+The descriptors for an skb are generated by
+gve_tx_add_skb_no_copy_dqo() for DQO RDA queue format.
+gve_tx_add_skb_no_copy_dqo() loops through each skb frag and
+generates a descriptor for the entire frag if the frag size is
+not greater than GVE_TX_MAX_BUF_SIZE_DQO. If the frag size is
+greater than GVE_TX_MAX_BUF_SIZE_DQO, it is split into descriptor(s)
+of size GVE_TX_MAX_BUF_SIZE_DQO and a descriptor is generated for
+the remainder (frag size % GVE_TX_MAX_BUF_SIZE_DQO).
+
+gve_can_send_tso() checks if the descriptors thus generated for an
+skb would meet the requirement that each TSO-segment not span more
+than 10 descriptors. However, the current code misses an edge case
+when a TSO segment spans multiple descriptors within a large frag.
+This change fixes the edge case.
+
+gve_can_send_tso() relies on the assumption that max gso size (9728)
+is less than GVE_TX_MAX_BUF_SIZE_DQO and therefore within an skb
+fragment a TSO segment can never span more than 2 descriptors.
+
+Fixes: a57e5de476be ("gve: DQO: Add TX path")
+Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
+Signed-off-by: Bailey Forrest <bcf@google.com>
+Reviewed-by: Jeroen de Borst <jeroendb@google.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20240724143431.3343722-1-pkaligineedi@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/google/gve/gve_tx_dqo.c | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -812,22 +812,42 @@ static bool gve_can_send_tso(const struc
+ const int header_len = skb_tcp_all_headers(skb);
+ const int gso_size = shinfo->gso_size;
+ int cur_seg_num_bufs;
++ int prev_frag_size;
+ int cur_seg_size;
+ int i;
+
+ cur_seg_size = skb_headlen(skb) - header_len;
++ prev_frag_size = skb_headlen(skb);
+ cur_seg_num_bufs = cur_seg_size > 0;
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ if (cur_seg_size >= gso_size) {
+ cur_seg_size %= gso_size;
+ cur_seg_num_bufs = cur_seg_size > 0;
++
++ if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
++ int prev_frag_remain = prev_frag_size %
++ GVE_TX_MAX_BUF_SIZE_DQO;
++
++ /* If the last descriptor of the previous frag
++ * is less than cur_seg_size, the segment will
++ * span two descriptors in the previous frag.
++ * Since max gso size (9728) is less than
++ * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
++ * for the segment to span more than two
++ * descriptors.
++ */
++ if (prev_frag_remain &&
++ cur_seg_size > prev_frag_remain)
++ cur_seg_num_bufs++;
++ }
+ }
+
+ if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
+ return false;
+
+- cur_seg_size += skb_frag_size(&shinfo->frags[i]);
++ prev_frag_size = skb_frag_size(&shinfo->frags[i]);
++ cur_seg_size += prev_frag_size;
+ }
+
+ return true;
--- /dev/null
+From 6ebbe97a488179f5dc85f2f1e0c89b486e99ee97 Mon Sep 17 00:00:00 2001
+From: Ahmed Zaki <ahmed.zaki@intel.com>
+Date: Fri, 14 Jun 2024 07:18:42 -0600
+Subject: ice: Add a per-VF limit on number of FDIR filters
+
+From: Ahmed Zaki <ahmed.zaki@intel.com>
+
+commit 6ebbe97a488179f5dc85f2f1e0c89b486e99ee97 upstream.
+
+While the iavf driver adds a s/w limit (128) on the number of FDIR
+filters that the VF can request, a malicious VF driver can request more
+than that and exhaust the resources for other VFs.
+
+Add a similar limit in ice.
+
+CC: stable@vger.kernel.org
+Fixes: 1f7ea1cd6a37 ("ice: Enable FDIR Configure for AVF")
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Suggested-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c | 2 +-
+ drivers/net/ethernet/intel/ice/ice_fdir.h | 3 +++
+ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 16 ++++++++++++++++
+ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h | 1 +
+ 4 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+@@ -531,7 +531,7 @@ ice_parse_rx_flow_user_data(struct ethto
+ *
+ * Returns the number of available flow director filters to this VSI
+ */
+-static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+ {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ u16 num_guar;
+--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
+@@ -202,6 +202,8 @@ struct ice_fdir_base_pkt {
+ const u8 *tun_pkt;
+ };
+
++struct ice_vsi;
++
+ int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+ int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+ int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+@@ -213,6 +215,7 @@ int
+ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun);
+ int ice_get_fdir_cnt_all(struct ice_hw *hw);
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi);
+ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+ bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
+ struct ice_fdir_fltr *
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -551,6 +551,8 @@ static void ice_vc_fdir_reset_cnt_all(st
+ fdir->fdir_fltr_cnt[flow][0] = 0;
+ fdir->fdir_fltr_cnt[flow][1] = 0;
+ }
++
++ fdir->fdir_fltr_cnt_total = 0;
+ }
+
+ /**
+@@ -1567,6 +1569,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf
+ resp->status = status;
+ resp->flow_id = conf->flow_id;
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
++ vf->fdir.fdir_fltr_cnt_total++;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+@@ -1631,6 +1634,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf
+ resp->status = status;
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
++ vf->fdir.fdir_fltr_cnt_total--;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+@@ -1797,6 +1801,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *
+ struct virtchnl_fdir_add *stat = NULL;
+ struct virtchnl_fdir_fltr_conf *conf;
+ enum virtchnl_status_code v_ret;
++ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int is_tun = 0;
+@@ -1805,6 +1810,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
++ vf_vsi = ice_get_vf_vsi(vf);
++
++#define ICE_VF_MAX_FDIR_FILTERS 128
++ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
++ vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
++ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
++ dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
++ vf->vf_id);
++ goto err_exit;
++ }
++
+ ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+@@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx {
+ struct ice_vf_fdir {
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
++ u16 fdir_fltr_cnt_total;
+ struct ice_fd_hw_prof **fdir_prof;
+
+ struct idr fdir_rule_idr;
--- /dev/null
+From 33b1c47d1fc0b5f06a393bb915db85baacba18ea Mon Sep 17 00:00:00 2001
+From: Shenwei Wang <shenwei.wang@nxp.com>
+Date: Wed, 3 Jul 2024 11:32:50 -0500
+Subject: irqchip/imx-irqsteer: Handle runtime power management correctly
+
+From: Shenwei Wang <shenwei.wang@nxp.com>
+
+commit 33b1c47d1fc0b5f06a393bb915db85baacba18ea upstream.
+
+The power domain is automatically activated from clk_prepare(). However, on
+certain platforms like i.MX8QM and i.MX8QXP, the power-on handling invokes
+sleeping functions, which triggers the 'scheduling while atomic' bug in the
+context switch path during device probing:
+
+ BUG: scheduling while atomic: kworker/u13:1/48/0x00000002
+ Call trace:
+ __schedule_bug+0x54/0x6c
+ __schedule+0x7f0/0xa94
+ schedule+0x5c/0xc4
+ schedule_preempt_disabled+0x24/0x40
+ __mutex_lock.constprop.0+0x2c0/0x540
+ __mutex_lock_slowpath+0x14/0x20
+ mutex_lock+0x48/0x54
+ clk_prepare_lock+0x44/0xa0
+ clk_prepare+0x20/0x44
+ imx_irqsteer_resume+0x28/0xe0
+ pm_generic_runtime_resume+0x2c/0x44
+ __genpd_runtime_resume+0x30/0x80
+ genpd_runtime_resume+0xc8/0x2c0
+ __rpm_callback+0x48/0x1d8
+ rpm_callback+0x6c/0x78
+ rpm_resume+0x490/0x6b4
+ __pm_runtime_resume+0x50/0x94
+ irq_chip_pm_get+0x2c/0xa0
+ __irq_do_set_handler+0x178/0x24c
+ irq_set_chained_handler_and_data+0x60/0xa4
+ mxc_gpio_probe+0x160/0x4b0
+
+Cure this by implementing the irq_bus_lock/sync_unlock() interrupt chip
+callbacks and handle power management in them as they are invoked from
+non-atomic context.
+
+[ tglx: Rewrote change log, added Fixes tag ]
+
+Fixes: 0136afa08967 ("irqchip: Add driver for imx-irqsteer controller")
+Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240703163250.47887-1-shenwei.wang@nxp.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-imx-irqsteer.c | 24 +++++++++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+--- a/drivers/irqchip/irq-imx-irqsteer.c
++++ b/drivers/irqchip/irq-imx-irqsteer.c
+@@ -36,6 +36,7 @@ struct irqsteer_data {
+ int channel;
+ struct irq_domain *domain;
+ u32 *saved_reg;
++ struct device *dev;
+ };
+
+ static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
+@@ -72,10 +73,26 @@ static void imx_irqsteer_irq_mask(struct
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+ }
+
++static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
++{
++ struct irqsteer_data *data = d->chip_data;
++
++ pm_runtime_get_sync(data->dev);
++}
++
++static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
++{
++ struct irqsteer_data *data = d->chip_data;
++
++ pm_runtime_put_autosuspend(data->dev);
++}
++
+ static const struct irq_chip imx_irqsteer_irq_chip = {
+- .name = "irqsteer",
+- .irq_mask = imx_irqsteer_irq_mask,
+- .irq_unmask = imx_irqsteer_irq_unmask,
++ .name = "irqsteer",
++ .irq_mask = imx_irqsteer_irq_mask,
++ .irq_unmask = imx_irqsteer_irq_unmask,
++ .irq_bus_lock = imx_irqsteer_irq_bus_lock,
++ .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock,
+ };
+
+ static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
+@@ -150,6 +167,7 @@ static int imx_irqsteer_probe(struct pla
+ if (!data)
+ return -ENOMEM;
+
++ data->dev = &pdev->dev;
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
--- /dev/null
+From 6ce3e98184b625d2870991880bf9586ded7ea7f9 Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Fri, 14 Jun 2024 19:32:04 +0200
+Subject: irqdomain: Fixed unbalanced fwnode get and put
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit 6ce3e98184b625d2870991880bf9586ded7ea7f9 upstream.
+
+fwnode_handle_get(fwnode) is called when a domain is created with fwnode
+passed as a function parameter. fwnode_handle_put(domain->fwnode) is called
+when the domain is destroyed but during the creation a path exists that
+does not set domain->fwnode.
+
+If this path is taken, the fwnode get will never be put.
+
+To avoid the unbalanced get and put, set domain->fwnode unconditionally.
+
+Fixes: d59f6617eef0 ("genirq: Allow fwnode to carry name information only")
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240614173232.1184015-4-herve.codina@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/irq/irqdomain.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -154,7 +154,6 @@ static struct irq_domain *__irq_domain_c
+ switch (fwid->type) {
+ case IRQCHIP_FWNODE_NAMED:
+ case IRQCHIP_FWNODE_NAMED_ID:
+- domain->fwnode = fwnode;
+ domain->name = kstrdup(fwid->name, GFP_KERNEL);
+ if (!domain->name) {
+ kfree(domain);
+@@ -163,7 +162,6 @@ static struct irq_domain *__irq_domain_c
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ break;
+ default:
+- domain->fwnode = fwnode;
+ domain->name = fwid->name;
+ break;
+ }
+@@ -183,7 +181,6 @@ static struct irq_domain *__irq_domain_c
+ }
+
+ domain->name = strreplace(name, '/', ':');
+- domain->fwnode = fwnode;
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ }
+
+@@ -199,8 +196,8 @@ static struct irq_domain *__irq_domain_c
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ }
+
+- fwnode_handle_get(fwnode);
+- fwnode_dev_initialized(fwnode, true);
++ domain->fwnode = fwnode_handle_get(fwnode);
++ fwnode_dev_initialized(domain->fwnode, true);
+
+ /* Fill structure */
+ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
--- /dev/null
+From 3415b10a03945b0da4a635e146750dfe5ce0f448 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Fri, 26 Jul 2024 11:05:00 -0700
+Subject: kbuild: Fix '-S -c' in x86 stack protector scripts
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 3415b10a03945b0da4a635e146750dfe5ce0f448 upstream.
+
+After a recent change in clang to stop consuming all instances of '-S'
+and '-c' [1], the stack protector scripts break due to the kernel's use
+of -Werror=unused-command-line-argument to catch cases where flags are
+not being properly consumed by the compiler driver:
+
+ $ echo | clang -o - -x c - -S -c -Werror=unused-command-line-argument
+ clang: error: argument unused during compilation: '-c' [-Werror,-Wunused-command-line-argument]
+
+This results in CONFIG_STACKPROTECTOR getting disabled because
+CONFIG_CC_HAS_SANE_STACKPROTECTOR is no longer set.
+
+'-c' and '-S' both instruct the compiler to stop at different stages of
+the pipeline ('-S' after compiling, '-c' after assembling), so having
+them present together in the same command makes little sense. In this
+case, the test wants to stop before assembling because it is looking at
+the textual assembly output of the compiler for either '%fs' or '%gs',
+so remove '-c' from the list of arguments to resolve the error.
+
+All versions of GCC continue to work after this change, along with
+versions of clang that do or do not contain the change mentioned above.
+
+Cc: stable@vger.kernel.org
+Fixes: 4f7fd4d7a791 ("[PATCH] Add the -fstack-protector option to the CFLAGS")
+Fixes: 60a5317ff0f4 ("x86: implement x86_32 stack protector")
+Link: https://github.com/llvm/llvm-project/commit/6461e537815f7fa68cef06842505353cf5600e9c [1]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/gcc-x86_32-has-stack-protector.sh | 2 +-
+ scripts/gcc-x86_64-has-stack-protector.sh | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/scripts/gcc-x86_32-has-stack-protector.sh
++++ b/scripts/gcc-x86_32-has-stack-protector.sh
+@@ -5,4 +5,4 @@
+ # -mstack-protector-guard-reg, added by
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,4 +1,4 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
--- /dev/null
+From dd6e9894b451e7c85cceb8e9dc5432679a70e7dc Mon Sep 17 00:00:00 2001
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+Date: Thu, 30 May 2024 21:14:37 +0800
+Subject: kobject_uevent: Fix OOB access within zap_modalias_env()
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+commit dd6e9894b451e7c85cceb8e9dc5432679a70e7dc upstream.
+
+zap_modalias_env() wrongly calculates size of memory block to move, so
+will cause OOB memory access issue if variable MODALIAS is not the last
+one within its @env parameter, fixed by correcting size to memmove.
+
+Fixes: 9b3fa47d4a76 ("kobject: fix suppressing modalias in uevents delivered over netlink")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Reviewed-by: Lk Sii <lk_sii@163.com>
+Link: https://lore.kernel.org/r/1717074877-11352-1-git-send-email-quic_zijuhu@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/kobject_uevent.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -432,8 +432,23 @@ static void zap_modalias_env(struct kobj
+ len = strlen(env->envp[i]) + 1;
+
+ if (i != env->envp_idx - 1) {
++ /* @env->envp[] contains pointers to @env->buf[]
++ * with @env->buflen chars, and we are removing
++ * variable MODALIAS here pointed by @env->envp[i]
++ * with length @len as shown below:
++ *
++ * 0 @env->buf[] @env->buflen
++ * ---------------------------------------------
++ * ^ ^ ^ ^
++ * | |-> @len <-| target block |
++ * @env->envp[0] @env->envp[i] @env->envp[i + 1]
++ *
++ * so the "target block" indicated above is moved
++ * backward by @len, and its right size is
++ * @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
++ */
+ memmove(env->envp[i], env->envp[i + 1],
+- env->buflen - len);
++ env->buflen - (env->envp[i + 1] - env->envp[0]));
+
+ for (j = i; j < env->envp_idx - 1; j++)
+ env->envp[j] = env->envp[j + 1] - len;
--- /dev/null
+From 4cd7ba16a0afb36550eed7690e73d3e7a743fa96 Mon Sep 17 00:00:00 2001
+From: Ram Tummala <rtummala@nvidia.com>
+Date: Tue, 9 Jul 2024 18:45:39 -0700
+Subject: mm: fix old/young bit handling in the faulting path
+
+From: Ram Tummala <rtummala@nvidia.com>
+
+commit 4cd7ba16a0afb36550eed7690e73d3e7a743fa96 upstream.
+
+Commit 3bd786f76de2 ("mm: convert do_set_pte() to set_pte_range()")
+replaced do_set_pte() with set_pte_range() and that introduced a
+regression in the following faulting path of non-anonymous vmas which
+caused the PTE for the faulting address to be marked as old instead of
+young.
+
+handle_pte_fault()
+ do_pte_missing()
+ do_fault()
+ do_read_fault() || do_cow_fault() || do_shared_fault()
+ finish_fault()
+ set_pte_range()
+
+The polarity of prefault calculation is incorrect. This leads to prefault
+being incorrectly set for the faulting address. The following check will
+incorrectly mark the PTE old rather than young. On some architectures
+this will cause a double fault to mark it young when the access is
+retried.
+
+ if (prefault && arch_wants_old_prefaulted_pte())
+ entry = pte_mkold(entry);
+
+On a subsequent fault on the same address, the faulting path will see a
+non NULL vmf->pte and instead of reaching the do_pte_missing() path, PTE
+will then be correctly marked young in handle_pte_fault() itself.
+
+Due to this bug, performance degradation in the fault handling path will
+be observed due to unnecessary double faulting.
+
+Link: https://lkml.kernel.org/r/20240710014539.746200-1-rtummala@nvidia.com
+Fixes: 3bd786f76de2 ("mm: convert do_set_pte() to set_pte_range()")
+Signed-off-by: Ram Tummala <rtummala@nvidia.com>
+Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Yin Fengwei <fengwei.yin@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4353,7 +4353,7 @@ void set_pte_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma = vmf->vma;
+ bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+- bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
++ bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
+ pte_t entry;
+
+ flush_icache_pages(vma, page, nr);
--- /dev/null
+From af649773fb25250cd22625af021fb6275c56a3ee Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Mon, 8 Jul 2024 08:56:32 +0100
+Subject: mm/numa_balancing: teach mpol_to_str about the balancing mode
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit af649773fb25250cd22625af021fb6275c56a3ee upstream.
+
+Since balancing mode was added in bda420b98505 ("numa balancing: migrate
+on fault among multiple bound nodes"), it was possible to set this mode
+but it wouldn't be shown in /proc/<pid>/numa_maps since there was no
+support for it in the mpol_to_str() helper.
+
+Furthermore, because the balancing mode sets the MPOL_F_MORON flag, it
+would be displayed as 'default' due a workaround introduced a few years
+earlier in 8790c71a18e5 ("mm/mempolicy.c: fix mempolicy printing in
+numa_maps").
+
+To tidy this up we implement two changes:
+
+Replace the MPOL_F_MORON check by pointer comparison against the
+preferred_node_policy array. By doing this we generalise the current
+special casing and replace the incorrect 'default' with the correct 'bind'
+for the mode.
+
+Secondly, we add a string representation and corresponding handling for
+the MPOL_F_NUMA_BALANCING flag.
+
+With the two changes together we start showing the balancing flag when it
+is set and therefore complete the fix.
+
+Representation format chosen is to separate multiple flags with vertical
+bars, following what existed long time ago in kernel 2.6.25. But as
+between then and now there wasn't a way to display multiple flags, this
+patch does not change the format in practice.
+
+Some /proc/<pid>/numa_maps output examples:
+
+ 555559580000 bind=balancing:0-1,3 file=...
+ 555585800000 bind=balancing|static:0,2 file=...
+ 555635240000 prefer=relative:0 file=
+
+Link: https://lkml.kernel.org/r/20240708075632.95857-1-tursulin@igalia.com
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: bda420b98505 ("numa balancing: migrate on fault among multiple bound nodes")
+References: 8790c71a18e5 ("mm/mempolicy.c: fix mempolicy printing in numa_maps")
+Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: <stable@vger.kernel.org> [5.12+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mempolicy.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -3134,8 +3134,9 @@ out:
+ * @pol: pointer to mempolicy to be formatted
+ *
+ * Convert @pol into a string. If @buffer is too short, truncate the string.
+- * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
+- * longest flag, "relative", and to display at least a few node ids.
++ * Recommend a @maxlen of at least 51 for the longest mode, "weighted
++ * interleave", plus the longest flag flags, "relative|balancing", and to
++ * display at least a few node ids.
+ */
+ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ {
+@@ -3144,7 +3145,10 @@ void mpol_to_str(char *buffer, int maxle
+ unsigned short mode = MPOL_DEFAULT;
+ unsigned short flags = 0;
+
+- if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
++ if (pol &&
++ pol != &default_policy &&
++ !(pol >= &preferred_node_policy[0] &&
++ pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) {
+ mode = pol->mode;
+ flags = pol->flags;
+ }
+@@ -3171,12 +3175,18 @@ void mpol_to_str(char *buffer, int maxle
+ p += snprintf(p, buffer + maxlen - p, "=");
+
+ /*
+- * Currently, the only defined flags are mutually exclusive
++ * Static and relative are mutually exclusive.
+ */
+ if (flags & MPOL_F_STATIC_NODES)
+ p += snprintf(p, buffer + maxlen - p, "static");
+ else if (flags & MPOL_F_RELATIVE_NODES)
+ p += snprintf(p, buffer + maxlen - p, "relative");
++
++ if (flags & MPOL_F_NUMA_BALANCING) {
++ if (!is_power_of_2(flags & MPOL_MODE_FLAGS))
++ p += snprintf(p, buffer + maxlen - p, "|");
++ p += snprintf(p, buffer + maxlen - p, "balancing");
++ }
+ }
+
+ if (!nodes_empty(nodes))
--- /dev/null
+From 1c184baccf0d5e2ef4cc1562261d0e48508a1c2b Mon Sep 17 00:00:00 2001
+From: Joy Chakraborty <joychakr@google.com>
+Date: Wed, 12 Jun 2024 08:36:35 +0000
+Subject: rtc: cmos: Fix return value of nvmem callbacks
+
+From: Joy Chakraborty <joychakr@google.com>
+
+commit 1c184baccf0d5e2ef4cc1562261d0e48508a1c2b upstream.
+
+Read/write callbacks registered with nvmem core expect 0 to be returned
+on success and a negative value to be returned on failure.
+
+cmos_nvram_read()/cmos_nvram_write() currently return the number of
+bytes read or written, fix to return 0 on success and -EIO incase number
+of bytes requested was not read or written.
+
+Fixes: 8b5b7958fd1c ("rtc: cmos: use generic nvmem")
+Cc: stable@vger.kernel.org
+Signed-off-by: Joy Chakraborty <joychakr@google.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/20240612083635.1253039-1-joychakr@google.com
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/rtc/rtc-cmos.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -643,11 +643,10 @@ static int cmos_nvram_read(void *priv, u
+ size_t count)
+ {
+ unsigned char *buf = val;
+- int retval;
+
+ off += NVRAM_OFFSET;
+ spin_lock_irq(&rtc_lock);
+- for (retval = 0; count; count--, off++, retval++) {
++ for (; count; count--, off++) {
+ if (off < 128)
+ *buf++ = CMOS_READ(off);
+ else if (can_bank2)
+@@ -657,7 +656,7 @@ static int cmos_nvram_read(void *priv, u
+ }
+ spin_unlock_irq(&rtc_lock);
+
+- return retval;
++ return count ? -EIO : 0;
+ }
+
+ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+@@ -665,7 +664,6 @@ static int cmos_nvram_write(void *priv,
+ {
+ struct cmos_rtc *cmos = priv;
+ unsigned char *buf = val;
+- int retval;
+
+ /* NOTE: on at least PCs and Ataris, the boot firmware uses a
+ * checksum on part of the NVRAM data. That's currently ignored
+@@ -674,7 +672,7 @@ static int cmos_nvram_write(void *priv,
+ */
+ off += NVRAM_OFFSET;
+ spin_lock_irq(&rtc_lock);
+- for (retval = 0; count; count--, off++, retval++) {
++ for (; count; count--, off++) {
+ /* don't trash RTC registers */
+ if (off == cmos->day_alrm
+ || off == cmos->mon_alrm
+@@ -689,7 +687,7 @@ static int cmos_nvram_write(void *priv,
+ }
+ spin_unlock_irq(&rtc_lock);
+
+- return retval;
++ return count ? -EIO : 0;
+ }
+
+ /*----------------------------------------------------------------*/
f2fs-fix-return-value-of-f2fs_convert_inline_inode.patch
f2fs-use-meta-inode-for-gc-of-atomic-file.patch
f2fs-use-meta-inode-for-gc-of-cow-file.patch
+clk-davinci-da8xx-cfgchip-initialize-clk_init_data-before-use.patch
+ubi-eba-properly-rollback-inside-self_check_eba.patch
+block-fix-deadlock-between-sd_remove-sd_release.patch
+mm-fix-old-young-bit-handling-in-the-faulting-path.patch
+decompress_bunzip2-fix-rare-decompression-failure.patch
+kbuild-fix-s-c-in-x86-stack-protector-scripts.patch
+asoc-sof-ipc4-topology-preserve-the-dma-link-id-for-chaindma-on-unprepare.patch
+asoc-amd-yc-support-mic-on-lenovo-thinkpad-e16-gen-2.patch
+kobject_uevent-fix-oob-access-within-zap_modalias_env.patch
+gve-fix-an-edge-case-for-tso-skb-validity-check.patch
+ice-add-a-per-vf-limit-on-number-of-fdir-filters.patch
+devres-fix-devm_krealloc-wasting-memory.patch
+devres-fix-memory-leakage-caused-by-driver-api-devm_free_percpu.patch
+irqdomain-fixed-unbalanced-fwnode-get-and-put.patch
+irqchip-imx-irqsteer-handle-runtime-power-management-correctly.patch
+mm-numa_balancing-teach-mpol_to_str-about-the-balancing-mode.patch
+rtc-cmos-fix-return-value-of-nvmem-callbacks.patch
--- /dev/null
+From 745d9f4a31defec731119ee8aad8ba9f2536dd9a Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Thu, 29 Feb 2024 23:42:36 +0300
+Subject: ubi: eba: properly rollback inside self_check_eba
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit 745d9f4a31defec731119ee8aad8ba9f2536dd9a upstream.
+
+In case of a memory allocation failure in the volumes loop we can only
+process the already allocated scan_eba and fm_eba array elements on the
+error path - others are still uninitialized.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 00abf3041590 ("UBI: Add self_check_eba()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/ubi/eba.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1560,6 +1560,7 @@ int self_check_eba(struct ubi_device *ub
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
++ kfree(scan_eba[i]);
+ goto out_free;
+ }
+
+@@ -1595,7 +1596,7 @@ int self_check_eba(struct ubi_device *ub
+ }
+
+ out_free:
+- for (i = 0; i < num_volumes; i++) {
++ while (--i >= 0) {
+ if (!ubi->volumes[i])
+ continue;
+