--- /dev/null
+From b3bebe44306e23827397d0d774d206e3fa374041 Mon Sep 17 00:00:00 2001
+From: Suren Baghdasaryan <surenb@google.com>
+Date: Wed, 17 Jul 2024 14:28:44 -0700
+Subject: alloc_tag: outline and export free_reserved_page()
+
+From: Suren Baghdasaryan <surenb@google.com>
+
+commit b3bebe44306e23827397d0d774d206e3fa374041 upstream.
+
+Outline and export free_reserved_page() because modules use it and it in
+turn uses page_ext_{get|put} which should not be exported. The same
+result could be obtained by outlining {get|put}_page_tag_ref() but that
+would have higher performance impact as these functions are used in more
+performance critical paths.
+
+Link: https://lkml.kernel.org/r/20240717212844.2749975-1-surenb@google.com
+Fixes: dcfe378c81f7 ("lib: introduce support for page allocation tagging")
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202407080044.DWMC9N9I-lkp@intel.com/
+Suggested-by: Christoph Hellwig <hch@infradead.org>
+Suggested-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: Sourav Panda <souravpanda@google.com>
+Cc: <stable@vger.kernel.org> [6.10]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 16 +---------------
+ mm/page_alloc.c | 17 +++++++++++++++++
+ 2 files changed, 18 insertions(+), 15 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -3177,21 +3177,7 @@ extern void reserve_bootmem_region(phys_
+ phys_addr_t end, int nid);
+
+ /* Free the reserved page into the buddy system, so it gets managed. */
+-static inline void free_reserved_page(struct page *page)
+-{
+- if (mem_alloc_profiling_enabled()) {
+- union codetag_ref *ref = get_page_tag_ref(page);
+-
+- if (ref) {
+- set_codetag_empty(ref);
+- put_page_tag_ref(ref);
+- }
+- }
+- ClearPageReserved(page);
+- init_page_count(page);
+- __free_page(page);
+- adjust_managed_page_count(page, 1);
+-}
++void free_reserved_page(struct page *page);
+ #define free_highmem_page(page) free_reserved_page(page)
+
+ static inline void mark_page_reserved(struct page *page)
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5809,6 +5809,23 @@ unsigned long free_reserved_area(void *s
+ return pages;
+ }
+
++void free_reserved_page(struct page *page)
++{
++ if (mem_alloc_profiling_enabled()) {
++ union codetag_ref *ref = get_page_tag_ref(page);
++
++ if (ref) {
++ set_codetag_empty(ref);
++ put_page_tag_ref(ref);
++ }
++ }
++ ClearPageReserved(page);
++ init_page_count(page);
++ __free_page(page);
++ adjust_managed_page_count(page, 1);
++}
++EXPORT_SYMBOL(free_reserved_page);
++
+ static int page_alloc_cpu_dead(unsigned int cpu)
+ {
+ struct zone *zone;
--- /dev/null
+From 36639013b3462c06ff8e3400a427f775b4fc97f5 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Thu, 25 Jul 2024 10:03:45 +0100
+Subject: arm64: mm: Fix lockless walks with static and dynamic page-table folding
+
+From: Will Deacon <will@kernel.org>
+
+commit 36639013b3462c06ff8e3400a427f775b4fc97f5 upstream.
+
+Lina reports random oopsen originating from the fast GUP code when
+16K pages are used with 4-level page-tables, the fourth level being
+folded at runtime due to lack of LPA2.
+
+In this configuration, the generic implementation of
+p4d_offset_lockless() will return a 'p4d_t *' corresponding to the
+'pgd_t' allocated on the stack of the caller, gup_fast_pgd_range().
+This is normally fine, but when the fourth level of page-table is folded
+at runtime, pud_offset_lockless() will offset from the address of the
+'p4d_t' to calculate the address of the PUD in the same page-table page.
+This results in a stray stack read when the 'p4d_t' has been allocated
+on the stack and can send the walker into the weeds.
+
+Fix the problem by providing our own definition of p4d_offset_lockless()
+when CONFIG_PGTABLE_LEVELS <= 4 which returns the real page-table
+pointer rather than the address of the local stack variable.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/50360968-13fb-4e6f-8f52-1725b3177215@asahilina.net
+Fixes: 0dd4f60a2c76 ("arm64: mm: Add support for folding PUDs at runtime")
+Reported-by: Asahi Lina <lina@asahilina.net>
+Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20240725090345.28461-1-will@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/pgtable.h | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -1065,6 +1065,28 @@ static inline bool pgtable_l5_enabled(vo
+
+ #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir)
+
++static inline
++p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
++{
++ /*
++ * With runtime folding of the pud, pud_offset_lockless() passes
++ * the 'pgd_t *' we return here to p4d_to_folded_pud(), which
++ * will offset the pointer assuming that it points into
++ * a page-table page. However, the fast GUP path passes us a
++ * pgd_t allocated on the stack and so we must use the original
++ * pointer in 'pgdp' to construct the p4d pointer instead of
++ * using the generic p4d_offset_lockless() implementation.
++ *
++ * Note: reusing the original pointer means that we may
++ * dereference the same (live) page-table entry multiple times.
++ * This is safe because it is still only loaded once in the
++ * context of each level and the CPU guarantees same-address
++ * read-after-read ordering.
++ */
++ return p4d_offset(pgdp, addr);
++}
++#define p4d_offset_lockless p4d_offset_lockless_folded
++
+ #endif /* CONFIG_PGTABLE_LEVELS > 4 */
+
+ #define pgd_ERROR(e) \
--- /dev/null
+From 1d9ce4440414c92acb17eece3218fe5c92b141e3 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 25 Jul 2024 08:54:28 +0200
+Subject: ASoC: amd: yc: Support mic on Lenovo Thinkpad E16 Gen 2
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 1d9ce4440414c92acb17eece3218fe5c92b141e3 upstream.
+
+Lenovo Thinkpad E16 Gen 2 AMD model (model 21M5) needs a corresponding
+quirk entry for making the internal mic working.
+
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1228269
+Cc: stable@vger.kernel.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20240725065442.9293-1-tiwai@suse.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/amd/yc/acp6x-mach.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -224,6 +224,13 @@ static const struct dmi_system_id yc_acp
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "21M5"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
+ }
+ },
--- /dev/null
+From ae67ed9010a7b52933ad1038d13df8a3aae34b83 Mon Sep 17 00:00:00 2001
+From: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Date: Wed, 24 Jul 2024 11:19:31 +0300
+Subject: ASoC: SOF: ipc4-topology: Only handle dai_config with HW_PARAMS for ChainDMA
+
+From: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+
+commit ae67ed9010a7b52933ad1038d13df8a3aae34b83 upstream.
+
+The DMA Link ID is only valid in snd_sof_dai_config_data when the
+dai_config is called with HW_PARAMS.
+
+The commit that this patch fixes is actually moved a code section without
+changing it, the same bug exists in the original code, needing different
+patch to kernel prior to 6.9 kernels.
+
+Cc: stable@vger.kernel.org
+Fixes: 3858464de57b ("ASoC: SOF: ipc4-topology: change chain_dma handling in dai_config")
+Link: https://github.com/thesofproject/linux/issues/5116
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Link: https://patch.msgid.link/20240724081932.24542-2-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc4-topology.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -3093,8 +3093,14 @@ static int sof_ipc4_dai_config(struct sn
+ return 0;
+
+ if (pipeline->use_chain_dma) {
+- pipeline->msg.primary &= ~SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
+- pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(data->dai_data);
++ /*
++ * Only configure the DMA Link ID for ChainDMA when this op is
++ * invoked with SOF_DAI_CONFIG_FLAGS_HW_PARAMS
++ */
++ if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
++ pipeline->msg.primary &= ~SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
++ pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(data->dai_data);
++ }
+ return 0;
+ }
+
--- /dev/null
+From e6fc5fcaeffa04a3fa1db8dfccdfd4b6001c0446 Mon Sep 17 00:00:00 2001
+From: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Date: Wed, 24 Jul 2024 11:19:32 +0300
+Subject: ASoC: SOF: ipc4-topology: Preserve the DMA Link ID for ChainDMA on unprepare
+
+From: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+
+commit e6fc5fcaeffa04a3fa1db8dfccdfd4b6001c0446 upstream.
+
+The DMA Link ID is set to the IPC message's primary during dai_config,
+which is only during hw_params.
+During xrun handling the hw_params is not called and the DMA Link ID
+information will be lost.
+
+All other fields in the message expected to be 0 for re-configuration, only
+the DMA Link ID needs to be preserved and the in case of repeated
+dai_config, it is correctly updated (masked and then set).
+
+Cc: stable@vger.kernel.org
+Fixes: ca5ce0caa67f ("ASoC: SOF: ipc4/intel: Add support for chained DMA")
+Link: https://github.com/thesofproject/linux/issues/5116
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Link: https://patch.msgid.link/20240724081932.24542-3-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc4-topology.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -1358,7 +1358,13 @@ static void sof_ipc4_unprepare_copier_mo
+ ipc4_copier = dai->private;
+
+ if (pipeline->use_chain_dma) {
+- pipeline->msg.primary = 0;
++ /*
++ * Preserve the DMA Link ID and clear other bits since
++ * the DMA Link ID is only configured once during
++ * dai_config, other fields are expected to be 0 for
++ * re-configuration
++ */
++ pipeline->msg.primary &= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
+ pipeline->msg.extension = 0;
+ }
+
--- /dev/null
+From 7e04da2dc7013af50ed3a2beb698d5168d1e594b Mon Sep 17 00:00:00 2001
+From: Yang Yang <yang.yang@vivo.com>
+Date: Wed, 24 Jul 2024 15:04:12 +0800
+Subject: block: fix deadlock between sd_remove & sd_release
+
+From: Yang Yang <yang.yang@vivo.com>
+
+commit 7e04da2dc7013af50ed3a2beb698d5168d1e594b upstream.
+
+Our test report the following hung task:
+
+[ 2538.459400] INFO: task "kworker/0:0":7 blocked for more than 188 seconds.
+[ 2538.459427] Call trace:
+[ 2538.459430] __switch_to+0x174/0x338
+[ 2538.459436] __schedule+0x628/0x9c4
+[ 2538.459442] schedule+0x7c/0xe8
+[ 2538.459447] schedule_preempt_disabled+0x24/0x40
+[ 2538.459453] __mutex_lock+0x3ec/0xf04
+[ 2538.459456] __mutex_lock_slowpath+0x14/0x24
+[ 2538.459459] mutex_lock+0x30/0xd8
+[ 2538.459462] del_gendisk+0xdc/0x350
+[ 2538.459466] sd_remove+0x30/0x60
+[ 2538.459470] device_release_driver_internal+0x1c4/0x2c4
+[ 2538.459474] device_release_driver+0x18/0x28
+[ 2538.459478] bus_remove_device+0x15c/0x174
+[ 2538.459483] device_del+0x1d0/0x358
+[ 2538.459488] __scsi_remove_device+0xa8/0x198
+[ 2538.459493] scsi_forget_host+0x50/0x70
+[ 2538.459497] scsi_remove_host+0x80/0x180
+[ 2538.459502] usb_stor_disconnect+0x68/0xf4
+[ 2538.459506] usb_unbind_interface+0xd4/0x280
+[ 2538.459510] device_release_driver_internal+0x1c4/0x2c4
+[ 2538.459514] device_release_driver+0x18/0x28
+[ 2538.459518] bus_remove_device+0x15c/0x174
+[ 2538.459523] device_del+0x1d0/0x358
+[ 2538.459528] usb_disable_device+0x84/0x194
+[ 2538.459532] usb_disconnect+0xec/0x300
+[ 2538.459537] hub_event+0xb80/0x1870
+[ 2538.459541] process_scheduled_works+0x248/0x4dc
+[ 2538.459545] worker_thread+0x244/0x334
+[ 2538.459549] kthread+0x114/0x1bc
+
+[ 2538.461001] INFO: task "fsck.":15415 blocked for more than 188 seconds.
+[ 2538.461014] Call trace:
+[ 2538.461016] __switch_to+0x174/0x338
+[ 2538.461021] __schedule+0x628/0x9c4
+[ 2538.461025] schedule+0x7c/0xe8
+[ 2538.461030] blk_queue_enter+0xc4/0x160
+[ 2538.461034] blk_mq_alloc_request+0x120/0x1d4
+[ 2538.461037] scsi_execute_cmd+0x7c/0x23c
+[ 2538.461040] ioctl_internal_command+0x5c/0x164
+[ 2538.461046] scsi_set_medium_removal+0x5c/0xb0
+[ 2538.461051] sd_release+0x50/0x94
+[ 2538.461054] blkdev_put+0x190/0x28c
+[ 2538.461058] blkdev_release+0x28/0x40
+[ 2538.461063] __fput+0xf8/0x2a8
+[ 2538.461066] __fput_sync+0x28/0x5c
+[ 2538.461070] __arm64_sys_close+0x84/0xe8
+[ 2538.461073] invoke_syscall+0x58/0x114
+[ 2538.461078] el0_svc_common+0xac/0xe0
+[ 2538.461082] do_el0_svc+0x1c/0x28
+[ 2538.461087] el0_svc+0x38/0x68
+[ 2538.461090] el0t_64_sync_handler+0x68/0xbc
+[ 2538.461093] el0t_64_sync+0x1a8/0x1ac
+
+ T1: T2:
+ sd_remove
+ del_gendisk
+ __blk_mark_disk_dead
+ blk_freeze_queue_start
+ ++q->mq_freeze_depth
+ bdev_release
+ mutex_lock(&disk->open_mutex)
+ sd_release
+ scsi_execute_cmd
+ blk_queue_enter
+ wait_event(!q->mq_freeze_depth)
+ mutex_lock(&disk->open_mutex)
+
+SCSI does not set GD_OWNS_QUEUE, so QUEUE_FLAG_DYING is not set in
+this scenario. This is a classic ABBA deadlock. To fix the deadlock,
+make sure we don't try to acquire disk->open_mutex after freezing
+the queue.
+
+Cc: stable@vger.kernel.org
+Fixes: eec1be4c30df ("block: delete partitions later in del_gendisk")
+Signed-off-by: Yang Yang <yang.yang@vivo.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Fixes: and Cc: stable tags are missing. Otherwise this patch looks fine
+Link: https://lore.kernel.org/r/20240724070412.22521-1-yang.yang@vivo.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/genhd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -663,12 +663,12 @@ void del_gendisk(struct gendisk *disk)
+ */
+ if (!test_bit(GD_DEAD, &disk->state))
+ blk_report_disk_dead(disk, false);
+- __blk_mark_disk_dead(disk);
+
+ /*
+ * Drop all partitions now that the disk is marked dead.
+ */
+ mutex_lock(&disk->open_mutex);
++ __blk_mark_disk_dead(disk);
+ xa_for_each_start(&disk->part_tbl, idx, part, 1)
+ drop_partition(part);
+ mutex_unlock(&disk->open_mutex);
--- /dev/null
+From a83b22754e351f13fb46596c85f667dc33da71ec Mon Sep 17 00:00:00 2001
+From: Bastien Curutchet <bastien.curutchet@bootlin.com>
+Date: Thu, 18 Jul 2024 13:55:34 +0200
+Subject: clk: davinci: da8xx-cfgchip: Initialize clk_init_data before use
+
+From: Bastien Curutchet <bastien.curutchet@bootlin.com>
+
+commit a83b22754e351f13fb46596c85f667dc33da71ec upstream.
+
+The flag attribute of the struct clk_init_data isn't initialized before
+the devm_clk_hw_register() call. This can lead to unexpected behavior
+during registration.
+
+Initialize the entire clk_init_data to zero at declaration.
+
+Cc: stable@vger.kernel.org
+Fixes: 58e1e2d2cd89 ("clk: davinci: cfgchip: Add TI DA8XX USB PHY clocks")
+Signed-off-by: Bastien Curutchet <bastien.curutchet@bootlin.com>
+Reviewed-by: David Lechner <david@lechnology.com>
+Link: https://lore.kernel.org/r/20240718115534.41513-1-bastien.curutchet@bootlin.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/davinci/da8xx-cfgchip.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/clk/davinci/da8xx-cfgchip.c
++++ b/drivers/clk/davinci/da8xx-cfgchip.c
+@@ -508,7 +508,7 @@ da8xx_cfgchip_register_usb0_clk48(struct
+ const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
+ struct clk *fck_clk;
+ struct da8xx_usb0_clk48 *usb0;
+- struct clk_init_data init;
++ struct clk_init_data init = {};
+ int ret;
+
+ fck_clk = devm_clk_get(dev, "fck");
+@@ -583,7 +583,7 @@ da8xx_cfgchip_register_usb1_clk48(struct
+ {
+ const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
+ struct da8xx_usb1_clk48 *usb1;
+- struct clk_init_data init;
++ struct clk_init_data init = {};
+ int ret;
+
+ usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
--- /dev/null
+From f99b3feb3b0e9fca2257c90fc8317be8ee44c19a Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Date: Mon, 22 Jul 2024 08:33:09 +0200
+Subject: clk: samsung: fix getting Exynos4 fin_pll rate from external clocks
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+commit f99b3feb3b0e9fca2257c90fc8317be8ee44c19a upstream.
+
+Commit 0dc83ad8bfc9 ("clk: samsung: Don't register clkdev lookup for the
+fixed rate clocks") claimed registering clkdev lookup is not necessary
+anymore, but that was not entirely true: Exynos4210/4212/4412 clock code
+still relied on it to get the clock rate of xxti or xusbxti external
+clocks.
+
+Drop that requirement by accessing already registered clk_hw when
+looking up the xxti/xusbxti rate.
+
+Reported-by: Artur Weber <aweber.kernel@gmail.com>
+Closes: https://lore.kernel.org/all/6227c1fb-d769-462a-b79b-abcc15d3db8e@gmail.com/
+Fixes: 0dc83ad8bfc9 ("clk: samsung: Don't register clkdev lookup for the fixed rate clocks")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20240722063309.60054-1-krzysztof.kozlowski@linaro.org
+Tested-by: Artur Weber <aweber.kernel@gmail.com> # Exynos4212
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/samsung/clk-exynos4.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index a026ccca7315..28945b6b0ee1 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -1040,19 +1040,20 @@ static unsigned long __init exynos4_get_xom(void)
+ static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx)
+ {
+ struct samsung_fixed_rate_clock fclk;
+- struct clk *clk;
+- unsigned long finpll_f = 24000000;
++ unsigned long finpll_f;
++ unsigned int parent;
+ char *parent_name;
+ unsigned int xom = exynos4_get_xom();
+
+ parent_name = xom & 1 ? "xusbxti" : "xxti";
+- clk = clk_get(NULL, parent_name);
+- if (IS_ERR(clk)) {
++ parent = xom & 1 ? CLK_XUSBXTI : CLK_XXTI;
++
++ finpll_f = clk_hw_get_rate(ctx->clk_data.hws[parent]);
++ if (!finpll_f) {
+ pr_err("%s: failed to lookup parent clock %s, assuming "
+ "fin_pll clock frequency is 24MHz\n", __func__,
+ parent_name);
+- } else {
+- finpll_f = clk_get_rate(clk);
++ finpll_f = 24000000;
+ }
+
+ fclk.id = CLK_FIN_PLL;
+--
+2.45.2
+
--- /dev/null
+From bf6acd5d16057d7accbbb1bf7dc6d8c56eeb4ecc Mon Sep 17 00:00:00 2001
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+Date: Wed, 17 Jul 2024 17:20:16 +0100
+Subject: decompress_bunzip2: fix rare decompression failure
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+commit bf6acd5d16057d7accbbb1bf7dc6d8c56eeb4ecc upstream.
+
+The decompression code parses a huffman tree and counts the number of
+symbols for a given bit length. In rare cases, there may be >= 256
+symbols with a given bit length, causing the unsigned char to overflow.
+This causes a decompression failure later when the code tries and fails to
+find the bit length for a given symbol.
+
+Since the maximum number of symbols is 258, use unsigned short instead.
+
+Link: https://lkml.kernel.org/r/20240717162016.1514077-1-ross.lagerwall@citrix.com
+Fixes: bc22c17e12c1 ("bzip2/lzma: library support for gzip, bzip2 and lzma decompression")
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Cc: Alain Knaff <alain@knaff.lu>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/decompress_bunzip2.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bu
+ RUNB) */
+ symCount = symTotal+2;
+ for (j = 0; j < groupCount; j++) {
+- unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
++ unsigned char length[MAX_SYMBOLS];
++ unsigned short temp[MAX_HUFCODE_BITS+1];
+ int minLen, maxLen, pp;
+ /* Read Huffman code lengths for each symbol. They're
+ stored in a way similar to mtf; record a starting
--- /dev/null
+From c884e3249f753dcef7a2b2023541ac1dc46b318e Mon Sep 17 00:00:00 2001
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+Date: Tue, 2 Jul 2024 22:51:50 +0800
+Subject: devres: Fix devm_krealloc() wasting memory
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+commit c884e3249f753dcef7a2b2023541ac1dc46b318e upstream.
+
+Driver API devm_krealloc() calls alloc_dr() with wrong argument
+@total_new_size, so causes more memory to be allocated than required
+fix this memory waste by using @new_size as the argument for alloc_dr().
+
+Fixes: f82485722e5d ("devres: provide devm_krealloc()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/r/1719931914-19035-2-git-send-email-quic_zijuhu@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/devres.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -896,9 +896,12 @@ void *devm_krealloc(struct device *dev,
+ /*
+ * Otherwise: allocate new, larger chunk. We need to allocate before
+ * taking the lock as most probably the caller uses GFP_KERNEL.
++ * alloc_dr() will call check_dr_size() to reserve extra memory
++ * for struct devres automatically, so size @new_size user request
++ * is delivered to it directly as devm_kmalloc() does.
+ */
+ new_dr = alloc_dr(devm_kmalloc_release,
+- total_new_size, gfp, dev_to_node(dev));
++ new_size, gfp, dev_to_node(dev));
+ if (!new_dr)
+ return NULL;
+
--- /dev/null
+From bd50a974097bb82d52a458bd3ee39fb723129a0c Mon Sep 17 00:00:00 2001
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+Date: Tue, 2 Jul 2024 22:51:51 +0800
+Subject: devres: Fix memory leakage caused by driver API devm_free_percpu()
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+commit bd50a974097bb82d52a458bd3ee39fb723129a0c upstream.
+
+It will cause memory leakage when use driver API devm_free_percpu()
+to free memory allocated by devm_alloc_percpu(), fixed by using
+devres_release() instead of devres_destroy() within devm_free_percpu().
+
+Fixes: ff86aae3b411 ("devres: add devm_alloc_percpu()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/r/1719931914-19035-3-git-send-email-quic_zijuhu@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/devres.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -1225,7 +1225,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+ */
+ void devm_free_percpu(struct device *dev, void __percpu *pdata)
+ {
+- WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
++ /*
++ * Use devres_release() to prevent memory leakage as
++ * devm_free_pages() does.
++ */
++ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
+ (__force void *)pdata));
+ }
+ EXPORT_SYMBOL_GPL(devm_free_percpu);
--- /dev/null
+From 3d83abcae6e8fa6698f6b0a026ca650302bdbfd8 Mon Sep 17 00:00:00 2001
+From: Yijie Yang <quic_yijiyang@quicinc.com>
+Date: Mon, 24 Jun 2024 10:19:16 +0800
+Subject: dt-bindings: phy: qcom,qmp-usb: fix spelling error
+
+From: Yijie Yang <quic_yijiyang@quicinc.com>
+
+commit 3d83abcae6e8fa6698f6b0a026ca650302bdbfd8 upstream.
+
+Correct the spelling error, changing 'com' to 'qcom'.
+
+Cc: stable@vger.kernel.org
+Fixes: f75a4b3a6efc ("dt-bindings: phy: qcom,qmp-usb: Add QDU1000 USB3 PHY")
+Signed-off-by: Yijie Yang <quic_yijiyang@quicinc.com>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20240624021916.2033062-1-quic_yijiyang@quicinc.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
+@@ -20,7 +20,7 @@ properties:
+ - qcom,ipq8074-qmp-usb3-phy
+ - qcom,ipq9574-qmp-usb3-phy
+ - qcom,msm8996-qmp-usb3-phy
+- - com,qdu1000-qmp-usb3-uni-phy
++ - qcom,qdu1000-qmp-usb3-uni-phy
+ - qcom,sa8775p-qmp-usb3-uni-phy
+ - qcom,sc8280xp-qmp-usb3-uni-phy
+ - qcom,sdm845-qmp-usb3-uni-phy
--- /dev/null
+From 36e3b949e35964e22b9a57f960660fc599038dd4 Mon Sep 17 00:00:00 2001
+From: Bailey Forrest <bcf@google.com>
+Date: Wed, 24 Jul 2024 07:34:31 -0700
+Subject: gve: Fix an edge case for TSO skb validity check
+
+From: Bailey Forrest <bcf@google.com>
+
+commit 36e3b949e35964e22b9a57f960660fc599038dd4 upstream.
+
+The NIC requires each TSO segment to not span more than 10
+descriptors. NIC further requires each descriptor to not exceed
+16KB - 1 (GVE_TX_MAX_BUF_SIZE_DQO).
+
+The descriptors for an skb are generated by
+gve_tx_add_skb_no_copy_dqo() for DQO RDA queue format.
+gve_tx_add_skb_no_copy_dqo() loops through each skb frag and
+generates a descriptor for the entire frag if the frag size is
+not greater than GVE_TX_MAX_BUF_SIZE_DQO. If the frag size is
+greater than GVE_TX_MAX_BUF_SIZE_DQO, it is split into descriptor(s)
+of size GVE_TX_MAX_BUF_SIZE_DQO and a descriptor is generated for
+the remainder (frag size % GVE_TX_MAX_BUF_SIZE_DQO).
+
+gve_can_send_tso() checks if the descriptors thus generated for an
+skb would meet the requirement that each TSO-segment not span more
+than 10 descriptors. However, the current code misses an edge case
+when a TSO segment spans multiple descriptors within a large frag.
+This change fixes the edge case.
+
+gve_can_send_tso() relies on the assumption that max gso size (9728)
+is less than GVE_TX_MAX_BUF_SIZE_DQO and therefore within an skb
+fragment a TSO segment can never span more than 2 descriptors.
+
+Fixes: a57e5de476be ("gve: DQO: Add TX path")
+Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
+Signed-off-by: Bailey Forrest <bcf@google.com>
+Reviewed-by: Jeroen de Borst <jeroendb@google.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20240724143431.3343722-1-pkaligineedi@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/google/gve/gve_tx_dqo.c | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -866,22 +866,42 @@ static bool gve_can_send_tso(const struc
+ const int header_len = skb_tcp_all_headers(skb);
+ const int gso_size = shinfo->gso_size;
+ int cur_seg_num_bufs;
++ int prev_frag_size;
+ int cur_seg_size;
+ int i;
+
+ cur_seg_size = skb_headlen(skb) - header_len;
++ prev_frag_size = skb_headlen(skb);
+ cur_seg_num_bufs = cur_seg_size > 0;
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ if (cur_seg_size >= gso_size) {
+ cur_seg_size %= gso_size;
+ cur_seg_num_bufs = cur_seg_size > 0;
++
++ if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
++ int prev_frag_remain = prev_frag_size %
++ GVE_TX_MAX_BUF_SIZE_DQO;
++
++ /* If the last descriptor of the previous frag
++ * is less than cur_seg_size, the segment will
++ * span two descriptors in the previous frag.
++ * Since max gso size (9728) is less than
++ * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
++ * for the segment to span more than two
++ * descriptors.
++ */
++ if (prev_frag_remain &&
++ cur_seg_size > prev_frag_remain)
++ cur_seg_num_bufs++;
++ }
+ }
+
+ if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
+ return false;
+
+- cur_seg_size += skb_frag_size(&shinfo->frags[i]);
++ prev_frag_size = skb_frag_size(&shinfo->frags[i]);
++ cur_seg_size += prev_frag_size;
+ }
+
+ return true;
--- /dev/null
+From 6ebbe97a488179f5dc85f2f1e0c89b486e99ee97 Mon Sep 17 00:00:00 2001
+From: Ahmed Zaki <ahmed.zaki@intel.com>
+Date: Fri, 14 Jun 2024 07:18:42 -0600
+Subject: ice: Add a per-VF limit on number of FDIR filters
+
+From: Ahmed Zaki <ahmed.zaki@intel.com>
+
+commit 6ebbe97a488179f5dc85f2f1e0c89b486e99ee97 upstream.
+
+While the iavf driver adds a s/w limit (128) on the number of FDIR
+filters that the VF can request, a malicious VF driver can request more
+than that and exhaust the resources for other VFs.
+
+Add a similar limit in ice.
+
+CC: stable@vger.kernel.org
+Fixes: 1f7ea1cd6a37 ("ice: Enable FDIR Configure for AVF")
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Suggested-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c | 2 +-
+ drivers/net/ethernet/intel/ice/ice_fdir.h | 3 +++
+ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 16 ++++++++++++++++
+ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h | 1 +
+ 4 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+@@ -534,7 +534,7 @@ ice_parse_rx_flow_user_data(struct ethto
+ *
+ * Returns the number of available flow director filters to this VSI
+ */
+-static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+ {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ u16 num_guar;
+--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
+@@ -207,6 +207,8 @@ struct ice_fdir_base_pkt {
+ const u8 *tun_pkt;
+ };
+
++struct ice_vsi;
++
+ int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+ int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+ int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+@@ -218,6 +220,7 @@ int
+ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun);
+ int ice_get_fdir_cnt_all(struct ice_hw *hw);
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi);
+ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+ bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
+ struct ice_fdir_fltr *
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -536,6 +536,8 @@ static void ice_vc_fdir_reset_cnt_all(st
+ fdir->fdir_fltr_cnt[flow][0] = 0;
+ fdir->fdir_fltr_cnt[flow][1] = 0;
+ }
++
++ fdir->fdir_fltr_cnt_total = 0;
+ }
+
+ /**
+@@ -1560,6 +1562,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf
+ resp->status = status;
+ resp->flow_id = conf->flow_id;
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
++ vf->fdir.fdir_fltr_cnt_total++;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+@@ -1624,6 +1627,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf
+ resp->status = status;
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
++ vf->fdir.fdir_fltr_cnt_total--;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+@@ -1790,6 +1794,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *
+ struct virtchnl_fdir_add *stat = NULL;
+ struct virtchnl_fdir_fltr_conf *conf;
+ enum virtchnl_status_code v_ret;
++ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int is_tun = 0;
+@@ -1798,6 +1803,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
++ vf_vsi = ice_get_vf_vsi(vf);
++
++#define ICE_VF_MAX_FDIR_FILTERS 128
++ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
++ vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
++ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
++ dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
++ vf->vf_id);
++ goto err_exit;
++ }
++
+ ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+@@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx {
+ struct ice_vf_fdir {
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
++ u16 fdir_fltr_cnt_total;
+ struct ice_fd_hw_prof **fdir_prof;
+
+ struct idr fdir_rule_idr;
--- /dev/null
+From 33b1c47d1fc0b5f06a393bb915db85baacba18ea Mon Sep 17 00:00:00 2001
+From: Shenwei Wang <shenwei.wang@nxp.com>
+Date: Wed, 3 Jul 2024 11:32:50 -0500
+Subject: irqchip/imx-irqsteer: Handle runtime power management correctly
+
+From: Shenwei Wang <shenwei.wang@nxp.com>
+
+commit 33b1c47d1fc0b5f06a393bb915db85baacba18ea upstream.
+
+The power domain is automatically activated from clk_prepare(). However, on
+certain platforms like i.MX8QM and i.MX8QXP, the power-on handling invokes
+sleeping functions, which triggers the 'scheduling while atomic' bug in the
+context switch path during device probing:
+
+ BUG: scheduling while atomic: kworker/u13:1/48/0x00000002
+ Call trace:
+ __schedule_bug+0x54/0x6c
+ __schedule+0x7f0/0xa94
+ schedule+0x5c/0xc4
+ schedule_preempt_disabled+0x24/0x40
+ __mutex_lock.constprop.0+0x2c0/0x540
+ __mutex_lock_slowpath+0x14/0x20
+ mutex_lock+0x48/0x54
+ clk_prepare_lock+0x44/0xa0
+ clk_prepare+0x20/0x44
+ imx_irqsteer_resume+0x28/0xe0
+ pm_generic_runtime_resume+0x2c/0x44
+ __genpd_runtime_resume+0x30/0x80
+ genpd_runtime_resume+0xc8/0x2c0
+ __rpm_callback+0x48/0x1d8
+ rpm_callback+0x6c/0x78
+ rpm_resume+0x490/0x6b4
+ __pm_runtime_resume+0x50/0x94
+ irq_chip_pm_get+0x2c/0xa0
+ __irq_do_set_handler+0x178/0x24c
+ irq_set_chained_handler_and_data+0x60/0xa4
+ mxc_gpio_probe+0x160/0x4b0
+
+Cure this by implementing the irq_bus_lock/sync_unlock() interrupt chip
+callbacks and handle power management in them as they are invoked from
+non-atomic context.
+
+[ tglx: Rewrote change log, added Fixes tag ]
+
+Fixes: 0136afa08967 ("irqchip: Add driver for imx-irqsteer controller")
+Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240703163250.47887-1-shenwei.wang@nxp.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-imx-irqsteer.c | 24 +++++++++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+--- a/drivers/irqchip/irq-imx-irqsteer.c
++++ b/drivers/irqchip/irq-imx-irqsteer.c
+@@ -36,6 +36,7 @@ struct irqsteer_data {
+ int channel;
+ struct irq_domain *domain;
+ u32 *saved_reg;
++ struct device *dev;
+ };
+
+ static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
+@@ -72,10 +73,26 @@ static void imx_irqsteer_irq_mask(struct
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+ }
+
++static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
++{
++ struct irqsteer_data *data = d->chip_data;
++
++ pm_runtime_get_sync(data->dev);
++}
++
++static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
++{
++ struct irqsteer_data *data = d->chip_data;
++
++ pm_runtime_put_autosuspend(data->dev);
++}
++
+ static const struct irq_chip imx_irqsteer_irq_chip = {
+- .name = "irqsteer",
+- .irq_mask = imx_irqsteer_irq_mask,
+- .irq_unmask = imx_irqsteer_irq_unmask,
++ .name = "irqsteer",
++ .irq_mask = imx_irqsteer_irq_mask,
++ .irq_unmask = imx_irqsteer_irq_unmask,
++ .irq_bus_lock = imx_irqsteer_irq_bus_lock,
++ .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock,
+ };
+
+ static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
+@@ -150,6 +167,7 @@ static int imx_irqsteer_probe(struct pla
+ if (!data)
+ return -ENOMEM;
+
++ data->dev = &pdev->dev;
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
--- /dev/null
+From 6ce3e98184b625d2870991880bf9586ded7ea7f9 Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Fri, 14 Jun 2024 19:32:04 +0200
+Subject: irqdomain: Fixed unbalanced fwnode get and put
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit 6ce3e98184b625d2870991880bf9586ded7ea7f9 upstream.
+
+fwnode_handle_get(fwnode) is called when a domain is created with fwnode
+passed as a function parameter. fwnode_handle_put(domain->fwnode) is called
+when the domain is destroyed but during the creation a path exists that
+does not set domain->fwnode.
+
+If this path is taken, the fwnode get will never be put.
+
+To avoid the unbalanced get and put, set domain->fwnode unconditionally.
+
+Fixes: d59f6617eef0 ("genirq: Allow fwnode to carry name information only")
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240614173232.1184015-4-herve.codina@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/irq/irqdomain.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -155,7 +155,6 @@ static struct irq_domain *__irq_domain_c
+ switch (fwid->type) {
+ case IRQCHIP_FWNODE_NAMED:
+ case IRQCHIP_FWNODE_NAMED_ID:
+- domain->fwnode = fwnode;
+ domain->name = kstrdup(fwid->name, GFP_KERNEL);
+ if (!domain->name) {
+ kfree(domain);
+@@ -164,7 +163,6 @@ static struct irq_domain *__irq_domain_c
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ break;
+ default:
+- domain->fwnode = fwnode;
+ domain->name = fwid->name;
+ break;
+ }
+@@ -184,7 +182,6 @@ static struct irq_domain *__irq_domain_c
+ }
+
+ domain->name = strreplace(name, '/', ':');
+- domain->fwnode = fwnode;
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ }
+
+@@ -200,8 +197,8 @@ static struct irq_domain *__irq_domain_c
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ }
+
+- fwnode_handle_get(fwnode);
+- fwnode_dev_initialized(fwnode, true);
++ domain->fwnode = fwnode_handle_get(fwnode);
++ fwnode_dev_initialized(domain->fwnode, true);
+
+ /* Fill structure */
+ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
--- /dev/null
+From 3415b10a03945b0da4a635e146750dfe5ce0f448 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Fri, 26 Jul 2024 11:05:00 -0700
+Subject: kbuild: Fix '-S -c' in x86 stack protector scripts
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 3415b10a03945b0da4a635e146750dfe5ce0f448 upstream.
+
+After a recent change in clang to stop consuming all instances of '-S'
+and '-c' [1], the stack protector scripts break due to the kernel's use
+of -Werror=unused-command-line-argument to catch cases where flags are
+not being properly consumed by the compiler driver:
+
+ $ echo | clang -o - -x c - -S -c -Werror=unused-command-line-argument
+ clang: error: argument unused during compilation: '-c' [-Werror,-Wunused-command-line-argument]
+
+This results in CONFIG_STACKPROTECTOR getting disabled because
+CONFIG_CC_HAS_SANE_STACKPROTECTOR is no longer set.
+
+'-c' and '-S' both instruct the compiler to stop at different stages of
+the pipeline ('-S' after compiling, '-c' after assembling), so having
+them present together in the same command makes little sense. In this
+case, the test wants to stop before assembling because it is looking at
+the textual assembly output of the compiler for either '%fs' or '%gs',
+so remove '-c' from the list of arguments to resolve the error.
+
+All versions of GCC continue to work after this change, along with
+versions of clang that do or do not contain the change mentioned above.
+
+Cc: stable@vger.kernel.org
+Fixes: 4f7fd4d7a791 ("[PATCH] Add the -fstack-protector option to the CFLAGS")
+Fixes: 60a5317ff0f4 ("x86: implement x86_32 stack protector")
+Link: https://github.com/llvm/llvm-project/commit/6461e537815f7fa68cef06842505353cf5600e9c [1]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/gcc-x86_32-has-stack-protector.sh | 2 +-
+ scripts/gcc-x86_64-has-stack-protector.sh | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/scripts/gcc-x86_32-has-stack-protector.sh
++++ b/scripts/gcc-x86_32-has-stack-protector.sh
+@@ -5,4 +5,4 @@
+ # -mstack-protector-guard-reg, added by
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,4 +1,4 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
--- /dev/null
+From dd6e9894b451e7c85cceb8e9dc5432679a70e7dc Mon Sep 17 00:00:00 2001
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+Date: Thu, 30 May 2024 21:14:37 +0800
+Subject: kobject_uevent: Fix OOB access within zap_modalias_env()
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+commit dd6e9894b451e7c85cceb8e9dc5432679a70e7dc upstream.
+
+zap_modalias_env() wrongly calculates size of memory block to move, so
+will cause OOB memory access issue if variable MODALIAS is not the last
+one within its @env parameter, fixed by correcting size to memmove.
+
+Fixes: 9b3fa47d4a76 ("kobject: fix suppressing modalias in uevents delivered over netlink")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Reviewed-by: Lk Sii <lk_sii@163.com>
+Link: https://lore.kernel.org/r/1717074877-11352-1-git-send-email-quic_zijuhu@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/kobject_uevent.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -433,8 +433,23 @@ static void zap_modalias_env(struct kobj
+ len = strlen(env->envp[i]) + 1;
+
+ if (i != env->envp_idx - 1) {
++ /* @env->envp[] contains pointers to @env->buf[]
++ * with @env->buflen chars, and we are removing
++ * variable MODALIAS here pointed by @env->envp[i]
++ * with length @len as shown below:
++ *
++ * 0 @env->buf[] @env->buflen
++ * ---------------------------------------------
++ * ^ ^ ^ ^
++ * | |-> @len <-| target block |
++ * @env->envp[0] @env->envp[i] @env->envp[i + 1]
++ *
++ * so the "target block" indicated above is moved
++ * backward by @len, and its right size is
++ * @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
++ */
+ memmove(env->envp[i], env->envp[i + 1],
+- env->buflen - len);
++ env->buflen - (env->envp[i + 1] - env->envp[0]));
+
+ for (j = i; j < env->envp_idx - 1; j++)
+ env->envp[j] = env->envp[j + 1] - len;
--- /dev/null
+From 36a5c03f232719eb4e2d925f4d584e09cfaf372c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Mateusz=20Jo=C5=84czyk?= <mat.jonczyk@o2.pl>
+Date: Thu, 11 Jul 2024 22:23:16 +0200
+Subject: md/raid1: set max_sectors during early return from choose_slow_rdev()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mateusz Jończyk <mat.jonczyk@o2.pl>
+
+commit 36a5c03f232719eb4e2d925f4d584e09cfaf372c upstream.
+
+Linux 6.9+ is unable to start a degraded RAID1 array with one drive,
+when that drive has a write-mostly flag set. During such an attempt,
+the following assertion in bio_split() is hit:
+
+ BUG_ON(sectors <= 0);
+
+Call Trace:
+ ? bio_split+0x96/0xb0
+ ? exc_invalid_op+0x53/0x70
+ ? bio_split+0x96/0xb0
+ ? asm_exc_invalid_op+0x1b/0x20
+ ? bio_split+0x96/0xb0
+ ? raid1_read_request+0x890/0xd20
+ ? __call_rcu_common.constprop.0+0x97/0x260
+ raid1_make_request+0x81/0xce0
+ ? __get_random_u32_below+0x17/0x70
+ ? new_slab+0x2b3/0x580
+ md_handle_request+0x77/0x210
+ md_submit_bio+0x62/0xa0
+ __submit_bio+0x17b/0x230
+ submit_bio_noacct_nocheck+0x18e/0x3c0
+ submit_bio_noacct+0x244/0x670
+
+After investigation, it turned out that choose_slow_rdev() does not set
+the value of max_sectors in some cases and because of it,
+raid1_read_request calls bio_split with sectors == 0.
+
+Fix it by filling in this variable.
+
+This bug was introduced in
+commit dfa8ecd167c1 ("md/raid1: factor out choose_slow_rdev() from read_balance()")
+but apparently hidden until
+commit 0091c5a269ec ("md/raid1: factor out helpers to choose the best rdev from read_balance()")
+shortly thereafter.
+
+Cc: stable@vger.kernel.org # 6.9.x+
+Signed-off-by: Mateusz Jończyk <mat.jonczyk@o2.pl>
+Fixes: dfa8ecd167c1 ("md/raid1: factor out choose_slow_rdev() from read_balance()")
+Cc: Song Liu <song@kernel.org>
+Cc: Yu Kuai <yukuai3@huawei.com>
+Cc: Paul Luse <paul.e.luse@linux.intel.com>
+Cc: Xiao Ni <xni@redhat.com>
+Cc: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Link: https://lore.kernel.org/linux-raid/20240706143038.7253-1-mat.jonczyk@o2.pl/
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+--
+
+Tested on both Linux 6.10 and 6.9.8.
+
+Inside a VM, mdadm testsuite for RAID1 on 6.10 did not find any problems:
+ ./test --dev=loop --no-error --raidtype=raid1
+(on 6.9.8 there was one failure, caused by external bitmap support not
+compiled in).
+
+Notes:
+- I was reliably getting deadlocks when adding / removing devices
+ on such an array - while the array was loaded with fsstress with 20
+ concurrent processes. When the array was idle or loaded with fsstress
+ with 8 processes, no such deadlocks happened in my tests.
+ This occurred also on unpatched Linux 6.8.0 though, but not on
+ 6.1.97-rc1, so this is likely an independent regression (to be
+ investigated).
+- I was also getting deadlocks when adding / removing the bitmap on the
+ array in similar conditions - this happened on Linux 6.1.97-rc1
+ also though. fsstress with 8 concurrent processes did cause it only
+ once during many tests.
+- in my testing, there was once a problem with hot adding an
+ internal bitmap to the array:
+ mdadm: Cannot add bitmap while array is resyncing or reshaping etc.
+ mdadm: failed to set internal bitmap.
+ even though no such reshaping was happening according to /proc/mdstat.
+ This seems unrelated, though.
+
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20240711202316.10775-1-mat.jonczyk@o2.pl
+---
+ drivers/md/raid1.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -680,6 +680,7 @@ static int choose_slow_rdev(struct r1con
+ len = r1_bio->sectors;
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len == r1_bio->sectors) {
++ *max_sectors = read_len;
+ update_read_sectors(conf, disk, this_sector, read_len);
+ return disk;
+ }
--- /dev/null
+From 4cd7ba16a0afb36550eed7690e73d3e7a743fa96 Mon Sep 17 00:00:00 2001
+From: Ram Tummala <rtummala@nvidia.com>
+Date: Tue, 9 Jul 2024 18:45:39 -0700
+Subject: mm: fix old/young bit handling in the faulting path
+
+From: Ram Tummala <rtummala@nvidia.com>
+
+commit 4cd7ba16a0afb36550eed7690e73d3e7a743fa96 upstream.
+
+Commit 3bd786f76de2 ("mm: convert do_set_pte() to set_pte_range()")
+replaced do_set_pte() with set_pte_range() and that introduced a
+regression in the following faulting path of non-anonymous vmas which
+caused the PTE for the faulting address to be marked as old instead of
+young.
+
+handle_pte_fault()
+ do_pte_missing()
+ do_fault()
+ do_read_fault() || do_cow_fault() || do_shared_fault()
+ finish_fault()
+ set_pte_range()
+
+The polarity of prefault calculation is incorrect. This leads to prefault
+being incorrectly set for the faulting address. The following check will
+incorrectly mark the PTE old rather than young. On some architectures
+this will cause a double fault to mark it young when the access is
+retried.
+
+ if (prefault && arch_wants_old_prefaulted_pte())
+ entry = pte_mkold(entry);
+
+On a subsequent fault on the same address, the faulting path will see a
+non NULL vmf->pte and instead of reaching the do_pte_missing() path, PTE
+will then be correctly marked young in handle_pte_fault() itself.
+
+Due to this bug, performance degradation in the fault handling path will
+be observed due to unnecessary double faulting.
+
+Link: https://lkml.kernel.org/r/20240710014539.746200-1-rtummala@nvidia.com
+Fixes: 3bd786f76de2 ("mm: convert do_set_pte() to set_pte_range()")
+Signed-off-by: Ram Tummala <rtummala@nvidia.com>
+Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Yin Fengwei <fengwei.yin@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4681,7 +4681,7 @@ void set_pte_range(struct vm_fault *vmf,
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+- bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
++ bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
+ pte_t entry;
+
+ flush_icache_pages(vma, page, nr);
--- /dev/null
+From af649773fb25250cd22625af021fb6275c56a3ee Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Mon, 8 Jul 2024 08:56:32 +0100
+Subject: mm/numa_balancing: teach mpol_to_str about the balancing mode
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit af649773fb25250cd22625af021fb6275c56a3ee upstream.
+
+Since balancing mode was added in bda420b98505 ("numa balancing: migrate
+on fault among multiple bound nodes"), it was possible to set this mode
+but it wouldn't be shown in /proc/<pid>/numa_maps since there was no
+support for it in the mpol_to_str() helper.
+
+Furthermore, because the balancing mode sets the MPOL_F_MORON flag, it
+would be displayed as 'default' due a workaround introduced a few years
+earlier in 8790c71a18e5 ("mm/mempolicy.c: fix mempolicy printing in
+numa_maps").
+
+To tidy this up we implement two changes:
+
+Replace the MPOL_F_MORON check by pointer comparison against the
+preferred_node_policy array. By doing this we generalise the current
+special casing and replace the incorrect 'default' with the correct 'bind'
+for the mode.
+
+Secondly, we add a string representation and corresponding handling for
+the MPOL_F_NUMA_BALANCING flag.
+
+With the two changes together we start showing the balancing flag when it
+is set and therefore complete the fix.
+
+Representation format chosen is to separate multiple flags with vertical
+bars, following what existed long time ago in kernel 2.6.25. But as
+between then and now there wasn't a way to display multiple flags, this
+patch does not change the format in practice.
+
+Some /proc/<pid>/numa_maps output examples:
+
+ 555559580000 bind=balancing:0-1,3 file=...
+ 555585800000 bind=balancing|static:0,2 file=...
+ 555635240000 prefer=relative:0 file=
+
+Link: https://lkml.kernel.org/r/20240708075632.95857-1-tursulin@igalia.com
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: bda420b98505 ("numa balancing: migrate on fault among multiple bound nodes")
+References: 8790c71a18e5 ("mm/mempolicy.c: fix mempolicy printing in numa_maps")
+Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: <stable@vger.kernel.org> [5.12+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mempolicy.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -3293,8 +3293,9 @@ out:
+ * @pol: pointer to mempolicy to be formatted
+ *
+ * Convert @pol into a string. If @buffer is too short, truncate the string.
+- * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
+- * longest flag, "relative", and to display at least a few node ids.
++ * Recommend a @maxlen of at least 51 for the longest mode, "weighted
++ * interleave", plus the longest flag flags, "relative|balancing", and to
++ * display at least a few node ids.
+ */
+ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ {
+@@ -3303,7 +3304,10 @@ void mpol_to_str(char *buffer, int maxle
+ unsigned short mode = MPOL_DEFAULT;
+ unsigned short flags = 0;
+
+- if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
++ if (pol &&
++ pol != &default_policy &&
++ !(pol >= &preferred_node_policy[0] &&
++ pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) {
+ mode = pol->mode;
+ flags = pol->flags;
+ }
+@@ -3331,12 +3335,18 @@ void mpol_to_str(char *buffer, int maxle
+ p += snprintf(p, buffer + maxlen - p, "=");
+
+ /*
+- * Currently, the only defined flags are mutually exclusive
++ * Static and relative are mutually exclusive.
+ */
+ if (flags & MPOL_F_STATIC_NODES)
+ p += snprintf(p, buffer + maxlen - p, "static");
+ else if (flags & MPOL_F_RELATIVE_NODES)
+ p += snprintf(p, buffer + maxlen - p, "relative");
++
++ if (flags & MPOL_F_NUMA_BALANCING) {
++ if (!is_power_of_2(flags & MPOL_MODE_FLAGS))
++ p += snprintf(p, buffer + maxlen - p, "|");
++ p += snprintf(p, buffer + maxlen - p, "balancing");
++ }
+ }
+
+ if (!nodes_empty(nodes))
--- /dev/null
+From 1c184baccf0d5e2ef4cc1562261d0e48508a1c2b Mon Sep 17 00:00:00 2001
+From: Joy Chakraborty <joychakr@google.com>
+Date: Wed, 12 Jun 2024 08:36:35 +0000
+Subject: rtc: cmos: Fix return value of nvmem callbacks
+
+From: Joy Chakraborty <joychakr@google.com>
+
+commit 1c184baccf0d5e2ef4cc1562261d0e48508a1c2b upstream.
+
+Read/write callbacks registered with nvmem core expect 0 to be returned
+on success and a negative value to be returned on failure.
+
+cmos_nvram_read()/cmos_nvram_write() currently return the number of
+bytes read or written, fix to return 0 on success and -EIO incase number
+of bytes requested was not read or written.
+
+Fixes: 8b5b7958fd1c ("rtc: cmos: use generic nvmem")
+Cc: stable@vger.kernel.org
+Signed-off-by: Joy Chakraborty <joychakr@google.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/20240612083635.1253039-1-joychakr@google.com
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/rtc/rtc-cmos.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -643,11 +643,10 @@ static int cmos_nvram_read(void *priv, u
+ size_t count)
+ {
+ unsigned char *buf = val;
+- int retval;
+
+ off += NVRAM_OFFSET;
+ spin_lock_irq(&rtc_lock);
+- for (retval = 0; count; count--, off++, retval++) {
++ for (; count; count--, off++) {
+ if (off < 128)
+ *buf++ = CMOS_READ(off);
+ else if (can_bank2)
+@@ -657,7 +656,7 @@ static int cmos_nvram_read(void *priv, u
+ }
+ spin_unlock_irq(&rtc_lock);
+
+- return retval;
++ return count ? -EIO : 0;
+ }
+
+ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+@@ -665,7 +664,6 @@ static int cmos_nvram_write(void *priv,
+ {
+ struct cmos_rtc *cmos = priv;
+ unsigned char *buf = val;
+- int retval;
+
+ /* NOTE: on at least PCs and Ataris, the boot firmware uses a
+ * checksum on part of the NVRAM data. That's currently ignored
+@@ -674,7 +672,7 @@ static int cmos_nvram_write(void *priv,
+ */
+ off += NVRAM_OFFSET;
+ spin_lock_irq(&rtc_lock);
+- for (retval = 0; count; count--, off++, retval++) {
++ for (; count; count--, off++) {
+ /* don't trash RTC registers */
+ if (off == cmos->day_alrm
+ || off == cmos->mon_alrm
+@@ -689,7 +687,7 @@ static int cmos_nvram_write(void *priv,
+ }
+ spin_unlock_irq(&rtc_lock);
+
+- return retval;
++ return count ? -EIO : 0;
+ }
+
+ /*----------------------------------------------------------------*/
f2fs-use-meta-inode-for-gc-of-cow-file.patch
dmaengine-fsl-edma-change-the-memory-access-from-local-into-remote-mode-in-i.mx-8qm.patch
nilfs2-handle-inconsistent-state-in-nilfs_btnode_create_block.patch
+clk-davinci-da8xx-cfgchip-initialize-clk_init_data-before-use.patch
+ubi-eba-properly-rollback-inside-self_check_eba.patch
+clk-samsung-fix-getting-exynos4-fin_pll-rate-from-external-clocks.patch
+block-fix-deadlock-between-sd_remove-sd_release.patch
+mm-fix-old-young-bit-handling-in-the-faulting-path.patch
+decompress_bunzip2-fix-rare-decompression-failure.patch
+kbuild-fix-s-c-in-x86-stack-protector-scripts.patch
+alloc_tag-outline-and-export-free_reserved_page.patch
+asoc-sof-ipc4-topology-only-handle-dai_config-with-hw_params-for-chaindma.patch
+asoc-sof-ipc4-topology-preserve-the-dma-link-id-for-chaindma-on-unprepare.patch
+asoc-amd-yc-support-mic-on-lenovo-thinkpad-e16-gen-2.patch
+arm64-mm-fix-lockless-walks-with-static-and-dynamic-page-table-folding.patch
+kobject_uevent-fix-oob-access-within-zap_modalias_env.patch
+gve-fix-an-edge-case-for-tso-skb-validity-check.patch
+ice-add-a-per-vf-limit-on-number-of-fdir-filters.patch
+dt-bindings-phy-qcom-qmp-usb-fix-spelling-error.patch
+devres-fix-devm_krealloc-wasting-memory.patch
+devres-fix-memory-leakage-caused-by-driver-api-devm_free_percpu.patch
+irqdomain-fixed-unbalanced-fwnode-get-and-put.patch
+md-raid1-set-max_sectors-during-early-return-from-choose_slow_rdev.patch
+irqchip-imx-irqsteer-handle-runtime-power-management-correctly.patch
+mm-numa_balancing-teach-mpol_to_str-about-the-balancing-mode.patch
+rtc-cmos-fix-return-value-of-nvmem-callbacks.patch
--- /dev/null
+From 745d9f4a31defec731119ee8aad8ba9f2536dd9a Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Thu, 29 Feb 2024 23:42:36 +0300
+Subject: ubi: eba: properly rollback inside self_check_eba
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit 745d9f4a31defec731119ee8aad8ba9f2536dd9a upstream.
+
+In case of a memory allocation failure in the volumes loop we can only
+process the already allocated scan_eba and fm_eba array elements on the
+error path - others are still uninitialized.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 00abf3041590 ("UBI: Add self_check_eba()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/ubi/eba.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1564,6 +1564,7 @@ int self_check_eba(struct ubi_device *ub
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
++ kfree(scan_eba[i]);
+ goto out_free;
+ }
+
+@@ -1599,7 +1600,7 @@ int self_check_eba(struct ubi_device *ub
+ }
+
+ out_free:
+- for (i = 0; i < num_volumes; i++) {
++ while (--i >= 0) {
+ if (!ubi->volumes[i])
+ continue;
+