--- /dev/null
+From 6675e76a5c441b52b1b983ebb714122087020ebe Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 7 Aug 2024 19:02:27 +0200
+Subject: ASoC: amd: yc: Add quirk entry for OMEN by HP Gaming Laptop 16-n0xxx
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 6675e76a5c441b52b1b983ebb714122087020ebe upstream.
+
+Fix the missing mic on OMEN by HP Gaming Laptop 16-n0xxx by adding the
+quirk entry with the board ID 8A44.
+
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1227182
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20240807170249.16490-1-tiwai@suse.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/amd/yc/acp6x-mach.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -413,6 +413,13 @@ static const struct dmi_system_id yc_acp
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8A44"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8A22"),
+ }
+ },
--- /dev/null
+From 12653ec36112ab55fa06c01db7c4432653d30a8d Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Fri, 19 Jul 2024 18:56:46 +0930
+Subject: btrfs: avoid using fixed char array size for tree names
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit 12653ec36112ab55fa06c01db7c4432653d30a8d upstream.
+
+[BUG]
+There is a bug report that using the latest trunk GCC 15, btrfs would cause
+unterminated-string-initialization warning:
+
+ linux-6.6/fs/btrfs/print-tree.c:29:49: error: initializer-string for array of ‘char’ is too long [-Werror=unterminated-string-initialization]
+ 29 | { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },
+ |
+ ^~~~~~~~~~~~~~~~~~
+
+[CAUSE]
+To print tree names we have an array of root_name_map structure, which
+uses "char name[16];" to store the name string of a tree.
+
+But the following trees have names exactly at 16 chars length:
+- "BLOCK_GROUP_TREE"
+- "RAID_STRIPE_TREE"
+
+This means we will have no space for the terminating '\0', and can lead
+to unexpected access when printing the name.
+
+[FIX]
+Instead of "char name[16];" use "const char *" instead.
+
+Since the name strings are all read-only data, and are all NULL
+terminated by default, there is not much need to bother the length at
+all.
+
+Reported-by: Sam James <sam@gentoo.org>
+Reported-by: Alejandro Colomar <alx@kernel.org>
+Fixes: edde81f1abf29 ("btrfs: add raid stripe tree pretty printer")
+Fixes: 9c54e80ddc6bd ("btrfs: add code to support the block group root")
+CC: stable@vger.kernel.org # 6.1+
+Suggested-by: Alejandro Colomar <alx@kernel.org>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Alejandro Colomar <alx@kernel.org>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/print-tree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -14,7 +14,7 @@
+
+ struct root_name_map {
+ u64 id;
+- char name[16];
++ const char *name;
+ };
+
+ static const struct root_name_map root_map[] = {
--- /dev/null
+From e82290a2e0e8ec5e836ecad1ca025021b3855c2d Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Mon, 18 Mar 2024 21:39:23 +0100
+Subject: drm/bridge: analogix_dp: properly handle zero sized AUX transactions
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit e82290a2e0e8ec5e836ecad1ca025021b3855c2d upstream.
+
+Address only transactions without any data are valid and should not
+be flagged as short transactions. Simply return the message size when
+no transaction errors occured.
+
+CC: stable@vger.kernel.org
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Reviewed-by: Robert Foss <rfoss@kernel.org>
+Signed-off-by: Robert Foss <rfoss@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240318203925.2837689-1-l.stach@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+@@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct anal
+ u32 status_reg;
+ u8 *buffer = msg->buffer;
+ unsigned int i;
+- int num_transferred = 0;
+ int ret;
+
+ /* Buffer size of AUX CH is 16 bytes */
+@@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct anal
+ reg = buffer[i];
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 4 * i);
+- num_transferred++;
+ }
+ }
+
+@@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct anal
+ reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 4 * i);
+ buffer[i] = (unsigned char)reg;
+- num_transferred++;
+ }
+ }
+
+@@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct anal
+ (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+
+- return num_transferred > 0 ? num_transferred : -EBUSY;
++ return msg->size;
+
+ aux_error:
+ /* if aux err happen, reset aux */
--- /dev/null
+From ddf983488c3e8d30d5c2e2b315ae7d9cd87096ed Mon Sep 17 00:00:00 2001
+From: Wayne Lin <Wayne.Lin@amd.com>
+Date: Wed, 26 Jun 2024 16:48:24 +0800
+Subject: drm/dp_mst: Skip CSN if topology probing is not done yet
+
+From: Wayne Lin <Wayne.Lin@amd.com>
+
+commit ddf983488c3e8d30d5c2e2b315ae7d9cd87096ed upstream.
+
+[Why]
+During resume, observe that we receive CSN event before we start topology
+probing. Handling CSN at this moment based on uncertain topology is
+unnecessary.
+
+[How]
+Add checking condition in drm_dp_mst_handle_up_req() to skip handling CSN
+if the topology is yet to be probed.
+
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: Harry Wentland <hwentlan@amd.com>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: Imre Deak <imre.deak@intel.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: stable@vger.kernel.org
+Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240626084825.878565-3-Wayne.Lin@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/display/drm_dp_mst_topology.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -4069,6 +4069,7 @@ static int drm_dp_mst_handle_up_req(stru
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ const struct drm_dp_connection_status_notify *conn_stat =
+ &up_req->msg.u.conn_stat;
++ bool handle_csn;
+
+ drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ conn_stat->port_number,
+@@ -4077,6 +4078,16 @@ static int drm_dp_mst_handle_up_req(stru
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
++
++ mutex_lock(&mgr->probe_lock);
++ handle_csn = mgr->mst_primary->link_address_sent;
++ mutex_unlock(&mgr->probe_lock);
++
++ if (!handle_csn) {
++ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
++ kfree(up_req);
++ goto out;
++ }
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ const struct drm_dp_resource_status_notify *res_stat =
+ &up_req->msg.u.resource_stat;
--- /dev/null
+From 0c94f58cef319ad054fd909b3bf4b7d09c03e11c Mon Sep 17 00:00:00 2001
+From: Dragan Simic <dsimic@manjaro.org>
+Date: Mon, 17 Jun 2024 22:22:02 +0200
+Subject: drm/lima: Mark simple_ondemand governor as softdep
+
+From: Dragan Simic <dsimic@manjaro.org>
+
+commit 0c94f58cef319ad054fd909b3bf4b7d09c03e11c upstream.
+
+Lima DRM driver uses devfreq to perform DVFS, while using simple_ondemand
+devfreq governor by default. This causes driver initialization to fail on
+boot when simple_ondemand governor isn't built into the kernel statically,
+as a result of the missing module dependency and, consequently, the
+required governor module not being included in the initial ramdisk. Thus,
+let's mark simple_ondemand governor as a softdep for Lima, to have its
+kernel module included in the initial ramdisk.
+
+This is a rather longstanding issue that has forced distributions to build
+devfreq governors statically into their kernels, [1][2] or may have forced
+some users to introduce unnecessary workarounds.
+
+Having simple_ondemand marked as a softdep for Lima may not resolve this
+issue for all Linux distributions. In particular, it will remain
+unresolved for the distributions whose utilities for the initial ramdisk
+generation do not handle the available softdep information [3] properly
+yet. However, some Linux distributions already handle softdeps properly
+while generating their initial ramdisks, [4] and this is a prerequisite
+step in the right direction for the distributions that don't handle them
+properly yet.
+
+[1] https://gitlab.manjaro.org/manjaro-arm/packages/core/linux-pinephone/-/blob/6.7-megi/config?ref_type=heads#L5749
+[2] https://gitlab.com/postmarketOS/pmaports/-/blob/7f64e287e7732c9eaa029653e73ca3d4ba1c8598/main/linux-postmarketos-allwinner/config-postmarketos-allwinner.aarch64#L4654
+[3] https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git/commit/?id=49d8e0b59052999de577ab732b719cfbeb89504d
+[4] https://github.com/archlinux/mkinitcpio/commit/97ac4d37aae084a050be512f6d8f4489054668ad
+
+Cc: Philip Muller <philm@manjaro.org>
+Cc: Oliver Smith <ollieparanoid@postmarketos.org>
+Cc: Daniel Smith <danct12@disroot.org>
+Cc: stable@vger.kernel.org
+Fixes: 1996970773a3 ("drm/lima: Add optional devfreq and cooling device support")
+Signed-off-by: Dragan Simic <dsimic@manjaro.org>
+Signed-off-by: Qiang Yu <yuq825@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/fdaf2e41bb6a0c5118ff9cc21f4f62583208d885.1718655070.git.dsimic@manjaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/lima/lima_drv.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/lima/lima_drv.c
++++ b/drivers/gpu/drm/lima/lima_drv.c
+@@ -501,3 +501,4 @@ module_platform_driver(lima_platform_dri
+ MODULE_AUTHOR("Lima Project Developers");
+ MODULE_DESCRIPTION("Lima DRM Driver");
+ MODULE_LICENSE("GPL v2");
++MODULE_SOFTDEP("pre: governor_simpleondemand");
--- /dev/null
+From eb1ae34e48a09b7a1179c579aed042b032e408f4 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Mon, 13 May 2024 14:51:07 +0200
+Subject: drm/mgag200: Bind I2C lifetime to DRM device
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit eb1ae34e48a09b7a1179c579aed042b032e408f4 upstream.
+
+Managed cleanup with devm_add_action_or_reset() will release the I2C
+adapter when the underlying Linux device goes away. But the connector
+still refers to it, so this cleanup leaves behind a stale pointer
+in struct drm_connector.ddc.
+
+Bind the lifetime of the I2C adapter to the connector's lifetime by
+using DRM's managed release. When the DRM device goes away (after
+the Linux device) DRM will first clean up the connector and then
+clean up the I2C adapter.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Jocelyn Falempe <jfalempe@redhat.com>
+Fixes: b279df242972 ("drm/mgag200: Switch I2C code to managed cleanup")
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Jocelyn Falempe <jfalempe@redhat.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.0+
+Link: https://patchwork.freedesktop.org/patch/msgid/20240513125620.6337-3-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mgag200/mgag200_i2c.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
++++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
+@@ -31,6 +31,8 @@
+ #include <linux/i2c.h>
+ #include <linux/pci.h>
+
++#include <drm/drm_managed.h>
++
+ #include "mgag200_drv.h"
+
+ static int mga_i2c_read_gpio(struct mga_device *mdev)
+@@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data)
+ return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+ }
+
+-static void mgag200_i2c_release(void *res)
++static void mgag200_i2c_release(struct drm_device *dev, void *res)
+ {
+ struct mga_i2c_chan *i2c = res;
+
+@@ -125,5 +127,5 @@ int mgag200_i2c_init(struct mga_device *
+ if (ret)
+ return ret;
+
+- return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c);
++ return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c);
+ }
--- /dev/null
+From ecde5db1598aecab54cc392282c15114f526f05f Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Mon, 13 May 2024 14:51:06 +0200
+Subject: drm/mgag200: Set DDC timeout in milliseconds
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit ecde5db1598aecab54cc392282c15114f526f05f upstream.
+
+Compute the i2c timeout in jiffies from a value in milliseconds. The
+original values of 2 jiffies equals 2 milliseconds if HZ has been
+configured to a value of 1000. This corresponds to 2.2 milliseconds
+used by most other DRM drivers. Update mgag200 accordingly.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Jocelyn Falempe <jfalempe@redhat.com>
+Fixes: 414c45310625 ("mgag200: initial g200se driver (v2)")
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Jocelyn Falempe <jfalempe@redhat.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v3.5+
+Link: https://patchwork.freedesktop.org/patch/msgid/20240513125620.6337-2-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mgag200/mgag200_i2c.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
++++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
+@@ -114,7 +114,7 @@ int mgag200_i2c_init(struct mga_device *
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 10;
+- i2c->bit.timeout = 2;
++ i2c->bit.timeout = usecs_to_jiffies(2200);
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = mga_gpio_setsda;
+ i2c->bit.setscl = mga_gpio_setscl;
--- /dev/null
+From 2bac084468847cfe5bbc7166082b2a208514bb1c Mon Sep 17 00:00:00 2001
+From: Bill Wendling <morbo@google.com>
+Date: Wed, 29 May 2024 14:54:44 -0700
+Subject: drm/radeon: Remove __counted_by from StateArray.states[]
+
+From: Bill Wendling <morbo@google.com>
+
+commit 2bac084468847cfe5bbc7166082b2a208514bb1c upstream.
+
+Work for __counted_by on generic pointers in structures (not just
+flexible array members) has started landing in Clang 19 (current tip of
+tree). During the development of this feature, a restriction was added
+to __counted_by to prevent the flexible array member's element type from
+including a flexible array member itself such as:
+
+ struct foo {
+ int count;
+ char buf[];
+ };
+
+ struct bar {
+ int count;
+ struct foo data[] __counted_by(count);
+ };
+
+because the size of data cannot be calculated with the standard array
+size formula:
+
+ sizeof(struct foo) * count
+
+This restriction was downgraded to a warning but due to CONFIG_WERROR,
+it can still break the build. The application of __counted_by on the
+states member of 'struct _StateArray' triggers this restriction,
+resulting in:
+
+ drivers/gpu/drm/radeon/pptable.h:442:5: error: 'counted_by' should not be applied to an array with element of unknown size because 'ATOM_PPLIB_STATE_V2' (aka 'struct _ATOM_PPLIB_STATE_V2') is a struct type with a flexible array member. This will be an error in a future compiler version [-Werror,-Wbounds-safety-counted-by-elt-type-unknown-size]
+ 442 | ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 1 error generated.
+
+Remove this use of __counted_by to fix the warning/error. However,
+rather than remove it altogether, leave it commented, as it may be
+possible to support this in future compiler releases.
+
+Cc: stable@vger.kernel.org
+Closes: https://github.com/ClangBuiltLinux/linux/issues/2028
+Fixes: efade6fe50e7 ("drm/radeon: silence UBSAN warning (v3)")
+Signed-off-by: Bill Wendling <morbo@google.com>
+Co-developed-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/pptable.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/pptable.h
++++ b/drivers/gpu/drm/radeon/pptable.h
+@@ -439,7 +439,7 @@ typedef struct _StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+- ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
++ ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
+ }StateArray;
+
+
--- /dev/null
+From 12c20c65d0460cf34f9a665d8f0c0d77d45a3829 Mon Sep 17 00:00:00 2001
+From: Mathias Krause <minipli@grsecurity.net>
+Date: Tue, 23 Jul 2024 14:25:21 +0200
+Subject: eventfs: Don't return NULL in eventfs_create_dir()
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+commit 12c20c65d0460cf34f9a665d8f0c0d77d45a3829 upstream.
+
+Commit 77a06c33a22d ("eventfs: Test for ei->is_freed when accessing
+ei->dentry") added another check, testing if the parent was freed after
+we released the mutex. If so, the function returns NULL. However, all
+callers expect it to either return a valid pointer or an error pointer,
+at least since commit 5264a2f4bb3b ("tracing: Fix a NULL vs IS_ERR() bug
+in event_subsystem_dir()"). Returning NULL will therefore fail the error
+condition check in the caller.
+
+Fix this by substituting the NULL return value with a fitting error
+pointer.
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: stable@vger.kernel.org
+Fixes: 77a06c33a22d ("eventfs: Test for ei->is_freed when accessing ei->dentry")
+Link: https://lore.kernel.org/20240723122522.2724-1-minipli@grsecurity.net
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Ajay Kaher <ajay.kaher@broadcom.com>
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/tracefs/event_inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -736,7 +736,7 @@ struct eventfs_inode *eventfs_create_dir
+ /* Was the parent freed? */
+ if (list_empty(&ei->list)) {
+ cleanup_ei(ei);
+- ei = NULL;
++ ei = ERR_PTR(-EBUSY);
+ }
+ return ei;
+ }
--- /dev/null
+From 8e556432477e97ad6179c61b61a32bf5f1af2355 Mon Sep 17 00:00:00 2001
+From: Mathias Krause <minipli@grsecurity.net>
+Date: Tue, 23 Jul 2024 23:07:53 +0200
+Subject: eventfs: Use SRCU for freeing eventfs_inodes
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+commit 8e556432477e97ad6179c61b61a32bf5f1af2355 upstream.
+
+To mirror the SRCU lock held in eventfs_iterate() when iterating over
+eventfs inodes, use call_srcu() to free them too.
+
+This was accidentally(?) degraded to RCU in commit 43aa6f97c2d0
+("eventfs: Get rid of dentry pointers without refcounts").
+
+Cc: Ajay Kaher <ajay.kaher@broadcom.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/20240723210755.8970-1-minipli@grsecurity.net
+Fixes: 43aa6f97c2d0 ("eventfs: Get rid of dentry pointers without refcounts")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/tracefs/event_inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -112,7 +112,7 @@ static void release_ei(struct kref *ref)
+ entry->release(entry->name, ei->data);
+ }
+
+- call_rcu(&ei->rcu, free_ei_rcu);
++ call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu);
+ }
+
+ static inline void put_ei(struct eventfs_inode *ei)
--- /dev/null
+From edbbaae42a56f9a2b39c52ef2504dfb3fb0a7858 Mon Sep 17 00:00:00 2001
+From: Shay Drory <shayd@nvidia.com>
+Date: Tue, 6 Aug 2024 10:20:44 +0300
+Subject: genirq/irqdesc: Honor caller provided affinity in alloc_desc()
+
+From: Shay Drory <shayd@nvidia.com>
+
+commit edbbaae42a56f9a2b39c52ef2504dfb3fb0a7858 upstream.
+
+Currently, whenever a caller is providing an affinity hint for an
+interrupt, the allocation code uses it to calculate the node and copies the
+cpumask into irq_desc::affinity.
+
+If the affinity for the interrupt is not marked 'managed' then the startup
+of the interrupt ignores irq_desc::affinity and uses the system default
+affinity mask.
+
+Prevent this by setting the IRQD_AFFINITY_SET flag for the interrupt in the
+allocator, which causes irq_setup_affinity() to use irq_desc::affinity on
+interrupt startup if the mask contains an online CPU.
+
+[ tglx: Massaged changelog ]
+
+Fixes: 45ddcecbfa94 ("genirq: Use affinity hint in irqdesc allocation")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/all/20240806072044.837827-1-shayd@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/irq/irqdesc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -530,6 +530,7 @@ static int alloc_descs(unsigned int star
+ flags = IRQD_AFFINITY_MANAGED |
+ IRQD_MANAGED_SHUTDOWN;
+ }
++ flags |= IRQD_AFFINITY_SET;
+ mask = &affinity->mask;
+ node = cpu_to_node(cpumask_first(mask));
+ affinity++;
--- /dev/null
+From 03f9885c60adf73488fe32aab628ee3d4a39598e Mon Sep 17 00:00:00 2001
+From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
+Date: Fri, 9 Aug 2024 15:10:47 +0800
+Subject: irqchip/riscv-aplic: Retrigger MSI interrupt on source configuration
+
+From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
+
+commit 03f9885c60adf73488fe32aab628ee3d4a39598e upstream.
+
+The section 4.5.2 of the RISC-V AIA specification says that "any write
+to a sourcecfg register of an APLIC might (or might not) cause the
+corresponding interrupt-pending bit to be set to one if the rectified
+input value is high (= 1) under the new source mode."
+
+When the interrupt type is changed in the sourcecfg register, the APLIC
+device might not set the corresponding pending bit, so the interrupt might
+never become pending.
+
+To handle sourcecfg register changes for level-triggered interrupts in MSI
+mode, manually set the pending bit for retriggering interrupt so it gets
+retriggered if it was already asserted.
+
+Fixes: ca8df97fe679 ("irqchip/riscv-aplic: Add support for MSI-mode")
+Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Vincent Chen <vincent.chen@sifive.com>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20240809071049.2454-1-yongxuan.wang@sifive.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-riscv-aplic-msi.c | 32 +++++++++++++++++++++------
+ 1 file changed, 25 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c
+index 028444af48bd..d7773f76e5d0 100644
+--- a/drivers/irqchip/irq-riscv-aplic-msi.c
++++ b/drivers/irqchip/irq-riscv-aplic-msi.c
+@@ -32,15 +32,10 @@ static void aplic_msi_irq_unmask(struct irq_data *d)
+ aplic_irq_unmask(d);
+ }
+
+-static void aplic_msi_irq_eoi(struct irq_data *d)
++static void aplic_msi_irq_retrigger_level(struct irq_data *d)
+ {
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+- /*
+- * EOI handling is required only for level-triggered interrupts
+- * when APLIC is in MSI mode.
+- */
+-
+ switch (irqd_get_trigger_type(d)) {
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_LEVEL_HIGH:
+@@ -59,6 +54,29 @@ static void aplic_msi_irq_eoi(struct irq_data *d)
+ }
+ }
+
++static void aplic_msi_irq_eoi(struct irq_data *d)
++{
++ /*
++ * EOI handling is required only for level-triggered interrupts
++ * when APLIC is in MSI mode.
++ */
++ aplic_msi_irq_retrigger_level(d);
++}
++
++static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type)
++{
++ int rc = aplic_irq_set_type(d, type);
++
++ if (rc)
++ return rc;
++ /*
++ * Updating sourcecfg register for level-triggered interrupts
++ * requires interrupt retriggering when APLIC is in MSI mode.
++ */
++ aplic_msi_irq_retrigger_level(d);
++ return 0;
++}
++
+ static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg)
+ {
+ unsigned int group_index, hart_index, guest_index, val;
+@@ -130,7 +148,7 @@ static const struct msi_domain_template aplic_msi_template = {
+ .name = "APLIC-MSI",
+ .irq_mask = aplic_msi_irq_mask,
+ .irq_unmask = aplic_msi_irq_unmask,
+- .irq_set_type = aplic_irq_set_type,
++ .irq_set_type = aplic_msi_irq_set_type,
+ .irq_eoi = aplic_msi_irq_eoi,
+ #ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+--
+2.46.0
+
--- /dev/null
+From d73f0f49daa84176c3beee1606e73c7ffb6af8b2 Mon Sep 17 00:00:00 2001
+From: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Date: Fri, 9 Aug 2024 12:32:24 +0530
+Subject: irqchip/xilinx: Fix shift out of bounds
+
+From: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+
+commit d73f0f49daa84176c3beee1606e73c7ffb6af8b2 upstream.
+
+The device tree property 'xlnx,kind-of-intr' is sanity checked that the
+bitmask contains only set bits which are in the range of the number of
+interrupts supported by the controller.
+
+The check is done by shifting the mask right by the number of supported
+interrupts and checking the result for zero.
+
+The data type of the mask is u32 and the number of supported interrupts is
+up to 32. In case of 32 interrupts the shift is out of bounds, resulting in
+a mismatch warning. The out of bounds condition is also reported by UBSAN:
+
+ UBSAN: shift-out-of-bounds in irq-xilinx-intc.c:332:22
+ shift exponent 32 is too large for 32-bit type 'unsigned int'
+
+Fix it by promoting the mask to u64 for the test.
+
+Fixes: d50466c90724 ("microblaze: intc: Refactor DT sanity check")
+Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/1723186944-3571957-1-git-send-email-radhey.shyam.pandey@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-xilinx-intc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-xilinx-intc.c
++++ b/drivers/irqchip/irq-xilinx-intc.c
+@@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(st
+ irqc->intr_mask = 0;
+ }
+
+- if (irqc->intr_mask >> irqc->nr_irq)
++ if ((u64)irqc->intr_mask >> irqc->nr_irq)
+ pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+ pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
--- /dev/null
+From 7d4df2dad312f270d62fecb0e5c8b086c6d7dcfc Mon Sep 17 00:00:00 2001
+From: Andrey Konovalov <andreyknvl@gmail.com>
+Date: Mon, 29 Jul 2024 04:21:58 +0200
+Subject: kcov: properly check for softirq context
+
+From: Andrey Konovalov <andreyknvl@gmail.com>
+
+commit 7d4df2dad312f270d62fecb0e5c8b086c6d7dcfc upstream.
+
+When collecting coverage from softirqs, KCOV uses in_serving_softirq() to
+check whether the code is running in the softirq context. Unfortunately,
+in_serving_softirq() is > 0 even when the code is running in the hardirq
+or NMI context for hardirqs and NMIs that happened during a softirq.
+
+As a result, if a softirq handler contains a remote coverage collection
+section and a hardirq with another remote coverage collection section
+happens during handling the softirq, KCOV incorrectly detects a nested
+softirq coverate collection section and prints a WARNING, as reported by
+syzbot.
+
+This issue was exposed by commit a7f3813e589f ("usb: gadget: dummy_hcd:
+Switch to hrtimer transfer scheduler"), which switched dummy_hcd to using
+hrtimer and made the timer's callback be executed in the hardirq context.
+
+Change the related checks in KCOV to account for this behavior of
+in_serving_softirq() and make KCOV ignore remote coverage collection
+sections in the hardirq and NMI contexts.
+
+This prevents the WARNING printed by syzbot but does not fix the inability
+of KCOV to collect coverage from the __usb_hcd_giveback_urb when dummy_hcd
+is in use (caused by a7f3813e589f); a separate patch is required for that.
+
+Link: https://lkml.kernel.org/r/20240729022158.92059-1-andrey.konovalov@linux.dev
+Fixes: 5ff3b30ab57d ("kcov: collect coverage from interrupts")
+Signed-off-by: Andrey Konovalov <andreyknvl@gmail.com>
+Reported-by: syzbot+2388cdaeb6b10f0c13ac@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2388cdaeb6b10f0c13ac
+Acked-by: Marco Elver <elver@google.com>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Aleksandr Nogikh <nogikh@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Marcello Sylvester Bauer <sylv@sylv.io>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kcov.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -161,6 +161,15 @@ static void kcov_remote_area_put(struct
+ kmsan_unpoison_memory(&area->list, sizeof(area->list));
+ }
+
++/*
++ * Unlike in_serving_softirq(), this function returns false when called during
++ * a hardirq or an NMI that happened in the softirq context.
++ */
++static inline bool in_softirq_really(void)
++{
++ return in_serving_softirq() && !in_hardirq() && !in_nmi();
++}
++
+ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+ {
+ unsigned int mode;
+@@ -170,7 +179,7 @@ static notrace bool check_kcov_mode(enum
+ * so we ignore code executed in interrupts, unless we are in a remote
+ * coverage collection section in a softirq.
+ */
+- if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
++ if (!in_task() && !(in_softirq_really() && t->kcov_softirq))
+ return false;
+ mode = READ_ONCE(t->kcov_mode);
+ /*
+@@ -849,7 +858,7 @@ void kcov_remote_start(u64 handle)
+
+ if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
+ return;
+- if (!in_task() && !in_serving_softirq())
++ if (!in_task() && !in_softirq_really())
+ return;
+
+ local_lock_irqsave(&kcov_percpu_data.lock, flags);
+@@ -991,7 +1000,7 @@ void kcov_remote_stop(void)
+ int sequence;
+ unsigned long flags;
+
+- if (!in_task() && !in_serving_softirq())
++ if (!in_task() && !in_softirq_really())
+ return;
+
+ local_lock_irqsave(&kcov_percpu_data.lock, flags);
--- /dev/null
+From e688c220732e518c2eb1639e9ef77d4a9311713c Mon Sep 17 00:00:00 2001
+From: Miao Wang <shankerwangmiao@gmail.com>
+Date: Wed, 7 Aug 2024 17:37:11 +0800
+Subject: LoongArch: Enable general EFI poweroff method
+
+From: Miao Wang <shankerwangmiao@gmail.com>
+
+commit e688c220732e518c2eb1639e9ef77d4a9311713c upstream.
+
+efi_shutdown_init() can register a general sys_off handler named
+efi_power_off(). Enable this by providing efi_poweroff_required(),
+like arm and x86. Since EFI poweroff is also supported on LoongArch,
+and the enablement makes the poweroff function usable for hardwares
+which lack ACPI S5.
+
+We prefer ACPI poweroff rather than EFI poweroff (like x86), so we only
+require EFI poweroff if acpi_gbl_reduced_hardware or acpi_no_s5 is true.
+
+Cc: stable@vger.kernel.org
+Acked-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Miao Wang <shankerwangmiao@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/efi.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/loongarch/kernel/efi.c
++++ b/arch/loongarch/kernel/efi.c
+@@ -66,6 +66,12 @@ void __init efi_runtime_init(void)
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ }
+
++bool efi_poweroff_required(void)
++{
++ return efi_enabled(EFI_RUNTIME_SERVICES) &&
++ (acpi_gbl_reduced_hardware || acpi_no_s5);
++}
++
+ unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+ #if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)
--- /dev/null
+From 5161b48712dcd08ec427c450399d4d1483e21dea Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Thu, 18 Jul 2024 16:36:07 +0800
+Subject: mm: list_lru: fix UAF for memory cgroup
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 5161b48712dcd08ec427c450399d4d1483e21dea upstream.
+
+The mem_cgroup_from_slab_obj() is supposed to be called under rcu lock or
+cgroup_mutex or others which could prevent returned memcg from being
+freed. Fix it by adding missing rcu read lock.
+
+Found by code inspection.
+
+[songmuchun@bytedance.com: only grab rcu lock when necessary, per Vlastimil]
+ Link: https://lkml.kernel.org/r/20240801024603.1865-1-songmuchun@bytedance.com
+Link: https://lkml.kernel.org/r/20240718083607.42068-1-songmuchun@bytedance.com
+Fixes: 0a97c01cd20b ("list_lru: allow explicit memcg and NUMA node selection")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Nhat Pham <nphamcs@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/list_lru.c | 28 ++++++++++++++++++++++------
+ 1 file changed, 22 insertions(+), 6 deletions(-)
+
+--- a/mm/list_lru.c
++++ b/mm/list_lru.c
+@@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru
+ }
+ #endif /* CONFIG_MEMCG_KMEM */
+
++/* The caller must ensure the memcg lifetime. */
+ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
+ {
+@@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
+
+ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
+ {
++ bool ret;
+ int nid = page_to_nid(virt_to_page(item));
+- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
+- mem_cgroup_from_slab_obj(item) : NULL;
+
+- return list_lru_add(lru, item, nid, memcg);
++ if (list_lru_memcg_aware(lru)) {
++ rcu_read_lock();
++ ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
++ rcu_read_unlock();
++ } else {
++ ret = list_lru_add(lru, item, nid, NULL);
++ }
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(list_lru_add_obj);
+
++/* The caller must ensure the memcg lifetime. */
+ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
+ {
+@@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
+
+ bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
+ {
++ bool ret;
+ int nid = page_to_nid(virt_to_page(item));
+- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
+- mem_cgroup_from_slab_obj(item) : NULL;
+
+- return list_lru_del(lru, item, nid, memcg);
++ if (list_lru_memcg_aware(lru)) {
++ rcu_read_lock();
++ ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
++ rcu_read_unlock();
++ } else {
++ ret = list_lru_del(lru, item, nid, NULL);
++ }
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(list_lru_del_obj);
+
--- /dev/null
+From d67c5649c1541dc93f202eeffc6f49220a4ed71d Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 31 Jul 2024 13:05:53 +0200
+Subject: mptcp: fully established after ADD_ADDR echo on MPJ
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit d67c5649c1541dc93f202eeffc6f49220a4ed71d upstream.
+
+Before this patch, receiving an ADD_ADDR echo on the just connected
+MP_JOIN subflow -- initiator side, after the MP_JOIN 3WHS -- was
+resulting in an MP_RESET. That's because only ACKs with a DSS or
+ADD_ADDRs without the echo bit were allowed.
+
+Not allowing the ADD_ADDR echo after an MP_CAPABLE 3WHS makes sense, as
+we are not supposed to send an ADD_ADDR before because it requires to be
+in full established mode first. For the MP_JOIN 3WHS, that's different:
+the ADD_ADDR can be sent on a previous subflow, and the ADD_ADDR echo
+can be received on the recently created one. The other peer will already
+be in fully established, so it is allowed to send that.
+
+We can then relax the conditions here to accept the ADD_ADDR echo for
+MPJ subflows.
+
+Fixes: 67b12f792d5e ("mptcp: full fully established support after ADD_ADDR")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240731-upstream-net-20240731-mptcp-endp-subflow-signal-v1-1-c8a9b036493b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -958,7 +958,8 @@ static bool check_fully_established(stru
+
+ if (subflow->remote_key_valid &&
+ (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
+- ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) {
++ ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) &&
++ (!mp_opt->echo || subflow->mp_join)))) {
+ /* subflows are fully established as soon as we get any
+ * additional ack, including ADD_ADDR.
+ */
--- /dev/null
+From 14ab4792ee120c022f276a7e4768f4dcb08f0cdd Mon Sep 17 00:00:00 2001
+From: Dmitry Safonov <0x7f454c46@gmail.com>
+Date: Thu, 1 Aug 2024 01:13:28 +0100
+Subject: net/tcp: Disable TCP-AO static key after RCU grace period
+
+From: Dmitry Safonov <0x7f454c46@gmail.com>
+
+commit 14ab4792ee120c022f276a7e4768f4dcb08f0cdd upstream.
+
+The lifetime of TCP-AO static_key is the same as the last
+tcp_ao_info. On the socket destruction tcp_ao_info ceases to be
+with RCU grace period, while tcp-ao static branch is currently deferred
+destructed. The static key definition is
+: DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_ao_needed, HZ);
+
+which means that if RCU grace period is delayed by more than a second
+and tcp_ao_needed is in the process of disablement, other CPUs may
+yet see tcp_ao_info which atent dead, but soon-to-be.
+And that breaks the assumption of static_key_fast_inc_not_disabled().
+
+See the comment near the definition:
+> * The caller must make sure that the static key can't get disabled while
+> * in this function. It doesn't patch jump labels, only adds a user to
+> * an already enabled static key.
+
+Originally it was introduced in commit eb8c507296f6 ("jump_label:
+Prevent key->enabled int overflow"), which is needed for the atomic
+contexts, one of which would be the creation of a full socket from a
+request socket. In that atomic context, it's known by the presence
+of the key (md5/ao) that the static branch is already enabled.
+So, the ref counter for that static branch is just incremented
+instead of holding the proper mutex.
+static_key_fast_inc_not_disabled() is just a helper for such usage
+case. But it must not be used if the static branch could get disabled
+in parallel as it's not protected by jump_label_mutex and as a result,
+races with jump_label_update() implementation details.
+
+Happened on netdev test-bot[1], so not a theoretical issue:
+
+[] jump_label: Fatal kernel bug, unexpected op at tcp_inbound_hash+0x1a7/0x870 [ffffffffa8c4e9b7] (eb 50 0f 1f 44 != 66 90 0f 1f 00)) size:2 type:1
+[] ------------[ cut here ]------------
+[] kernel BUG at arch/x86/kernel/jump_label.c:73!
+[] Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN NOPTI
+[] CPU: 3 PID: 243 Comm: kworker/3:3 Not tainted 6.10.0-virtme #1
+[] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+[] Workqueue: events jump_label_update_timeout
+[] RIP: 0010:__jump_label_patch+0x2f6/0x350
+...
+[] Call Trace:
+[] <TASK>
+[] arch_jump_label_transform_queue+0x6c/0x110
+[] __jump_label_update+0xef/0x350
+[] __static_key_slow_dec_cpuslocked.part.0+0x3c/0x60
+[] jump_label_update_timeout+0x2c/0x40
+[] process_one_work+0xe3b/0x1670
+[] worker_thread+0x587/0xce0
+[] kthread+0x28a/0x350
+[] ret_from_fork+0x31/0x70
+[] ret_from_fork_asm+0x1a/0x30
+[] </TASK>
+[] Modules linked in: veth
+[] ---[ end trace 0000000000000000 ]---
+[] RIP: 0010:__jump_label_patch+0x2f6/0x350
+
+[1]: https://netdev-3.bots.linux.dev/vmksft-tcp-ao-dbg/results/696681/5-connect-deny-ipv6/stderr
+
+Cc: stable@kernel.org
+Fixes: 67fa83f7c86a ("net/tcp: Add static_key for TCP-AO")
+Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ao.c | 43 ++++++++++++++++++++++++++++++-------------
+ 1 file changed, 30 insertions(+), 13 deletions(-)
+
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -266,32 +266,49 @@ static void tcp_ao_key_free_rcu(struct r
+ kfree_sensitive(key);
+ }
+
+-void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
++static void tcp_ao_info_free_rcu(struct rcu_head *head)
+ {
+- struct tcp_ao_info *ao;
++ struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu);
+ struct tcp_ao_key *key;
+ struct hlist_node *n;
+
++ hlist_for_each_entry_safe(key, n, &ao->head, node) {
++ hlist_del(&key->node);
++ tcp_sigpool_release(key->tcp_sigpool_id);
++ kfree_sensitive(key);
++ }
++ kfree(ao);
++ static_branch_slow_dec_deferred(&tcp_ao_needed);
++}
++
++static void tcp_ao_sk_omem_free(struct sock *sk, struct tcp_ao_info *ao)
++{
++ size_t total_ao_sk_mem = 0;
++ struct tcp_ao_key *key;
++
++ hlist_for_each_entry(key, &ao->head, node)
++ total_ao_sk_mem += tcp_ao_sizeof_key(key);
++ atomic_sub(total_ao_sk_mem, &sk->sk_omem_alloc);
++}
++
++void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
++{
++ struct tcp_ao_info *ao;
++
+ if (twsk) {
+ ao = rcu_dereference_protected(tcp_twsk(sk)->ao_info, 1);
+- tcp_twsk(sk)->ao_info = NULL;
++ rcu_assign_pointer(tcp_twsk(sk)->ao_info, NULL);
+ } else {
+ ao = rcu_dereference_protected(tcp_sk(sk)->ao_info, 1);
+- tcp_sk(sk)->ao_info = NULL;
++ rcu_assign_pointer(tcp_sk(sk)->ao_info, NULL);
+ }
+
+ if (!ao || !refcount_dec_and_test(&ao->refcnt))
+ return;
+
+- hlist_for_each_entry_safe(key, n, &ao->head, node) {
+- hlist_del_rcu(&key->node);
+- if (!twsk)
+- atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
+- call_rcu(&key->rcu, tcp_ao_key_free_rcu);
+- }
+-
+- kfree_rcu(ao, rcu);
+- static_branch_slow_dec_deferred(&tcp_ao_needed);
++ if (!twsk)
++ tcp_ao_sk_omem_free(sk, ao);
++ call_rcu(&ao->rcu, tcp_ao_info_free_rcu);
+ }
+
+ void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
--- /dev/null
+From 6d45e1c948a8b7ed6ceddb14319af69424db730c Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Tue, 6 Aug 2024 13:46:47 -0400
+Subject: padata: Fix possible divide-by-0 panic in padata_mt_helper()
+
+From: Waiman Long <longman@redhat.com>
+
+commit 6d45e1c948a8b7ed6ceddb14319af69424db730c upstream.
+
+We are hit with a not easily reproducible divide-by-0 panic in padata.c at
+bootup time.
+
+ [ 10.017908] Oops: divide error: 0000 1 PREEMPT SMP NOPTI
+ [ 10.017908] CPU: 26 PID: 2627 Comm: kworker/u1666:1 Not tainted 6.10.0-15.el10.x86_64 #1
+ [ 10.017908] Hardware name: Lenovo ThinkSystem SR950 [7X12CTO1WW]/[7X12CTO1WW], BIOS [PSE140J-2.30] 07/20/2021
+ [ 10.017908] Workqueue: events_unbound padata_mt_helper
+ [ 10.017908] RIP: 0010:padata_mt_helper+0x39/0xb0
+ :
+ [ 10.017963] Call Trace:
+ [ 10.017968] <TASK>
+ [ 10.018004] ? padata_mt_helper+0x39/0xb0
+ [ 10.018084] process_one_work+0x174/0x330
+ [ 10.018093] worker_thread+0x266/0x3a0
+ [ 10.018111] kthread+0xcf/0x100
+ [ 10.018124] ret_from_fork+0x31/0x50
+ [ 10.018138] ret_from_fork_asm+0x1a/0x30
+ [ 10.018147] </TASK>
+
+Looking at the padata_mt_helper() function, the only way a divide-by-0
+panic can happen is when ps->chunk_size is 0. The way that chunk_size is
+initialized in padata_do_multithreaded(), chunk_size can be 0 when the
+min_chunk in the passed-in padata_mt_job structure is 0.
+
+Fix this divide-by-0 panic by making sure that chunk_size will be at least
+1 no matter what the input parameters are.
+
+Link: https://lkml.kernel.org/r/20240806174647.1050398-1-longman@redhat.com
+Fixes: 004ed42638f4 ("padata: add basic support for multithreaded jobs")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: Waiman Long <longman@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/padata.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -517,6 +517,13 @@ void __init padata_do_multithreaded(stru
+ ps.chunk_size = max(ps.chunk_size, job->min_chunk);
+ ps.chunk_size = roundup(ps.chunk_size, job->align);
+
++ /*
++ * chunk_size can be 0 if the caller sets min_chunk to 0. So force it
++ * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
++ */
++ if (!ps.chunk_size)
++ ps.chunk_size = 1U;
++
+ list_for_each_entry(pw, &works, pw_list)
+ if (job->numa_aware) {
+ int old_node = atomic_read(&last_used_nid);
--- /dev/null
+From 7ae04ba36b381bffe2471eff3a93edced843240f Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sat, 27 Jul 2024 20:22:52 +0200
+Subject: parisc: fix a possible DMA corruption
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 7ae04ba36b381bffe2471eff3a93edced843240f upstream.
+
+ARCH_DMA_MINALIGN was defined as 16 - this is too small - it may be
+possible that two unrelated 16-byte allocations share a cache line. If
+one of these allocations is written using DMA and the other is written
+using cached write, the value that was written with DMA may be
+corrupted.
+
+This commit changes ARCH_DMA_MINALIGN to be 128 on PA20 and 32 on PA1.1 -
+that's the largest possible cache line size.
+
+As different parisc microarchitectures have different cache line size, we
+define arch_slab_minalign(), cache_line_size() and
+dma_get_cache_alignment() so that the kernel may tune slab cache
+parameters dynamically, based on the detected cache line size.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/Kconfig | 1 +
+ arch/parisc/include/asm/cache.h | 11 ++++++++++-
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -20,6 +20,7 @@ config PARISC
+ select ARCH_SUPPORTS_HUGETLBFS if PA20
+ select ARCH_SUPPORTS_MEMORY_FAILURE
+ select ARCH_STACKWALK
++ select ARCH_HAS_CACHE_LINE_SIZE
+ select ARCH_HAS_DEBUG_VM_PGTABLE
+ select HAVE_RELIABLE_STACKTRACE
+ select DMA_OPS
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -20,7 +20,16 @@
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
++#ifdef CONFIG_PA20
++#define ARCH_DMA_MINALIGN 128
++#else
++#define ARCH_DMA_MINALIGN 32
++#endif
++#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
++
++#define arch_slab_minalign() ((unsigned)dcache_stride)
++#define cache_line_size() dcache_stride
++#define dma_get_cache_alignment cache_line_size
+
+ #define __read_mostly __section(".data..read_mostly")
+
--- /dev/null
+From 1fd2c10acb7b35d72101a4619ee5b2cddb9efd3a Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sat, 27 Jul 2024 12:11:02 +0200
+Subject: parisc: fix unaligned accesses in BPF
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 1fd2c10acb7b35d72101a4619ee5b2cddb9efd3a upstream.
+
+There were spurious unaligned access warnings when calling BPF code.
+Sometimes, the warnings were triggered with any incoming packet, making
+the machine hard to use.
+
+The reason for the warnings is this: on parisc64, pointers to functions
+are not really pointers to functions, they are pointers to 16-byte
+descriptor. The first 8 bytes of the descriptor is a pointer to the
+function and the next 8 bytes of the descriptor is the content of the
+"dp" register. This descriptor is generated in the function
+bpf_jit_build_prologue.
+
+The problem is that the function bpf_int_jit_compile advertises 4-byte
+alignment when calling bpf_jit_binary_alloc, bpf_jit_binary_alloc
+randomizes the returned array and if the array happens to be not aligned
+on 8-byte boundary, the descriptor generated in bpf_jit_build_prologue is
+also not aligned and this triggers the unaligned access warning.
+
+Fix this by advertising 8-byte alignment on parisc64 when calling
+bpf_jit_binary_alloc.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/net/bpf_jit_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/parisc/net/bpf_jit_core.c
++++ b/arch/parisc/net/bpf_jit_core.c
+@@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(str
+ jit_data->header =
+ bpf_jit_binary_alloc(prog_size + extable_size,
+ &jit_data->image,
+- sizeof(u32),
++ sizeof(long),
+ bpf_fill_ill_insns);
+ if (!jit_data->header) {
+ prog = orig_prog;
--- /dev/null
+From b34ce4a59cfe9cd0d6f870e6408e8ec88a964585 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 17 Jul 2024 22:03:32 +0200
+Subject: power: supply: axp288_charger: Fix constant_charge_voltage writes
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit b34ce4a59cfe9cd0d6f870e6408e8ec88a964585 upstream.
+
+info->max_cv is in millivolts, divide the microvolt value being written
+to constant_charge_voltage by 1000 *before* clamping it to info->max_cv.
+
+Before this fix the code always tried to set constant_charge_voltage
+to max_cv / 1000 = 4 millivolt, which ends up in setting it to 4.1V
+which is the lowest supported value.
+
+Fixes: 843735b788a4 ("power: axp288_charger: axp288 charger driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20240717200333.56669-1-hdegoede@redhat.com
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/axp288_charger.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -337,8 +337,8 @@ static int axp288_charger_usb_set_proper
+ }
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+- scaled_val = min(val->intval, info->max_cv);
+- scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000);
++ scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000);
++ scaled_val = min(scaled_val, info->max_cv);
+ ret = axp288_charger_set_cv(info, scaled_val);
+ if (ret < 0) {
+ dev_warn(&info->pdev->dev, "set charge voltage failed\n");
--- /dev/null
+From 81af7f2342d162e24ac820c10e68684d9f927663 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 17 Jul 2024 22:03:33 +0200
+Subject: power: supply: axp288_charger: Round constant_charge_voltage writes down
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 81af7f2342d162e24ac820c10e68684d9f927663 upstream.
+
+Round constant_charge_voltage writes down to the first supported lower
+value, rather then rounding them up to the first supported higher value.
+
+This fixes e.g. writing 4250000 resulting in a value of 4350000 which
+might be dangerous, instead writing 4250000 will now result in a safe
+4200000 value.
+
+Fixes: 843735b788a4 ("power: axp288_charger: axp288 charger driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20240717200333.56669-2-hdegoede@redhat.com
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/axp288_charger.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -178,18 +178,18 @@ static inline int axp288_charger_set_cv(
+ u8 reg_val;
+ int ret;
+
+- if (cv <= CV_4100MV) {
+- reg_val = CHRG_CCCV_CV_4100MV;
+- cv = CV_4100MV;
+- } else if (cv <= CV_4150MV) {
+- reg_val = CHRG_CCCV_CV_4150MV;
+- cv = CV_4150MV;
+- } else if (cv <= CV_4200MV) {
++ if (cv >= CV_4350MV) {
++ reg_val = CHRG_CCCV_CV_4350MV;
++ cv = CV_4350MV;
++ } else if (cv >= CV_4200MV) {
+ reg_val = CHRG_CCCV_CV_4200MV;
+ cv = CV_4200MV;
++ } else if (cv >= CV_4150MV) {
++ reg_val = CHRG_CCCV_CV_4150MV;
++ cv = CV_4150MV;
+ } else {
+- reg_val = CHRG_CCCV_CV_4350MV;
+- cv = CV_4350MV;
++ reg_val = CHRG_CCCV_CV_4100MV;
++ cv = CV_4100MV;
+ }
+
+ reg_val = reg_val << CHRG_CCCV_CV_BIT_POS;
--- /dev/null
+From bf9d5cb588755ee41ac12a8976dccf44ae18281b Mon Sep 17 00:00:00 2001
+From: Neil Armstrong <neil.armstrong@linaro.org>
+Date: Mon, 15 Jul 2024 14:57:06 +0200
+Subject: power: supply: qcom_battmgr: return EAGAIN when firmware service is not up
+
+From: Neil Armstrong <neil.armstrong@linaro.org>
+
+commit bf9d5cb588755ee41ac12a8976dccf44ae18281b upstream.
+
+The driver returns -ENODEV when the firmware battmrg service hasn't
+started yet, while per-se -ENODEV is fine, we usually use -EAGAIN to
+tell the user to retry again later. And the power supply core uses
+-EGAIN when the device isn't initialized, let's use the same return.
+
+This notably causes an infinite spam of:
+thermal thermal_zoneXX: failed to read out thermal zone (-19)
+because the thermal core doesn't understand -ENODEV, but only
+considers -EAGAIN as a non-fatal error.
+
+While it didn't appear until now, commit [1] fixes thermal core
+and no more ignores thermal zones returning an error at first
+temperature update.
+
+[1] 5725f40698b9 ("thermal: core: Call monitor_thermal_zone() if zone temperature is invalid")
+
+Link: https://lore.kernel.org/all/2ed4c630-204a-4f80-a37f-f2ca838eb455@linaro.org/
+Cc: stable@vger.kernel.org
+Fixes: 29e8142b5623 ("power: supply: Introduce Qualcomm PMIC GLINK power supply")
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Tested-by: Stephan Gerhold <stephan.gerhold@linaro.org>
+Reviewed-by: Stephan Gerhold <stephan.gerhold@linaro.org>
+Link: https://lore.kernel.org/r/20240715-topic-sm8x50-upstream-fix-battmgr-temp-tz-warn-v1-1-16e842ccead7@linaro.org
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/qcom_battmgr.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/power/supply/qcom_battmgr.c
++++ b/drivers/power/supply/qcom_battmgr.c
+@@ -486,7 +486,7 @@ static int qcom_battmgr_bat_get_property
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+@@ -683,7 +683,7 @@ static int qcom_battmgr_ac_get_property(
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+ if (ret)
+@@ -748,7 +748,7 @@ static int qcom_battmgr_usb_get_property
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+@@ -867,7 +867,7 @@ static int qcom_battmgr_wls_get_property
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
--- /dev/null
+From fe7a11c78d2a9bdb8b50afc278a31ac177000948 Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Wed, 3 Jul 2024 11:16:10 +0800
+Subject: sched/core: Fix unbalance set_rq_online/offline() in sched_cpu_deactivate()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit fe7a11c78d2a9bdb8b50afc278a31ac177000948 upstream.
+
+If cpuset_cpu_inactive() fails, set_rq_online() need be called to rollback.
+
+Fixes: 120455c514f7 ("sched: Fix hotplug vs CPU bandwidth control")
+Cc: stable@kernel.org
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20240703031610.587047-5-yangyingliang@huaweicloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9781,6 +9781,7 @@ int sched_cpu_deactivate(unsigned int cp
+ ret = cpuset_cpu_inactive(cpu);
+ if (ret) {
+ sched_smt_present_inc(cpu);
++ sched_set_rq_online(rq, cpu);
+ balance_push_set(cpu, false);
+ set_cpu_active(cpu, true);
+ sched_update_numa(cpu, true);
--- /dev/null
+From 2f027354122f58ee846468a6f6b48672fff92e9b Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Wed, 3 Jul 2024 11:16:09 +0800
+Subject: sched/core: Introduce sched_set_rq_on/offline() helper
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit 2f027354122f58ee846468a6f6b48672fff92e9b upstream.
+
+Introduce sched_set_rq_on/offline() helper, so it can be called
+in normal or error path simply. No functional changed.
+
+Cc: stable@kernel.org
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20240703031610.587047-4-yangyingliang@huaweicloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 40 ++++++++++++++++++++++++++--------------
+ 1 file changed, 26 insertions(+), 14 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9604,6 +9604,30 @@ void set_rq_offline(struct rq *rq)
+ }
+ }
+
++static inline void sched_set_rq_online(struct rq *rq, int cpu)
++{
++ struct rq_flags rf;
++
++ rq_lock_irqsave(rq, &rf);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_online(rq);
++ }
++ rq_unlock_irqrestore(rq, &rf);
++}
++
++static inline void sched_set_rq_offline(struct rq *rq, int cpu)
++{
++ struct rq_flags rf;
++
++ rq_lock_irqsave(rq, &rf);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_offline(rq);
++ }
++ rq_unlock_irqrestore(rq, &rf);
++}
++
+ /*
+ * used to mark begin/end of suspend/resume:
+ */
+@@ -9673,7 +9697,6 @@ static inline void sched_smt_present_dec
+ int sched_cpu_activate(unsigned int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+- struct rq_flags rf;
+
+ /*
+ * Clear the balance_push callback and prepare to schedule
+@@ -9702,12 +9725,7 @@ int sched_cpu_activate(unsigned int cpu)
+ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
+ * domains.
+ */
+- rq_lock_irqsave(rq, &rf);
+- if (rq->rd) {
+- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+- set_rq_online(rq);
+- }
+- rq_unlock_irqrestore(rq, &rf);
++ sched_set_rq_online(rq, cpu);
+
+ return 0;
+ }
+@@ -9715,7 +9733,6 @@ int sched_cpu_activate(unsigned int cpu)
+ int sched_cpu_deactivate(unsigned int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+- struct rq_flags rf;
+ int ret;
+
+ /*
+@@ -9746,12 +9763,7 @@ int sched_cpu_deactivate(unsigned int cp
+ */
+ synchronize_rcu();
+
+- rq_lock_irqsave(rq, &rf);
+- if (rq->rd) {
+- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+- set_rq_offline(rq);
+- }
+- rq_unlock_irqrestore(rq, &rf);
++ sched_set_rq_offline(rq, cpu);
+
+ /*
+ * When going down, decrement the number of cores with SMT present.
--- /dev/null
+From e22f910a26cc2a3ac9c66b8e935ef2a7dd881117 Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Wed, 3 Jul 2024 11:16:08 +0800
+Subject: sched/smt: Fix unbalance sched_smt_present dec/inc
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit e22f910a26cc2a3ac9c66b8e935ef2a7dd881117 upstream.
+
+I got the following warn report while doing stress test:
+
+jump label: negative count!
+WARNING: CPU: 3 PID: 38 at kernel/jump_label.c:263 static_key_slow_try_dec+0x9d/0xb0
+Call Trace:
+ <TASK>
+ __static_key_slow_dec_cpuslocked+0x16/0x70
+ sched_cpu_deactivate+0x26e/0x2a0
+ cpuhp_invoke_callback+0x3ad/0x10d0
+ cpuhp_thread_fun+0x3f5/0x680
+ smpboot_thread_fn+0x56d/0x8d0
+ kthread+0x309/0x400
+ ret_from_fork+0x41/0x70
+ ret_from_fork_asm+0x1b/0x30
+ </TASK>
+
+Because when cpuset_cpu_inactive() fails in sched_cpu_deactivate(),
+the cpu offline failed, but sched_smt_present is decremented before
+calling sched_cpu_deactivate(), it leads to unbalanced dec/inc, so
+fix it by incrementing sched_smt_present in the error path.
+
+Fixes: c5511d03ec09 ("sched/smt: Make sched_smt_present track topology")
+Cc: stable@kernel.org
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Chen Yu <yu.c.chen@intel.com>
+Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
+Link: https://lore.kernel.org/r/20240703031610.587047-3-yangyingliang@huaweicloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9768,6 +9768,7 @@ int sched_cpu_deactivate(unsigned int cp
+ sched_update_numa(cpu, false);
+ ret = cpuset_cpu_inactive(cpu);
+ if (ret) {
++ sched_smt_present_inc(cpu);
+ balance_push_set(cpu, false);
+ set_cpu_active(cpu, true);
+ sched_update_numa(cpu, true);
--- /dev/null
+From 31b164e2e4af84d08d2498083676e7eeaa102493 Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Wed, 3 Jul 2024 11:16:07 +0800
+Subject: sched/smt: Introduce sched_smt_present_inc/dec() helper
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit 31b164e2e4af84d08d2498083676e7eeaa102493 upstream.
+
+Introduce sched_smt_present_inc/dec() helper, so it can be called
+in normal or error path simply. No functional changed.
+
+Cc: stable@kernel.org
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20240703031610.587047-2-yangyingliang@huaweicloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9654,6 +9654,22 @@ static int cpuset_cpu_inactive(unsigned
+ return 0;
+ }
+
++static inline void sched_smt_present_inc(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++}
++
++static inline void sched_smt_present_dec(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_dec_cpuslocked(&sched_smt_present);
++#endif
++}
++
+ int sched_cpu_activate(unsigned int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+@@ -9665,13 +9681,10 @@ int sched_cpu_activate(unsigned int cpu)
+ */
+ balance_push_set(cpu, false);
+
+-#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going up, increment the number of cores with SMT present.
+ */
+- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+- static_branch_inc_cpuslocked(&sched_smt_present);
+-#endif
++ sched_smt_present_inc(cpu);
+ set_cpu_active(cpu, true);
+
+ if (sched_smp_initialized) {
+@@ -9740,13 +9753,12 @@ int sched_cpu_deactivate(unsigned int cp
+ }
+ rq_unlock_irqrestore(rq, &rf);
+
+-#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+- static_branch_dec_cpuslocked(&sched_smt_present);
++ sched_smt_present_dec(cpu);
+
++#ifdef CONFIG_SCHED_SMT
+ sched_core_cpu_deactivate(cpu);
+ #endif
+
--- /dev/null
+From 30b651c8bc788c068a978dc760e9d5f824f7019e Mon Sep 17 00:00:00 2001
+From: Nico Pache <npache@redhat.com>
+Date: Wed, 24 Jul 2024 15:35:17 -0600
+Subject: selftests: mm: add s390 to ARCH check
+
+From: Nico Pache <npache@redhat.com>
+
+commit 30b651c8bc788c068a978dc760e9d5f824f7019e upstream.
+
+commit 0518dbe97fe6 ("selftests/mm: fix cross compilation with LLVM")
+changed the env variable for the architecture from MACHINE to ARCH.
+
+This is preventing 3 required TEST_GEN_FILES from being included when
+cross compiling s390x and errors when trying to run the test suite. This
+is due to the ARCH variable already being set and the arch folder name
+being s390.
+
+Add "s390" to the filtered list to cover this case and have the 3 files
+included in the build.
+
+Link: https://lkml.kernel.org/r/20240724213517.23918-1-npache@redhat.com
+Fixes: 0518dbe97fe6 ("selftests/mm: fix cross compilation with LLVM")
+Signed-off-by: Nico Pache <npache@redhat.com>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: Paul Walmsley <paul.walmsley@sifive.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/mm/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/mm/Makefile
++++ b/tools/testing/selftests/mm/Makefile
+@@ -106,7 +106,7 @@ endif
+
+ endif
+
+-ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64))
++ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390))
+ TEST_GEN_FILES += va_high_addr_switch
+ TEST_GEN_FILES += virtual_address_range
+ TEST_GEN_FILES += write_to_hugetlbfs
serial-sc16is7xx-fix-invalid-fifo-access-with-special-register-set.patch
tty-vt-conmakehash-cope-with-abs_srctree-no-longer-in-env.patch
memcg-protect-concurrent-access-to-mem_cgroup_idr.patch
+parisc-fix-unaligned-accesses-in-bpf.patch
+parisc-fix-a-possible-dma-corruption.patch
+asoc-amd-yc-add-quirk-entry-for-omen-by-hp-gaming-laptop-16-n0xxx.patch
+spmi-pmic-arb-pass-the-correct-of_node-to-irq_domain_add_tree.patch
+kcov-properly-check-for-softirq-context.patch
+irqchip-xilinx-fix-shift-out-of-bounds.patch
+irqchip-riscv-aplic-retrigger-msi-interrupt-on-source-configuration.patch
+genirq-irqdesc-honor-caller-provided-affinity-in-alloc_desc.patch
+loongarch-enable-general-efi-poweroff-method.patch
+power-supply-qcom_battmgr-return-eagain-when-firmware-service-is-not-up.patch
+power-supply-axp288_charger-fix-constant_charge_voltage-writes.patch
+power-supply-axp288_charger-round-constant_charge_voltage-writes-down.patch
+tracing-have-format-file-honor-event_file_fl_freed.patch
+tracing-fix-overflow-in-get_free_elt.patch
+padata-fix-possible-divide-by-0-panic-in-padata_mt_helper.patch
+smb3-fix-setting-securityflags-when-encryption-is-required.patch
+eventfs-don-t-return-null-in-eventfs_create_dir.patch
+eventfs-use-srcu-for-freeing-eventfs_inodes.patch
+selftests-mm-add-s390-to-arch-check.patch
+mm-list_lru-fix-uaf-for-memory-cgroup.patch
+net-tcp-disable-tcp-ao-static-key-after-rcu-grace-period.patch
+btrfs-avoid-using-fixed-char-array-size-for-tree-names.patch
+x86-paravirt-fix-incorrect-virt-spinlock-setting-on-bare-metal.patch
+x86-mtrr-check-if-fixed-mtrrs-exist-before-saving-them.patch
+sched-smt-introduce-sched_smt_present_inc-dec-helper.patch
+sched-smt-fix-unbalance-sched_smt_present-dec-inc.patch
+sched-core-introduce-sched_set_rq_on-offline-helper.patch
+sched-core-fix-unbalance-set_rq_online-offline-in-sched_cpu_deactivate.patch
+drm-bridge-analogix_dp-properly-handle-zero-sized-aux-transactions.patch
+drm-dp_mst-skip-csn-if-topology-probing-is-not-done-yet.patch
+drm-lima-mark-simple_ondemand-governor-as-softdep.patch
+drm-mgag200-set-ddc-timeout-in-milliseconds.patch
+drm-mgag200-bind-i2c-lifetime-to-drm-device.patch
+drm-radeon-remove-__counted_by-from-statearray.states.patch
+mptcp-fully-established-after-add_addr-echo-on-mpj.patch
--- /dev/null
+From 1b5487aefb1ce7a6b1f15a33297d1231306b4122 Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Wed, 31 Jul 2024 21:38:50 -0500
+Subject: smb3: fix setting SecurityFlags when encryption is required
+
+From: Steve French <stfrench@microsoft.com>
+
+commit 1b5487aefb1ce7a6b1f15a33297d1231306b4122 upstream.
+
+Setting encryption as required in security flags was broken.
+For example (to require all mounts to be encrypted by setting):
+
+ "echo 0x400c5 > /proc/fs/cifs/SecurityFlags"
+
+Would return "Invalid argument" and log "Unsupported security flags"
+This patch fixes that (e.g. allowing overriding the default for
+SecurityFlags 0x00c5, including 0x40000 to require seal, ie
+SMB3.1.1 encryption) so now that works and forces encryption
+on subsequent mounts.
+
+Acked-by: Bharath SM <bharathsm@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/cifs/usage.rst | 2 +-
+ fs/smb/client/cifs_debug.c | 2 +-
+ fs/smb/client/cifsglob.h | 8 ++++----
+ fs/smb/client/smb2pdu.c | 3 +++
+ 4 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/Documentation/admin-guide/cifs/usage.rst
++++ b/Documentation/admin-guide/cifs/usage.rst
+@@ -742,7 +742,7 @@ SecurityFlags Flags which control secur
+ may use NTLMSSP 0x00080
+ must use NTLMSSP 0x80080
+ seal (packet encryption) 0x00040
+- must seal (not implemented yet) 0x40040
++ must seal 0x40040
+
+ cifsFYI If set to non-zero value, additional debug information
+ will be logged to the system error log. This field
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -1072,7 +1072,7 @@ static int cifs_security_flags_proc_open
+ static void
+ cifs_security_flags_handle_must_flags(unsigned int *flags)
+ {
+- unsigned int signflags = *flags & CIFSSEC_MUST_SIGN;
++ unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL);
+
+ if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
+ *flags = CIFSSEC_MUST_KRB5;
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1901,7 +1901,7 @@ static inline bool is_replayable_error(i
+ #define CIFSSEC_MAY_SIGN 0x00001
+ #define CIFSSEC_MAY_NTLMV2 0x00004
+ #define CIFSSEC_MAY_KRB5 0x00008
+-#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */
++#define CIFSSEC_MAY_SEAL 0x00040
+ #define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */
+
+ #define CIFSSEC_MUST_SIGN 0x01001
+@@ -1911,11 +1911,11 @@ require use of the stronger protocol */
+ #define CIFSSEC_MUST_NTLMV2 0x04004
+ #define CIFSSEC_MUST_KRB5 0x08008
+ #ifdef CONFIG_CIFS_UPCALL
+-#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */
++#define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */
+ #else
+-#define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */
++#define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */
+ #endif /* UPCALL */
+-#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
++#define CIFSSEC_MUST_SEAL 0x40040
+ #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
+
+ #define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -82,6 +82,9 @@ int smb3_encryption_required(const struc
+ if (tcon->seal &&
+ (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ return 1;
++ if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
++ (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++ return 1;
+ return 0;
+ }
+
--- /dev/null
+From f38ba5459ced3441852f37f20fcfb7bd39d20f62 Mon Sep 17 00:00:00 2001
+From: Konrad Dybcio <konrad.dybcio@linaro.org>
+Date: Thu, 25 Jul 2024 09:46:32 -0700
+Subject: spmi: pmic-arb: Pass the correct of_node to irq_domain_add_tree
+
+From: Konrad Dybcio <konrad.dybcio@linaro.org>
+
+commit f38ba5459ced3441852f37f20fcfb7bd39d20f62 upstream.
+
+Currently, irqchips for all of the subnodes (which represent a given
+bus master) point to the parent wrapper node. This is no bueno, as
+no interrupts arrive, ever (because nothing references that node).
+
+Fix that by passing a reference to the respective master's of_node.
+
+Worth noting, this is a NOP for devices with only a single master
+described.
+
+Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Link: https://lore.kernel.org/r/20240522-topic-spmi_multi_master_irqfix-v2-1-7ec92a862b9f@linaro.org
+Reviewed-by: Abel Vesa <abel.vesa@linaro.org>
+Tested-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Fixes: 02922ccbb330 ("spmi: pmic-arb: Register controller for bus instead of arbiter")
+Cc: stable@vger.kernel.org
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Reviewed-by: Bjorn Andersson <andersson@kernel.org>
+Link: https://lore.kernel.org/r/20240725164636.3362690-3-sboyd@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spmi/spmi-pmic-arb.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
+index f240fcc5a4e1..b6880c13163c 100644
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -1737,8 +1737,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev,
+
+ dev_dbg(&pdev->dev, "adding irq domain for bus %d\n", bus_index);
+
+- bus->domain = irq_domain_add_tree(dev->of_node,
+- &pmic_arb_irq_domain_ops, bus);
++ bus->domain = irq_domain_add_tree(node, &pmic_arb_irq_domain_ops, bus);
+ if (!bus->domain) {
+ dev_err(&pdev->dev, "unable to create irq_domain\n");
+ return -ENOMEM;
+--
+2.46.0
+
--- /dev/null
+From bcf86c01ca4676316557dd482c8416ece8c2e143 Mon Sep 17 00:00:00 2001
+From: Tze-nan Wu <Tze-nan.Wu@mediatek.com>
+Date: Mon, 5 Aug 2024 13:59:22 +0800
+Subject: tracing: Fix overflow in get_free_elt()
+
+From: Tze-nan Wu <Tze-nan.Wu@mediatek.com>
+
+commit bcf86c01ca4676316557dd482c8416ece8c2e143 upstream.
+
+"tracing_map->next_elt" in get_free_elt() is at risk of overflowing.
+
+Once it overflows, new elements can still be inserted into the tracing_map
+even though the maximum number of elements (`max_elts`) has been reached.
+Continuing to insert elements after the overflow could result in the
+tracing_map containing "tracing_map->max_size" elements, leaving no empty
+entries.
+If any attempt is made to insert an element into a full tracing_map using
+`__tracing_map_insert()`, it will cause an infinite loop with preemption
+disabled, leading to a CPU hang problem.
+
+Fix this by preventing any further increments to "tracing_map->next_elt"
+once it reaches "tracing_map->max_elt".
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Fixes: 08d43a5fa063e ("tracing: Add lock-free tracing_map")
+Co-developed-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com>
+Link: https://lore.kernel.org/20240805055922.6277-1-Tze-nan.Wu@mediatek.com
+Signed-off-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com>
+Signed-off-by: Tze-nan Wu <Tze-nan.Wu@mediatek.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/tracing_map.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -454,7 +454,7 @@ static struct tracing_map_elt *get_free_
+ struct tracing_map_elt *elt = NULL;
+ int idx;
+
+- idx = atomic_inc_return(&map->next_elt);
++ idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts);
+ if (idx < map->max_elts) {
+ elt = *(TRACING_MAP_ELT(map->elts, idx));
+ if (map->ops && map->ops->elt_init)
+@@ -699,7 +699,7 @@ void tracing_map_clear(struct tracing_ma
+ {
+ unsigned int i;
+
+- atomic_set(&map->next_elt, -1);
++ atomic_set(&map->next_elt, 0);
+ atomic64_set(&map->hits, 0);
+ atomic64_set(&map->drops, 0);
+
+@@ -783,7 +783,7 @@ struct tracing_map *tracing_map_create(u
+
+ map->map_bits = map_bits;
+ map->max_elts = (1 << map_bits);
+- atomic_set(&map->next_elt, -1);
++ atomic_set(&map->next_elt, 0);
+
+ map->map_size = (1 << (map_bits + 1));
+ map->ops = ops;
--- /dev/null
+From b1560408692cd0ab0370cfbe9deb03ce97ab3f6d Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Tue, 30 Jul 2024 11:06:57 -0400
+Subject: tracing: Have format file honor EVENT_FILE_FL_FREED
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit b1560408692cd0ab0370cfbe9deb03ce97ab3f6d upstream.
+
+When eventfs was introduced, special care had to be done to coordinate the
+freeing of the file meta data with the files that are exposed to user
+space. The file meta data would have a ref count that is set when the file
+is created and would be decremented and freed after the last user that
+opened the file closed it. When the file meta data was to be freed, it
+would set a flag (EVENT_FILE_FL_FREED) to denote that the file is freed,
+and any new references made (like new opens or reads) would fail as it is
+marked freed. This allowed other meta data to be freed after this flag was
+set (under the event_mutex).
+
+All the files that were dynamically created in the events directory had a
+pointer to the file meta data and would call event_release() when the last
+reference to the user space file was closed. This would be the time that it
+is safe to free the file meta data.
+
+A shortcut was made for the "format" file. It's i_private would point to
+the "call" entry directly and not point to the file's meta data. This is
+because all format files are the same for the same "call", so it was
+thought there was no reason to differentiate them. The other files
+maintain state (like the "enable", "trigger", etc). But this meant if the
+file were to disappear, the "format" file would be unaware of it.
+
+This caused a race that could be trigger via the user_events test (that
+would create dynamic events and free them), and running a loop that would
+read the user_events format files:
+
+In one console run:
+
+ # cd tools/testing/selftests/user_events
+ # while true; do ./ftrace_test; done
+
+And in another console run:
+
+ # cd /sys/kernel/tracing/
+ # while true; do cat events/user_events/__test_event/format; done 2>/dev/null
+
+With KASAN memory checking, it would trigger a use-after-free bug report
+(which was a real bug). This was because the format file was not checking
+the file's meta data flag "EVENT_FILE_FL_FREED", so it would access the
+event that the file meta data pointed to after the event was freed.
+
+After inspection, there are other locations that were found to not check
+the EVENT_FILE_FL_FREED flag when accessing the trace_event_file. Add a
+new helper function: event_file_file() that will make sure that the
+event_mutex is held, and will return NULL if the trace_event_file has the
+EVENT_FILE_FL_FREED flag set. Have the first reference of the struct file
+pointer use event_file_file() and check for NULL. Later uses can still use
+the event_file_data() helper function if the event_mutex is still held and
+was not released since the event_file_file() call.
+
+Link: https://lore.kernel.org/all/20240719204701.1605950-1-minipli@grsecurity.net/
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Ajay Kaher <ajay.kaher@broadcom.com>
+Cc: Ilkka Naulapää <digirigawa@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Dan Carpenter <dan.carpenter@linaro.org>
+Cc: Beau Belgrave <beaub@linux.microsoft.com>
+Cc: Florian Fainelli <florian.fainelli@broadcom.com>
+Cc: Alexey Makhalov <alexey.makhalov@broadcom.com>
+Cc: Vasavi Sirnapalli <vasavi.sirnapalli@broadcom.com>
+Link: https://lore.kernel.org/20240730110657.3b69d3c1@gandalf.local.home
+Fixes: b63db58e2fa5d ("eventfs/tracing: Add callback for release of an eventfs_inode")
+Reported-by: Mathias Krause <minipli@grsecurity.net>
+Tested-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.h | 23 +++++++++++++++++++++++
+ kernel/trace/trace_events.c | 33 ++++++++++++++++++++-------------
+ kernel/trace/trace_events_hist.c | 4 ++--
+ kernel/trace/trace_events_inject.c | 2 +-
+ kernel/trace/trace_events_trigger.c | 6 +++---
+ 5 files changed, 49 insertions(+), 19 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1573,6 +1573,29 @@ static inline void *event_file_data(stru
+ extern struct mutex event_mutex;
+ extern struct list_head ftrace_events;
+
++/*
++ * When the trace_event_file is the filp->i_private pointer,
++ * it must be taken under the event_mutex lock, and then checked
++ * if the EVENT_FILE_FL_FREED flag is set. If it is, then the
++ * data pointed to by the trace_event_file can not be trusted.
++ *
++ * Use the event_file_file() to access the trace_event_file from
++ * the filp the first time under the event_mutex and check for
++ * NULL. If it is needed to be retrieved again and the event_mutex
++ * is still held, then the event_file_data() can be used and it
++ * is guaranteed to be valid.
++ */
++static inline struct trace_event_file *event_file_file(struct file *filp)
++{
++ struct trace_event_file *file;
++
++ lockdep_assert_held(&event_mutex);
++ file = READ_ONCE(file_inode(filp)->i_private);
++ if (!file || file->flags & EVENT_FILE_FL_FREED)
++ return NULL;
++ return file;
++}
++
+ extern const struct file_operations event_trigger_fops;
+ extern const struct file_operations event_hist_fops;
+ extern const struct file_operations event_hist_debug_fops;
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1386,12 +1386,12 @@ event_enable_read(struct file *filp, cha
+ char buf[4] = "0";
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
++ file = event_file_file(filp);
+ if (likely(file))
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+
+- if (!file || flags & EVENT_FILE_FL_FREED)
++ if (!file)
+ return -ENODEV;
+
+ if (flags & EVENT_FILE_FL_ENABLED &&
+@@ -1424,8 +1424,8 @@ event_enable_write(struct file *filp, co
+ case 1:
+ ret = -ENODEV;
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
+- if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) {
++ file = event_file_file(filp);
++ if (likely(file)) {
+ ret = tracing_update_buffers(file->tr);
+ if (ret < 0) {
+ mutex_unlock(&event_mutex);
+@@ -1540,7 +1540,8 @@ enum {
+
+ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
+ {
+- struct trace_event_call *call = event_file_data(m->private);
++ struct trace_event_file *file = event_file_data(m->private);
++ struct trace_event_call *call = file->event_call;
+ struct list_head *common_head = &ftrace_common_fields;
+ struct list_head *head = trace_get_fields(call);
+ struct list_head *node = v;
+@@ -1572,7 +1573,8 @@ static void *f_next(struct seq_file *m,
+
+ static int f_show(struct seq_file *m, void *v)
+ {
+- struct trace_event_call *call = event_file_data(m->private);
++ struct trace_event_file *file = event_file_data(m->private);
++ struct trace_event_call *call = file->event_call;
+ struct ftrace_event_field *field;
+ const char *array_descriptor;
+
+@@ -1627,12 +1629,14 @@ static int f_show(struct seq_file *m, vo
+
+ static void *f_start(struct seq_file *m, loff_t *pos)
+ {
++ struct trace_event_file *file;
+ void *p = (void *)FORMAT_HEADER;
+ loff_t l = 0;
+
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+- if (!event_file_data(m->private))
++ file = event_file_file(m->private);
++ if (!file)
+ return ERR_PTR(-ENODEV);
+
+ while (l < *pos && p)
+@@ -1706,8 +1710,8 @@ event_filter_read(struct file *filp, cha
+ trace_seq_init(s);
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
+- if (file && !(file->flags & EVENT_FILE_FL_FREED))
++ file = event_file_file(filp);
++ if (file)
+ print_event_filter(file, s);
+ mutex_unlock(&event_mutex);
+
+@@ -1736,9 +1740,13 @@ event_filter_write(struct file *filp, co
+ return PTR_ERR(buf);
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
+- if (file)
+- err = apply_event_filter(file, buf);
++ file = event_file_file(filp);
++ if (file) {
++ if (file->flags & EVENT_FILE_FL_FREED)
++ err = -ENODEV;
++ else
++ err = apply_event_filter(file, buf);
++ }
+ mutex_unlock(&event_mutex);
+
+ kfree(buf);
+@@ -2485,7 +2493,6 @@ static int event_callback(const char *na
+ if (strcmp(name, "format") == 0) {
+ *mode = TRACE_MODE_READ;
+ *fops = &ftrace_event_format_fops;
+- *data = call;
+ return 1;
+ }
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -5601,7 +5601,7 @@ static int hist_show(struct seq_file *m,
+
+ mutex_lock(&event_mutex);
+
+- event_file = event_file_data(m->private);
++ event_file = event_file_file(m->private);
+ if (unlikely(!event_file)) {
+ ret = -ENODEV;
+ goto out_unlock;
+@@ -5880,7 +5880,7 @@ static int hist_debug_show(struct seq_fi
+
+ mutex_lock(&event_mutex);
+
+- event_file = event_file_data(m->private);
++ event_file = event_file_file(m->private);
+ if (unlikely(!event_file)) {
+ ret = -ENODEV;
+ goto out_unlock;
+--- a/kernel/trace/trace_events_inject.c
++++ b/kernel/trace/trace_events_inject.c
+@@ -299,7 +299,7 @@ event_inject_write(struct file *filp, co
+ strim(buf);
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
++ file = event_file_file(filp);
+ if (file) {
+ call = file->event_call;
+ size = parse_entry(buf, call, &entry);
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -159,7 +159,7 @@ static void *trigger_start(struct seq_fi
+
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+- event_file = event_file_data(m->private);
++ event_file = event_file_file(m->private);
+ if (unlikely(!event_file))
+ return ERR_PTR(-ENODEV);
+
+@@ -213,7 +213,7 @@ static int event_trigger_regex_open(stru
+
+ mutex_lock(&event_mutex);
+
+- if (unlikely(!event_file_data(file))) {
++ if (unlikely(!event_file_file(file))) {
+ mutex_unlock(&event_mutex);
+ return -ENODEV;
+ }
+@@ -293,7 +293,7 @@ static ssize_t event_trigger_regex_write
+ strim(buf);
+
+ mutex_lock(&event_mutex);
+- event_file = event_file_data(file);
++ event_file = event_file_file(file);
+ if (unlikely(!event_file)) {
+ mutex_unlock(&event_mutex);
+ kfree(buf);
--- /dev/null
+From 919f18f961c03d6694aa726c514184f2311a4614 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 7 Aug 2024 17:02:44 -0700
+Subject: x86/mtrr: Check if fixed MTRRs exist before saving them
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 919f18f961c03d6694aa726c514184f2311a4614 upstream.
+
+MTRRs have an obsolete fixed variant for fine grained caching control
+of the 640K-1MB region that uses separate MSRs. This fixed variant has
+a separate capability bit in the MTRR capability MSR.
+
+So far all x86 CPUs which support MTRR have this separate bit set, so it
+went unnoticed that mtrr_save_state() does not check the capability bit
+before accessing the fixed MTRR MSRs.
+
+Though on a CPU that does not support the fixed MTRR capability this
+results in a #GP. The #GP itself is harmless because the RDMSR fault is
+handled gracefully, but results in a WARN_ON().
+
+Add the missing capability check to prevent this.
+
+Fixes: 2b1f6278d77c ("[PATCH] x86: Save the MTRRs of the BSP before booting an AP")
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20240808000244.946864-1-ak@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/mtrr/mtrr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
+@@ -609,7 +609,7 @@ void mtrr_save_state(void)
+ {
+ int first_cpu;
+
+- if (!mtrr_enabled())
++ if (!mtrr_enabled() || !mtrr_state.have_fixed)
+ return;
+
+ first_cpu = cpumask_first(cpu_online_mask);
--- /dev/null
+From e639222a51196c69c70b49b67098ce2f9919ed08 Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Tue, 6 Aug 2024 19:22:07 +0800
+Subject: x86/paravirt: Fix incorrect virt spinlock setting on bare metal
+
+From: Chen Yu <yu.c.chen@intel.com>
+
+commit e639222a51196c69c70b49b67098ce2f9919ed08 upstream.
+
+The kernel can change spinlock behavior when running as a guest. But this
+guest-friendly behavior causes performance problems on bare metal.
+
+The kernel uses a static key to switch between the two modes.
+
+In theory, the static key is enabled by default (run in guest mode) and
+should be disabled for bare metal (and in some guests that want native
+behavior or paravirt spinlock).
+
+A performance drop is reported when running encode/decode workload and
+BenchSEE cache sub-workload.
+
+Bisect points to commit ce0a1b608bfc ("x86/paravirt: Silence unused
+native_pv_lock_init() function warning"). When CONFIG_PARAVIRT_SPINLOCKS is
+disabled the virt_spin_lock_key is incorrectly set to true on bare
+metal. The qspinlock degenerates to test-and-set spinlock, which decreases
+the performance on bare metal.
+
+Set the default value of virt_spin_lock_key to false. If booting in a VM,
+enable this key. Later during the VM initialization, if other
+high-efficient spinlock is preferred (e.g. paravirt-spinlock), or the user
+wants the native qspinlock (via nopvspin boot commandline), the
+virt_spin_lock_key is disabled accordingly.
+
+This results in the following decision matrix:
+
+X86_FEATURE_HYPERVISOR Y Y Y N
+CONFIG_PARAVIRT_SPINLOCKS Y Y N Y/N
+PV spinlock Y N N Y/N
+
+virt_spin_lock_key N Y/N Y N
+
+Fixes: ce0a1b608bfc ("x86/paravirt: Silence unused native_pv_lock_init() function warning")
+Reported-by: Prem Nath Dey <prem.nath.dey@intel.com>
+Reported-by: Xiaoping Zhou <xiaoping.zhou@intel.com>
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Suggested-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
+Suggested-by: Nikolay Borisov <nik.borisov@suse.com>
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20240806112207.29792-1-yu.c.chen@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/qspinlock.h | 12 +++++++-----
+ arch/x86/kernel/paravirt.c | 7 +++----
+ 2 files changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/qspinlock.h
++++ b/arch/x86/include/asm/qspinlock.h
+@@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(lon
+
+ #ifdef CONFIG_PARAVIRT
+ /*
+- * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
++ * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
+ *
+- * Native (and PV wanting native due to vCPU pinning) should disable this key.
+- * It is done in this backwards fashion to only have a single direction change,
+- * which removes ordering between native_pv_spin_init() and HV setup.
++ * Native (and PV wanting native due to vCPU pinning) should keep this key
++ * disabled. Native does not touch the key.
++ *
++ * When in a guest then native_pv_lock_init() enables the key first and
++ * KVM/XEN might conditionally disable it later in the boot process again.
+ */
+-DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
++DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+ /*
+ * Shortcut for the queued_spin_lock_slowpath() function that allows
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -51,13 +51,12 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "s
+ DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
+ #endif
+
+-DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
++DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+ void __init native_pv_lock_init(void)
+ {
+- if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
+- !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+- static_branch_disable(&virt_spin_lock_key);
++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
++ static_branch_enable(&virt_spin_lock_key);
+ }
+
+ static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)