--- /dev/null
+From 7d4b8c024a65797d6a916ef88178b193aa72126d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Dec 2024 12:06:13 +0200
+Subject: ACPI: resource: Fix memory resource type union access
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 7899ca9f3bd2b008e9a7c41f2a9f1986052d7e96 ]
+
+In acpi_decode_space() addr->info.mem.caching is checked on main level
+for any resource type but addr->info.mem is part of union and thus
+valid only if the resource type is memory range.
+
+Move the check inside the preceeding switch/case to only execute it
+when the union is of correct type.
+
+Fixes: fcb29bbcd540 ("ACPI: Add prefetch decoding to the address space parser")
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Link: https://patch.msgid.link/20241202100614.20731-1-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/resource.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 5e59344270ba..65567f26d7bd 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
+ switch (addr->resource_type) {
+ case ACPI_MEMORY_RANGE:
+ acpi_dev_memresource_flags(res, len, wp);
++
++ if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
++ res->flags |= IORESOURCE_PREFETCH;
+ break;
+ case ACPI_IO_RANGE:
+ acpi_dev_ioresource_flags(res, len, iodec,
+@@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
+ if (addr->producer_consumer == ACPI_PRODUCER)
+ res->flags |= IORESOURCE_WINDOW;
+
+- if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+- res->flags |= IORESOURCE_PREFETCH;
+-
+ return !(res->flags & IORESOURCE_DISABLED);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 7ee23b5169c57afd5b2e461c54f7fb38235c38bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 11:29:54 +0300
+Subject: ACPICA: events/evxfregn: don't release the ContextMutex that was
+ never acquired
+
+From: Daniil Tatianin <d-tatianin@yandex-team.ru>
+
+[ Upstream commit c53d96a4481f42a1635b96d2c1acbb0a126bfd54 ]
+
+This bug was first introduced in c27f3d011b08, where the author of the
+patch probably meant to do DeleteMutex instead of ReleaseMutex. The
+mutex leak was noticed later on and fixed in e4dfe108371, but the bogus
+MutexRelease line was never removed, so do it now.
+
+Link: https://github.com/acpica/acpica/pull/982
+Fixes: c27f3d011b08 ("ACPICA: Fix race in generic_serial_bus (I2C) and GPIO op_region parameter handling")
+Signed-off-by: Daniil Tatianin <d-tatianin@yandex-team.ru>
+Link: https://patch.msgid.link/20241122082954.658356-1-d-tatianin@yandex-team.ru
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/acpica/evxfregn.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index 6e0d2a98c4ad..47265b073e6f 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -201,8 +201,6 @@ acpi_remove_address_space_handler(acpi_handle device,
+
+ /* Now we can delete the handler object */
+
+- acpi_os_release_mutex(handler_obj->address_space.
+- context_mutex);
+ acpi_ut_remove_reference(handler_obj);
+ goto unlock_and_exit;
+ }
+--
+2.39.5
+
--- /dev/null
+From 327b4d7b451f5add0862001b8853e189c17620f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 16:52:50 +0100
+Subject: batman-adv: Do not let TT changes list grows indefinitely
+
+From: Remi Pommarel <repk@triplefau.lt>
+
+[ Upstream commit fff8f17c1a6fc802ca23bbd3a276abfde8cc58e6 ]
+
+When TT changes list is too big to fit in packet due to MTU size, an
+empty OGM is sent expected other node to send TT request to get the
+changes. The issue is that tt.last_changeset was not built thus the
+originator was responding with previous changes to those TT requests
+(see batadv_send_my_tt_response). Also the changes list was never
+cleaned up effectively never ending growing from this point onwards,
+repeatedly sending the same TT response changes over and over, and
+creating a new empty OGM every OGM interval expecting for the local
+changes to be purged.
+
+When there is more TT changes that can fit in packet, drop all changes,
+send empty OGM and wait for TT request so we can respond with a full
+table instead.
+
+Fixes: e1bf0c14096f ("batman-adv: tvlv - convert tt data sent within OGMs")
+Signed-off-by: Remi Pommarel <repk@triplefau.lt>
+Acked-by: Antonio Quartulli <Antonio@mandelbit.com>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/batman-adv/translation-table.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index e6d51d5070aa..0c23df3ea0c3 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -1000,6 +1000,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ int tt_diff_len, tt_change_len = 0;
+ int tt_diff_entries_num = 0;
+ int tt_diff_entries_count = 0;
++ bool drop_changes = false;
+ size_t tt_extra_len = 0;
+ u16 tvlv_len;
+
+@@ -1007,10 +1008,17 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ tt_diff_len = batadv_tt_len(tt_diff_entries_num);
+
+ /* if we have too many changes for one packet don't send any
+- * and wait for the tt table request which will be fragmented
++ * and wait for the tt table request so we can reply with the full
++ * (fragmented) table.
++ *
++ * The local change history should still be cleaned up so the next
++ * TT round can start again with a clean state.
+ */
+- if (tt_diff_len > bat_priv->soft_iface->mtu)
++ if (tt_diff_len > bat_priv->soft_iface->mtu) {
+ tt_diff_len = 0;
++ tt_diff_entries_num = 0;
++ drop_changes = true;
++ }
+
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+ &tt_change, &tt_diff_len);
+@@ -1019,7 +1027,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+
+ tt_data->flags = BATADV_TT_OGM_DIFF;
+
+- if (tt_diff_len == 0)
++ if (!drop_changes && tt_diff_len == 0)
+ goto container_register;
+
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
+--
+2.39.5
+
--- /dev/null
+From 152555931d1dd2d51809e80b5319818bc911f378 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 16:52:48 +0100
+Subject: batman-adv: Do not send uninitialized TT changes
+
+From: Remi Pommarel <repk@triplefau.lt>
+
+[ Upstream commit f2f7358c3890e7366cbcb7512b4bc8b4394b2d61 ]
+
+The number of TT changes can be less than initially expected in
+batadv_tt_tvlv_container_update() (changes can be removed by
+batadv_tt_local_event() in ADD+DEL sequence between reading
+tt_diff_entries_num and actually iterating the change list under lock).
+
+Thus tt_diff_len could be bigger than the actual changes size that need
+to be sent. Because batadv_send_my_tt_response sends the whole
+packet, uninitialized data can be interpreted as TT changes on other
+nodes leading to weird TT global entries on those nodes such as:
+
+ * 00:00:00:00:00:00 -1 [....] ( 0) 88:12:4e:ad:7e:ba (179) (0x45845380)
+ * 00:00:00:00:78:79 4092 [.W..] ( 0) 88:12:4e:ad:7e:3c (145) (0x8ebadb8b)
+
+All of the above also applies to OGM tvlv container buffer's tvlv_len.
+
+Remove the extra allocated space to avoid sending uninitialized TT
+changes in batadv_send_my_tt_response() and batadv_v_ogm_send_softif().
+
+Fixes: e1bf0c14096f ("batman-adv: tvlv - convert tt data sent within OGMs")
+Signed-off-by: Remi Pommarel <repk@triplefau.lt>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/batman-adv/translation-table.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 38930eccd9df..0653ae49efd4 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -1000,6 +1000,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ int tt_diff_len, tt_change_len = 0;
+ int tt_diff_entries_num = 0;
+ int tt_diff_entries_count = 0;
++ size_t tt_extra_len = 0;
+ u16 tvlv_len;
+
+ tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+@@ -1037,6 +1038,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ }
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+
++ tt_extra_len = batadv_tt_len(tt_diff_entries_num -
++ tt_diff_entries_count);
++
+ /* Keep the buffer for possible tt_request */
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+ kfree(bat_priv->tt.last_changeset);
+@@ -1045,6 +1049,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ tt_change_len = batadv_tt_len(tt_diff_entries_count);
+ /* check whether this new OGM has no changes due to size problems */
+ if (tt_diff_entries_count > 0) {
++ tt_diff_len -= tt_extra_len;
+ /* if kmalloc() fails we will reply with the full table
+ * instead of providing the diff
+ */
+@@ -1057,6 +1062,8 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+ }
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+
++ /* Remove extra packet space for OGM */
++ tvlv_len -= tt_extra_len;
+ container_register:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+ tvlv_len);
+--
+2.39.5
+
--- /dev/null
+From fff2b8f7c5724c7144eb6e07cf90ae7bf68fee9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2024 16:52:49 +0100
+Subject: batman-adv: Remove uninitialized data in full table TT response
+
+From: Remi Pommarel <repk@triplefau.lt>
+
+[ Upstream commit 8038806db64da15721775d6b834990cacbfcf0b2 ]
+
+The number of entries filled by batadv_tt_tvlv_generate() can be less
+than initially expected in batadv_tt_prepare_tvlv_{global,local}_data()
+(changes can be removed by batadv_tt_local_event() in ADD+DEL sequence
+in the meantime as the lock held during the whole tvlv global/local data
+generation).
+
+Thus tvlv_len could be bigger than the actual TT entry size that need
+to be sent so full table TT_RESPONSE could hold invalid TT entries such
+as below.
+
+ * 00:00:00:00:00:00 -1 [....] ( 0) 88:12:4e:ad:7e:ba (179) (0x45845380)
+ * 00:00:00:00:78:79 4092 [.W..] ( 0) 88:12:4e:ad:7e:3c (145) (0x8ebadb8b)
+
+Remove the extra allocated space to avoid sending uninitialized entries
+for full table TT_RESPONSE in both batadv_send_other_tt_response() and
+batadv_send_my_tt_response().
+
+Fixes: 7ea7b4a14275 ("batman-adv: make the TT CRC logic VLAN specific")
+Signed-off-by: Remi Pommarel <repk@triplefau.lt>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/batman-adv/translation-table.c | 37 ++++++++++++++++++------------
+ 1 file changed, 22 insertions(+), 15 deletions(-)
+
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 0653ae49efd4..e6d51d5070aa 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -2986,14 +2986,16 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
+ *
+ * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
+ * is not provided then this becomes a no-op.
++ *
++ * Return: Remaining unused length in tvlv_buff.
+ */
+-static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+- struct batadv_hashtable *hash,
+- void *tvlv_buff, u16 tt_len,
+- bool (*valid_cb)(const void *,
+- const void *,
+- u8 *flags),
+- void *cb_data)
++static u16 batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
++ struct batadv_hashtable *hash,
++ void *tvlv_buff, u16 tt_len,
++ bool (*valid_cb)(const void *,
++ const void *,
++ u8 *flags),
++ void *cb_data)
+ {
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tvlv_tt_change *tt_change;
+@@ -3007,7 +3009,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
+
+ if (!valid_cb)
+- return;
++ return tt_len;
+
+ rcu_read_lock();
+ for (i = 0; i < hash->size; i++) {
+@@ -3033,6 +3035,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ }
+ }
+ rcu_read_unlock();
++
++ return batadv_tt_len(tt_tot - tt_num_entries);
+ }
+
+ /**
+@@ -3310,10 +3314,11 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+ goto out;
+
+ /* fill the rest of the tvlv with the real TT entries */
+- batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+- tt_change, tt_len,
+- batadv_tt_global_valid,
+- req_dst_orig_node);
++ tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
++ bat_priv->tt.global_hash,
++ tt_change, tt_len,
++ batadv_tt_global_valid,
++ req_dst_orig_node);
+ }
+
+ /* Don't send the response, if larger than fragmented packet. */
+@@ -3439,9 +3444,11 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ goto out;
+
+ /* fill the rest of the tvlv with the real TT entries */
+- batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+- tt_change, tt_len,
+- batadv_tt_local_valid, NULL);
++ tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
++ bat_priv->tt.local_hash,
++ tt_change, tt_len,
++ batadv_tt_local_valid,
++ NULL);
+ }
+
+ tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+--
+2.39.5
+
--- /dev/null
+From aaf2d3e0cb412de2a14ba1f5ddba37a8bc2ee936 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Dec 2024 10:13:29 -0700
+Subject: blk-iocost: Avoid using clamp() on inuse in __propagate_weights()
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit 57e420c84f9ab55ba4c5e2ae9c5f6c8e1ea834d2 ]
+
+After a recent change to clamp() and its variants [1] that increases the
+coverage of the check that high is greater than low because it can be
+done through inlining, certain build configurations (such as s390
+defconfig) fail to build with clang with:
+
+ block/blk-iocost.c:1101:11: error: call to '__compiletime_assert_557' declared with 'error' attribute: clamp() low limit 1 greater than high limit active
+ 1101 | inuse = clamp_t(u32, inuse, 1, active);
+ | ^
+ include/linux/minmax.h:218:36: note: expanded from macro 'clamp_t'
+ 218 | #define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi)
+ | ^
+ include/linux/minmax.h:195:2: note: expanded from macro '__careful_clamp'
+ 195 | __clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
+ | ^
+ include/linux/minmax.h:188:2: note: expanded from macro '__clamp_once'
+ 188 | BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \
+ | ^
+
+__propagate_weights() is called with an active value of zero in
+ioc_check_iocgs(), which results in the high value being less than the
+low value, which is undefined because the value returned depends on the
+order of the comparisons.
+
+The purpose of this expression is to ensure inuse is not more than
+active and at least 1. This could be written more simply with a ternary
+expression that uses min(inuse, active) as the condition so that the
+value of that condition can be used if it is not zero and one if it is.
+Do this conversion to resolve the error and add a comment to deter
+people from turning this back into clamp().
+
+Fixes: 7caa47151ab2 ("blkcg: implement blk-iocost")
+Link: https://lore.kernel.org/r/34d53778977747f19cce2abb287bb3e6@AcuMS.aculab.com/ [1]
+Suggested-by: David Laight <david.laight@aculab.com>
+Reported-by: Linux Kernel Functional Testing <lkft@linaro.org>
+Closes: https://lore.kernel.org/llvm/CA+G9fYsD7mw13wredcZn0L-KBA3yeoVSTuxnss-AEWMN3ha0cA@mail.gmail.com/
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202412120322.3GfVe3vF-lkp@intel.com/
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-iocost.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 1acd96c9d5c0..642b16e59dd0 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -916,7 +916,14 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
+ inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
+ iocg->child_active_sum);
+ } else {
+- inuse = clamp_t(u32, inuse, 1, active);
++ /*
++ * It may be tempting to turn this into a clamp expression with
++ * a lower limit of 1 but active may be 0, which cannot be used
++ * as an upper limit in that situation. This expression allows
++ * active to clamp inuse unless it is 0, in which case inuse
++ * becomes 1.
++ */
++ inuse = min(inuse, active) ?: 1;
+ }
+
+ if (active == iocg->active && inuse == iocg->inuse)
+--
+2.39.5
+
--- /dev/null
+From 067fe0a9230c01e1b8fae56fa64133abb3b9f8ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Sep 2020 14:52:35 -0400
+Subject: blk-iocost: clamp inuse and skip noops in __propagate_weights()
+
+From: Tejun Heo <tj@kernel.org>
+
+[ Upstream commit db84a72af6be422abf2089a5896293590dda5066 ]
+
+__propagate_weights() currently expects the callers to clamp inuse within
+[1, active], which is needlessly fragile. The inuse adjustment logic is
+going to be revamped, in preparation, let's make __propagate_weights() clamp
+inuse on entry.
+
+Also, make it avoid weight updates altogether if neither active or inuse is
+changed.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 57e420c84f9a ("blk-iocost: Avoid using clamp() on inuse in __propagate_weights()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-iocost.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index e7d5aafa5e99..75eb90b3241e 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -907,7 +907,10 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
+
+ lockdep_assert_held(&ioc->lock);
+
+- inuse = min(active, inuse);
++ inuse = clamp_t(u32, inuse, 1, active);
++
++ if (active == iocg->active && inuse == iocg->inuse)
++ return;
+
+ for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
+ struct ioc_gq *parent = iocg->ancestors[lvl];
+--
+2.39.5
+
--- /dev/null
+From 502753c9fb3e7242c8c7e3434ffa72e3370c88dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 May 2021 21:38:36 -0400
+Subject: blk-iocost: fix weight updates of inner active iocgs
+
+From: Tejun Heo <tj@kernel.org>
+
+[ Upstream commit e9f4eee9a0023ba22db9560d4cc6ee63f933dae8 ]
+
+When the weight of an active iocg is updated, weight_updated() is called
+which in turn calls __propagate_weights() to update the active and inuse
+weights so that the effective hierarchical weights are update accordingly.
+
+The current implementation is incorrect for inner active nodes. For an
+active leaf iocg, inuse can be any value between 1 and active and the
+difference represents how much the iocg is donating. When weight is updated,
+as long as inuse is clamped between 1 and the new weight, we're alright and
+this is what __propagate_weights() currently implements.
+
+However, that's not how an active inner node's inuse is set. An inner node's
+inuse is solely determined by the ratio between the sums of inuse's and
+active's of its children - ie. they're results of propagating the leaves'
+active and inuse weights upwards. __propagate_weights() incorrectly applies
+the same clamping as for a leaf when an active inner node's weight is
+updated. Consider a hierarchy which looks like the following with saturating
+workloads in AA and BB.
+
+ R
+ / \
+ A B
+ | |
+ AA BB
+
+1. For both A and B, active=100, inuse=100, hwa=0.5, hwi=0.5.
+
+2. echo 200 > A/io.weight
+
+3. __propagate_weights() update A's active to 200 and leave inuse at 100 as
+ it's already between 1 and the new active, making A:active=200,
+ A:inuse=100. As R's active_sum is updated along with A's active,
+ A:hwa=2/3, B:hwa=1/3. However, because the inuses didn't change, the
+ hwi's remain unchanged at 0.5.
+
+4. The weight of A is now twice that of B but AA and BB still have the same
+ hwi of 0.5 and thus are doing the same amount of IOs.
+
+Fix it by making __propgate_weights() always calculate the inuse of an
+active inner iocg based on the ratio of child_inuse_sum to child_active_sum.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Dan Schatzberg <dschatzberg@fb.com>
+Fixes: 7caa47151ab2 ("blkcg: implement blk-iocost")
+Cc: stable@vger.kernel.org # v5.4+
+Link: https://lore.kernel.org/r/YJsxnLZV1MnBcqjj@slm.duckdns.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 57e420c84f9a ("blk-iocost: Avoid using clamp() on inuse in __propagate_weights()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-iocost.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 75eb90b3241e..1acd96c9d5c0 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -907,7 +907,17 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
+
+ lockdep_assert_held(&ioc->lock);
+
+- inuse = clamp_t(u32, inuse, 1, active);
++ /*
++ * For an active leaf node, its inuse shouldn't be zero or exceed
++ * @active. An active internal node's inuse is solely determined by the
++ * inuse to active ratio of its children regardless of @inuse.
++ */
++ if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
++ inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
++ iocg->child_active_sum);
++ } else {
++ inuse = clamp_t(u32, inuse, 1, active);
++ }
+
+ if (active == iocg->active && inuse == iocg->inuse)
+ return;
+@@ -920,7 +930,7 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
+ /* update the level sums */
+ parent->child_active_sum += (s32)(active - child->active);
+ parent->child_inuse_sum += (s32)(inuse - child->inuse);
+- /* apply the udpates */
++ /* apply the updates */
+ child->active = active;
+ child->inuse = inuse;
+
+--
+2.39.5
+
--- /dev/null
+From 7eaa78d8b16c4dede8410942cf556b9dd3d46ad4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Dec 2024 14:10:31 +0000
+Subject: net: lapb: increase LAPB_HEADER_LEN
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a6d75ecee2bf828ac6a1b52724aba0a977e4eaf4 ]
+
+It is unclear if net/lapb code is supposed to be ready for 8021q.
+
+We can at least avoid crashes like the following :
+
+skbuff: skb_under_panic: text:ffffffff8aabe1f6 len:24 put:20 head:ffff88802824a400 data:ffff88802824a3fe tail:0x16 end:0x140 dev:nr0.2
+------------[ cut here ]------------
+ kernel BUG at net/core/skbuff.c:206 !
+Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI
+CPU: 1 UID: 0 PID: 5508 Comm: dhcpcd Not tainted 6.12.0-rc7-syzkaller-00144-g66418447d27b #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/30/2024
+ RIP: 0010:skb_panic net/core/skbuff.c:206 [inline]
+ RIP: 0010:skb_under_panic+0x14b/0x150 net/core/skbuff.c:216
+Code: 0d 8d 48 c7 c6 2e 9e 29 8e 48 8b 54 24 08 8b 0c 24 44 8b 44 24 04 4d 89 e9 50 41 54 41 57 41 56 e8 1a 6f 37 02 48 83 c4 20 90 <0f> 0b 0f 1f 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3
+RSP: 0018:ffffc90002ddf638 EFLAGS: 00010282
+RAX: 0000000000000086 RBX: dffffc0000000000 RCX: 7a24750e538ff600
+RDX: 0000000000000000 RSI: 0000000000000201 RDI: 0000000000000000
+RBP: ffff888034a86650 R08: ffffffff8174b13c R09: 1ffff920005bbe60
+R10: dffffc0000000000 R11: fffff520005bbe61 R12: 0000000000000140
+R13: ffff88802824a400 R14: ffff88802824a3fe R15: 0000000000000016
+FS: 00007f2a5990d740(0000) GS:ffff8880b8700000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000000110c2631fd CR3: 0000000029504000 CR4: 00000000003526f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ skb_push+0xe5/0x100 net/core/skbuff.c:2636
+ nr_header+0x36/0x320 net/netrom/nr_dev.c:69
+ dev_hard_header include/linux/netdevice.h:3148 [inline]
+ vlan_dev_hard_header+0x359/0x480 net/8021q/vlan_dev.c:83
+ dev_hard_header include/linux/netdevice.h:3148 [inline]
+ lapbeth_data_transmit+0x1f6/0x2a0 drivers/net/wan/lapbether.c:257
+ lapb_data_transmit+0x91/0xb0 net/lapb/lapb_iface.c:447
+ lapb_transmit_buffer+0x168/0x1f0 net/lapb/lapb_out.c:149
+ lapb_establish_data_link+0x84/0xd0
+ lapb_device_event+0x4e0/0x670
+ notifier_call_chain+0x19f/0x3e0 kernel/notifier.c:93
+ __dev_notify_flags+0x207/0x400
+ dev_change_flags+0xf0/0x1a0 net/core/dev.c:8922
+ devinet_ioctl+0xa4e/0x1aa0 net/ipv4/devinet.c:1188
+ inet_ioctl+0x3d7/0x4f0 net/ipv4/af_inet.c:1003
+ sock_do_ioctl+0x158/0x460 net/socket.c:1227
+ sock_ioctl+0x626/0x8e0 net/socket.c:1346
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:907 [inline]
+ __se_sys_ioctl+0xf9/0x170 fs/ioctl.c:893
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+fb99d1b0c0f81d94a5e2@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/67506220.050a0220.17bd51.006c.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20241204141031.4030267-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/lapb.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/lapb.h b/include/net/lapb.h
+index ccc3d1f020b0..c4417a631013 100644
+--- a/include/net/lapb.h
++++ b/include/net/lapb.h
+@@ -4,7 +4,7 @@
+ #include <linux/lapb.h>
+ #include <linux/refcount.h>
+
+-#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
++#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */
+
+ #define LAPB_ACK_PENDING_CONDITION 0x01
+ #define LAPB_REJECT_CONDITION 0x02
+--
+2.39.5
+
--- /dev/null
+From c806b6c0c83d21dedcb899308f405c495747c515 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Dec 2024 14:14:11 +0100
+Subject: net/sched: netem: account for backlog updates from child qdisc
+
+From: Martin Ottens <martin.ottens@fau.de>
+
+[ Upstream commit f8d4bc455047cf3903cd6f85f49978987dbb3027 ]
+
+In general, 'qlen' of any classful qdisc should keep track of the
+number of packets that the qdisc itself and all of its children holds.
+In case of netem, 'qlen' only accounts for the packets in its internal
+tfifo. When netem is used with a child qdisc, the child qdisc can use
+'qdisc_tree_reduce_backlog' to inform its parent, netem, about created
+or dropped SKBs. This function updates 'qlen' and the backlog statistics
+of netem, but netem does not account for changes made by a child qdisc.
+'qlen' then indicates the wrong number of packets in the tfifo.
+If a child qdisc creates new SKBs during enqueue and informs its parent
+about this, netem's 'qlen' value is increased. When netem dequeues the
+newly created SKBs from the child, the 'qlen' in netem is not updated.
+If 'qlen' reaches the configured sch->limit, the enqueue function stops
+working, even though the tfifo is not full.
+
+Reproduce the bug:
+Ensure that the sender machine has GSO enabled. Configure netem as root
+qdisc and tbf as its child on the outgoing interface of the machine
+as follows:
+$ tc qdisc add dev <oif> root handle 1: netem delay 100ms limit 100
+$ tc qdisc add dev <oif> parent 1:0 tbf rate 50Mbit burst 1542 latency 50ms
+
+Send bulk TCP traffic out via this interface, e.g., by running an iPerf3
+client on the machine. Check the qdisc statistics:
+$ tc -s qdisc show dev <oif>
+
+Statistics after 10s of iPerf3 TCP test before the fix (note that
+netem's backlog > limit, netem stopped accepting packets):
+qdisc netem 1: root refcnt 2 limit 1000 delay 100ms
+ Sent 2767766 bytes 1848 pkt (dropped 652, overlimits 0 requeues 0)
+ backlog 4294528236b 1155p requeues 0
+qdisc tbf 10: parent 1:1 rate 50Mbit burst 1537b lat 50ms
+ Sent 2767766 bytes 1848 pkt (dropped 327, overlimits 7601 requeues 0)
+ backlog 0b 0p requeues 0
+
+Statistics after the fix:
+qdisc netem 1: root refcnt 2 limit 1000 delay 100ms
+ Sent 37766372 bytes 24974 pkt (dropped 9, overlimits 0 requeues 0)
+ backlog 0b 0p requeues 0
+qdisc tbf 10: parent 1:1 rate 50Mbit burst 1537b lat 50ms
+ Sent 37766372 bytes 24974 pkt (dropped 327, overlimits 96017 requeues 0)
+ backlog 0b 0p requeues 0
+
+tbf segments the GSO SKBs (tbf_segment) and updates the netem's 'qlen'.
+The interface fully stops transferring packets and "locks". In this case,
+the child qdisc and tfifo are empty, but 'qlen' indicates the tfifo is at
+its limit and no more packets are accepted.
+
+This patch adds a counter for the entries in the tfifo. Netem's 'qlen' is
+only decreased when a packet is returned by its dequeue function, and not
+during enqueuing into the child qdisc. External updates to 'qlen' are thus
+accounted for and only the behavior of the backlog statistics changes. As
+in other qdiscs, 'qlen' then keeps track of how many packets are held in
+netem and all of its children. As before, sch->limit remains as the
+maximum number of packets in the tfifo. The same applies to netem's
+backlog statistics.
+
+Fixes: 50612537e9ab ("netem: fix classful handling")
+Signed-off-by: Martin Ottens <martin.ottens@fau.de>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20241210131412.1837202-1-martin.ottens@fau.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_netem.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 9913bf87e598..73c952d48275 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -77,6 +77,8 @@ struct netem_sched_data {
+ struct sk_buff *t_head;
+ struct sk_buff *t_tail;
+
++ u32 t_len;
++
+ /* optional qdisc for classful handling (NULL at netem init) */
+ struct Qdisc *qdisc;
+
+@@ -373,6 +375,7 @@ static void tfifo_reset(struct Qdisc *sch)
+ rtnl_kfree_skbs(q->t_head, q->t_tail);
+ q->t_head = NULL;
+ q->t_tail = NULL;
++ q->t_len = 0;
+ }
+
+ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+@@ -402,6 +405,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+ rb_link_node(&nskb->rbnode, parent, p);
+ rb_insert_color(&nskb->rbnode, &q->t_root);
+ }
++ q->t_len++;
+ sch->q.qlen++;
+ }
+
+@@ -508,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 1<<(prandom_u32() % 8);
+ }
+
+- if (unlikely(sch->q.qlen >= sch->limit)) {
++ if (unlikely(q->t_len >= sch->limit)) {
+ /* re-link segs, so that qdisc_drop_all() frees them all */
+ skb->next = segs;
+ qdisc_drop_all(skb, sch, to_free);
+@@ -692,8 +696,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ tfifo_dequeue:
+ skb = __qdisc_dequeue_head(&sch->q);
+ if (skb) {
+- qdisc_qstats_backlog_dec(sch, skb);
+ deliver:
++ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ return skb;
+ }
+@@ -709,8 +713,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+
+ if (time_to_send <= now && q->slot.slot_next <= now) {
+ netem_erase_head(q, skb);
+- sch->q.qlen--;
+- qdisc_qstats_backlog_dec(sch, skb);
++ q->t_len--;
+ skb->next = NULL;
+ skb->prev = NULL;
+ /* skb->dev shares skb->rbnode area,
+@@ -737,16 +740,21 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ if (net_xmit_drop_count(err))
+ qdisc_qstats_drop(sch);
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
++ sch->qstats.backlog -= pkt_len;
++ sch->q.qlen--;
+ }
+ goto tfifo_dequeue;
+ }
++ sch->q.qlen--;
+ goto deliver;
+ }
+
+ if (q->qdisc) {
+ skb = q->qdisc->ops->dequeue(q->qdisc);
+- if (skb)
++ if (skb) {
++ sch->q.qlen--;
+ goto deliver;
++ }
+ }
+
+ qdisc_watchdog_schedule_ns(&q->watchdog,
+@@ -756,8 +764,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+
+ if (q->qdisc) {
+ skb = q->qdisc->ops->dequeue(q->qdisc);
+- if (skb)
++ if (skb) {
++ sch->q.qlen--;
+ goto deliver;
++ }
+ }
+ return NULL;
+ }
+--
+2.39.5
+
--- /dev/null
+From 2aae098340db7fe4943ec3e3855e09beb1a75040 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Dec 2024 19:46:42 +0100
+Subject: qca_spi: Fix clock speed for multiple QCA7000
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 4dba406fac06b009873fe7a28231b9b7e4288b09 ]
+
+Storing the maximum clock speed in module parameter qcaspi_clkspeed
+has the unintended side effect that the first probed instance
+defines the value for all other instances. Fix this issue by storing
+it in max_speed_hz of the relevant SPI device.
+
+This fix keeps the priority of the speed parameter (module parameter,
+device tree property, driver default). Btw this uses the opportunity
+to get the rid of the unused member clkspeed.
+
+Fixes: 291ab06ecf67 ("net: qualcomm: new Ethernet over SPI driver for QCA7000")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://patch.msgid.link/20241206184643.123399-2-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qualcomm/qca_spi.c | 24 ++++++++++--------------
+ drivers/net/ethernet/qualcomm/qca_spi.h | 1 -
+ 2 files changed, 10 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 036ab9dfe7fc..ff477e27a3fe 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -823,7 +823,6 @@ qcaspi_netdev_init(struct net_device *dev)
+
+ dev->mtu = QCAFRM_MAX_MTU;
+ dev->type = ARPHRD_ETHER;
+- qca->clkspeed = qcaspi_clkspeed;
+ qca->burst_len = qcaspi_burst_len;
+ qca->spi_thread = NULL;
+ qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+@@ -912,17 +911,15 @@ qca_spi_probe(struct spi_device *spi)
+ legacy_mode = of_property_read_bool(spi->dev.of_node,
+ "qca,legacy-mode");
+
+- if (qcaspi_clkspeed == 0) {
+- if (spi->max_speed_hz)
+- qcaspi_clkspeed = spi->max_speed_hz;
+- else
+- qcaspi_clkspeed = QCASPI_CLK_SPEED;
+- }
++ if (qcaspi_clkspeed)
++ spi->max_speed_hz = qcaspi_clkspeed;
++ else if (!spi->max_speed_hz)
++ spi->max_speed_hz = QCASPI_CLK_SPEED;
+
+- if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
+- (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
+- dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+- qcaspi_clkspeed);
++ if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
++ spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
++ dev_err(&spi->dev, "Invalid clkspeed: %u\n",
++ spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+@@ -947,14 +944,13 @@ qca_spi_probe(struct spi_device *spi)
+ return -EINVAL;
+ }
+
+- dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
++ dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
+ QCASPI_DRV_VERSION,
+- qcaspi_clkspeed,
++ spi->max_speed_hz,
+ qcaspi_burst_len,
+ qcaspi_pluggable);
+
+ spi->mode = SPI_MODE_3;
+- spi->max_speed_hz = qcaspi_clkspeed;
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device\n");
+ return -EFAULT;
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
+index d13a67e20d65..203941d5d72a 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.h
++++ b/drivers/net/ethernet/qualcomm/qca_spi.h
+@@ -101,7 +101,6 @@ struct qcaspi {
+ #endif
+
+ /* user configurable options */
+- u32 clkspeed;
+ u8 legacy_mode;
+ u16 burst_len;
+ };
+--
+2.39.5
+
--- /dev/null
+From 4f0cce32dfcb8acec29f6b62c186a64e1699e4c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Dec 2024 19:46:43 +0100
+Subject: qca_spi: Make driver probing reliable
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit becc6399ce3b724cffe9ccb7ef0bff440bb1b62b ]
+
+The module parameter qcaspi_pluggable controls if QCA7000 signature
+should be checked at driver probe (current default) or not. Unfortunately
+this could fail in case the chip is temporary in reset, which isn't under
+total control by the Linux host. So disable this check per default
+in order to avoid unexpected probe failures.
+
+Fixes: 291ab06ecf67 ("net: qualcomm: new Ethernet over SPI driver for QCA7000")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://patch.msgid.link/20241206184643.123399-3-wahrenst@gmx.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qualcomm/qca_spi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index ff477e27a3fe..f051086074a9 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -65,7 +65,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
+
+ #define QCASPI_PLUGGABLE_MIN 0
+ #define QCASPI_PLUGGABLE_MAX 1
+-static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
++static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
+ module_param(qcaspi_pluggable, int, 0);
+ MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
+
+--
+2.39.5
+
usb-gadget-u_serial-fix-the-issue-that-gs_start_io-crashed-due-to-accessing-null-pointer.patch
xfs-don-t-drop-errno-values-when-we-fail-to-ficlone-the-entire-range.patch
bpf-sockmap-fix-update-element-with-same.patch
+batman-adv-do-not-send-uninitialized-tt-changes.patch
+batman-adv-remove-uninitialized-data-in-full-table-t.patch
+batman-adv-do-not-let-tt-changes-list-grows-indefini.patch
+tipc-fix-null-deref-in-cleanup_bearer.patch
+net-lapb-increase-lapb_header_len.patch
+acpi-resource-fix-memory-resource-type-union-access.patch
+qca_spi-fix-clock-speed-for-multiple-qca7000.patch
+qca_spi-make-driver-probing-reliable.patch
+net-sched-netem-account-for-backlog-updates-from-chi.patch
+acpica-events-evxfregn-don-t-release-the-contextmute.patch
+blk-iocost-clamp-inuse-and-skip-noops-in-__propagate.patch
+blk-iocost-fix-weight-updates-of-inner-active-iocgs.patch
+blk-iocost-avoid-using-clamp-on-inuse-in-__propagate.patch
--- /dev/null
+From 888bd7c566e07cdf7278744b00f8d17553e14abb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Dec 2024 17:05:48 +0000
+Subject: tipc: fix NULL deref in cleanup_bearer()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b04d86fff66b15c07505d226431f808c15b1703c ]
+
+syzbot found [1] that after blamed commit, ub->ubsock->sk
+was NULL when attempting the atomic_dec() :
+
+atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+
+Fix this by caching the tipc_net pointer.
+
+[1]
+
+Oops: general protection fault, probably for non-canonical address 0xdffffc0000000006: 0000 [#1] PREEMPT SMP KASAN PTI
+KASAN: null-ptr-deref in range [0x0000000000000030-0x0000000000000037]
+CPU: 0 UID: 0 PID: 5896 Comm: kworker/0:3 Not tainted 6.13.0-rc1-next-20241203-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
+Workqueue: events cleanup_bearer
+ RIP: 0010:read_pnet include/net/net_namespace.h:387 [inline]
+ RIP: 0010:sock_net include/net/sock.h:655 [inline]
+ RIP: 0010:cleanup_bearer+0x1f7/0x280 net/tipc/udp_media.c:820
+Code: 18 48 89 d8 48 c1 e8 03 42 80 3c 28 00 74 08 48 89 df e8 3c f7 99 f6 48 8b 1b 48 83 c3 30 e8 f0 e4 60 00 48 89 d8 48 c1 e8 03 <42> 80 3c 28 00 74 08 48 89 df e8 1a f7 99 f6 49 83 c7 e8 48 8b 1b
+RSP: 0018:ffffc9000410fb70 EFLAGS: 00010206
+RAX: 0000000000000006 RBX: 0000000000000030 RCX: ffff88802fe45a00
+RDX: 0000000000000001 RSI: 0000000000000008 RDI: ffffc9000410f900
+RBP: ffff88807e1f0908 R08: ffffc9000410f907 R09: 1ffff92000821f20
+R10: dffffc0000000000 R11: fffff52000821f21 R12: ffff888031d19980
+R13: dffffc0000000000 R14: dffffc0000000000 R15: ffff88807e1f0918
+FS: 0000000000000000(0000) GS:ffff8880b8600000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000556ca050b000 CR3: 0000000031c0c000 CR4: 00000000003526f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+
+Fixes: 6a2fa13312e5 ("tipc: Fix use-after-free of kernel socket in cleanup_bearer().")
+Reported-by: syzbot+46aa5474f179dacd1a3b@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/67508b5f.050a0220.17bd51.0070.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://patch.msgid.link/20241204170548.4152658-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/udp_media.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 696a21208b24..5f278c25462e 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -798,6 +798,7 @@ static void cleanup_bearer(struct work_struct *work)
+ {
+ struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+ struct udp_replicast *rcast, *tmp;
++ struct tipc_net *tn;
+
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ dst_cache_destroy(&rcast->dst_cache);
+@@ -805,10 +806,14 @@ static void cleanup_bearer(struct work_struct *work)
+ kfree_rcu(rcast, rcu);
+ }
+
++ tn = tipc_net(sock_net(ub->ubsock->sk));
++
+ dst_cache_destroy(&ub->rcast.dst_cache);
+ udp_tunnel_sock_release(ub->ubsock);
++
++ /* Note: could use a call_rcu() to avoid another synchronize_net() */
+ synchronize_net();
+- atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
++ atomic_dec(&tn->wq_count);
+ kfree(ub);
+ }
+
+--
+2.39.5
+