--- /dev/null
+From 2932095c114b98cbb40ccf34fc00d613cb17cead Mon Sep 17 00:00:00 2001
+From: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+Date: Fri, 30 Jan 2026 14:23:53 +0200
+Subject: counter: rz-mtu3-cnt: do not use struct rz_mtu3_channel's dev member
+
+From: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+
+commit 2932095c114b98cbb40ccf34fc00d613cb17cead upstream.
+
+The counter driver can use HW channels 1 and 2, while the PWM driver can
+use HW channels 0, 1, 2, 3, 4, 6, 7.
+
+The dev member is assigned both by the counter driver and the PWM driver
+for channels 1 and 2, to their own struct device instance, overwriting
+the previous value.
+
+The sub-drivers race to assign their own struct device pointer to the
+same struct rz_mtu3_channel's dev member.
+
+The dev member of struct rz_mtu3_channel is used by the counter
+sub-driver for runtime PM.
+
+Depending on the probe order of the counter and PWM sub-drivers, the
+dev member may point to the wrong struct device instance, causing the
+counter sub-driver to do runtime PM actions on the wrong device.
+
+To fix this, use the parent pointer of the counter, which is assigned
+during probe to the correct struct device, not the struct device pointer
+inside the shared struct rz_mtu3_channel.
+
+Cc: stable@vger.kernel.org
+Fixes: 0be8907359df ("counter: Add Renesas RZ/G2L MTU3a counter driver")
+Signed-off-by: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+Link: https://lore.kernel.org/r/20260130122353.2263273-6-cosmin-gabriel.tanislav.xa@renesas.com
+Signed-off-by: William Breathitt Gray <wbg@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/counter/rz-mtu3-cnt.c | 55 ++++++++++++++++++++----------------------
+ 1 file changed, 27 insertions(+), 28 deletions(-)
+
+--- a/drivers/counter/rz-mtu3-cnt.c
++++ b/drivers/counter/rz-mtu3-cnt.c
+@@ -107,9 +107,9 @@ static bool rz_mtu3_is_counter_invalid(s
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ unsigned long tmdr;
+
+- pm_runtime_get_sync(priv->ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
+- pm_runtime_put(priv->ch->dev);
++ pm_runtime_put(counter->parent);
+
+ if (id == RZ_MTU3_32_BIT_CH && test_bit(RZ_MTU3_TMDR3_LWA, &tmdr))
+ return false;
+@@ -165,12 +165,12 @@ static int rz_mtu3_count_read(struct cou
+ if (ret)
+ return ret;
+
+- pm_runtime_get_sync(ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ if (count->id == RZ_MTU3_32_BIT_CH)
+ *val = rz_mtu3_32bit_ch_read(ch, RZ_MTU3_TCNTLW);
+ else
+ *val = rz_mtu3_16bit_ch_read(ch, RZ_MTU3_TCNT);
+- pm_runtime_put(ch->dev);
++ pm_runtime_put(counter->parent);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+@@ -187,26 +187,26 @@ static int rz_mtu3_count_write(struct co
+ if (ret)
+ return ret;
+
+- pm_runtime_get_sync(ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ if (count->id == RZ_MTU3_32_BIT_CH)
+ rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TCNTLW, val);
+ else
+ rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TCNT, val);
+- pm_runtime_put(ch->dev);
++ pm_runtime_put(counter->parent);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+ }
+
+ static int rz_mtu3_count_function_read_helper(struct rz_mtu3_channel *const ch,
+- struct rz_mtu3_cnt *const priv,
++ struct counter_device *const counter,
+ enum counter_function *function)
+ {
+ u8 timer_mode;
+
+- pm_runtime_get_sync(ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ timer_mode = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TMDR1);
+- pm_runtime_put(ch->dev);
++ pm_runtime_put(counter->parent);
+
+ switch (timer_mode & RZ_MTU3_TMDR1_PH_CNT_MODE_MASK) {
+ case RZ_MTU3_TMDR1_PH_CNT_MODE_1:
+@@ -240,7 +240,7 @@ static int rz_mtu3_count_function_read(s
+ if (ret)
+ return ret;
+
+- ret = rz_mtu3_count_function_read_helper(ch, priv, function);
++ ret = rz_mtu3_count_function_read_helper(ch, counter, function);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+@@ -279,9 +279,9 @@ static int rz_mtu3_count_function_write(
+ return -EINVAL;
+ }
+
+- pm_runtime_get_sync(ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TMDR1, timer_mode);
+- pm_runtime_put(ch->dev);
++ pm_runtime_put(counter->parent);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+@@ -300,9 +300,9 @@ static int rz_mtu3_count_direction_read(
+ if (ret)
+ return ret;
+
+- pm_runtime_get_sync(ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ tsr = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TSR);
+- pm_runtime_put(ch->dev);
++ pm_runtime_put(counter->parent);
+
+ *direction = (tsr & RZ_MTU3_TSR_TCFD) ?
+ COUNTER_COUNT_DIRECTION_FORWARD : COUNTER_COUNT_DIRECTION_BACKWARD;
+@@ -377,14 +377,14 @@ static int rz_mtu3_count_ceiling_write(s
+ return -EINVAL;
+ }
+
+- pm_runtime_get_sync(ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ if (count->id == RZ_MTU3_32_BIT_CH)
+ rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TGRALW, ceiling);
+ else
+ rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TGRA, ceiling);
+
+ rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA);
+- pm_runtime_put(ch->dev);
++ pm_runtime_put(counter->parent);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+@@ -495,7 +495,6 @@ static int rz_mtu3_count_enable_read(str
+ static int rz_mtu3_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
+ {
+- struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret = 0;
+
+@@ -505,14 +504,14 @@ static int rz_mtu3_count_enable_write(st
+ goto exit;
+
+ if (enable) {
+- pm_runtime_get_sync(ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ ret = rz_mtu3_initialize_counter(counter, count->id);
+ if (ret == 0)
+ priv->count_is_enabled[count->id] = true;
+ } else {
+ rz_mtu3_terminate_counter(counter, count->id);
+ priv->count_is_enabled[count->id] = false;
+- pm_runtime_put(ch->dev);
++ pm_runtime_put(counter->parent);
+ }
+
+ exit:
+@@ -544,9 +543,9 @@ static int rz_mtu3_cascade_counts_enable
+ if (ret)
+ return ret;
+
+- pm_runtime_get_sync(priv->ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
+- pm_runtime_put(priv->ch->dev);
++ pm_runtime_put(counter->parent);
+ *cascade_enable = test_bit(RZ_MTU3_TMDR3_LWA, &tmdr);
+ mutex_unlock(&priv->lock);
+
+@@ -563,10 +562,10 @@ static int rz_mtu3_cascade_counts_enable
+ if (ret)
+ return ret;
+
+- pm_runtime_get_sync(priv->ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3,
+ RZ_MTU3_TMDR3_LWA, cascade_enable);
+- pm_runtime_put(priv->ch->dev);
++ pm_runtime_put(counter->parent);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+@@ -583,9 +582,9 @@ static int rz_mtu3_ext_input_phase_clock
+ if (ret)
+ return ret;
+
+- pm_runtime_get_sync(priv->ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
+- pm_runtime_put(priv->ch->dev);
++ pm_runtime_put(counter->parent);
+ *ext_input_phase_clock_select = test_bit(RZ_MTU3_TMDR3_PHCKSEL, &tmdr);
+ mutex_unlock(&priv->lock);
+
+@@ -602,11 +601,11 @@ static int rz_mtu3_ext_input_phase_clock
+ if (ret)
+ return ret;
+
+- pm_runtime_get_sync(priv->ch->dev);
++ pm_runtime_get_sync(counter->parent);
+ rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3,
+ RZ_MTU3_TMDR3_PHCKSEL,
+ ext_input_phase_clock_select);
+- pm_runtime_put(priv->ch->dev);
++ pm_runtime_put(counter->parent);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+@@ -644,7 +643,7 @@ static int rz_mtu3_action_read(struct co
+ if (ret)
+ return ret;
+
+- ret = rz_mtu3_count_function_read_helper(ch, priv, &function);
++ ret = rz_mtu3_count_function_read_helper(ch, counter, &function);
+ if (ret) {
+ mutex_unlock(&priv->lock);
+ return ret;
--- /dev/null
+From 67c3f99bed6f422ba343d2b70a2eeeccdfd91bef Mon Sep 17 00:00:00 2001
+From: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+Date: Fri, 30 Jan 2026 14:23:52 +0200
+Subject: counter: rz-mtu3-cnt: prevent counter from being toggled multiple times
+
+From: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+
+commit 67c3f99bed6f422ba343d2b70a2eeeccdfd91bef upstream.
+
+Runtime PM counter is incremented / decremented each time the sysfs
+enable file is written to.
+
+If user writes 0 to the sysfs enable file multiple times, runtime PM
+usage count underflows, generating the following message.
+
+rz-mtu3-counter rz-mtu3-counter.0: Runtime PM usage count underflow!
+
+At the same time, hardware registers end up being accessed with clocks
+off in rz_mtu3_terminate_counter() to disable an already disabled
+channel.
+
+If user writes 1 to the sysfs enable file multiple times, runtime PM
+usage count will be incremented each time, requiring the same number of
+0 writes to get it back to 0.
+
+If user writes 0 to the sysfs enable file while PWM is in progress, PWM
+is stopped without counter being the owner of the underlying MTU3
+channel.
+
+Check against the cached count_is_enabled value and exit if the user
+is trying to set the same enable value.
+
+Cc: stable@vger.kernel.org
+Fixes: 0be8907359df ("counter: Add Renesas RZ/G2L MTU3a counter driver")
+Signed-off-by: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+Link: https://lore.kernel.org/r/20260130122353.2263273-5-cosmin-gabriel.tanislav.xa@renesas.com
+Signed-off-by: William Breathitt Gray <wbg@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/counter/rz-mtu3-cnt.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/counter/rz-mtu3-cnt.c
++++ b/drivers/counter/rz-mtu3-cnt.c
+@@ -499,21 +499,25 @@ static int rz_mtu3_count_enable_write(st
+ struct rz_mtu3_cnt *const priv = counter_priv(counter);
+ int ret = 0;
+
++ mutex_lock(&priv->lock);
++
++ if (priv->count_is_enabled[count->id] == enable)
++ goto exit;
++
+ if (enable) {
+- mutex_lock(&priv->lock);
+ pm_runtime_get_sync(ch->dev);
+ ret = rz_mtu3_initialize_counter(counter, count->id);
+ if (ret == 0)
+ priv->count_is_enabled[count->id] = true;
+- mutex_unlock(&priv->lock);
+ } else {
+- mutex_lock(&priv->lock);
+ rz_mtu3_terminate_counter(counter, count->id);
+ priv->count_is_enabled[count->id] = false;
+ pm_runtime_put(ch->dev);
+- mutex_unlock(&priv->lock);
+ }
+
++exit:
++ mutex_unlock(&priv->lock);
++
+ return ret;
+ }
+
--- /dev/null
+From 6dcf9d0064ce2f3e3dfe5755f98b93abe6a98e1e Mon Sep 17 00:00:00 2001
+From: Guangshuo Li <lgs201920130244@gmail.com>
+Date: Wed, 1 Apr 2026 10:45:35 +0800
+Subject: cpufreq: governor: fix double free in cpufreq_dbs_governor_init() error path
+
+From: Guangshuo Li <lgs201920130244@gmail.com>
+
+commit 6dcf9d0064ce2f3e3dfe5755f98b93abe6a98e1e upstream.
+
+When kobject_init_and_add() fails, cpufreq_dbs_governor_init() calls
+kobject_put(&dbs_data->attr_set.kobj).
+
+The kobject release callback cpufreq_dbs_data_release() calls
+gov->exit(dbs_data) and kfree(dbs_data), but the current error path
+then calls gov->exit(dbs_data) and kfree(dbs_data) again, causing a
+double free.
+
+Keep the direct kfree(dbs_data) for the gov->init() failure path, but
+after kobject_init_and_add() has been called, let kobject_put() handle
+the cleanup through cpufreq_dbs_data_release().
+
+Fixes: 4ebe36c94aed ("cpufreq: Fix kobject memleak")
+Signed-off-by: Guangshuo Li <lgs201920130244@gmail.com>
+Reviewed-by: Zhongqiu Han <zhongqiu.han@oss.qualcomm.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20260401024535.1395801-1-lgs201920130244@gmail.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cpufreq_governor.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -468,13 +468,13 @@ int cpufreq_dbs_governor_init(struct cpu
+ /* Failure, so roll back. */
+ pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
+
+- kobject_put(&dbs_data->attr_set.kobj);
+-
+ policy->governor_data = NULL;
+
+ if (!have_governor_per_policy())
+ gov->gdbs_data = NULL;
+- gov->exit(dbs_data);
++
++ kobject_put(&dbs_data->attr_set.kobj);
++ goto free_policy_dbs_info;
+
+ free_dbs_data:
+ kfree(dbs_data);
--- /dev/null
+From 4b56770d345524fc2acc143a2b85539cf7d74bc1 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Mon, 16 Mar 2026 13:21:19 -0700
+Subject: crypto: tegra - Add missing CRYPTO_ALG_ASYNC
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 4b56770d345524fc2acc143a2b85539cf7d74bc1 upstream.
+
+The tegra crypto driver failed to set the CRYPTO_ALG_ASYNC on its
+asynchronous algorithms, causing the crypto API to select them for users
+that request only synchronous algorithms. This causes crashes (at
+least). Fix this by adding the flag like what the other drivers do.
+Also remove the unnecessary CRYPTO_ALG_TYPE_* flags, since those just
+get ignored and overridden by the registration function anyway.
+
+Reported-by: Zorro Lang <zlang@redhat.com>
+Closes: https://lore.kernel.org/r/20260314080937.pghb4aa7d4je3mhh@dell-per750-06-vm-08.rhts.eng.pek2.redhat.com
+Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver")
+Cc: stable@vger.kernel.org
+Cc: Akhil R <akhilrajeev@nvidia.com>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/tegra/tegra-se-aes.c | 11 +++++++----
+ drivers/crypto/tegra/tegra-se-hash.c | 30 +++++++++++++++++-------------
+ 2 files changed, 24 insertions(+), 17 deletions(-)
+
+--- a/drivers/crypto/tegra/tegra-se-aes.c
++++ b/drivers/crypto/tegra/tegra-se-aes.c
+@@ -529,7 +529,7 @@ static struct tegra_se_alg tegra_aes_alg
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 500,
+- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+@@ -550,7 +550,7 @@ static struct tegra_se_alg tegra_aes_alg
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 500,
+- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+@@ -572,7 +572,7 @@ static struct tegra_se_alg tegra_aes_alg
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-tegra",
+ .cra_priority = 500,
+- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+@@ -594,6 +594,7 @@ static struct tegra_se_alg tegra_aes_alg
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-tegra",
+ .cra_priority = 500,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = (__alignof__(u64) - 1),
+@@ -1922,6 +1923,7 @@ static struct tegra_se_alg tegra_aead_al
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-tegra",
+ .cra_priority = 500,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aead_ctx),
+ .cra_alignmask = 0xf,
+@@ -1944,6 +1946,7 @@ static struct tegra_se_alg tegra_aead_al
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-tegra",
+ .cra_priority = 500,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aead_ctx),
+ .cra_alignmask = 0xf,
+@@ -1971,7 +1974,7 @@ static struct tegra_se_alg tegra_cmac_al
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "tegra-se-cmac",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_cmac_ctx),
+ .cra_alignmask = 0,
+--- a/drivers/crypto/tegra/tegra-se-hash.c
++++ b/drivers/crypto/tegra/tegra-se-hash.c
+@@ -761,7 +761,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha1",
+ .cra_driver_name = "tegra-se-sha1",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -786,7 +786,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha224",
+ .cra_driver_name = "tegra-se-sha224",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -811,7 +811,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha256",
+ .cra_driver_name = "tegra-se-sha256",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -836,7 +836,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha384",
+ .cra_driver_name = "tegra-se-sha384",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -861,7 +861,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha512",
+ .cra_driver_name = "tegra-se-sha512",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -886,7 +886,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha3-224",
+ .cra_driver_name = "tegra-se-sha3-224",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -911,7 +911,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha3-256",
+ .cra_driver_name = "tegra-se-sha3-256",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -936,7 +936,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha3-384",
+ .cra_driver_name = "tegra-se-sha3-384",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -961,7 +961,7 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "sha3-512",
+ .cra_driver_name = "tegra-se-sha3-512",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
++ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -988,7 +988,8 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "tegra-se-hmac-sha224",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -1015,7 +1016,8 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "tegra-se-hmac-sha256",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -1042,7 +1044,8 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "tegra-se-hmac-sha384",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+@@ -1069,7 +1072,8 @@ static struct tegra_se_alg tegra_hash_al
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "tegra-se-hmac-sha512",
+ .cra_priority = 300,
+- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
--- /dev/null
+From 16fdabe143fce2cbf89139677728e17e21b46c28 Mon Sep 17 00:00:00 2001
+From: Tzung-Bi Shih <tzungbi@kernel.org>
+Date: Thu, 5 Feb 2026 09:28:40 +0000
+Subject: gpio: Fix resource leaks on errors in gpiochip_add_data_with_key()
+
+From: Tzung-Bi Shih <tzungbi@kernel.org>
+
+commit 16fdabe143fce2cbf89139677728e17e21b46c28 upstream.
+
+Since commit aab5c6f20023 ("gpio: set device type for GPIO chips"),
+`gdev->dev.release` is unset. As a result, the reference count to
+`gdev->dev` isn't dropped on the error handling paths.
+
+Drop the reference on errors.
+
+Also reorder the instructions to make the error handling simpler.
+Now gpiochip_add_data_with_key() roughly looks like:
+
+ >>> Some memory allocation. Go to ERR ZONE 1 on errors.
+ >>> device_initialize().
+
+ gpiodev_release() takes over the responsibility for freeing the
+ resources of `gdev->dev`. The subsequent error handling paths
+ shouldn't go through ERR ZONE 1 again which leads to double free.
+
+ >>> Some initialization mainly on `gdev`.
+ >>> The rest of initialization. Go to ERR ZONE 2 on errors.
+ >>> Chip registration success and exit.
+
+ >>> ERR ZONE 2. gpio_device_put() and exit.
+ >>> ERR ZONE 1.
+
+Cc: stable@vger.kernel.org
+Fixes: aab5c6f20023 ("gpio: set device type for GPIO chips")
+Reviewed-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Tzung-Bi Shih <tzungbi@kernel.org>
+Link: https://patch.msgid.link/20260205092840.2574840-1-tzungbi@kernel.org
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib.c | 101 +++++++++++++++++++++++--------------------------
+ 1 file changed, 48 insertions(+), 53 deletions(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -882,13 +882,15 @@ static const struct device_type gpio_dev
+ #define gcdev_unregister(gdev) device_del(&(gdev)->dev)
+ #endif
+
++/*
++ * An initial reference count has been held in gpiochip_add_data_with_key().
++ * The caller should drop the reference via gpio_device_put() on errors.
++ */
+ static int gpiochip_setup_dev(struct gpio_device *gdev)
+ {
+ struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev);
+ int ret;
+
+- device_initialize(&gdev->dev);
+-
+ /*
+ * If fwnode doesn't belong to another device, it's safe to clear its
+ * initialized flag.
+@@ -954,9 +956,11 @@ static void gpiochip_setup_devs(void)
+ list_for_each_entry_srcu(gdev, &gpio_devices, list,
+ srcu_read_lock_held(&gpio_devices_srcu)) {
+ ret = gpiochip_setup_dev(gdev);
+- if (ret)
++ if (ret) {
++ gpio_device_put(gdev);
+ dev_err(&gdev->dev,
+ "Failed to initialize gpio device (%d)\n", ret);
++ }
+ }
+ }
+
+@@ -1037,71 +1041,72 @@ int gpiochip_add_data_with_key(struct gp
+ int base = 0;
+ int ret;
+
+- /*
+- * First: allocate and populate the internal stat container, and
+- * set up the struct device.
+- */
+ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+-
+- gdev->dev.type = &gpio_dev_type;
+- gdev->dev.bus = &gpio_bus_type;
+- gdev->dev.parent = gc->parent;
+- rcu_assign_pointer(gdev->chip, gc);
+-
+ gc->gpiodev = gdev;
+ gpiochip_set_data(gc, data);
+
+- device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
+-
+ ret = ida_alloc(&gpio_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_gdev;
+ gdev->id = ret;
+
+- ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
++ ret = init_srcu_struct(&gdev->srcu);
+ if (ret)
+ goto err_free_ida;
++ rcu_assign_pointer(gdev->chip, gc);
+
+- if (gc->parent && gc->parent->driver)
+- gdev->owner = gc->parent->driver->owner;
+- else if (gc->owner)
+- /* TODO: remove chip->owner */
+- gdev->owner = gc->owner;
+- else
+- gdev->owner = THIS_MODULE;
++ ret = init_srcu_struct(&gdev->desc_srcu);
++ if (ret)
++ goto err_cleanup_gdev_srcu;
++
++ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
++ if (ret)
++ goto err_cleanup_desc_srcu;
++
++ device_initialize(&gdev->dev);
++ /*
++ * After this point any allocated resources to `gdev` will be
++ * free():ed by gpiodev_release(). If you add new resources
++ * then make sure they get free():ed there.
++ */
++ gdev->dev.type = &gpio_dev_type;
++ gdev->dev.bus = &gpio_bus_type;
++ gdev->dev.parent = gc->parent;
++ device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
+
+ ret = gpiochip_get_ngpios(gc, &gdev->dev);
+ if (ret)
+- goto err_free_dev_name;
++ goto err_put_device;
++ gdev->ngpio = gc->ngpio;
+
+ gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL);
+ if (!gdev->descs) {
+ ret = -ENOMEM;
+- goto err_free_dev_name;
++ goto err_put_device;
+ }
+
+ gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL);
+ if (!gdev->label) {
+ ret = -ENOMEM;
+- goto err_free_descs;
++ goto err_put_device;
+ }
+
+- gdev->ngpio = gc->ngpio;
+ gdev->can_sleep = gc->can_sleep;
+-
+ rwlock_init(&gdev->line_state_lock);
+ RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
+-
+- ret = init_srcu_struct(&gdev->srcu);
+- if (ret)
+- goto err_free_label;
+-
+- ret = init_srcu_struct(&gdev->desc_srcu);
+- if (ret)
+- goto err_cleanup_gdev_srcu;
++#ifdef CONFIG_PINCTRL
++ INIT_LIST_HEAD(&gdev->pin_ranges);
++#endif
++ if (gc->parent && gc->parent->driver)
++ gdev->owner = gc->parent->driver->owner;
++ else if (gc->owner)
++ /* TODO: remove chip->owner */
++ gdev->owner = gc->owner;
++ else
++ gdev->owner = THIS_MODULE;
+
+ scoped_guard(mutex, &gpio_devices_lock) {
+ /*
+@@ -1117,7 +1122,7 @@ int gpiochip_add_data_with_key(struct gp
+ if (base < 0) {
+ ret = base;
+ base = 0;
+- goto err_cleanup_desc_srcu;
++ goto err_put_device;
+ }
+
+ /*
+@@ -1137,14 +1142,10 @@ int gpiochip_add_data_with_key(struct gp
+ ret = gpiodev_add_to_list_unlocked(gdev);
+ if (ret) {
+ gpiochip_err(gc, "GPIO integer space overlap, cannot add chip\n");
+- goto err_cleanup_desc_srcu;
++ goto err_put_device;
+ }
+ }
+
+-#ifdef CONFIG_PINCTRL
+- INIT_LIST_HEAD(&gdev->pin_ranges);
+-#endif
+-
+ if (gc->names)
+ gpiochip_set_desc_names(gc);
+
+@@ -1231,25 +1232,19 @@ err_remove_from_list:
+ scoped_guard(mutex, &gpio_devices_lock)
+ list_del_rcu(&gdev->list);
+ synchronize_srcu(&gpio_devices_srcu);
+- if (gdev->dev.release) {
+- /* release() has been registered by gpiochip_setup_dev() */
+- gpio_device_put(gdev);
+- goto err_print_message;
+- }
++err_put_device:
++ gpio_device_put(gdev);
++ goto err_print_message;
++
+ err_cleanup_desc_srcu:
+ cleanup_srcu_struct(&gdev->desc_srcu);
+ err_cleanup_gdev_srcu:
+ cleanup_srcu_struct(&gdev->srcu);
+-err_free_label:
+- kfree_const(gdev->label);
+-err_free_descs:
+- kfree(gdev->descs);
+-err_free_dev_name:
+- kfree(dev_name(&gdev->dev));
+ err_free_ida:
+ ida_free(&gpio_ida, gdev->id);
+ err_free_gdev:
+ kfree(gdev);
++
+ err_print_message:
+ /* failures here can mean systems won't boot... */
+ if (ret != -EPROBE_DEFER) {
--- /dev/null
+From c720fb57d56274213d027b3c5ab99080cf62a306 Mon Sep 17 00:00:00 2001
+From: Shenwei Wang <shenwei.wang@nxp.com>
+Date: Tue, 24 Mar 2026 14:21:29 -0500
+Subject: gpio: mxc: map Both Edge pad wakeup to Rising Edge
+
+From: Shenwei Wang <shenwei.wang@nxp.com>
+
+commit c720fb57d56274213d027b3c5ab99080cf62a306 upstream.
+
+Suspend may fail on i.MX8QM when Falling Edge is used as a pad wakeup
+trigger due to a hardware bug in the detection logic. Since the hardware
+does not support Both Edge wakeup, remap requests for Both Edge to Rising
+Edge by default to avoid hitting this issue.
+
+A warning is emitted when Falling Edge is selected on i.MX8QM.
+
+Fixes: f60c9eac54af ("gpio: mxc: enable pad wakeup on i.MX8x platforms")
+cc: stable@vger.kernel.org
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
+Link: https://patch.msgid.link/20260324192129.2797237-1-shenwei.wang@nxp.com
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-mxc.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -584,12 +584,13 @@ static bool mxc_gpio_set_pad_wakeup(stru
+ unsigned long config;
+ bool ret = false;
+ int i, type;
++ bool is_imx8qm = of_device_is_compatible(port->dev->of_node, "fsl,imx8qm-gpio");
+
+ static const u32 pad_type_map[] = {
+ IMX_SCU_WAKEUP_OFF, /* 0 */
+ IMX_SCU_WAKEUP_RISE_EDGE, /* IRQ_TYPE_EDGE_RISING */
+ IMX_SCU_WAKEUP_FALL_EDGE, /* IRQ_TYPE_EDGE_FALLING */
+- IMX_SCU_WAKEUP_FALL_EDGE, /* IRQ_TYPE_EDGE_BOTH */
++ IMX_SCU_WAKEUP_RISE_EDGE, /* IRQ_TYPE_EDGE_BOTH */
+ IMX_SCU_WAKEUP_HIGH_LVL, /* IRQ_TYPE_LEVEL_HIGH */
+ IMX_SCU_WAKEUP_OFF, /* 5 */
+ IMX_SCU_WAKEUP_OFF, /* 6 */
+@@ -604,6 +605,13 @@ static bool mxc_gpio_set_pad_wakeup(stru
+ config = pad_type_map[type];
+ else
+ config = IMX_SCU_WAKEUP_OFF;
++
++ if (is_imx8qm && config == IMX_SCU_WAKEUP_FALL_EDGE) {
++ dev_warn_once(port->dev,
++ "No falling-edge support for wakeup on i.MX8QM\n");
++ config = IMX_SCU_WAKEUP_OFF;
++ }
++
+ ret |= mxc_gpio_generic_config(port, i, config);
+ }
+ }
--- /dev/null
+From 6a502776f4a4f80fb839b22f12aeaf0267fca344 Mon Sep 17 00:00:00 2001
+From: Xingjing Deng <micro6947@gmail.com>
+Date: Sat, 31 Jan 2026 14:55:39 +0800
+Subject: misc: fastrpc: check qcom_scm_assign_mem() return in rpmsg_probe
+
+From: Xingjing Deng <micro6947@gmail.com>
+
+commit 6a502776f4a4f80fb839b22f12aeaf0267fca344 upstream.
+
+In the SDSP probe path, qcom_scm_assign_mem() is used to assign the
+reserved memory to the configured VMIDs, but its return value was not checked.
+
+Fail the probe if the SCM call fails to avoid continuing with an
+unexpected/incorrect memory permission configuration.
+
+This issue was found by an in-house analysis workflow that extracts AST-based
+information and runs static checks, with LLM assistance for triage, and was
+confirmed by manual code review.
+No hardware testing was performed.
+
+Fixes: c3c0363bc72d4 ("misc: fastrpc: support complete DMA pool access to the DSP")
+Cc: stable@vger.kernel.org # 6.11-rc1
+Signed-off-by: Xingjing Deng <xjdeng@buaa.edu.cn>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260131065539.2124047-1-xjdeng@buaa.edu.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -2338,8 +2338,10 @@ static int fastrpc_rpmsg_probe(struct rp
+ if (!err) {
+ src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
+- qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms,
++ err = qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms,
+ data->vmperms, data->vmcount);
++ if (err)
++ goto err_free_data;
+ }
+
+ }
--- /dev/null
+From ba2c83167b215da30fa2aae56b140198cf8d8408 Mon Sep 17 00:00:00 2001
+From: Xingjing Deng <micro6947@gmail.com>
+Date: Fri, 30 Jan 2026 07:41:40 +0800
+Subject: misc: fastrpc: possible double-free of cctx->remote_heap
+
+From: Xingjing Deng <micro6947@gmail.com>
+
+commit ba2c83167b215da30fa2aae56b140198cf8d8408 upstream.
+
+fastrpc_init_create_static_process() may free cctx->remote_heap on the
+err_map path but does not clear the pointer. Later, fastrpc_rpmsg_remove()
+frees cctx->remote_heap again if it is non-NULL, which can lead to a
+double-free if the INIT_CREATE_STATIC ioctl hits the error path and the rpmsg
+device is subsequently removed/unbound.
+Clear cctx->remote_heap after freeing it in the error path to prevent the
+later cleanup from freeing it again.
+
+This issue was found by an in-house analysis workflow that extracts AST-based
+information and runs static checks, with LLM assistance for triage, and was
+confirmed by manual code review.
+No hardware testing was performed.
+
+Fixes: 0871561055e66 ("misc: fastrpc: Add support for audiopd")
+Cc: stable@vger.kernel.org # 6.2+
+Signed-off-by: Xingjing Deng <xjdeng@buaa.edu.cn>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260129234140.410983-1-xjdeng@buaa.edu.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1370,6 +1370,7 @@ err_invoke:
+ }
+ err_map:
+ fastrpc_buf_free(fl->cctx->remote_heap);
++ fl->cctx->remote_heap = NULL;
+ err_name:
+ kfree(name);
+ err:
--- /dev/null
+From 976ff48c2ac6e6b25b01428c9d7997bcd0fb2949 Mon Sep 17 00:00:00 2001
+From: "Sven Eckelmann (Plasma Cloud)" <se@simonwunderlich.de>
+Date: Tue, 24 Mar 2026 09:36:01 +0100
+Subject: net: ethernet: mtk_ppe: avoid NULL deref when gmac0 is disabled
+
+From: Sven Eckelmann (Plasma Cloud) <se@simonwunderlich.de>
+
+commit 976ff48c2ac6e6b25b01428c9d7997bcd0fb2949 upstream.
+
+If the gmac0 is disabled, the precheck for a valid ingress device will
+cause a NULL pointer deref and crash the system. This happens because
+eth->netdev[0] will be NULL but the code will directly try to access
+netdev_ops.
+
+Instead of just checking for the first net_device, it must be checked if
+any of the mtk_eth net_devices is matching the netdev_ops of the ingress
+device.
+
+Cc: stable@vger.kernel.org
+Fixes: 73cfd947dbdb ("net: ethernet: mtk_eth_soc: ppe: prevent ppe update for non-mtk devices")
+Signed-off-by: Sven Eckelmann (Plasma Cloud) <se@simonwunderlich.de>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260324-wed-crash-gmac0-disabled-v1-1-3bc388aee565@simonwunderlich.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -244,6 +244,25 @@ out:
+ return 0;
+ }
+
++static bool
++mtk_flow_is_valid_idev(const struct mtk_eth *eth, const struct net_device *idev)
++{
++ size_t i;
++
++ if (!idev)
++ return false;
++
++ for (i = 0; i < ARRAY_SIZE(eth->netdev); i++) {
++ if (!eth->netdev[i])
++ continue;
++
++ if (idev->netdev_ops == eth->netdev[i]->netdev_ops)
++ return true;
++ }
++
++ return false;
++}
++
+ static int
+ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
+ int ppe_index)
+@@ -270,7 +289,7 @@ mtk_flow_offload_replace(struct mtk_eth
+ flow_rule_match_meta(rule, &match);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
+- if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) {
++ if (mtk_flow_is_valid_idev(eth, idev)) {
+ struct mtk_mac *mac = netdev_priv(idev);
+
+ if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
--- /dev/null
+From c0fd0fe745f5e8c568d898cd1513d0083e46204a Mon Sep 17 00:00:00 2001
+From: Yufan Chen <yufan.chen@linux.dev>
+Date: Sun, 29 Mar 2026 00:32:57 +0800
+Subject: net: ftgmac100: fix ring allocation unwind on open failure
+
+From: Yufan Chen <yufan.chen@linux.dev>
+
+commit c0fd0fe745f5e8c568d898cd1513d0083e46204a upstream.
+
+ftgmac100_alloc_rings() allocates rx_skbs, tx_skbs, rxdes, txdes, and
+rx_scratch in stages. On intermediate failures it returned -ENOMEM
+directly, leaking resources allocated earlier in the function.
+
+Rework the failure path to use staged local unwind labels and free
+allocated resources in reverse order before returning -ENOMEM. This
+matches common netdev allocation cleanup style.
+
+Fixes: d72e01a0430f ("ftgmac100: Use a scratch buffer for failed RX allocations")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yufan Chen <yufan.chen@linux.dev>
+Link: https://patch.msgid.link/20260328163257.60836-1-yufan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/faraday/ftgmac100.c | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -964,19 +964,19 @@ static int ftgmac100_alloc_rings(struct
+ priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
+ GFP_KERNEL);
+ if (!priv->tx_skbs)
+- return -ENOMEM;
++ goto err_free_rx_skbs;
+
+ /* Allocate descriptors */
+ priv->rxdes = dma_alloc_coherent(priv->dev,
+ MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
+ &priv->rxdes_dma, GFP_KERNEL);
+ if (!priv->rxdes)
+- return -ENOMEM;
++ goto err_free_tx_skbs;
+ priv->txdes = dma_alloc_coherent(priv->dev,
+ MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
+ &priv->txdes_dma, GFP_KERNEL);
+ if (!priv->txdes)
+- return -ENOMEM;
++ goto err_free_rxdes;
+
+ /* Allocate scratch packet buffer */
+ priv->rx_scratch = dma_alloc_coherent(priv->dev,
+@@ -984,9 +984,29 @@ static int ftgmac100_alloc_rings(struct
+ &priv->rx_scratch_dma,
+ GFP_KERNEL);
+ if (!priv->rx_scratch)
+- return -ENOMEM;
++ goto err_free_txdes;
+
+ return 0;
++
++err_free_txdes:
++ dma_free_coherent(priv->dev,
++ MAX_TX_QUEUE_ENTRIES *
++ sizeof(struct ftgmac100_txdes),
++ priv->txdes, priv->txdes_dma);
++ priv->txdes = NULL;
++err_free_rxdes:
++ dma_free_coherent(priv->dev,
++ MAX_RX_QUEUE_ENTRIES *
++ sizeof(struct ftgmac100_rxdes),
++ priv->rxdes, priv->rxdes_dma);
++ priv->rxdes = NULL;
++err_free_tx_skbs:
++ kfree(priv->tx_skbs);
++ priv->tx_skbs = NULL;
++err_free_rx_skbs:
++ kfree(priv->rx_skbs);
++ priv->rx_skbs = NULL;
++ return -ENOMEM;
+ }
+
+ static void ftgmac100_init_rings(struct ftgmac100 *priv)
--- /dev/null
+From 0c4a59df370bea245695c00aaae6ae75747139bd Mon Sep 17 00:00:00 2001
+From: Changwoo Min <changwoo@igalia.com>
+Date: Thu, 2 Apr 2026 11:31:50 +0900
+Subject: sched_ext: Fix is_bpf_migration_disabled() false negative on non-PREEMPT_RCU
+
+From: Changwoo Min <changwoo@igalia.com>
+
+commit 0c4a59df370bea245695c00aaae6ae75747139bd upstream.
+
+Since commit 8e4f0b1ebcf2 ("bpf: use rcu_read_lock_dont_migrate() for
+trampoline.c"), the BPF prolog (__bpf_prog_enter) calls migrate_disable()
+only when CONFIG_PREEMPT_RCU is enabled, via rcu_read_lock_dont_migrate().
+Without CONFIG_PREEMPT_RCU, the prolog never touches migration_disabled,
+so migration_disabled == 1 always means the task is truly
+migration-disabled regardless of whether it is the current task.
+
+The old unconditional p == current check was a false negative in this
+case, potentially allowing a migration-disabled task to be dispatched to
+a remote CPU and triggering scx_error in task_can_run_on_remote_rq().
+
+Only apply the p == current disambiguation when CONFIG_PREEMPT_RCU is
+enabled, where the ambiguity with the BPF prolog still exists.
+
+Fixes: 8e4f0b1ebcf2 ("bpf: use rcu_read_lock_dont_migrate() for trampoline.c")
+Cc: stable@vger.kernel.org # v6.18+
+Link: https://lore.kernel.org/lkml/20250821090609.42508-8-dongml2@chinatelecom.cn/
+Signed-off-by: Changwoo Min <changwoo@igalia.com>
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext_idle.c | 31 +++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+--- a/kernel/sched/ext_idle.c
++++ b/kernel/sched/ext_idle.c
+@@ -861,25 +861,32 @@ static bool check_builtin_idle_enabled(s
+ * code.
+ *
+ * We can't simply check whether @p->migration_disabled is set in a
+- * sched_ext callback, because migration is always disabled for the current
+- * task while running BPF code.
++ * sched_ext callback, because the BPF prolog (__bpf_prog_enter) may disable
++ * migration for the current task while running BPF code.
+ *
+- * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) respectively
+- * disable and re-enable migration. For this reason, the current task
+- * inside a sched_ext callback is always a migration-disabled task.
++ * Since the BPF prolog calls migrate_disable() only when CONFIG_PREEMPT_RCU
++ * is enabled (via rcu_read_lock_dont_migrate()), migration_disabled == 1 for
++ * the current task is ambiguous only in that case: it could be from the BPF
++ * prolog rather than a real migrate_disable() call.
+ *
+- * Therefore, when @p->migration_disabled == 1, check whether @p is the
+- * current task or not: if it is, then migration was not disabled before
+- * entering the callback, otherwise migration was disabled.
++ * Without CONFIG_PREEMPT_RCU, the BPF prolog never calls migrate_disable(),
++ * so migration_disabled == 1 always means the task is truly
++ * migration-disabled.
++ *
++ * Therefore, when migration_disabled == 1 and CONFIG_PREEMPT_RCU is enabled,
++ * check whether @p is the current task or not: if it is, then migration was
++ * not disabled before entering the callback, otherwise migration was disabled.
+ *
+ * Returns true if @p is migration-disabled, false otherwise.
+ */
+ static bool is_bpf_migration_disabled(const struct task_struct *p)
+ {
+- if (p->migration_disabled == 1)
+- return p != current;
+- else
+- return p->migration_disabled;
++ if (p->migration_disabled == 1) {
++ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
++ return p != current;
++ return true;
++ }
++ return p->migration_disabled;
+ }
+
+ static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p,
nvmem-zynqmp_nvmem-fix-buffer-size-in-dma-and-memcpy.patch
netfilter-ipset-drop-logically-empty-buckets-in-mtype_del.patch
gpib-fix-fluke-driver-s390-compile-issue.patch
+vt-discard-stale-unicode-buffer-on-alt-screen-exit-after-resize.patch
+vt-resize-saved-unicode-buffer-on-alt-screen-exit-after-resize.patch
+counter-rz-mtu3-cnt-prevent-counter-from-being-toggled-multiple-times.patch
+counter-rz-mtu3-cnt-do-not-use-struct-rz_mtu3_channel-s-dev-member.patch
+crypto-tegra-add-missing-crypto_alg_async.patch
+vxlan-validate-nd-option-lengths-in-vxlan_na_create.patch
+net-ftgmac100-fix-ring-allocation-unwind-on-open-failure.patch
+net-ethernet-mtk_ppe-avoid-null-deref-when-gmac0-is-disabled.patch
+virtio_net-clamp-rss_max_key_size-to-netdev_rss_key_len.patch
+cpufreq-governor-fix-double-free-in-cpufreq_dbs_governor_init-error-path.patch
+sched_ext-fix-is_bpf_migration_disabled-false-negative-on-non-preempt_rcu.patch
+gpio-mxc-map-both-edge-pad-wakeup-to-rising-edge.patch
+gpio-fix-resource-leaks-on-errors-in-gpiochip_add_data_with_key.patch
+thermal-core-address-thermal-zone-removal-races-with-resume.patch
+thermal-core-fix-thermal-zone-device-registration-error-path.patch
+misc-fastrpc-possible-double-free-of-cctx-remote_heap.patch
+misc-fastrpc-check-qcom_scm_assign_mem-return-in-rpmsg_probe.patch
+usb-typec-thunderbolt-set-enter_vdo-during-initialization.patch
+thunderbolt-fix-property-read-in-nhi_wake_supported.patch
+usb-dummy-hcd-fix-locking-synchronization-error.patch
+usb-dummy-hcd-fix-interrupt-synchronization-error.patch
+usb-gadget-dummy_hcd-fix-premature-urb-completion-when-zlp-follows-partial-transfer.patch
+usb-typec-ucsi-validate-connector-number-in-ucsi_notify_common.patch
--- /dev/null
+From 45b859b0728267a6199ee5002d62e6c6f3e8c89d Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Fri, 27 Mar 2026 10:49:52 +0100
+Subject: thermal: core: Address thermal zone removal races with resume
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 45b859b0728267a6199ee5002d62e6c6f3e8c89d upstream.
+
+Since thermal_zone_pm_complete() and thermal_zone_device_resume()
+re-initialize the poll_queue delayed work for the given thermal zone,
+the cancel_delayed_work_sync() in thermal_zone_device_unregister()
+may miss some already running work items and the thermal zone may
+be freed prematurely [1].
+
+There are two failing scenarios that both start with
+running thermal_pm_notify_complete() right before invoking
+thermal_zone_device_unregister() for one of the thermal zones.
+
+In the first scenario, there is a work item already running for
+the given thermal zone when thermal_pm_notify_complete() calls
+thermal_zone_pm_complete() for that thermal zone and it continues to
+run when thermal_zone_device_unregister() starts. Since the poll_queue
+delayed work has been re-initialized by thermal_pm_notify_complete(), the
+running work item will be missed by the cancel_delayed_work_sync() in
+thermal_zone_device_unregister() and if it continues to run past the
+freeing of the thermal zone object, a use-after-free will occur.
+
+In the second scenario, thermal_zone_device_resume() queued up by
+thermal_pm_notify_complete() runs right after the thermal_zone_exit()
+called by thermal_zone_device_unregister() has returned. The poll_queue
+delayed work is re-initialized by it before cancel_delayed_work_sync() is
+called by thermal_zone_device_unregister(), so it may continue to run
+after the freeing of the thermal zone object, which also leads to a
+use-after-free.
+
+Address the first failing scenario by ensuring that no thermal work
+items will be running when thermal_pm_notify_complete() is called.
+For this purpose, first move the cancel_delayed_work() call from
+thermal_zone_pm_complete() to thermal_zone_pm_prepare() to prevent
+new work from entering the workqueue going forward. Next, switch
+over to using a dedicated workqueue for thermal events and update
+the code in thermal_pm_notify() to flush that workqueue after
+thermal_pm_notify_prepare() has returned which will take care of
+all leftover thermal work already on the workqueue (that leftover
+work would do nothing useful anyway because all of the thermal zones
+have been flagged as suspended).
+
+The second failing scenario is addressed by adding a tz->state check
+to thermal_zone_device_resume() to prevent it from re-initializing
+the poll_queue delayed work if the thermal zone is going away.
+
+Note that the above changes will also facilitate relocating the suspend
+and resume of thermal zones closer to the suspend and resume of devices,
+respectively.
+
+Fixes: 5a5efdaffda5 ("thermal: core: Resume thermal zones asynchronously")
+Reported-by: syzbot+3b3852c6031d0f30dfaf@syzkaller.appspotmail.com
+Closes: https://syzbot.org/bug?extid=3b3852c6031d0f30dfaf
+Reported-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Closes: https://lore.kernel.org/linux-pm/20260324-thermal-core-uaf-init_delayed_work-v1-1-6611ae76a8a1@igalia.com/ [1]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Tested-by: Mauricio Faria de Oliveira <mfo@igalia.com>
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/6267615.lOV4Wx5bFT@rafael.j.wysocki
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/thermal_core.c | 31 ++++++++++++++++++++++++++-----
+ 1 file changed, 26 insertions(+), 5 deletions(-)
+
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -41,6 +41,8 @@ static struct thermal_governor *def_gove
+
+ static bool thermal_pm_suspended;
+
++static struct workqueue_struct *thermal_wq __ro_after_init;
++
+ /*
+ * Governor section: set of functions to handle thermal governors
+ *
+@@ -313,7 +315,7 @@ static void thermal_zone_device_set_poll
+ if (delay > HZ)
+ delay = round_jiffies_relative(delay);
+
+- mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay);
++ mod_delayed_work(thermal_wq, &tz->poll_queue, delay);
+ }
+
+ static void thermal_zone_recheck(struct thermal_zone_device *tz, int error)
+@@ -1781,6 +1783,10 @@ static void thermal_zone_device_resume(s
+
+ guard(thermal_zone)(tz);
+
++ /* If the thermal zone is going away, there's nothing to do. */
++ if (tz->state & TZ_STATE_FLAG_EXIT)
++ return;
++
+ tz->state &= ~(TZ_STATE_FLAG_SUSPENDED | TZ_STATE_FLAG_RESUMING);
+
+ thermal_debug_tz_resume(tz);
+@@ -1807,6 +1813,9 @@ static void thermal_zone_pm_prepare(stru
+ }
+
+ tz->state |= TZ_STATE_FLAG_SUSPENDED;
++
++ /* Prevent new work from getting to the workqueue subsequently. */
++ cancel_delayed_work(&tz->poll_queue);
+ }
+
+ static void thermal_pm_notify_prepare(void)
+@@ -1825,8 +1834,6 @@ static void thermal_zone_pm_complete(str
+ {
+ guard(thermal_zone)(tz);
+
+- cancel_delayed_work(&tz->poll_queue);
+-
+ reinit_completion(&tz->resume);
+ tz->state |= TZ_STATE_FLAG_RESUMING;
+
+@@ -1836,7 +1843,7 @@ static void thermal_zone_pm_complete(str
+ */
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_resume);
+ /* Queue up the work without a delay. */
+- mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, 0);
++ mod_delayed_work(thermal_wq, &tz->poll_queue, 0);
+ }
+
+ static void thermal_pm_notify_complete(void)
+@@ -1859,6 +1866,11 @@ static int thermal_pm_notify(struct noti
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ thermal_pm_notify_prepare();
++ /*
++ * Allow any leftover thermal work items already on the
++ * worqueue to complete so they don't get in the way later.
++ */
++ flush_workqueue(thermal_wq);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+@@ -1891,9 +1903,16 @@ static int __init thermal_init(void)
+ if (result)
+ goto error;
+
++ thermal_wq = alloc_workqueue("thermal_events",
++ WQ_FREEZABLE | WQ_POWER_EFFICIENT | WQ_PERCPU, 0);
++ if (!thermal_wq) {
++ result = -ENOMEM;
++ goto unregister_netlink;
++ }
++
+ result = thermal_register_governors();
+ if (result)
+- goto unregister_netlink;
++ goto destroy_workqueue;
+
+ thermal_class = kzalloc(sizeof(*thermal_class), GFP_KERNEL);
+ if (!thermal_class) {
+@@ -1920,6 +1939,8 @@ static int __init thermal_init(void)
+
+ unregister_governors:
+ thermal_unregister_governors();
++destroy_workqueue:
++ destroy_workqueue(thermal_wq);
+ unregister_netlink:
+ thermal_netlink_exit();
+ error:
--- /dev/null
+From 9e07e3b81807edd356e1f794cffa00a428eff443 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 1 Apr 2026 16:33:53 +0200
+Subject: thermal: core: Fix thermal zone device registration error path
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 9e07e3b81807edd356e1f794cffa00a428eff443 upstream.
+
+If thermal_zone_device_register_with_trips() fails after registering
+a thermal zone device, it needs to wait for the tz->removal completion
+like thermal_zone_device_unregister(), in case user space has managed
+to take a reference to the thermal zone device's kobject, in which case
+thermal_release() may not be called by the error path itself and tz may
+be freed prematurely.
+
+Add the missing wait_for_completion() call to the thermal zone device
+registration error path.
+
+Fixes: 04e6ccfc93c5 ("thermal: core: Fix NULL pointer dereference in zone registration error path")
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: All applicable <stable@vger.kernel.org>
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+Tested-by: Lukasz Luba <lukasz.luba@arm.com>
+Link: https://patch.msgid.link/2849815.mvXUDI8C0e@rafael.j.wysocki
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/thermal_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1638,6 +1638,7 @@ unregister:
+ device_del(&tz->device);
+ release_device:
+ put_device(&tz->device);
++ wait_for_completion(&tz->removal);
+ remove_id:
+ ida_free(&thermal_tz_ida, id);
+ free_tzp:
--- /dev/null
+From 73a505dc48144ec72e25874e2b2a72487b02d3bc Mon Sep 17 00:00:00 2001
+From: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Date: Mon, 9 Mar 2026 10:39:49 +0100
+Subject: thunderbolt: Fix property read in nhi_wake_supported()
+
+From: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+
+commit 73a505dc48144ec72e25874e2b2a72487b02d3bc upstream.
+
+device_property_read_foo() returns 0 on success and only then modifies
+'val'. Currently, val is left uninitialized if the aforementioned
+function returns non-zero, making nhi_wake_supported() return true
+almost always (random != 0) if the property is not present in device
+firmware.
+
+Invert the check to make it make sense.
+
+Fixes: 3cdb9446a117 ("thunderbolt: Add support for Intel Ice Lake")
+Cc: stable@vger.kernel.org
+Signed-off-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/nhi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1020,7 +1020,7 @@ static bool nhi_wake_supported(struct pc
+ * If power rails are sustainable for wakeup from S4 this
+ * property is set by the BIOS.
+ */
+- if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
++ if (!device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
+ return !!val;
+
+ return true;
--- /dev/null
+From 2ca9e46f8f1f5a297eb0ac83f79d35d5b3a02541 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Sun, 15 Mar 2026 14:31:00 -0400
+Subject: USB: dummy-hcd: Fix interrupt synchronization error
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 2ca9e46f8f1f5a297eb0ac83f79d35d5b3a02541 upstream.
+
+This fixes an error in synchronization in the dummy-hcd driver. The
+error has a somewhat involved history. The synchronization mechanism
+was introduced by commit 7dbd8f4cabd9 ("USB: dummy-hcd: Fix erroneous
+synchronization change"), which added an emulated "interrupts enabled"
+flag together with code emulating synchronize_irq() (it waits until
+all current handler callbacks have returned).
+
+But the emulated interrupt-disable occurred too late, after the driver
+containing the handler callback routines had been told that it was
+unbound and no more callbacks would occur. Commit 4a5d797a9f9c ("usb:
+gadget: dummy_hcd: fix gpf in gadget_setup") tried to fix this by
+moving the synchronize_irq() emulation code from dummy_stop() to
+dummy_pullup(), which runs before the unbind callback.
+
+There still were races, though, because the emulated interrupt-disable
+still occurred too late. It couldn't be moved to dummy_pullup(),
+because that routine can be called for reasons other than an impending
+unbind. Therefore commits 7dc0c55e9f30 ("USB: UDC core: Add
+udc_async_callbacks gadget op") and 04145a03db9d ("USB: UDC: Implement
+udc_async_callbacks in dummy-hcd") added an API allowing the UDC core
+to tell dummy-hcd exactly when emulated interrupts and their callbacks
+should be disabled.
+
+That brings us to the current state of things, which is still wrong
+because the emulated synchronize_irq() occurs before the emulated
+interrupt-disable! That's no good, beause it means that more emulated
+interrupts can occur after the synchronize_irq() emulation has run,
+leading to the possibility that a callback handler may be running when
+the gadget driver is unbound.
+
+To fix this, we have to move the synchronize_irq() emulation code yet
+again, to the dummy_udc_async_callbacks() routine, which takes care of
+enabling and disabling emulated interrupt requests. The
+synchronization will now run immediately after emulated interrupts are
+disabled, which is where it belongs.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Fixes: 04145a03db9d ("USB: UDC: Implement udc_async_callbacks in dummy-hcd")
+Cc: stable <stable@kernel.org>
+Link: https://patch.msgid.link/c7bc93fe-4241-4d04-bd56-27c12ba35c97@rowland.harvard.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/udc/dummy_hcd.c | 29 ++++++++++++++---------------
+ 1 file changed, 14 insertions(+), 15 deletions(-)
+
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -913,21 +913,6 @@ static int dummy_pullup(struct usb_gadge
+ spin_lock_irqsave(&dum->lock, flags);
+ dum->pullup = (value != 0);
+ set_link_state(dum_hcd);
+- if (value == 0) {
+- /*
+- * Emulate synchronize_irq(): wait for callbacks to finish.
+- * This seems to be the best place to emulate the call to
+- * synchronize_irq() that's in usb_gadget_remove_driver().
+- * Doing it in dummy_udc_stop() would be too late since it
+- * is called after the unbind callback and unbind shouldn't
+- * be invoked until all the other callbacks are finished.
+- */
+- while (dum->callback_usage > 0) {
+- spin_unlock_irqrestore(&dum->lock, flags);
+- usleep_range(1000, 2000);
+- spin_lock_irqsave(&dum->lock, flags);
+- }
+- }
+ spin_unlock_irqrestore(&dum->lock, flags);
+
+ usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+@@ -950,6 +935,20 @@ static void dummy_udc_async_callbacks(st
+
+ spin_lock_irq(&dum->lock);
+ dum->ints_enabled = enable;
++ if (!enable) {
++ /*
++ * Emulate synchronize_irq(): wait for callbacks to finish.
++ * This has to happen after emulated interrupts are disabled
++ * (dum->ints_enabled is clear) and before the unbind callback,
++ * just like the call to synchronize_irq() in
++ * gadget/udc/core:gadget_unbind_driver().
++ */
++ while (dum->callback_usage > 0) {
++ spin_unlock_irq(&dum->lock);
++ usleep_range(1000, 2000);
++ spin_lock_irq(&dum->lock);
++ }
++ }
+ spin_unlock_irq(&dum->lock);
+ }
+
--- /dev/null
+From 616a63ff495df12863692ab3f9f7b84e3fa7a66d Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Sun, 15 Mar 2026 14:30:43 -0400
+Subject: USB: dummy-hcd: Fix locking/synchronization error
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 616a63ff495df12863692ab3f9f7b84e3fa7a66d upstream.
+
+Syzbot testing was able to provoke an addressing exception and crash
+in the usb_gadget_udc_reset() routine in
+drivers/usb/gadgets/udc/core.c, resulting from the fact that the
+routine was called with a second ("driver") argument of NULL. The bad
+caller was set_link_state() in dummy_hcd.c, and the problem arose
+because of a race between a USB reset and driver unbind.
+
+These sorts of races were not supposed to be possible; commit
+7dbd8f4cabd9 ("USB: dummy-hcd: Fix erroneous synchronization change"),
+along with a few followup commits, was written specifically to prevent
+them. As it turns out, there are (at least) two errors remaining in
+the code. Another patch will address the second error; this one is
+concerned with the first.
+
+The error responsible for the syzbot crash occurred because the
+stop_activity() routine will sometimes drop and then re-acquire the
+dum->lock spinlock. A call to stop_activity() occurs in
+set_link_state() when handling an emulated USB reset, after the test
+of dum->ints_enabled and before the increment of dum->callback_usage.
+This allowed another thread (doing a driver unbind) to sneak in and
+grab the spinlock, and then clear dum->ints_enabled and dum->driver.
+Normally this other thread would have to wait for dum->callback_usage
+to go down to 0 before it would clear dum->driver, but in this case it
+didn't have to wait since dum->callback_usage had not yet been
+incremented.
+
+The fix is to increment dum->callback_usage _before_ calling
+stop_activity() instead of after. Then the thread doing the unbind
+will not clear dum->driver until after the call to
+usb_gadget_udc_reset() safely returns and dum->callback_usage has been
+decremented again.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Reported-by: syzbot+19bed92c97bee999e5db@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-usb/68fc7c9c.050a0220.346f24.023c.GAE@google.com/
+Tested-by: syzbot+19bed92c97bee999e5db@syzkaller.appspotmail.com
+Fixes: 7dbd8f4cabd9 ("USB: dummy-hcd: Fix erroneous synchronization change")
+Cc: stable <stable@kernel.org>
+Link: https://patch.msgid.link/46135f42-fdbe-46b5-aac0-6ca70492af15@rowland.harvard.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/udc/dummy_hcd.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -462,8 +462,13 @@ static void set_link_state(struct dummy_
+
+ /* Report reset and disconnect events to the driver */
+ if (dum->ints_enabled && (disconnect || reset)) {
+- stop_activity(dum);
+ ++dum->callback_usage;
++ /*
++ * stop_activity() can drop dum->lock, so it must
++ * not come between the dum->ints_enabled test
++ * and the ++dum->callback_usage.
++ */
++ stop_activity(dum);
+ spin_unlock(&dum->lock);
+ if (reset)
+ usb_gadget_udc_reset(&dum->gadget, dum->driver);
--- /dev/null
+From f50200dd44125e445a6164e88c217472fa79cdbc Mon Sep 17 00:00:00 2001
+From: Sebastian Urban <surban@surban.net>
+Date: Sun, 15 Mar 2026 16:10:45 +0100
+Subject: usb: gadget: dummy_hcd: fix premature URB completion when ZLP follows partial transfer
+
+From: Sebastian Urban <surban@surban.net>
+
+commit f50200dd44125e445a6164e88c217472fa79cdbc upstream.
+
+When a gadget request is only partially transferred in transfer()
+because the per-frame bandwidth budget is exhausted, the loop advances
+to the next queued request. If that next request is a zero-length
+packet (ZLP), len evaluates to zero and the code takes the
+unlikely(len == 0) path, which sets is_short = 1. This bypasses the
+bandwidth guard ("limit < ep->ep.maxpacket && limit < len") that
+lives in the else branch and would otherwise break out of the loop for
+non-zero requests. The is_short path then completes the URB before all
+data from the first request has been transferred.
+
+Reproducer (bulk IN, high speed):
+
+ Device side (FunctionFS with Linux AIO):
+ 1. Queue a 65024-byte write via io_submit (127 * 512, i.e. a
+ multiple of the HS bulk max packet size).
+ 2. Immediately queue a zero-length write (ZLP) via io_submit.
+
+ Host side:
+ 3. Submit a 65536-byte bulk IN URB.
+
+ Expected: URB completes with actual_length = 65024.
+ Actual: URB completes with actual_length = 53248, losing 11776
+ bytes that leak into subsequent URBs.
+
+At high speed the per-frame budget is 53248 bytes (512 * 13 * 8).
+The 65024-byte request exhausts this budget after 53248 bytes, leaving
+the request incomplete (req->req.actual < req->req.length). Neither
+the request nor the URB is finished, and rescan is 0, so the loop
+advances to the ZLP. For the ZLP, dev_len = 0, so len = min(12288, 0)
+= 0, taking the unlikely(len == 0) path and setting is_short = 1.
+The is_short handler then sets *status = 0, completing the URB with
+only 53248 of the expected 65024 bytes.
+
+Fix this by breaking out of the loop when the current request has
+remaining data (req->req.actual < req->req.length). The request
+resumes on the next timer tick, preserving correct data ordering.
+
+Signed-off-by: Sebastian Urban <surban@surban.net>
+Cc: stable <stable@kernel.org>
+Reviewed-by: Alan Stern <stern@rowland.harvard.edu>
+Link: https://patch.msgid.link/20260315151045.1155850-1-surban@surban.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/udc/dummy_hcd.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -1538,6 +1538,12 @@ top:
+ /* rescan to continue with any other queued i/o */
+ if (rescan)
+ goto top;
++
++ /* request not fully transferred; stop iterating to
++ * preserve data ordering across queued requests.
++ */
++ if (req->req.actual < req->req.length)
++ break;
+ }
+ return sent;
+ }
--- /dev/null
+From 3b8ae9817686efb3ea789ca9d4efdff2ce9c1c04 Mon Sep 17 00:00:00 2001
+From: Andrei Kuchynski <akuchynski@chromium.org>
+Date: Tue, 24 Mar 2026 10:30:12 +0000
+Subject: usb: typec: thunderbolt: Set enter_vdo during initialization
+
+From: Andrei Kuchynski <akuchynski@chromium.org>
+
+commit 3b8ae9817686efb3ea789ca9d4efdff2ce9c1c04 upstream.
+
+In the current implementation, if a cable's alternate mode enter operation
+is not supported, the tbt->plug[TYPEC_PLUG_SOP_P] pointer is cleared by the
+time tbt_enter_mode() is called. This prevents the driver from identifying
+the cable's VDO.
+
+As a result, the Thunderbolt connection falls back to the default
+TBT_CABLE_USB3_PASSIVE speed, even if the cable supports higher speeds.
+To ensure the correct VDO value is used during mode entry, calculate and
+store the enter_vdo earlier during the initialization phase in tbt_ready().
+
+Cc: stable <stable@kernel.org>
+Fixes: 100e25738659 ("usb: typec: Add driver for Thunderbolt 3 Alternate Mode")
+Tested-by: Madhu M <madhu.m@intel.corp-partner.google.com>
+Signed-off-by: Andrei Kuchynski <akuchynski@chromium.org>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Reviewed-by: Benson Leung <bleung@chromium.org>
+Link: https://patch.msgid.link/20260324103012.1417616-1-akuchynski@chromium.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/altmodes/thunderbolt.c | 44 +++++++++++++++----------------
+ 1 file changed, 22 insertions(+), 22 deletions(-)
+
+--- a/drivers/usb/typec/altmodes/thunderbolt.c
++++ b/drivers/usb/typec/altmodes/thunderbolt.c
+@@ -39,28 +39,7 @@ static bool tbt_ready(struct typec_altmo
+
+ static int tbt_enter_mode(struct tbt_altmode *tbt)
+ {
+- struct typec_altmode *plug = tbt->plug[TYPEC_PLUG_SOP_P];
+- u32 vdo;
+-
+- vdo = tbt->alt->vdo & (TBT_VENDOR_SPECIFIC_B0 | TBT_VENDOR_SPECIFIC_B1);
+- vdo |= tbt->alt->vdo & TBT_INTEL_SPECIFIC_B0;
+- vdo |= TBT_MODE;
+-
+- if (plug) {
+- if (typec_cable_is_active(tbt->cable))
+- vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
+-
+- vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_SPEED(plug->vdo));
+- vdo |= plug->vdo & TBT_CABLE_ROUNDED;
+- vdo |= plug->vdo & TBT_CABLE_OPTICAL;
+- vdo |= plug->vdo & TBT_CABLE_RETIMER;
+- vdo |= plug->vdo & TBT_CABLE_LINK_TRAINING;
+- } else {
+- vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_USB3_PASSIVE);
+- }
+-
+- tbt->enter_vdo = vdo;
+- return typec_altmode_enter(tbt->alt, &vdo);
++ return typec_altmode_enter(tbt->alt, &tbt->enter_vdo);
+ }
+
+ static void tbt_altmode_work(struct work_struct *work)
+@@ -337,6 +316,7 @@ static bool tbt_ready(struct typec_altmo
+ {
+ struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
+ struct typec_altmode *plug;
++ u32 vdo;
+
+ if (tbt->cable)
+ return true;
+@@ -364,6 +344,26 @@ static bool tbt_ready(struct typec_altmo
+ tbt->plug[i] = plug;
+ }
+
++ vdo = tbt->alt->vdo & (TBT_VENDOR_SPECIFIC_B0 | TBT_VENDOR_SPECIFIC_B1);
++ vdo |= tbt->alt->vdo & TBT_INTEL_SPECIFIC_B0;
++ vdo |= TBT_MODE;
++ plug = tbt->plug[TYPEC_PLUG_SOP_P];
++
++ if (plug) {
++ if (typec_cable_is_active(tbt->cable))
++ vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
++
++ vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_SPEED(plug->vdo));
++ vdo |= plug->vdo & TBT_CABLE_ROUNDED;
++ vdo |= plug->vdo & TBT_CABLE_OPTICAL;
++ vdo |= plug->vdo & TBT_CABLE_RETIMER;
++ vdo |= plug->vdo & TBT_CABLE_LINK_TRAINING;
++ } else {
++ vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_USB3_PASSIVE);
++ }
++
++ tbt->enter_vdo = vdo;
++
+ return true;
+ }
+
--- /dev/null
+From d2d8c17ac01a1b1f638ea5d340a884ccc5015186 Mon Sep 17 00:00:00 2001
+From: Nathan Rebello <nathan.c.rebello@gmail.com>
+Date: Fri, 13 Mar 2026 18:24:53 -0400
+Subject: usb: typec: ucsi: validate connector number in ucsi_notify_common()
+
+From: Nathan Rebello <nathan.c.rebello@gmail.com>
+
+commit d2d8c17ac01a1b1f638ea5d340a884ccc5015186 upstream.
+
+The connector number extracted from CCI via UCSI_CCI_CONNECTOR() is a
+7-bit field (0-127) that is used to index into the connector array in
+ucsi_connector_change(). However, the array is only allocated for the
+number of connectors reported by the device (typically 2-4 entries).
+
+A malicious or malfunctioning device could report an out-of-range
+connector number in the CCI, causing an out-of-bounds array access in
+ucsi_connector_change().
+
+Add a bounds check in ucsi_notify_common(), the central point where CCI
+is parsed after arriving from hardware, so that bogus connector numbers
+are rejected before they propagate further.
+
+Fixes: bdc62f2bae8f ("usb: typec: ucsi: Simplified registration and I/O API")
+Cc: stable <stable@kernel.org>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Nathan Rebello <nathan.c.rebello@gmail.com>
+Link: https://patch.msgid.link/20260313222453.123-1-nathan.c.rebello@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -42,8 +42,13 @@ void ucsi_notify_common(struct ucsi *ucs
+ if (cci & UCSI_CCI_BUSY)
+ return;
+
+- if (UCSI_CCI_CONNECTOR(cci))
+- ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
++ if (UCSI_CCI_CONNECTOR(cci)) {
++ if (UCSI_CCI_CONNECTOR(cci) <= ucsi->cap.num_connectors)
++ ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
++ else
++ dev_err(ucsi->dev, "bogus connector number in CCI: %lu\n",
++ UCSI_CCI_CONNECTOR(cci));
++ }
+
+ if (cci & UCSI_CCI_ACK_COMPLETE &&
+ test_and_clear_bit(ACK_PENDING, &ucsi->flags))
--- /dev/null
+From b4e5f04c58a29c499faa85d12952ca9a4faf1cb9 Mon Sep 17 00:00:00 2001
+From: Srujana Challa <schalla@marvell.com>
+Date: Thu, 26 Mar 2026 19:53:44 +0530
+Subject: virtio_net: clamp rss_max_key_size to NETDEV_RSS_KEY_LEN
+
+From: Srujana Challa <schalla@marvell.com>
+
+commit b4e5f04c58a29c499faa85d12952ca9a4faf1cb9 upstream.
+
+rss_max_key_size in the virtio spec is the maximum key size supported by
+the device, not a mandatory size the driver must use. Also the value 40
+is a spec minimum, not a spec maximum.
+
+The current code rejects RSS and can fail probe when the device reports a
+larger rss_max_key_size than the driver buffer limit. Instead, clamp the
+effective key length to min(device rss_max_key_size, NETDEV_RSS_KEY_LEN)
+and keep RSS enabled.
+
+This keeps probe working on devices that advertise larger maximum key sizes
+while respecting the netdev RSS key buffer size limit.
+
+Fixes: 3f7d9c1964fc ("virtio_net: Add hash_key_length check")
+Cc: stable@vger.kernel.org
+Signed-off-by: Srujana Challa <schalla@marvell.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://patch.msgid.link/20260326142344.1171317-1-schalla@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/virtio_net.c | 20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -381,8 +381,6 @@ struct receive_queue {
+ struct xdp_buff **xsk_buffs;
+ };
+
+-#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
+-
+ /* Control VQ buffers: protected by the rtnl lock */
+ struct control_buf {
+ struct virtio_net_ctrl_hdr hdr;
+@@ -495,7 +493,7 @@ struct virtnet_info {
+
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct virtio_net_rss_config_trailer, rss_trailer, hash_key_data,
+- u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
++ u8 rss_hash_key_data[NETDEV_RSS_KEY_LEN];
+ );
+ };
+ static_assert(offsetof(struct virtnet_info, rss_trailer.hash_key_data) ==
+@@ -6794,6 +6792,7 @@ static int virtnet_probe(struct virtio_d
+ struct virtnet_info *vi;
+ u16 max_queue_pairs;
+ int mtu = 0;
++ u16 key_sz;
+
+ /* Find if host supports multiqueue/rss virtio_net device */
+ max_queue_pairs = 1;
+@@ -6929,14 +6928,13 @@ static int virtnet_probe(struct virtio_d
+ }
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
+- vi->rss_key_size =
+- virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+- if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
+- dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
+- vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
+- err = -EINVAL;
+- goto free;
+- }
++ key_sz = virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
++
++ vi->rss_key_size = min_t(u16, key_sz, NETDEV_RSS_KEY_LEN);
++ if (key_sz > vi->rss_key_size)
++ dev_warn(&vdev->dev,
++ "rss_max_key_size=%u exceeds driver limit %u, clamping\n",
++ key_sz, vi->rss_key_size);
+
+ vi->rss_hash_types_supported =
+ virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
--- /dev/null
+From 40014493cece72a0be5672cd86763e53fb3ec613 Mon Sep 17 00:00:00 2001
+From: Liav Mordouch <liavmordouch@gmail.com>
+Date: Fri, 27 Mar 2026 20:02:04 +0300
+Subject: vt: discard stale unicode buffer on alt screen exit after resize
+
+From: Liav Mordouch <liavmordouch@gmail.com>
+
+commit 40014493cece72a0be5672cd86763e53fb3ec613 upstream.
+
+When enter_alt_screen() saves vc_uni_lines into vc_saved_uni_lines and
+sets vc_uni_lines to NULL, a subsequent console resize via vc_do_resize()
+skips reallocating the unicode buffer because vc_uni_lines is NULL.
+However, vc_saved_uni_lines still points to the old buffer allocated for
+the original dimensions.
+
+When leave_alt_screen() later restores vc_saved_uni_lines, the buffer
+dimensions no longer match vc_rows/vc_cols. Any operation that iterates
+over the unicode buffer using the current dimensions (e.g. csi_J clearing
+the screen) will access memory out of bounds, causing a kernel oops:
+
+ BUG: unable to handle page fault for address: 0x0000002000000020
+ RIP: 0010:csi_J+0x133/0x2d0
+
+The faulting address 0x0000002000000020 is two adjacent u32 space
+characters (0x20) interpreted as a pointer, read from the row data area
+past the end of the 25-entry pointer array in a buffer allocated for
+80x25 but accessed with 240x67 dimensions.
+
+Fix this by checking whether the console dimensions changed while in the
+alternate screen. If they did, free the stale saved buffer instead of
+restoring it. The unicode screen will be lazily rebuilt via
+vc_uniscr_check() when next needed.
+
+Fixes: 5eb608319bb5 ("vt: save/restore unicode screen buffer for alternate screen")
+Cc: stable <stable@kernel.org>
+Tested-by: Liav Mordouch <liavmordouch@gmail.com>
+Signed-off-by: Liav Mordouch <liavmordouch@gmail.com>
+Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
+Link: https://patch.msgid.link/20260327170204.29706-1-liavmordouch@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/vt/vt.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1907,6 +1907,7 @@ static void leave_alt_screen(struct vc_d
+ unsigned int rows = min(vc->vc_saved_rows, vc->vc_rows);
+ unsigned int cols = min(vc->vc_saved_cols, vc->vc_cols);
+ u16 *src, *dest;
++ bool uni_lines_stale;
+
+ if (vc->vc_saved_screen == NULL)
+ return; /* Not inside an alt-screen */
+@@ -1915,7 +1916,18 @@ static void leave_alt_screen(struct vc_d
+ dest = ((u16 *)vc->vc_origin) + r * vc->vc_cols;
+ memcpy(dest, src, 2 * cols);
+ }
+- vc_uniscr_set(vc, vc->vc_saved_uni_lines);
++ /*
++ * If the console was resized while in the alternate screen,
++ * vc_saved_uni_lines was allocated for the old dimensions.
++ * Restoring it would cause out-of-bounds accesses. Discard it
++ * and let the unicode screen be lazily rebuilt.
++ */
++ uni_lines_stale = vc->vc_saved_rows != vc->vc_rows ||
++ vc->vc_saved_cols != vc->vc_cols;
++ if (uni_lines_stale)
++ vc_uniscr_free(vc->vc_saved_uni_lines);
++ else
++ vc_uniscr_set(vc, vc->vc_saved_uni_lines);
+ vc->vc_saved_uni_lines = NULL;
+ restore_cur(vc);
+ /* Update the entire screen */
--- /dev/null
+From 3ddbea7542ae529c1a88ef9a8b1ce169126211f6 Mon Sep 17 00:00:00 2001
+From: Nicolas Pitre <nico@fluxnic.net>
+Date: Fri, 27 Mar 2026 23:09:47 -0400
+Subject: vt: resize saved unicode buffer on alt screen exit after resize
+
+From: Nicolas Pitre <nico@fluxnic.net>
+
+commit 3ddbea7542ae529c1a88ef9a8b1ce169126211f6 upstream.
+
+Instead of discarding the saved unicode buffer when the console was
+resized while in the alternate screen, resize it to the current
+dimensions using vc_uniscr_copy_area() to preserve its content. This
+properly restores the unicode screen on alt screen exit rather than
+lazily rebuilding it from a lossy reverse glyph translation.
+
+On allocation failure the stale buffer is freed and vc_uni_lines is
+set to NULL so it gets lazily rebuilt via vc_uniscr_check() when next
+needed.
+
+Fixes: 40014493cece ("vt: discard stale unicode buffer on alt screen exit after resize")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Nicolas Pitre <nico@fluxnic.net>
+Link: https://patch.msgid.link/3nsr334n-079q-125n-7807-n4nq818758ns@syhkavp.arg
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/vt/vt.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1907,7 +1907,6 @@ static void leave_alt_screen(struct vc_d
+ unsigned int rows = min(vc->vc_saved_rows, vc->vc_rows);
+ unsigned int cols = min(vc->vc_saved_cols, vc->vc_cols);
+ u16 *src, *dest;
+- bool uni_lines_stale;
+
+ if (vc->vc_saved_screen == NULL)
+ return; /* Not inside an alt-screen */
+@@ -1918,16 +1917,23 @@ static void leave_alt_screen(struct vc_d
+ }
+ /*
+ * If the console was resized while in the alternate screen,
+- * vc_saved_uni_lines was allocated for the old dimensions.
+- * Restoring it would cause out-of-bounds accesses. Discard it
+- * and let the unicode screen be lazily rebuilt.
++ * resize the saved unicode buffer to the current dimensions.
++ * On allocation failure new_uniscr is NULL, causing the old
++ * buffer to be freed and vc_uni_lines to be lazily rebuilt
++ * via vc_uniscr_check() when next needed.
+ */
+- uni_lines_stale = vc->vc_saved_rows != vc->vc_rows ||
+- vc->vc_saved_cols != vc->vc_cols;
+- if (uni_lines_stale)
++ if (vc->vc_saved_uni_lines &&
++ (vc->vc_saved_rows != vc->vc_rows ||
++ vc->vc_saved_cols != vc->vc_cols)) {
++ u32 **new_uniscr = vc_uniscr_alloc(vc->vc_cols, vc->vc_rows);
++
++ if (new_uniscr)
++ vc_uniscr_copy_area(new_uniscr, vc->vc_cols, vc->vc_rows,
++ vc->vc_saved_uni_lines, cols, 0, rows);
+ vc_uniscr_free(vc->vc_saved_uni_lines);
+- else
+- vc_uniscr_set(vc, vc->vc_saved_uni_lines);
++ vc->vc_saved_uni_lines = new_uniscr;
++ }
++ vc_uniscr_set(vc, vc->vc_saved_uni_lines);
+ vc->vc_saved_uni_lines = NULL;
+ restore_cur(vc);
+ /* Update the entire screen */
--- /dev/null
+From afa9a05e6c4971bd5586f1b304e14d61fb3d9385 Mon Sep 17 00:00:00 2001
+From: Yang Yang <n05ec@lzu.edu.cn>
+Date: Thu, 26 Mar 2026 03:44:41 +0000
+Subject: vxlan: validate ND option lengths in vxlan_na_create
+
+From: Yang Yang <n05ec@lzu.edu.cn>
+
+commit afa9a05e6c4971bd5586f1b304e14d61fb3d9385 upstream.
+
+vxlan_na_create() walks ND options according to option-provided
+lengths. A malformed option can make the parser advance beyond the
+computed option span or use a too-short source LLADDR option payload.
+
+Validate option lengths against the remaining NS option area before
+advancing, and only read source LLADDR when the option is large enough
+for an Ethernet address.
+
+Fixes: 4b29dba9c085 ("vxlan: fix nonfunctional neigh_reduce()")
+Cc: stable@vger.kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Tested-by: Ao Zhou <n05ec@lzu.edu.cn>
+Co-developed-by: Yuan Tan <tanyuan98@outlook.com>
+Signed-off-by: Yuan Tan <tanyuan98@outlook.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Yang Yang <n05ec@lzu.edu.cn>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260326034441.2037420-4-n05ec@lzu.edu.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1965,12 +1965,14 @@ static struct sk_buff *vxlan_na_create(s
+ ns_olen = request->len - skb_network_offset(request) -
+ sizeof(struct ipv6hdr) - sizeof(*ns);
+ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+- if (!ns->opt[i + 1]) {
++ if (!ns->opt[i + 1] || i + (ns->opt[i + 1] << 3) > ns_olen) {
+ kfree_skb(reply);
+ return NULL;
+ }
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+- daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
++ if ((ns->opt[i + 1] << 3) >=
++ sizeof(struct nd_opt_hdr) + ETH_ALEN)
++ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+ }
+ }