--- /dev/null
+From 84ad9ac8d9ca29033d589e79a991866b38e23b85 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:28 +0100
+Subject: arm64: dts: qcom: sdm845: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 84ad9ac8d9ca29033d589e79a991866b38e23b85 upstream.
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: ca4db2b538a1 ("arm64: dts: qcom: sdm845: Add USB-related nodes")
+Cc: stable@vger.kernel.org # 4.20
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20231120164331.8116-9-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sdm845.dtsi | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -3567,8 +3567,8 @@
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -3615,8 +3615,8 @@
+
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 490 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 491 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
--- /dev/null
+From 7d4b5d7a37bdd63a5a3371b988744b060d5bb86f Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 27 Dec 2023 21:38:23 +0100
+Subject: async: Introduce async_schedule_dev_nocall()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 7d4b5d7a37bdd63a5a3371b988744b060d5bb86f upstream.
+
+In preparation for subsequent changes, introduce a specialized variant
+of async_schedule_dev() that will not invoke the argument function
+synchronously when it cannot be scheduled for asynchronous execution.
+
+The new function, async_schedule_dev_nocall(), will be used for fixing
+possible deadlocks in the system-wide power management core code.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com> for the series.
+Tested-by: Youngmin Nam <youngmin.nam@samsung.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/async.h | 2 ++
+ kernel/async.c | 29 +++++++++++++++++++++++++++++
+ 2 files changed, 31 insertions(+)
+
+--- a/include/linux/async.h
++++ b/include/linux/async.h
+@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, st
+ return async_schedule_node(func, dev, dev_to_node(dev));
+ }
+
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
++
+ /**
+ * async_schedule_dev_domain - A device specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -244,6 +244,35 @@ async_cookie_t async_schedule_node(async
+ EXPORT_SYMBOL_GPL(async_schedule_node);
+
+ /**
++ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
++ * @func: function to execute asynchronously
++ * @dev: device argument to be passed to function
++ *
++ * @dev is used as both the argument for the function and to provide NUMA
++ * context for where to run the function.
++ *
++ * If the asynchronous execution of @func is scheduled successfully, return
++ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
++ * that will run the function synchronously then.
++ */
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
++{
++ struct async_entry *entry;
++
++ entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
++
++ /* Give up if there is no memory or too much work. */
++ if (!entry || atomic_read(&entry_count) > MAX_WORK) {
++ kfree(entry);
++ return false;
++ }
++
++ __async_schedule_node_domain(func, dev, dev_to_node(dev),
++ &async_dfl_domain, entry);
++ return true;
++}
++
++/**
+ * async_synchronize_full - synchronize all asynchronous function calls
+ *
+ * This function waits until all asynchronous function calls have been done.
--- /dev/null
+From 6aa09a5bccd8e224d917afdb4c278fc66aacde4d Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 27 Dec 2023 21:37:02 +0100
+Subject: async: Split async_schedule_node_domain()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 6aa09a5bccd8e224d917afdb4c278fc66aacde4d upstream.
+
+In preparation for subsequent changes, split async_schedule_node_domain()
+in two pieces so as to allow the bottom part of it to be called from a
+somewhat different code path.
+
+No functional impact.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Tested-by: Youngmin Nam <youngmin.nam@samsung.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/async.c | 56 ++++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 34 insertions(+), 22 deletions(-)
+
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct wo
+ wake_up(&async_done);
+ }
+
++static async_cookie_t __async_schedule_node_domain(async_func_t func,
++ void *data, int node,
++ struct async_domain *domain,
++ struct async_entry *entry)
++{
++ async_cookie_t newcookie;
++ unsigned long flags;
++
++ INIT_LIST_HEAD(&entry->domain_list);
++ INIT_LIST_HEAD(&entry->global_list);
++ INIT_WORK(&entry->work, async_run_entry_fn);
++ entry->func = func;
++ entry->data = data;
++ entry->domain = domain;
++
++ spin_lock_irqsave(&async_lock, flags);
++
++ /* allocate cookie and queue */
++ newcookie = entry->cookie = next_cookie++;
++
++ list_add_tail(&entry->domain_list, &domain->pending);
++ if (domain->registered)
++ list_add_tail(&entry->global_list, &async_global_pending);
++
++ atomic_inc(&entry_count);
++ spin_unlock_irqrestore(&async_lock, flags);
++
++ /* schedule for execution */
++ queue_work_node(node, system_unbound_wq, &entry->work);
++
++ return newcookie;
++}
++
+ /**
+ * async_schedule_node_domain - NUMA specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domai
+ func(data, newcookie);
+ return newcookie;
+ }
+- INIT_LIST_HEAD(&entry->domain_list);
+- INIT_LIST_HEAD(&entry->global_list);
+- INIT_WORK(&entry->work, async_run_entry_fn);
+- entry->func = func;
+- entry->data = data;
+- entry->domain = domain;
+-
+- spin_lock_irqsave(&async_lock, flags);
+
+- /* allocate cookie and queue */
+- newcookie = entry->cookie = next_cookie++;
+-
+- list_add_tail(&entry->domain_list, &domain->pending);
+- if (domain->registered)
+- list_add_tail(&entry->global_list, &async_global_pending);
+-
+- atomic_inc(&entry_count);
+- spin_unlock_irqrestore(&async_lock, flags);
+-
+- /* schedule for execution */
+- queue_work_node(node, system_unbound_wq, &entry->work);
+-
+- return newcookie;
++ return __async_schedule_node_domain(func, data, node, domain, entry);
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node_domain);
+
--- /dev/null
+From 01bd694ac2f682fb8017e16148b928482bc8fa4b Mon Sep 17 00:00:00 2001
+From: Qiang Yu <quic_qianyu@quicinc.com>
+Date: Mon, 11 Dec 2023 14:42:52 +0800
+Subject: bus: mhi: host: Drop chan lock before queuing buffers
+
+From: Qiang Yu <quic_qianyu@quicinc.com>
+
+commit 01bd694ac2f682fb8017e16148b928482bc8fa4b upstream.
+
+Ensure read and write locks for the channel are not taken in succession by
+dropping the read lock from parse_xfer_event() such that a callback given
+to client can potentially queue buffers and acquire the write lock in that
+process. Any queueing of buffers should be done without channel read lock
+acquired as it can result in multiple locks and a soft lockup.
+
+Cc: <stable@vger.kernel.org> # 5.7
+Fixes: 1d3173a3bae7 ("bus: mhi: core: Add support for processing events from client device")
+Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Tested-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/1702276972-41296-3-git-send-email-quic_qianyu@quicinc.com
+[mani: added fixes tag and cc'ed stable]
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bus/mhi/host/main.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -569,6 +569,8 @@ static int parse_xfer_event(struct mhi_c
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
++ read_unlock_bh(&mhi_chan->lock);
++
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+@@ -591,6 +593,8 @@ static int parse_xfer_event(struct mhi_c
+ kfree(buf_info->cb_buf);
+ }
+ }
++
++ read_lock_bh(&mhi_chan->lock);
+ }
+ break;
+ } /* CC_EOT */
--- /dev/null
+From 27016f75f5ed47e2d8e0ca75a8ff1f40bc1a5e27 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 7 Dec 2023 18:36:57 +0800
+Subject: crypto: api - Disallow identical driver names
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 27016f75f5ed47e2d8e0ca75a8ff1f40bc1a5e27 upstream.
+
+Disallow registration of two algorithms with identical driver names.
+
+Cc: <stable@vger.kernel.org>
+Reported-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algapi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -258,6 +258,7 @@ static struct crypto_larval *__crypto_re
+ }
+
+ if (!strcmp(q->cra_driver_name, alg->cra_name) ||
++ !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
+ !strcmp(q->cra_name, alg->cra_driver_name))
+ goto err;
+ }
--- /dev/null
+From d07f951903fa9922c375b8ab1ce81b18a0034e3b Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 28 Nov 2023 14:22:13 +0800
+Subject: crypto: s390/aes - Fix buffer overread in CTR mode
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit d07f951903fa9922c375b8ab1ce81b18a0034e3b upstream.
+
+When processing the last block, the s390 ctr code will always read
+a whole block, even if there isn't a whole block of data left. Fix
+this by using the actual length left and copy it into a buffer first
+for processing.
+
+Fixes: 0200f3ecc196 ("crypto: s390 - add System z hardware support for CTR mode")
+Cc: <stable@vger.kernel.org>
+Reported-by: Guangwu Zhang <guazhang@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewd-by: Harald Freudenberger <freude@de.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/crypto/aes_s390.c | 4 +++-
+ arch/s390/crypto/paes_s390.c | 4 +++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -600,7 +600,9 @@ static int ctr_aes_crypt(struct skcipher
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
+- cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
++ cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
+ AES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -676,9 +676,11 @@ static int ctr_paes_crypt(struct skciphe
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
+ while (1) {
+ if (cpacf_kmctr(ctx->fc, ¶m, buf,
+- walk.src.virt.addr, AES_BLOCK_SIZE,
++ buf, AES_BLOCK_SIZE,
+ walk.iv) == AES_BLOCK_SIZE)
+ break;
+ if (__paes_convert_key(ctx))
--- /dev/null
+From 7c784d624819acbeefb0018bac89e632467cca5a Mon Sep 17 00:00:00 2001
+From: Suraj Jitindar Singh <surajjs@amazon.com>
+Date: Wed, 13 Dec 2023 16:16:35 +1100
+Subject: ext4: allow for the last group to be marked as trimmed
+
+From: Suraj Jitindar Singh <surajjs@amazon.com>
+
+commit 7c784d624819acbeefb0018bac89e632467cca5a upstream.
+
+The ext4 filesystem tracks the trim status of blocks at the group
+level. When an entire group has been trimmed then it is marked as
+such and subsequent trim invocations with the same minimum trim size
+will not be attempted on that group unless it is marked as able to be
+trimmed again such as when a block is freed.
+
+Currently the last group can't be marked as trimmed due to incorrect
+logic in ext4_last_grp_cluster(). ext4_last_grp_cluster() is supposed
+to return the zero based index of the last cluster in a group. This is
+then used by ext4_try_to_trim_range() to determine if the trim
+operation spans the entire group and as such if the trim status of the
+group should be recorded.
+
+ext4_last_grp_cluster() takes a 0 based group index, thus the valid
+values for grp are 0..(ext4_get_groups_count - 1). Any group index
+less than (ext4_get_groups_count - 1) is not the last group and must
+have EXT4_CLUSTERS_PER_GROUP(sb) clusters. For the last group we need
+to calculate the number of clusters based on the number of blocks in
+the group. Finally subtract 1 from the number of clusters as zero
+based indexing is expected. Rearrange the function slightly to make
+it clear what we are calculating and returning.
+
+Reproducer:
+// Create file system where the last group has fewer blocks than
+// blocks per group
+$ mkfs.ext4 -b 4096 -g 8192 /dev/nvme0n1 8191
+$ mount /dev/nvme0n1 /mnt
+
+Before Patch:
+$ fstrim -v /mnt
+/mnt: 25.9 MiB (27156480 bytes) trimmed
+// Group not marked as trimmed so second invocation still discards blocks
+$ fstrim -v /mnt
+/mnt: 25.9 MiB (27156480 bytes) trimmed
+
+After Patch:
+fstrim -v /mnt
+/mnt: 25.9 MiB (27156480 bytes) trimmed
+// Group marked as trimmed so second invocation DOESN'T discard any blocks
+fstrim -v /mnt
+/mnt: 0 B (0 bytes) trimmed
+
+Fixes: 45e4ab320c9b ("ext4: move setting of trimmed bit into ext4_try_to_trim_range()")
+Cc: <stable@vger.kernel.org> # 4.19+
+Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20231213051635.37731-1-surajjs@amazon.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -5895,11 +5895,16 @@ __acquires(bitlock)
+ static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+ {
+- if (grp < ext4_get_groups_count(sb))
+- return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+- return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+- ext4_group_first_block_no(sb, grp) - 1) >>
+- EXT4_CLUSTER_BITS(sb);
++ unsigned long nr_clusters_in_group;
++
++ if (grp < (ext4_get_groups_count(sb) - 1))
++ nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
++ else
++ nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++ ext4_group_first_block_no(sb, grp))
++ >> EXT4_CLUSTER_BITS(sb);
++
++ return nr_clusters_in_group - 1;
+ }
+
+ static bool ext4_trim_interrupted(void)
--- /dev/null
+From 78aafb3884f6bc6636efcc1760c891c8500b9922 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sat, 2 Dec 2023 09:01:54 +0800
+Subject: hwrng: core - Fix page fault dead lock on mmap-ed hwrng
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 78aafb3884f6bc6636efcc1760c891c8500b9922 upstream.
+
+There is a dead-lock in the hwrng device read path. This triggers
+when the user reads from /dev/hwrng into memory also mmap-ed from
+/dev/hwrng. The resulting page fault triggers a recursive read
+which then dead-locks.
+
+Fix this by using a stack buffer when calling copy_to_user.
+
+Reported-by: Edward Adam Davis <eadavis@qq.com>
+Reported-by: syzbot+c52ab18308964d248092@syzkaller.appspotmail.com
+Fixes: 9996508b3353 ("hwrng: core - Replace u32 in driver API with byte array")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/hw_random/core.c | 34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -24,10 +24,13 @@
+ #include <linux/random.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ #include <linux/uaccess.h>
+
+ #define RNG_MODULE_NAME "hw_random"
+
++#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
++
+ static struct hwrng *current_rng;
+ /* the current rng has been explicitly chosen by user via sysfs */
+ static int cur_rng_set_by_user;
+@@ -59,7 +62,7 @@ static inline int rng_get_data(struct hw
+
+ static size_t rng_buffer_size(void)
+ {
+- return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
++ return RNG_BUFFER_SIZE;
+ }
+
+ static void add_early_randomness(struct hwrng *rng)
+@@ -206,6 +209,7 @@ static inline int rng_get_data(struct hw
+ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *offp)
+ {
++ u8 buffer[RNG_BUFFER_SIZE];
+ ssize_t ret = 0;
+ int err = 0;
+ int bytes_read, len;
+@@ -233,34 +237,37 @@ static ssize_t rng_dev_read(struct file
+ if (bytes_read < 0) {
+ err = bytes_read;
+ goto out_unlock_reading;
++ } else if (bytes_read == 0 &&
++ (filp->f_flags & O_NONBLOCK)) {
++ err = -EAGAIN;
++ goto out_unlock_reading;
+ }
++
+ data_avail = bytes_read;
+ }
+
+- if (!data_avail) {
+- if (filp->f_flags & O_NONBLOCK) {
+- err = -EAGAIN;
+- goto out_unlock_reading;
+- }
+- } else {
+- len = data_avail;
++ len = data_avail;
++ if (len) {
+ if (len > size)
+ len = size;
+
+ data_avail -= len;
+
+- if (copy_to_user(buf + ret, rng_buffer + data_avail,
+- len)) {
++ memcpy(buffer, rng_buffer + data_avail, len);
++ }
++ mutex_unlock(&reading_mutex);
++ put_rng(rng);
++
++ if (len) {
++ if (copy_to_user(buf + ret, buffer, len)) {
+ err = -EFAULT;
+- goto out_unlock_reading;
++ goto out;
+ }
+
+ size -= len;
+ ret += len;
+ }
+
+- mutex_unlock(&reading_mutex);
+- put_rng(rng);
+
+ if (need_resched())
+ schedule_timeout_interruptible(1);
+@@ -271,6 +278,7 @@ static ssize_t rng_dev_read(struct file
+ }
+ }
+ out:
++ memzero_explicit(buffer, sizeof(buffer));
+ return ret ? : err;
+
+ out_unlock_reading:
--- /dev/null
+From 735ae74f73e55c191d48689bd11ff4a06ea0508f Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Wed, 3 Jan 2024 21:02:16 +0100
+Subject: parisc/firmware: Fix F-extend for PDC addresses
+
+From: Helge Deller <deller@gmx.de>
+
+commit 735ae74f73e55c191d48689bd11ff4a06ea0508f upstream.
+
+When running with narrow firmware (64-bit kernel using a 32-bit
+firmware), extend PDC addresses into the 0xfffffff0.00000000
+region instead of the 0xf0f0f0f0.00000000 region.
+
+This fixes the power button on the C3700 machine in qemu (64-bit CPU
+with 32-bit firmware), and my assumption is that the previous code was
+really never used (because most 64-bit machines have a 64-bit firmware),
+or it just worked on very old machines because they may only decode
+40-bit of virtual addresses.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/firmware.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned l
+ #ifdef CONFIG_64BIT
+ if(unlikely(parisc_narrow_firmware)) {
+ if((address & 0xff000000) == 0xf0000000)
+- return 0xf0f0f0f000000000UL | (u32)address;
++ return (0xfffffff0UL << 32) | (u32)address;
+
+ if((address & 0xf0000000) == 0xf0000000)
+- return 0xffffffff00000000UL | (u32)address;
++ return (0xffffffffUL << 32) | (u32)address;
+ }
+ #endif
+ return address;
--- /dev/null
+From 71cd7e80cfde548959952eac7063aeaea1f2e1c6 Mon Sep 17 00:00:00 2001
+From: Hongchen Zhang <zhanghongchen@loongson.cn>
+Date: Thu, 16 Nov 2023 08:56:09 +0800
+Subject: PM: hibernate: Enforce ordering during image compression/decompression
+
+From: Hongchen Zhang <zhanghongchen@loongson.cn>
+
+commit 71cd7e80cfde548959952eac7063aeaea1f2e1c6 upstream.
+
+An S4 (suspend to disk) test on the LoongArch 3A6000 platform sometimes
+fails with the following error messaged in the dmesg log:
+
+ Invalid LZO compressed length
+
+That happens because when compressing/decompressing the image, the
+synchronization between the control thread and the compress/decompress/crc
+thread is based on a relaxed ordering interface, which is unreliable, and the
+following situation may occur:
+
+CPU 0 CPU 1
+save_image_lzo lzo_compress_threadfn
+ atomic_set(&d->stop, 1);
+ atomic_read(&data[thr].stop)
+ data[thr].cmp = data[thr].cmp_len;
+ WRITE data[thr].cmp_len
+
+Then CPU0 gets a stale cmp_len and writes it to disk. During resume from S4,
+wrong cmp_len is loaded.
+
+To maintain data consistency between the two threads, use the acquire/release
+variants of atomic set and read operations.
+
+Fixes: 081a9d043c98 ("PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image")
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Hongchen Zhang <zhanghongchen@loongson.cn>
+Co-developed-by: Weihao Li <liweihao@loongson.cn>
+Signed-off-by: Weihao Li <liweihao@loongson.cn>
+[ rjw: Subject rewrite and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/swap.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -603,11 +603,11 @@ static int crc32_threadfn(void *data)
+ unsigned i;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -616,7 +616,7 @@ static int crc32_threadfn(void *data)
+ for (i = 0; i < d->run_threads; i++)
+ *d->crc32 = crc32_le(*d->crc32,
+ d->unc[i], *d->unc_len[i]);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -646,12 +646,12 @@ static int lzo_compress_threadfn(void *d
+ struct cmp_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -660,7 +660,7 @@ static int lzo_compress_threadfn(void *d
+ d->ret = lzo1x_1_compress(d->unc, d->unc_len,
+ d->cmp + LZO_HEADER, &d->cmp_len,
+ d->wrk);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -798,7 +798,7 @@ static int save_image_lzo(struct swap_ma
+
+ data[thr].unc_len = off;
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -806,12 +806,12 @@ static int save_image_lzo(struct swap_ma
+ break;
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -850,7 +850,7 @@ static int save_image_lzo(struct swap_ma
+ }
+ }
+
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+
+@@ -1132,12 +1132,12 @@ static int lzo_decompress_threadfn(void
+ struct dec_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -1150,7 +1150,7 @@ static int lzo_decompress_threadfn(void
+ flush_icache_range((unsigned long)d->unc,
+ (unsigned long)d->unc + d->unc_len);
+
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -1338,7 +1338,7 @@ static int load_image_lzo(struct swap_ma
+ }
+
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ crc->run_threads = 0;
+ }
+@@ -1374,7 +1374,7 @@ static int load_image_lzo(struct swap_ma
+ pg = 0;
+ }
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -1393,7 +1393,7 @@ static int load_image_lzo(struct swap_ma
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -1424,7 +1424,7 @@ static int load_image_lzo(struct swap_ma
+ ret = snapshot_write_next(snapshot);
+ if (ret <= 0) {
+ crc->run_threads = thr + 1;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ goto out_finish;
+ }
+@@ -1432,13 +1432,13 @@ static int load_image_lzo(struct swap_ma
+ }
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ }
+
+ out_finish:
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+ stop = ktime_get();
--- /dev/null
+From d5362c37e1f8a40096452fc201c30e705750e687 Mon Sep 17 00:00:00 2001
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+Date: Fri, 15 Dec 2023 10:00:49 +0800
+Subject: rpmsg: virtio: Free driver_override when rpmsg_remove()
+
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+
+commit d5362c37e1f8a40096452fc201c30e705750e687 upstream.
+
+Free driver_override when rpmsg_remove(), otherwise
+the following memory leak will occur:
+
+unreferenced object 0xffff0000d55d7080 (size 128):
+ comm "kworker/u8:2", pid 56, jiffies 4294893188 (age 214.272s)
+ hex dump (first 32 bytes):
+ 72 70 6d 73 67 5f 6e 73 00 00 00 00 00 00 00 00 rpmsg_ns........
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<000000009c94c9c1>] __kmem_cache_alloc_node+0x1f8/0x320
+ [<000000002300d89b>] __kmalloc_node_track_caller+0x44/0x70
+ [<00000000228a60c3>] kstrndup+0x4c/0x90
+ [<0000000077158695>] driver_set_override+0xd0/0x164
+ [<000000003e9c4ea5>] rpmsg_register_device_override+0x98/0x170
+ [<000000001c0c89a8>] rpmsg_ns_register_device+0x24/0x30
+ [<000000008bbf8fa2>] rpmsg_probe+0x2e0/0x3ec
+ [<00000000e65a68df>] virtio_dev_probe+0x1c0/0x280
+ [<00000000443331cc>] really_probe+0xbc/0x2dc
+ [<00000000391064b1>] __driver_probe_device+0x78/0xe0
+ [<00000000a41c9a5b>] driver_probe_device+0xd8/0x160
+ [<000000009c3bd5df>] __device_attach_driver+0xb8/0x140
+ [<0000000043cd7614>] bus_for_each_drv+0x7c/0xd4
+ [<000000003b929a36>] __device_attach+0x9c/0x19c
+ [<00000000a94e0ba8>] device_initial_probe+0x14/0x20
+ [<000000003c999637>] bus_probe_device+0xa0/0xac
+
+Signed-off-by: Xiaolei Wang <xiaolei.wang@windriver.com>
+Fixes: b0b03b811963 ("rpmsg: Release rpmsg devices in backends")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20231215020049.78750-1-xiaolei.wang@windriver.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/rpmsg/virtio_rpmsg_bus.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/rpmsg/virtio_rpmsg_bus.c
++++ b/drivers/rpmsg/virtio_rpmsg_bus.c
+@@ -387,6 +387,7 @@ static void virtio_rpmsg_release_device(
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+
++ kfree(rpdev->driver_override);
+ kfree(vch);
+ }
+
iio-adc-ad7091r-enable-internal-vref-if-external-vre.patch
dmaengine-fix-null-pointer-in-channel-unregistration.patch
iio-adc-ad7091r-move-exports-into-iio_ad7091r-namespace.patch
+ext4-allow-for-the-last-group-to-be-marked-as-trimmed.patch
+crypto-api-disallow-identical-driver-names.patch
+pm-hibernate-enforce-ordering-during-image-compression-decompression.patch
+hwrng-core-fix-page-fault-dead-lock-on-mmap-ed-hwrng.patch
+crypto-s390-aes-fix-buffer-overread-in-ctr-mode.patch
+rpmsg-virtio-free-driver_override-when-rpmsg_remove.patch
+bus-mhi-host-drop-chan-lock-before-queuing-buffers.patch
+parisc-firmware-fix-f-extend-for-pdc-addresses.patch
+async-split-async_schedule_node_domain.patch
+async-introduce-async_schedule_dev_nocall.patch
+arm64-dts-qcom-sdm845-fix-usb-wakeup-interrupt-types.patch