--- /dev/null
+From 300a90b2cb5d442879e6398920c49aebbd5c8e40 Mon Sep 17 00:00:00 2001
+From: Song Liu <song@kernel.org>
+Date: Tue, 10 Sep 2024 22:55:08 -0700
+Subject: bpf: lsm: Set bpf_lsm_blob_sizes.lbs_task to 0
+
+From: Song Liu <song@kernel.org>
+
+commit 300a90b2cb5d442879e6398920c49aebbd5c8e40 upstream.
+
+bpf task local storage is now using task_struct->bpf_storage, so
+bpf_lsm_blob_sizes.lbs_task is no longer needed. Remove it to save some
+memory.
+
+Fixes: a10787e6d58c ("bpf: Enable task local storage for tracing programs")
+Cc: stable@vger.kernel.org
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Matt Bobrowski <mattbobrowski@google.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Acked-by: Matt Bobrowski <mattbobrowski@google.com>
+Link: https://lore.kernel.org/r/20240911055508.9588-1-song@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/bpf/hooks.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/security/bpf/hooks.c
++++ b/security/bpf/hooks.c
+@@ -24,7 +24,6 @@ static int __init bpf_lsm_init(void)
+
+ struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = {
+ .lbs_inode = sizeof(struct bpf_storage_blob),
+- .lbs_task = sizeof(struct bpf_storage_blob),
+ };
+
+ DEFINE_LSM(bpf) = {
--- /dev/null
+From e6a3531dd542cb127c8de32ab1e54a48ae19962b Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 24 Sep 2024 15:18:29 +0200
+Subject: dm-verity: restart or panic on an I/O error
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit e6a3531dd542cb127c8de32ab1e54a48ae19962b upstream.
+
+Maxim Suhanov reported that dm-verity doesn't crash if an I/O error
+happens. In theory, this could be used to subvert security, because an
+attacker can create sectors that return error with the Write Uncorrectable
+command. Some programs may misbehave if they have to deal with EIO.
+
+This commit fixes dm-verity, so that if "panic_on_corruption" or
+"restart_on_corruption" was specified and an I/O error happens, the
+machine will panic or restart.
+
+This commit also changes kernel_restart to emergency_restart -
+kernel_restart calls reboot notifiers and these reboot notifiers may wait
+for the bio that failed. emergency_restart doesn't call the notifiers.
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-verity-target.c | 23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -264,8 +264,10 @@ out:
+ if (v->mode == DM_VERITY_MODE_LOGGING)
+ return 0;
+
+- if (v->mode == DM_VERITY_MODE_RESTART)
+- kernel_restart("dm-verity device corrupted");
++ if (v->mode == DM_VERITY_MODE_RESTART) {
++ pr_emerg("dm-verity device corrupted\n");
++ emergency_restart();
++ }
+
+ if (v->mode == DM_VERITY_MODE_PANIC)
+ panic("dm-verity device corrupted");
+@@ -689,6 +691,23 @@ static void verity_finish_io(struct dm_v
+ if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
+ verity_fec_finish_io(io);
+
++ if (unlikely(status != BLK_STS_OK) &&
++ unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
++ !verity_is_system_shutting_down()) {
++ if (v->mode == DM_VERITY_MODE_RESTART ||
++ v->mode == DM_VERITY_MODE_PANIC)
++ DMERR_LIMIT("%s has error: %s", v->data_dev->name,
++ blk_status_to_str(status));
++
++ if (v->mode == DM_VERITY_MODE_RESTART) {
++ pr_emerg("dm-verity device corrupted\n");
++ emergency_restart();
++ }
++
++ if (v->mode == DM_VERITY_MODE_PANIC)
++ panic("dm-verity device corrupted");
++ }
++
+ bio_endio(bio);
+ }
+
--- /dev/null
+From 93701d3b84ac5f3ea07259d4ced405c53d757985 Mon Sep 17 00:00:00 2001
+From: Tommy Huang <tommy_huang@aspeedtech.com>
+Date: Wed, 11 Sep 2024 17:39:51 +0800
+Subject: i2c: aspeed: Update the stop sw state when the bus recovery occurs
+
+From: Tommy Huang <tommy_huang@aspeedtech.com>
+
+commit 93701d3b84ac5f3ea07259d4ced405c53d757985 upstream.
+
+When the i2c bus recovery occurs, driver will send i2c stop command
+in the scl low condition. In this case the sw state will still keep
+original situation. Under multi-master usage, i2c bus recovery will
+be called when i2c transfer timeout occurs. Update the stop command
+calling with aspeed_i2c_do_stop function to update master_state.
+
+Fixes: f327c686d3ba ("i2c: aspeed: added driver for Aspeed I2C")
+Cc: stable@vger.kernel.org # v4.13+
+Signed-off-by: Tommy Huang <tommy_huang@aspeedtech.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-aspeed.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -170,6 +170,13 @@ struct aspeed_i2c_bus {
+
+ static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
+
++/* precondition: bus.lock has been acquired. */
++static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
++{
++ bus->master_state = ASPEED_I2C_MASTER_STOP;
++ writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++}
++
+ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
+ {
+ unsigned long time_left, flags;
+@@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct
+ command);
+
+ reinit_completion(&bus->cmd_complete);
+- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++ aspeed_i2c_do_stop(bus);
+ spin_unlock_irqrestore(&bus->lock, flags);
+
+ time_left = wait_for_completion_timeout(
+@@ -391,13 +398,6 @@ static void aspeed_i2c_do_start(struct a
+ }
+
+ /* precondition: bus.lock has been acquired. */
+-static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
+-{
+- bus->master_state = ASPEED_I2C_MASTER_STOP;
+- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+-}
+-
+-/* precondition: bus.lock has been acquired. */
+ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
+ {
+ if (bus->msgs_index + 1 < bus->msgs_count) {
--- /dev/null
+From 1db4da55070d6a2754efeb3743f5312fc32f5961 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 11 Sep 2024 18:39:14 +0300
+Subject: i2c: isch: Add missed 'else'
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 1db4da55070d6a2754efeb3743f5312fc32f5961 upstream.
+
+In accordance with the existing comment and code analysis
+it is quite likely that there is a missed 'else' when adapter
+times out. Add it.
+
+Fixes: 5bc1200852c3 ("i2c: Add Intel SCH SMBus support")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v2.6.27+
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-isch.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-isch.c
++++ b/drivers/i2c/busses/i2c-isch.c
+@@ -99,8 +99,7 @@ static int sch_transaction(void)
+ if (retries > MAX_RETRIES) {
+ dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
+ result = -ETIMEDOUT;
+- }
+- if (temp & 0x04) {
++ } else if (temp & 0x04) {
+ result = -EIO;
+ dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
+ "locked until next hard reset. (sorry!)\n");
--- /dev/null
+From a6f88ac32c6e63e69c595bfae220d8641704c9b7 Mon Sep 17 00:00:00 2001
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Date: Thu, 20 Jun 2024 22:54:34 +0000
+Subject: lockdep: fix deadlock issue between lockdep and rcu
+
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+
+commit a6f88ac32c6e63e69c595bfae220d8641704c9b7 upstream.
+
+There is a deadlock scenario between lockdep and rcu when
+rcu nocb feature is enabled, just as following call stack:
+
+ rcuop/x
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?)
+-001|queued_spin_lock(inline) // try to hold nocb_gp_lock
+-001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80)
+-002|__raw_spin_lock_irqsave(inline)
+-002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80)
+-003|wake_nocb_gp_defer(inline)
+-003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680)
+-004|__call_rcu_common(inline)
+-004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?)
+-005|call_rcu_zapped(inline)
+-005|free_zapped_rcu(ch = ?)// hold graph lock
+-006|rcu_do_batch(rdp = 0xFFFFFF817F245680)
+-007|nocb_cb_wait(inline)
+-007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680)
+-008|kthread(_create = 0xFFFFFF80803122C0)
+-009|ret_from_fork(asm)
+
+ rcuop/y
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0)
+-001|queued_spin_lock()
+-001|lockdep_lock()
+-001|graph_lock() // try to hold graph lock
+-002|lookup_chain_cache_add()
+-002|validate_chain()
+-003|lock_acquire
+-004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80)
+-005|lock_timer_base(inline)
+-006|mod_timer(inline)
+-006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock
+-006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680)
+-007|__call_rcu_common(inline)
+-007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?)
+-008|call_rcu_hurry(inline)
+-008|rcu_sync_call(inline)
+-008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58)
+-009|rcu_do_batch(rdp = 0xFFFFFF817F266680)
+-010|nocb_cb_wait(inline)
+-010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680)
+-011|kthread(_create = 0xFFFFFF8080363740)
+-012|ret_from_fork(asm)
+
+rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread.
+This patch release the graph lock before lockdep call_rcu.
+
+Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
+Cc: stable@vger.kernel.org
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Waiman Long <longman@redhat.com>
+Cc: Carlos Llamas <cmllamas@google.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Reviewed-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Link: https://lore.kernel.org/r/20240620225436.3127927-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/lockdep.c | 48 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 32 insertions(+), 16 deletions(-)
+
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -6184,25 +6184,27 @@ static struct pending_free *get_pending_
+ static void free_zapped_rcu(struct rcu_head *cb);
+
+ /*
+- * Schedule an RCU callback if no RCU callback is pending. Must be called with
+- * the graph lock held.
+- */
+-static void call_rcu_zapped(struct pending_free *pf)
++* See if we need to queue an RCU callback, must called with
++* the lockdep lock held, returns false if either we don't have
++* any pending free or the callback is already scheduled.
++* Otherwise, a call_rcu() must follow this function call.
++*/
++static bool prepare_call_rcu_zapped(struct pending_free *pf)
+ {
+ WARN_ON_ONCE(inside_selftest());
+
+ if (list_empty(&pf->zapped))
+- return;
++ return false;
+
+ if (delayed_free.scheduled)
+- return;
++ return false;
+
+ delayed_free.scheduled = true;
+
+ WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
+ delayed_free.index ^= 1;
+
+- call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++ return true;
+ }
+
+ /* The caller must hold the graph lock. May be called from RCU context. */
+@@ -6228,6 +6230,7 @@ static void free_zapped_rcu(struct rcu_h
+ {
+ struct pending_free *pf;
+ unsigned long flags;
++ bool need_callback;
+
+ if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
+ return;
+@@ -6239,14 +6242,18 @@ static void free_zapped_rcu(struct rcu_h
+ pf = delayed_free.pf + (delayed_free.index ^ 1);
+ __free_zapped_classes(pf);
+ delayed_free.scheduled = false;
++ need_callback =
++ prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
++ lockdep_unlock();
++ raw_local_irq_restore(flags);
+
+ /*
+- * If there's anything on the open list, close and start a new callback.
+- */
+- call_rcu_zapped(delayed_free.pf + delayed_free.index);
++ * If there's pending free and its callback has not been scheduled,
++ * queue an RCU callback.
++ */
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+
+- lockdep_unlock();
+- raw_local_irq_restore(flags);
+ }
+
+ /*
+@@ -6286,6 +6293,7 @@ static void lockdep_free_key_range_reg(v
+ {
+ struct pending_free *pf;
+ unsigned long flags;
++ bool need_callback;
+
+ init_data_structures_once();
+
+@@ -6293,10 +6301,11 @@ static void lockdep_free_key_range_reg(v
+ lockdep_lock();
+ pf = get_pending_free();
+ __lockdep_free_key_range(pf, start, size);
+- call_rcu_zapped(pf);
++ need_callback = prepare_call_rcu_zapped(pf);
+ lockdep_unlock();
+ raw_local_irq_restore(flags);
+-
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ /*
+ * Wait for any possible iterators from look_up_lock_class() to pass
+ * before continuing to free the memory they refer to.
+@@ -6390,6 +6399,7 @@ static void lockdep_reset_lock_reg(struc
+ struct pending_free *pf;
+ unsigned long flags;
+ int locked;
++ bool need_callback = false;
+
+ raw_local_irq_save(flags);
+ locked = graph_lock();
+@@ -6398,11 +6408,13 @@ static void lockdep_reset_lock_reg(struc
+
+ pf = get_pending_free();
+ __lockdep_reset_lock(pf, lock);
+- call_rcu_zapped(pf);
++ need_callback = prepare_call_rcu_zapped(pf);
+
+ graph_unlock();
+ out_irq:
+ raw_local_irq_restore(flags);
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ }
+
+ /*
+@@ -6446,6 +6458,7 @@ void lockdep_unregister_key(struct lock_
+ struct pending_free *pf;
+ unsigned long flags;
+ bool found = false;
++ bool need_callback = false;
+
+ might_sleep();
+
+@@ -6466,11 +6479,14 @@ void lockdep_unregister_key(struct lock_
+ if (found) {
+ pf = get_pending_free();
+ __lockdep_free_key_range(pf, key, 1);
+- call_rcu_zapped(pf);
++ need_callback = prepare_call_rcu_zapped(pf);
+ }
+ lockdep_unlock();
+ raw_local_irq_restore(flags);
+
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++
+ /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+ synchronize_rcu();
+ }
--- /dev/null
+From fb497d6db7c19c797cbd694b52d1af87c4eebcc6 Mon Sep 17 00:00:00 2001
+From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Date: Wed, 4 Sep 2024 17:12:04 -0700
+Subject: mm/damon/vaddr: protect vma traversal in __damon_va_thre_regions() with rcu read lock
+
+From: Liam R. Howlett <Liam.Howlett@oracle.com>
+
+commit fb497d6db7c19c797cbd694b52d1af87c4eebcc6 upstream.
+
+Traversing VMAs of a given maple tree should be protected by rcu read
+lock. However, __damon_va_three_regions() is not doing the protection.
+Hold the lock.
+
+Link: https://lkml.kernel.org/r/20240905001204.1481-1-sj@kernel.org
+Fixes: d0cf3dd47f0d ("damon: convert __damon_va_three_regions to use the VMA iterator")
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Closes: https://lore.kernel.org/b83651a0-5b24-4206-b860-cb54ffdf209b@roeck-us.net
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/vaddr.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -126,6 +126,7 @@ static int __damon_va_three_regions(stru
+ * If this is too slow, it can be optimised to examine the maple
+ * tree gaps.
+ */
++ rcu_read_lock();
+ for_each_vma(vmi, vma) {
+ unsigned long gap;
+
+@@ -146,6 +147,7 @@ static int __damon_va_three_regions(stru
+ next:
+ prev = vma;
+ }
++ rcu_read_unlock();
+
+ if (!sz_range(&second_gap) || !sz_range(&first_gap))
+ return -EINVAL;
--- /dev/null
+From 69b50d4351ed924f29e3d46b159e28f70dfc707f Mon Sep 17 00:00:00 2001
+From: David Gow <davidgow@google.com>
+Date: Sat, 3 Aug 2024 15:46:41 +0800
+Subject: mm: only enforce minimum stack gap size if it's sensible
+
+From: David Gow <davidgow@google.com>
+
+commit 69b50d4351ed924f29e3d46b159e28f70dfc707f upstream.
+
+The generic mmap_base code tries to leave a gap between the top of the
+stack and the mmap base address, but enforces a minimum gap size (MIN_GAP)
+of 128MB, which is too large on some setups. In particular, on arm tasks
+without ADDR_LIMIT_32BIT, the STACK_TOP value is less than 128MB, so it's
+impossible to fit such a gap in.
+
+Only enforce this minimum if MIN_GAP < MAX_GAP, as we'd prefer to honour
+MAX_GAP, which is defined proportionally, so scales better and always
+leaves us with both _some_ stack space and some room for mmap.
+
+This fixes the usercopy KUnit test suite on 32-bit arm, as it doesn't set
+any personality flags so gets the default (in this case 26-bit) task size.
+This test can be run with: ./tools/testing/kunit/kunit.py run --arch arm
+usercopy --make_options LLVM=1
+
+Link: https://lkml.kernel.org/r/20240803074642.1849623-2-davidgow@google.com
+Fixes: dba79c3df4a2 ("arm: use generic mmap top-down layout and brk randomization")
+Signed-off-by: David Gow <davidgow@google.com>
+Reviewed-by: Kees Cook <kees@kernel.org>
+Cc: Alexandre Ghiti <alex@ghiti.fr>
+Cc: Linus Walleij <linus.walleij@linaro.org>
+Cc: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/util.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -434,7 +434,7 @@ static unsigned long mmap_base(unsigned
+ if (gap + pad > gap)
+ gap += pad;
+
+- if (gap < MIN_GAP)
++ if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
--- /dev/null
+From f34d086fb7102fec895fd58b9e816b981b284c17 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Tue, 11 Jun 2024 09:50:32 +0200
+Subject: module: Fix KCOV-ignored file name
+
+From: Dmitry Vyukov <dvyukov@google.com>
+
+commit f34d086fb7102fec895fd58b9e816b981b284c17 upstream.
+
+module.c was renamed to main.c, but the Makefile directive was copy-pasted
+verbatim with the old file name. Fix up the file name.
+
+Fixes: cfc1d277891e ("module: Move all into module/")
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Reviewed-by: Marco Elver <elver@google.com>
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/bc0cf790b4839c5e38e2fafc64271f620568a39e.1718092070.git.dvyukov@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/module/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/module/Makefile
++++ b/kernel/module/Makefile
+@@ -5,7 +5,7 @@
+
+ # These are called from save_stack_trace() on slub debug path,
+ # and produce insane amounts of uninteresting coverage.
+-KCOV_INSTRUMENT_module.o := n
++KCOV_INSTRUMENT_main.o := n
+
+ obj-y += main.o
+ obj-y += strict_rwx.o
mm-filemap-return-early-if-failed-to-allocate-memory-for-split.patch
lib-xarray-introduce-a-new-helper-xas_get_order.patch
mm-filemap-optimize-filemap-folio-adding.patch
+bpf-lsm-set-bpf_lsm_blob_sizes.lbs_task-to-0.patch
+dm-verity-restart-or-panic-on-an-i-o-error.patch
+lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
+mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch
+spi-fspi-add-support-for-imx8ulp.patch
+module-fix-kcov-ignored-file-name.patch
+mm-damon-vaddr-protect-vma-traversal-in-__damon_va_thre_regions-with-rcu-read-lock.patch
+i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch
+i2c-isch-add-missed-else.patch
--- /dev/null
+From 9228956a620553d7fd17f703a37a26c91e4d92ab Mon Sep 17 00:00:00 2001
+From: Haibo Chen <haibo.chen@nxp.com>
+Date: Thu, 5 Sep 2024 17:43:37 +0800
+Subject: spi: fspi: add support for imx8ulp
+
+From: Haibo Chen <haibo.chen@nxp.com>
+
+commit 9228956a620553d7fd17f703a37a26c91e4d92ab upstream.
+
+The flexspi on imx8ulp only has 16 LUTs, different with others which
+have up to 32 LUTs.
+
+Add a separate compatible string and nxp_fspi_devtype_data to support
+flexspi on imx8ulp.
+
+Fixes: ef89fd56bdfc ("arm64: dts: imx8ulp: add flexspi node")
+Cc: stable@kernel.org
+Signed-off-by: Haibo Chen <haibo.chen@nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20240905094338.1986871-4-haibo.chen@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-nxp-fspi.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -371,6 +371,15 @@ static struct nxp_fspi_devtype_data imx8
+ .little_endian = true, /* little-endian */
+ };
+
++static struct nxp_fspi_devtype_data imx8ulp_data = {
++ .rxfifo = SZ_512, /* (64 * 64 bits) */
++ .txfifo = SZ_1K, /* (128 * 64 bits) */
++ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
++ .quirks = 0,
++ .lut_num = 16,
++ .little_endian = true, /* little-endian */
++};
++
+ struct nxp_fspi {
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+@@ -1297,6 +1306,7 @@ static const struct of_device_id nxp_fsp
+ { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, },
+ { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
+ { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, },
++ { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);