--- /dev/null
+From 73254a297c2dd094abec7c9efee32455ae875bdf Mon Sep 17 00:00:00 2001
+From: Hagar Hemdan <hagarhem@amazon.com>
+Date: Tue, 4 Jun 2024 13:05:27 +0000
+Subject: io_uring: fix possible deadlock in io_register_iowq_max_workers()
+
+From: Hagar Hemdan <hagarhem@amazon.com>
+
+commit 73254a297c2dd094abec7c9efee32455ae875bdf upstream.
+
+The io_register_iowq_max_workers() function calls io_put_sq_data(),
+which acquires the sqd->lock without releasing the uring_lock.
+Similar to the commit 009ad9f0c6ee ("io_uring: drop ctx->uring_lock
+before acquiring sqd->lock"), this can lead to a potential deadlock
+situation.
+
+To resolve this issue, the uring_lock is released before calling
+io_put_sq_data(), and then it is re-acquired after the function call.
+
+This change ensures that the locks are acquired in the correct
+order, preventing the possibility of a deadlock.
+
+Suggested-by: Maximilian Heyne <mheyne@amazon.de>
+Signed-off-by: Hagar Hemdan <hagarhem@amazon.com>
+Link: https://lore.kernel.org/r/20240604130527.3597-1-hagarhem@amazon.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -10822,8 +10822,10 @@ static int io_register_iowq_max_workers(
+ }
+
+ if (sqd) {
++ mutex_unlock(&ctx->uring_lock);
+ mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
++ mutex_lock(&ctx->uring_lock);
+ }
+
+ if (copy_to_user(arg, new_count, sizeof(new_count)))
+@@ -10848,8 +10850,11 @@ static int io_register_iowq_max_workers(
+ return 0;
+ err:
+ if (sqd) {
++ mutex_unlock(&ctx->uring_lock);
+ mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
++ mutex_lock(&ctx->uring_lock);
++
+ }
+ return ret;
+ }
--- /dev/null
+From 59f8f0b54c8ffb4521f6bbd1cb6f4dfa5022e75e Mon Sep 17 00:00:00 2001
+From: Li Nan <linan122@huawei.com>
+Date: Sat, 27 May 2023 15:22:16 +0800
+Subject: md/raid10: improve code of mrdev in raid10_sync_request
+
+From: Li Nan <linan122@huawei.com>
+
+commit 59f8f0b54c8ffb4521f6bbd1cb6f4dfa5022e75e upstream.
+
+'need_recover' and 'mrdev' are equivalent in raid10_sync_request(), and
+inc mrdev->nr_pending is unreasonable if don't need recovery. Replace
+'need_recover' with 'mrdev', and only inc nr_pending when needed.
+
+Signed-off-by: Li Nan <linan122@huawei.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20230527072218.2365857-3-linan666@huaweicloud.com
+Cc: Hagar Gamal Halim <hagarhem@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid10.c | 23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3443,7 +3443,6 @@ static sector_t raid10_sync_request(stru
+ sector_t sect;
+ int must_sync;
+ int any_working;
+- int need_recover = 0;
+ struct raid10_info *mirror = &conf->mirrors[i];
+ struct md_rdev *mrdev, *mreplace;
+
+@@ -3451,14 +3450,13 @@ static sector_t raid10_sync_request(stru
+ mrdev = rcu_dereference(mirror->rdev);
+ mreplace = rcu_dereference(mirror->replacement);
+
+- if (mrdev != NULL &&
+- !test_bit(Faulty, &mrdev->flags) &&
+- !test_bit(In_sync, &mrdev->flags))
+- need_recover = 1;
++ if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
++ test_bit(In_sync, &mrdev->flags)))
++ mrdev = NULL;
+ if (mreplace && test_bit(Faulty, &mreplace->flags))
+ mreplace = NULL;
+
+- if (!need_recover && !mreplace) {
++ if (!mrdev && !mreplace) {
+ rcu_read_unlock();
+ continue;
+ }
+@@ -3492,7 +3490,8 @@ static sector_t raid10_sync_request(stru
+ rcu_read_unlock();
+ continue;
+ }
+- atomic_inc(&mrdev->nr_pending);
++ if (mrdev)
++ atomic_inc(&mrdev->nr_pending);
+ if (mreplace)
+ atomic_inc(&mreplace->nr_pending);
+ rcu_read_unlock();
+@@ -3579,7 +3578,7 @@ static sector_t raid10_sync_request(stru
+ r10_bio->devs[1].devnum = i;
+ r10_bio->devs[1].addr = to_addr;
+
+- if (need_recover) {
++ if (mrdev) {
+ bio = r10_bio->devs[1].bio;
+ bio->bi_next = biolist;
+ biolist = bio;
+@@ -3624,7 +3623,7 @@ static sector_t raid10_sync_request(stru
+ for (k = 0; k < conf->copies; k++)
+ if (r10_bio->devs[k].devnum == i)
+ break;
+- if (!test_bit(In_sync,
++ if (mrdev && !test_bit(In_sync,
+ &mrdev->flags)
+ && !rdev_set_badblocks(
+ mrdev,
+@@ -3650,12 +3649,14 @@ static sector_t raid10_sync_request(stru
+ if (rb2)
+ atomic_dec(&rb2->remaining);
+ r10_bio = rb2;
+- rdev_dec_pending(mrdev, mddev);
++ if (mrdev)
++ rdev_dec_pending(mrdev, mddev);
+ if (mreplace)
+ rdev_dec_pending(mreplace, mddev);
+ break;
+ }
+- rdev_dec_pending(mrdev, mddev);
++ if (mrdev)
++ rdev_dec_pending(mrdev, mddev);
+ if (mreplace)
+ rdev_dec_pending(mreplace, mddev);
+ if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
--- /dev/null
+From 704573851b51808b45dae2d62059d1d8189138a2 Mon Sep 17 00:00:00 2001
+From: Qun-Wei Lin <qun-wei.lin@mediatek.com>
+Date: Fri, 25 Oct 2024 16:58:11 +0800
+Subject: mm: krealloc: Fix MTE false alarm in __do_krealloc
+
+From: Qun-Wei Lin <qun-wei.lin@mediatek.com>
+
+commit 704573851b51808b45dae2d62059d1d8189138a2 upstream.
+
+This patch addresses an issue introduced by commit 1a83a716ec233 ("mm:
+krealloc: consider spare memory for __GFP_ZERO") which causes MTE
+(Memory Tagging Extension) to falsely report a slab-out-of-bounds error.
+
+The problem occurs when zeroing out spare memory in __do_krealloc. The
+original code only considered software-based KASAN and did not account
+for MTE. It does not reset the KASAN tag before calling memset, leading
+to a mismatch between the pointer tag and the memory tag, resulting
+in a false positive.
+
+Example of the error:
+==================================================================
+swapper/0: BUG: KASAN: slab-out-of-bounds in __memset+0x84/0x188
+swapper/0: Write at addr f4ffff8005f0fdf0 by task swapper/0/1
+swapper/0: Pointer tag: [f4], memory tag: [fe]
+swapper/0:
+swapper/0: CPU: 4 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.12.
+swapper/0: Hardware name: MT6991(ENG) (DT)
+swapper/0: Call trace:
+swapper/0: dump_backtrace+0xfc/0x17c
+swapper/0: show_stack+0x18/0x28
+swapper/0: dump_stack_lvl+0x40/0xa0
+swapper/0: print_report+0x1b8/0x71c
+swapper/0: kasan_report+0xec/0x14c
+swapper/0: __do_kernel_fault+0x60/0x29c
+swapper/0: do_bad_area+0x30/0xdc
+swapper/0: do_tag_check_fault+0x20/0x34
+swapper/0: do_mem_abort+0x58/0x104
+swapper/0: el1_abort+0x3c/0x5c
+swapper/0: el1h_64_sync_handler+0x80/0xcc
+swapper/0: el1h_64_sync+0x68/0x6c
+swapper/0: __memset+0x84/0x188
+swapper/0: btf_populate_kfunc_set+0x280/0x3d8
+swapper/0: __register_btf_kfunc_id_set+0x43c/0x468
+swapper/0: register_btf_kfunc_id_set+0x48/0x60
+swapper/0: register_nf_nat_bpf+0x1c/0x40
+swapper/0: nf_nat_init+0xc0/0x128
+swapper/0: do_one_initcall+0x184/0x464
+swapper/0: do_initcall_level+0xdc/0x1b0
+swapper/0: do_initcalls+0x70/0xc0
+swapper/0: do_basic_setup+0x1c/0x28
+swapper/0: kernel_init_freeable+0x144/0x1b8
+swapper/0: kernel_init+0x20/0x1a8
+swapper/0: ret_from_fork+0x10/0x20
+==================================================================
+
+Fixes: 1a83a716ec233 ("mm: krealloc: consider spare memory for __GFP_ZERO")
+Signed-off-by: Qun-Wei Lin <qun-wei.lin@mediatek.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slab_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -1213,7 +1213,7 @@ static __always_inline void *__do_kreall
+ /* Zero out spare memory. */
+ if (want_init_on_alloc(flags)) {
+ kasan_disable_current();
+- memset((void *)p + new_size, 0, ks - new_size);
++ memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
+ kasan_enable_current();
+ }
+
--- /dev/null
+From vbabka@suse.cz Fri Nov 15 06:17:06 2024
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Wed, 13 Nov 2024 17:31:19 +0100
+Subject: mm/memory: add non-anonymous page check in the copy_present_page()
+To: stable@vger.kernel.org
+Cc: David Hildenbrand <david@redhat.com>, Peter Xu <peterx@redhat.com>, linux-mm@kvack.org, Yuanzheng Song <songyuanzheng@huawei.com>, Vlastimil Babka <vbabka@suse.cz>
+Message-ID: <20241113163118.54834-2-vbabka@suse.cz>
+
+From: Yuanzheng Song <songyuanzheng@huawei.com>
+
+The vma->anon_vma of the child process may be NULL because
+the entire vma does not contain anonymous pages. In this
+case, a BUG will occur when the copy_present_page() passes
+a copy of a non-anonymous page of that vma to the
+page_add_new_anon_rmap() to set up new anonymous rmap.
+
+------------[ cut here ]------------
+kernel BUG at mm/rmap.c:1052!
+Internal error: Oops - BUG: 0 [#1] SMP
+Modules linked in:
+CPU: 4 PID: 4652 Comm: test Not tainted 5.15.75 #1
+Hardware name: linux,dummy-virt (DT)
+pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : __page_set_anon_rmap+0xc0/0xe8
+lr : __page_set_anon_rmap+0xc0/0xe8
+sp : ffff80000e773860
+x29: ffff80000e773860 x28: fffffc13cf006ec0 x27: ffff04f3ccd68000
+x26: ffff04f3c5c33248 x25: 0000000010100073 x24: ffff04f3c53c0a80
+x23: 0000000020000000 x22: 0000000000000001 x21: 0000000020000000
+x20: fffffc13cf006ec0 x19: 0000000000000000 x18: 0000000000000000
+x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000
+x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000
+x11: 0000000000000000 x10: 0000000000000000 x9 : ffffdddc5581377c
+x8 : 0000000000000000 x7 : 0000000000000011 x6 : ffff2717a8433000
+x5 : ffff80000e773810 x4 : ffffdddc55400000 x3 : 0000000000000000
+x2 : ffffdddc56b20000 x1 : ffff04f3c9a48040 x0 : 0000000000000000
+Call trace:
+ __page_set_anon_rmap+0xc0/0xe8
+ page_add_new_anon_rmap+0x13c/0x200
+ copy_pte_range+0x6b8/0x1018
+ copy_page_range+0x3a8/0x5e0
+ dup_mmap+0x3a0/0x6e8
+ dup_mm+0x78/0x140
+ copy_process+0x1528/0x1b08
+ kernel_clone+0xac/0x610
+ __do_sys_clone+0x78/0xb0
+ __arm64_sys_clone+0x30/0x40
+ invoke_syscall+0x68/0x170
+ el0_svc_common.constprop.0+0x80/0x250
+ do_el0_svc+0x48/0xb8
+ el0_svc+0x48/0x1a8
+ el0t_64_sync_handler+0xb0/0xb8
+ el0t_64_sync+0x1a0/0x1a4
+Code: 97f899f4 f9400273 17ffffeb 97f899f1 (d4210000)
+---[ end trace dc65e5edd0f362fa ]---
+Kernel panic - not syncing: Oops - BUG: Fatal exception
+SMP: stopping secondary CPUs
+Kernel Offset: 0x5ddc4d400000 from 0xffff800008000000
+PHYS_OFFSET: 0xfffffb0c80000000
+CPU features: 0x44000cf1,00000806
+Memory Limit: none
+---[ end Kernel panic - not syncing: Oops - BUG: Fatal exception ]---
+
+This problem has been fixed by the commit <fb3d824d1a46>
+("mm/rmap: split page_dup_rmap() into page_dup_file_rmap()
+and page_try_dup_anon_rmap()"), but still exists in the
+linux-5.15.y branch.
+
+This patch is not applicable to this version because
+of the large version differences. Therefore, fix it by
+adding non-anonymous page check in the copy_present_page().
+
+Cc: stable@vger.kernel.org
+Fixes: 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
+Signed-off-by: Yuanzheng Song <songyuanzheng@huawei.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -903,6 +903,17 @@ copy_present_page(struct vm_area_struct
+ if (likely(!page_needs_cow_for_dma(src_vma, page)))
+ return 1;
+
++ /*
++ * The vma->anon_vma of the child process may be NULL
++ * because the entire vma does not contain anonymous pages.
++ * A BUG will occur when the copy_present_page() passes
++ * a copy of a non-anonymous page of that vma to the
++ * page_add_new_anon_rmap() to set up new anonymous rmap.
++ * Return 1 if the page is not an anonymous page.
++ */
++ if (!PageAnon(page))
++ return 1;
++
+ new_page = *prealloc;
+ if (!new_page)
+ return -EAGAIN;
hid-multitouch-add-quirk-for-logitech-bolt-receiver-.patch
hid-lenovo-add-support-for-thinkpad-x1-tablet-gen-3-.patch
net-usb-qmi_wwan-add-fibocom-fg132-0x0112-compositio.patch
+md-raid10-improve-code-of-mrdev-in-raid10_sync_request.patch
+io_uring-fix-possible-deadlock-in-io_register_iowq_max_workers.patch
+mm-krealloc-fix-mte-false-alarm-in-__do_krealloc.patch
+mm-memory-add-non-anonymous-page-check-in-the-copy_present_page.patch
+udf-allocate-name-buffer-in-directory-iterator-on-heap.patch
+udf-avoid-directory-type-conversion-failure-due-to-enomem.patch
--- /dev/null
+From 0aba4860b0d0216a1a300484ff536171894d49d8 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 20 Dec 2022 12:38:45 +0100
+Subject: udf: Allocate name buffer in directory iterator on heap
+
+From: Jan Kara <jack@suse.cz>
+
+commit 0aba4860b0d0216a1a300484ff536171894d49d8 upstream.
+
+Currently we allocate name buffer in directory iterators (struct
+udf_fileident_iter) on stack. These structures are relatively large
+(some 360 bytes on 64-bit architectures). For udf_rename() which needs
+to keep three of these structures in parallel the stack usage becomes
+rather heavy - 1536 bytes in total. Allocate the name buffer in the
+iterator from heap to avoid excessive stack usage.
+
+Link: https://lore.kernel.org/all/202212200558.lK9x1KW0-lkp@intel.com
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+[Add extra include linux/slab.h]
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/udf/directory.c | 24 ++++++++++++++++--------
+ fs/udf/udfdecl.h | 2 +-
+ 2 files changed, 17 insertions(+), 9 deletions(-)
+
+--- a/fs/udf/directory.c
++++ b/fs/udf/directory.c
+@@ -19,6 +19,7 @@
+ #include <linux/bio.h>
+ #include <linux/crc-itu-t.h>
+ #include <linux/iversion.h>
++#include <linux/slab.h>
+
+ static int udf_verify_fi(struct udf_fileident_iter *iter)
+ {
+@@ -248,9 +249,14 @@ int udf_fiiter_init(struct udf_fileident
+ iter->elen = 0;
+ iter->epos.bh = NULL;
+ iter->name = NULL;
++ iter->namebuf = kmalloc(UDF_NAME_LEN_CS0, GFP_KERNEL);
++ if (!iter->namebuf)
++ return -ENOMEM;
+
+- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+- return udf_copy_fi(iter);
++ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
++ err = udf_copy_fi(iter);
++ goto out;
++ }
+
+ if (inode_bmap(dir, iter->pos >> dir->i_blkbits, &iter->epos,
+ &iter->eloc, &iter->elen, &iter->loffset) !=
+@@ -260,17 +266,17 @@ int udf_fiiter_init(struct udf_fileident
+ udf_err(dir->i_sb,
+ "position %llu not allocated in directory (ino %lu)\n",
+ (unsigned long long)pos, dir->i_ino);
+- return -EFSCORRUPTED;
++ err = -EFSCORRUPTED;
++ goto out;
+ }
+ err = udf_fiiter_load_bhs(iter);
+ if (err < 0)
+- return err;
++ goto out;
+ err = udf_copy_fi(iter);
+- if (err < 0) {
++out:
++ if (err < 0)
+ udf_fiiter_release(iter);
+- return err;
+- }
+- return 0;
++ return err;
+ }
+
+ int udf_fiiter_advance(struct udf_fileident_iter *iter)
+@@ -307,6 +313,8 @@ void udf_fiiter_release(struct udf_filei
+ brelse(iter->bh[0]);
+ brelse(iter->bh[1]);
+ iter->bh[0] = iter->bh[1] = NULL;
++ kfree(iter->namebuf);
++ iter->namebuf = NULL;
+ }
+
+ static void udf_copy_to_bufs(void *buf1, int len1, void *buf2, int len2,
+--- a/fs/udf/udfdecl.h
++++ b/fs/udf/udfdecl.h
+@@ -99,7 +99,7 @@ struct udf_fileident_iter {
+ struct extent_position epos; /* Position after the above extent */
+ struct fileIdentDesc fi; /* Copied directory entry */
+ uint8_t *name; /* Pointer to entry name */
+- uint8_t namebuf[UDF_NAME_LEN_CS0]; /* Storage for entry name in case
++ uint8_t *namebuf; /* Storage for entry name in case
+ * the name is split between two blocks
+ */
+ };
--- /dev/null
+From df97f64dfa317a5485daf247b6c043a584ef95f9 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 9 Feb 2023 10:33:09 +0100
+Subject: udf: Avoid directory type conversion failure due to ENOMEM
+
+From: Jan Kara <jack@suse.cz>
+
+commit df97f64dfa317a5485daf247b6c043a584ef95f9 upstream.
+
+When converting directory from in-ICB to normal format, the last
+iteration through the directory fixing up directory enteries can fail
+due to ENOMEM. We do not expect this iteration to fail since the
+directory is already verified to be correct and it is difficult to undo
+the conversion at this point. So just use GFP_NOFAIL to make sure the
+small allocation cannot fail.
+
+Reported-by: syzbot+111eaa994ff74f8d440f@syzkaller.appspotmail.com
+Fixes: 0aba4860b0d0 ("udf: Allocate name buffer in directory iterator on heap")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/udf/directory.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/fs/udf/directory.c
++++ b/fs/udf/directory.c
+@@ -249,9 +249,12 @@ int udf_fiiter_init(struct udf_fileident
+ iter->elen = 0;
+ iter->epos.bh = NULL;
+ iter->name = NULL;
+- iter->namebuf = kmalloc(UDF_NAME_LEN_CS0, GFP_KERNEL);
+- if (!iter->namebuf)
+- return -ENOMEM;
++ /*
++ * When directory is verified, we don't expect directory iteration to
++ * fail and it can be difficult to undo without corrupting filesystem.
++ * So just do not allow memory allocation failures here.
++ */
++ iter->namebuf = kmalloc(UDF_NAME_LEN_CS0, GFP_KERNEL | __GFP_NOFAIL);
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ err = udf_copy_fi(iter);