From: Sasha Levin Date: Thu, 19 May 2022 13:51:40 +0000 (-0400) Subject: Fixes for 5.10 X-Git-Tag: v4.9.316~55 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9ddbb84f4a1b56222968ef48abe2805ecff866b5;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.10 Signed-off-by: Sasha Levin --- diff --git a/queue-5.10/alsa-hda-realtek-enable-headset-mic-on-lenovo-p360.patch b/queue-5.10/alsa-hda-realtek-enable-headset-mic-on-lenovo-p360.patch new file mode 100644 index 00000000000..db0d7d07048 --- /dev/null +++ b/queue-5.10/alsa-hda-realtek-enable-headset-mic-on-lenovo-p360.patch @@ -0,0 +1,35 @@ +From f790625f26f4f772aa30827dcdabac6378ca3ff1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 26 Mar 2022 00:05:00 +0800 +Subject: ALSA: hda/realtek: Enable headset mic on Lenovo P360 + +From: Kai-Heng Feng + +[ Upstream commit 5a8738571747c1e275a40b69a608657603867b7e ] + +Lenovo P360 is another platform equipped with ALC897, and it needs +ALC897_FIXUP_HEADSET_MIC_PIN quirk to make its headset mic work. + +Signed-off-by: Kai-Heng Feng +Link: https://lore.kernel.org/r/20220325160501.705221-1-kai.heng.feng@canonical.com +Signed-off-by: Takashi Iwai +Signed-off-by: Sasha Levin +--- + sound/pci/hda/patch_realtek.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index a5b1fd62a99c..3f880c4fd5e0 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10876,6 +10876,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE), + SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS), ++ SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), +-- +2.35.1 + diff --git a/queue-5.10/arm-9191-1-arm-stacktrace-kasan-silence-kasan-warnin.patch b/queue-5.10/arm-9191-1-arm-stacktrace-kasan-silence-kasan-warnin.patch new file mode 100644 index 00000000000..e2b97bc4175 --- /dev/null +++ b/queue-5.10/arm-9191-1-arm-stacktrace-kasan-silence-kasan-warnin.patch @@ -0,0 +1,105 @@ +From 3f17c70d099ff3a8e86e9c40bf632aefc4ba15b7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Apr 2022 10:52:47 +0100 +Subject: ARM: 9191/1: arm/stacktrace, kasan: Silence KASAN warnings in + unwind_frame() + +From: linyujun + +[ Upstream commit 9be4c88bb7924f68f88cfd47d925c2d046f51a73 ] + +The following KASAN warning is detected by QEMU. + +================================================================== +BUG: KASAN: stack-out-of-bounds in unwind_frame+0x508/0x870 +Read of size 4 at addr c36bba90 by task cat/163 + +CPU: 1 PID: 163 Comm: cat Not tainted 5.10.0-rc1 #40 +Hardware name: ARM-Versatile Express +[] (unwind_backtrace) from [] (show_stack+0x10/0x14) +[] (show_stack) from [] (dump_stack+0x98/0xb0) +[] (dump_stack) from [] (print_address_description.constprop.0+0x58/0x4bc) +[] (print_address_description.constprop.0) from [] (kasan_report+0x154/0x170) +[] (kasan_report) from [] (unwind_frame+0x508/0x870) +[] (unwind_frame) from [] (__save_stack_trace+0x110/0x134) +[] (__save_stack_trace) from [] (stack_trace_save+0x8c/0xb4) +[] (stack_trace_save) from [] (kasan_set_track+0x38/0x60) +[] (kasan_set_track) from [] (kasan_set_free_info+0x20/0x2c) +[] (kasan_set_free_info) from [] (__kasan_slab_free+0xec/0x120) +[] (__kasan_slab_free) from [] (kmem_cache_free+0x7c/0x334) +[] (kmem_cache_free) from [] (rcu_core+0x390/0xccc) +[] (rcu_core) from [] (__do_softirq+0x180/0x518) +[] (__do_softirq) from [] (irq_exit+0x9c/0xe0) +[] (irq_exit) from [] (__handle_domain_irq+0xb0/0x110) +[] (__handle_domain_irq) from [] (gic_handle_irq+0xa0/0xb8) +[] (gic_handle_irq) from [] (__irq_svc+0x6c/0x94) +Exception stack(0xc36bb928 to 0xc36bb970) +b920: c36bb9c0 00000000 c0126919 c0101228 c36bb9c0 b76d7730 +b940: c36b8000 c36bb9a0 c3335b00 c01ce0d8 00000003 c36bba3c c36bb940 c36bb978 +b960: c010e298 c011373c 60000013 ffffffff +[] (__irq_svc) from [] (unwind_frame+0x0/0x870) +[] (unwind_frame) from [<00000000>] (0x0) + +The buggy address belongs to the page: +page:(ptrval) refcount:0 mapcount:0 mapping:00000000 index:0x0 pfn:0x636bb +flags: 0x0() +raw: 00000000 00000000 ef867764 00000000 00000000 00000000 ffffffff 00000000 +page dumped because: kasan: bad access detected + +addr c36bba90 is located in stack of task cat/163 at offset 48 in frame: + stack_trace_save+0x0/0xb4 + +this frame has 1 object: + [32, 48) 'trace' + +Memory state around the buggy address: + c36bb980: f1 f1 f1 f1 00 04 f2 f2 00 00 f3 f3 00 00 00 00 + c36bba00: 00 00 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 +>c36bba80: 00 00 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00 + ^ + c36bbb00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + c36bbb80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +================================================================== + +There is a same issue on x86 and has been resolved by the commit f7d27c35ddff +("x86/mm, kasan: Silence KASAN warnings in get_wchan()"). +The solution could be applied to arm architecture too. + +Signed-off-by: Lin Yujun +Reported-by: He Ying +Signed-off-by: Russell King (Oracle) +Signed-off-by: Sasha Levin +--- + arch/arm/kernel/stacktrace.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c +index db798eac7431..824774999825 100644 +--- a/arch/arm/kernel/stacktrace.c ++++ b/arch/arm/kernel/stacktrace.c +@@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame) + return -EINVAL; + + frame->sp = frame->fp; +- frame->fp = *(unsigned long *)(fp); +- frame->pc = *(unsigned long *)(fp + 4); ++ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); ++ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4)); + #else + /* check current frame pointer is within bounds */ + if (fp < low + 12 || fp > high - 4) + return -EINVAL; + + /* restore the registers from the stack frame */ +- frame->fp = *(unsigned long *)(fp - 12); +- frame->sp = *(unsigned long *)(fp - 8); +- frame->pc = *(unsigned long *)(fp - 4); ++ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12)); ++ frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8)); ++ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4)); + #endif + + return 0; +-- +2.35.1 + diff --git a/queue-5.10/crypto-stm32-fix-reference-leak-in-stm32_crc_remove.patch b/queue-5.10/crypto-stm32-fix-reference-leak-in-stm32_crc_remove.patch new file mode 100644 index 00000000000..11cd8f8d1a4 --- /dev/null +++ b/queue-5.10/crypto-stm32-fix-reference-leak-in-stm32_crc_remove.patch @@ -0,0 +1,39 @@ +From 6ce18e20f40c6df809afbc22d92664f70b04dcc7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 17 Mar 2022 13:16:13 +0000 +Subject: crypto: stm32 - fix reference leak in stm32_crc_remove + +From: Zheng Yongjun + +[ Upstream commit e9a36feecee0ee5845f2e0656f50f9942dd0bed3 ] + +pm_runtime_get_sync() will increment pm usage counter even it +failed. Forgetting to call pm_runtime_put_noidle will result +in reference leak in stm32_crc_remove, so we should fix it. + +Signed-off-by: Zheng Yongjun +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +--- + drivers/crypto/stm32/stm32-crc32.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c +index be1bf39a317d..90a920e7f664 100644 +--- a/drivers/crypto/stm32/stm32-crc32.c ++++ b/drivers/crypto/stm32/stm32-crc32.c +@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev) + struct stm32_crc *crc = platform_get_drvdata(pdev); + int ret = pm_runtime_get_sync(crc->dev); + +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(crc->dev); + return ret; ++ } + + spin_lock(&crc_list.lock); + list_del(&crc->list); +-- +2.35.1 + diff --git a/queue-5.10/crypto-x86-chacha20-avoid-spurious-jumps-to-other-fu.patch b/queue-5.10/crypto-x86-chacha20-avoid-spurious-jumps-to-other-fu.patch new file mode 100644 index 00000000000..e0786675e46 --- /dev/null +++ b/queue-5.10/crypto-x86-chacha20-avoid-spurious-jumps-to-other-fu.patch @@ -0,0 +1,52 @@ +From 49e9b8ad625a8aff694d343f8fc51aa3a891f915 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 22 Mar 2022 12:48:10 +0100 +Subject: crypto: x86/chacha20 - Avoid spurious jumps to other functions + +From: Peter Zijlstra + +[ Upstream commit 4327d168515fd8b5b92fa1efdf1d219fb6514460 ] + +The chacha_Nblock_xor_avx512vl() functions all have their own, +identical, .LdoneN label, however in one particular spot {2,4} jump to +the 8 version instead of their own. Resulting in: + + arch/x86/crypto/chacha-x86_64.o: warning: objtool: chacha_2block_xor_avx512vl() falls through to next function chacha_8block_xor_avx512vl() + arch/x86/crypto/chacha-x86_64.o: warning: objtool: chacha_4block_xor_avx512vl() falls through to next function chacha_8block_xor_avx512vl() + +Make each function consistently use its own done label. + +Reported-by: Stephen Rothwell +Signed-off-by: Peter Zijlstra (Intel) +Reviewed-by: Martin Willi +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +--- + arch/x86/crypto/chacha-avx512vl-x86_64.S | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S +index bb193fde123a..8713c16c2501 100644 +--- a/arch/x86/crypto/chacha-avx512vl-x86_64.S ++++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S +@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl) + # xor remaining bytes from partial register into output + mov %rcx,%rax + and $0xf,%rcx +- jz .Ldone8 ++ jz .Ldone2 + mov %rax,%r9 + and $~0xf,%r9 + +@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl) + # xor remaining bytes from partial register into output + mov %rcx,%rax + and $0xf,%rcx +- jz .Ldone8 ++ jz .Ldone4 + mov %rax,%r9 + and $~0xf,%r9 + +-- +2.35.1 + diff --git a/queue-5.10/drbd-remove-usage-of-list-iterator-variable-after-lo.patch b/queue-5.10/drbd-remove-usage-of-list-iterator-variable-after-lo.patch new file mode 100644 index 00000000000..2aad6be8616 --- /dev/null +++ b/queue-5.10/drbd-remove-usage-of-list-iterator-variable-after-lo.patch @@ -0,0 +1,57 @@ +From 4ceb58a2016ab439717fbad837b76f975e9a8d94 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Apr 2022 00:03:48 +0200 +Subject: drbd: remove usage of list iterator variable after loop + +From: Jakob Koschel + +[ Upstream commit 901aeda62efa21f2eae937bccb71b49ae531be06 ] + +In preparation to limit the scope of a list iterator to the list +traversal loop, use a dedicated pointer to iterate through the list [1]. + +Since that variable should not be used past the loop iteration, a +separate variable is used to 'remember the current location within the +loop'. + +To either continue iterating from that position or skip the iteration +(if the previous iteration was complete) list_prepare_entry() is used. + +Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ [1] +Signed-off-by: Jakob Koschel +Link: https://lore.kernel.org/r/20220331220349.885126-1-jakobkoschel@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + drivers/block/drbd/drbd_main.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c +index 65b95aef8dbc..3cdbd81f983f 100644 +--- a/drivers/block/drbd/drbd_main.c ++++ b/drivers/block/drbd/drbd_main.c +@@ -184,7 +184,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, + unsigned int set_size) + { + struct drbd_request *r; +- struct drbd_request *req = NULL; ++ struct drbd_request *req = NULL, *tmp = NULL; + int expect_epoch = 0; + int expect_size = 0; + +@@ -238,8 +238,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, + * to catch requests being barrier-acked "unexpectedly". + * It usually should find the same req again, or some READ preceding it. */ + list_for_each_entry(req, &connection->transfer_log, tl_requests) +- if (req->epoch == expect_epoch) ++ if (req->epoch == expect_epoch) { ++ tmp = req; + break; ++ } ++ req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests); + list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { + if (req->epoch != expect_epoch) + break; +-- +2.35.1 + diff --git a/queue-5.10/fs-fix-an-infinite-loop-in-iomap_fiemap.patch b/queue-5.10/fs-fix-an-infinite-loop-in-iomap_fiemap.patch new file mode 100644 index 00000000000..aa25feb74ef --- /dev/null +++ b/queue-5.10/fs-fix-an-infinite-loop-in-iomap_fiemap.patch @@ -0,0 +1,72 @@ +From 7021d9662fa27bb67b4042bb8ea360619b2fc7b7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 30 Mar 2022 09:49:28 -0700 +Subject: fs: fix an infinite loop in iomap_fiemap + +From: Guo Xuenan + +[ Upstream commit 49df34221804cfd6384135b28b03c9461a31d024 ] + +when get fiemap starting from MAX_LFS_FILESIZE, (maxbytes - *len) < start +will always true , then *len set zero. because of start offset is beyond +file size, for erofs filesystem it will always return iomap.length with +zero,iomap iterate will enter infinite loop. it is necessary cover this +corner case to avoid this situation. + +------------[ cut here ]------------ +WARNING: CPU: 7 PID: 905 at fs/iomap/iter.c:35 iomap_iter+0x97f/0xc70 +Modules linked in: xfs erofs +CPU: 7 PID: 905 Comm: iomap Tainted: G W 5.17.0-rc8 #27 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 +RIP: 0010:iomap_iter+0x97f/0xc70 +Code: 85 a1 fc ff ff e8 71 be 9c ff 0f 1f 44 00 00 e9 92 fc ff ff e8 62 be 9c ff 0f 0b b8 fb ff ff ff e9 fc f8 ff ff e8 51 be 9c ff <0f> 0b e9 2b fc ff ff e8 45 be 9c ff 0f 0b e9 e1 fb ff ff e8 39 be +RSP: 0018:ffff888060a37ab0 EFLAGS: 00010293 +RAX: 0000000000000000 RBX: ffff888060a37bb0 RCX: 0000000000000000 +RDX: ffff88807e19a900 RSI: ffffffff81a7da7f RDI: ffff888060a37be0 +RBP: 7fffffffffffffff R08: 0000000000000000 R09: ffff888060a37c20 +R10: ffff888060a37c67 R11: ffffed100c146f8c R12: 7fffffffffffffff +R13: 0000000000000000 R14: ffff888060a37bd8 R15: ffff888060a37c20 +FS: 00007fd3cca01540(0000) GS:ffff888108780000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 0000000020010820 CR3: 0000000054b92000 CR4: 00000000000006e0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +Call Trace: + + iomap_fiemap+0x1c9/0x2f0 + erofs_fiemap+0x64/0x90 [erofs] + do_vfs_ioctl+0x40d/0x12e0 + __x64_sys_ioctl+0xaa/0x1c0 + do_syscall_64+0x35/0x80 + entry_SYSCALL_64_after_hwframe+0x44/0xae + +---[ end trace 0000000000000000 ]--- +watchdog: BUG: soft lockup - CPU#7 stuck for 26s! [iomap:905] + +Reported-by: Hulk Robot +Signed-off-by: Guo Xuenan +Reviewed-by: Christoph Hellwig +[djwong: fix some typos] +Reviewed-by: Darrick J. Wong +Signed-off-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/ioctl.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/fs/ioctl.c b/fs/ioctl.c +index 4e6cc0a7d69c..7bcc60091287 100644 +--- a/fs/ioctl.c ++++ b/fs/ioctl.c +@@ -170,7 +170,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, + + if (*len == 0) + return -EINVAL; +- if (start > maxbytes) ++ if (start >= maxbytes) + return -EFBIG; + + /* +-- +2.35.1 + diff --git a/queue-5.10/gfs2-disable-page-faults-during-lockless-buffered-re.patch b/queue-5.10/gfs2-disable-page-faults-during-lockless-buffered-re.patch new file mode 100644 index 00000000000..88faa8d04e2 --- /dev/null +++ b/queue-5.10/gfs2-disable-page-faults-during-lockless-buffered-re.patch @@ -0,0 +1,50 @@ +From 3a8a20cf9a808f94f4bcbd53b3c04b7cf18b3ab4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 14 Mar 2022 18:32:02 +0100 +Subject: gfs2: Disable page faults during lockless buffered reads + +From: Andreas Gruenbacher + +[ Upstream commit 52f3f033a5dbd023307520af1ff551cadfd7f037 ] + +During lockless buffered reads, filemap_read() holds page cache page +references while trying to copy data to the user-space buffer. The +calling process isn't holding the inode glock, but the page references +it holds prevent those pages from being removed from the page cache, and +that prevents the underlying inode glock from being moved to another +node. Thus, we can end up in the same kinds of distributed deadlock +situations as with normal (non-lockless) buffered reads. + +Fix that by disabling page faults during lockless reads as well. + +Signed-off-by: Andreas Gruenbacher +Signed-off-by: Sasha Levin +--- + fs/gfs2/file.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c +index 2e6f622ed428..55a8eb3c1963 100644 +--- a/fs/gfs2/file.c ++++ b/fs/gfs2/file.c +@@ -858,14 +858,16 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) + return ret; + iocb->ki_flags &= ~IOCB_DIRECT; + } ++ pagefault_disable(); + iocb->ki_flags |= IOCB_NOIO; + ret = generic_file_read_iter(iocb, to); + iocb->ki_flags &= ~IOCB_NOIO; ++ pagefault_enable(); + if (ret >= 0) { + if (!iov_iter_count(to)) + return ret; + written = ret; +- } else { ++ } else if (ret != -EFAULT) { + if (ret != -EAGAIN) + return ret; + if (iocb->ki_flags & IOCB_NOWAIT) +-- +2.35.1 + diff --git a/queue-5.10/input-add-bounds-checking-to-input_set_capability.patch b/queue-5.10/input-add-bounds-checking-to-input_set_capability.patch new file mode 100644 index 00000000000..9fca39e2ab5 --- /dev/null +++ b/queue-5.10/input-add-bounds-checking-to-input_set_capability.patch @@ -0,0 +1,65 @@ +From 393165ce516b5df82159c4395c56763b096977b2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 20 Mar 2022 21:55:27 -0700 +Subject: Input: add bounds checking to input_set_capability() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jeff LaBundy + +[ Upstream commit 409353cbe9fe48f6bc196114c442b1cff05a39bc ] + +Update input_set_capability() to prevent kernel panic in case the +event code exceeds the bitmap for the given event type. + +Suggested-by: Tomasz Moń +Signed-off-by: Jeff LaBundy +Reviewed-by: Tomasz Moń +Link: https://lore.kernel.org/r/20220320032537.545250-1-jeff@labundy.com +Signed-off-by: Dmitry Torokhov +Signed-off-by: Sasha Levin +--- + drivers/input/input.c | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +diff --git a/drivers/input/input.c b/drivers/input/input.c +index 3cfd2c18eebd..49504dcd5dc6 100644 +--- a/drivers/input/input.c ++++ b/drivers/input/input.c +@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex); + + static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; + ++static const unsigned int input_max_code[EV_CNT] = { ++ [EV_KEY] = KEY_MAX, ++ [EV_REL] = REL_MAX, ++ [EV_ABS] = ABS_MAX, ++ [EV_MSC] = MSC_MAX, ++ [EV_SW] = SW_MAX, ++ [EV_LED] = LED_MAX, ++ [EV_SND] = SND_MAX, ++ [EV_FF] = FF_MAX, ++}; ++ + static inline int is_event_supported(unsigned int code, + unsigned long *bm, unsigned int max) + { +@@ -1976,6 +1987,14 @@ EXPORT_SYMBOL(input_get_timestamp); + */ + void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) + { ++ if (type < EV_CNT && input_max_code[type] && ++ code > input_max_code[type]) { ++ pr_err("%s: invalid code %u for type %u\n", __func__, code, ++ type); ++ dump_stack(); ++ return; ++ } ++ + switch (type) { + case EV_KEY: + __set_bit(code, dev->keybit); +-- +2.35.1 + diff --git a/queue-5.10/input-stmfts-fix-reference-leak-in-stmfts_input_open.patch b/queue-5.10/input-stmfts-fix-reference-leak-in-stmfts_input_open.patch new file mode 100644 index 00000000000..94adc0fd8ab --- /dev/null +++ b/queue-5.10/input-stmfts-fix-reference-leak-in-stmfts_input_open.patch @@ -0,0 +1,53 @@ +From f176b215c3252adfb6ec14a0c09bcef20e79ae30 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 20 Mar 2022 21:56:38 -0700 +Subject: Input: stmfts - fix reference leak in stmfts_input_open + +From: Zheng Yongjun + +[ Upstream commit 26623eea0da3476446909af96c980768df07bbd9 ] + +pm_runtime_get_sync() will increment pm usage counter even it +failed. Forgetting to call pm_runtime_put_noidle will result +in reference leak in stmfts_input_open, so we should fix it. + +Signed-off-by: Zheng Yongjun +Link: https://lore.kernel.org/r/20220317131604.53538-1-zhengyongjun3@huawei.com +Signed-off-by: Dmitry Torokhov +Signed-off-by: Sasha Levin +--- + drivers/input/touchscreen/stmfts.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c +index 9a64e1dbc04a..64b690a72d10 100644 +--- a/drivers/input/touchscreen/stmfts.c ++++ b/drivers/input/touchscreen/stmfts.c +@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev) + + err = pm_runtime_get_sync(&sdata->client->dev); + if (err < 0) +- return err; ++ goto out; + + err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON); + if (err) +- return err; ++ goto out; + + mutex_lock(&sdata->mutex); + sdata->running = true; +@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev) + "failed to enable touchkey\n"); + } + +- return 0; ++out: ++ pm_runtime_put_noidle(&sdata->client->dev); ++ return err; + } + + static void stmfts_input_close(struct input_dev *dev) +-- +2.35.1 + diff --git a/queue-5.10/mips-lantiq-check-the-return-value-of-kzalloc.patch b/queue-5.10/mips-lantiq-check-the-return-value-of-kzalloc.patch new file mode 100644 index 00000000000..3a86e0a067e --- /dev/null +++ b/queue-5.10/mips-lantiq-check-the-return-value-of-kzalloc.patch @@ -0,0 +1,135 @@ +From 06d5cb4dc6418bc41c75551189921da7092fd5ec Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 25 Mar 2022 19:49:41 +0800 +Subject: MIPS: lantiq: check the return value of kzalloc() + +From: Xiaoke Wang + +[ Upstream commit 34123208bbcc8c884a0489f543a23fe9eebb5514 ] + +kzalloc() is a memory allocation function which can return NULL when +some internal memory errors happen. So it is better to check the +return value of it to prevent potential wrong memory access or +memory leak. + +Signed-off-by: Xiaoke Wang +Signed-off-by: Thomas Bogendoerfer +Signed-off-by: Sasha Levin +--- + arch/mips/lantiq/falcon/sysctrl.c | 2 ++ + arch/mips/lantiq/xway/gptu.c | 2 ++ + arch/mips/lantiq/xway/sysctrl.c | 46 ++++++++++++++++++++----------- + 3 files changed, 34 insertions(+), 16 deletions(-) + +diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c +index 42222f849bd2..446a2536999b 100644 +--- a/arch/mips/lantiq/falcon/sysctrl.c ++++ b/arch/mips/lantiq/falcon/sysctrl.c +@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module, + { + struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + ++ if (!clk) ++ return; + clk->cl.dev_id = dev; + clk->cl.con_id = NULL; + clk->cl.clk = clk; +diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c +index 3d5683e75cf1..200fe9ff641d 100644 +--- a/arch/mips/lantiq/xway/gptu.c ++++ b/arch/mips/lantiq/xway/gptu.c +@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con, + { + struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + ++ if (!clk) ++ return; + clk->cl.dev_id = dev_name(dev); + clk->cl.con_id = con; + clk->cl.clk = clk; +diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c +index 917fac1636b7..084f6caba5f2 100644 +--- a/arch/mips/lantiq/xway/sysctrl.c ++++ b/arch/mips/lantiq/xway/sysctrl.c +@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate, + { + struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + ++ if (!clk) ++ return; + clk->cl.dev_id = dev; + clk->cl.con_id = con; + clk->cl.clk = clk; +@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con, + { + struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + ++ if (!clk) ++ return; + clk->cl.dev_id = dev; + clk->cl.con_id = con; + clk->cl.clk = clk; +@@ -356,24 +360,28 @@ static void clkdev_add_pci(void) + struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL); + + /* main pci clock */ +- clk->cl.dev_id = "17000000.pci"; +- clk->cl.con_id = NULL; +- clk->cl.clk = clk; +- clk->rate = CLOCK_33M; +- clk->rates = valid_pci_rates; +- clk->enable = pci_enable; +- clk->disable = pmu_disable; +- clk->module = 0; +- clk->bits = PMU_PCI; +- clkdev_add(&clk->cl); ++ if (clk) { ++ clk->cl.dev_id = "17000000.pci"; ++ clk->cl.con_id = NULL; ++ clk->cl.clk = clk; ++ clk->rate = CLOCK_33M; ++ clk->rates = valid_pci_rates; ++ clk->enable = pci_enable; ++ clk->disable = pmu_disable; ++ clk->module = 0; ++ clk->bits = PMU_PCI; ++ clkdev_add(&clk->cl); ++ } + + /* use internal/external bus clock */ +- clk_ext->cl.dev_id = "17000000.pci"; +- clk_ext->cl.con_id = "external"; +- clk_ext->cl.clk = clk_ext; +- clk_ext->enable = pci_ext_enable; +- clk_ext->disable = pci_ext_disable; +- clkdev_add(&clk_ext->cl); ++ if (clk_ext) { ++ clk_ext->cl.dev_id = "17000000.pci"; ++ clk_ext->cl.con_id = "external"; ++ clk_ext->cl.clk = clk_ext; ++ clk_ext->enable = pci_ext_enable; ++ clk_ext->disable = pci_ext_disable; ++ clkdev_add(&clk_ext->cl); ++ } + } + + /* xway socs can generate clocks on gpio pins */ +@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void) + char *name; + + name = kzalloc(sizeof("clkout0"), GFP_KERNEL); ++ if (!name) ++ continue; + sprintf(name, "clkout%d", i); + + clk = kzalloc(sizeof(struct clk), GFP_KERNEL); ++ if (!clk) { ++ kfree(name); ++ continue; ++ } + clk->cl.dev_id = "1f103000.cgu"; + clk->cl.con_id = name; + clk->cl.clk = clk; +-- +2.35.1 + diff --git a/queue-5.10/nilfs2-fix-lockdep-warnings-during-disk-space-reclam.patch b/queue-5.10/nilfs2-fix-lockdep-warnings-during-disk-space-reclam.patch new file mode 100644 index 00000000000..ebda2c0a6ec --- /dev/null +++ b/queue-5.10/nilfs2-fix-lockdep-warnings-during-disk-space-reclam.patch @@ -0,0 +1,349 @@ +From baab2716c764ca79471ac6f5b7ff5ebf385e79e6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Apr 2022 11:28:21 -0700 +Subject: nilfs2: fix lockdep warnings during disk space reclamation + +From: Ryusuke Konishi + +[ Upstream commit 6e211930f79aa45d422009a5f2e5467d2369ffe5 ] + +During disk space reclamation, nilfs2 still emits the following lockdep +warning due to page/folio operations on shadowed page caches that nilfs2 +uses to get a snapshot of DAT file in memory: + + WARNING: CPU: 0 PID: 2643 at include/linux/backing-dev.h:272 __folio_mark_dirty+0x645/0x670 + ... + RIP: 0010:__folio_mark_dirty+0x645/0x670 + ... + Call Trace: + filemap_dirty_folio+0x74/0xd0 + __set_page_dirty_nobuffers+0x85/0xb0 + nilfs_copy_dirty_pages+0x288/0x510 [nilfs2] + nilfs_mdt_save_to_shadow_map+0x50/0xe0 [nilfs2] + nilfs_clean_segments+0xee/0x5d0 [nilfs2] + nilfs_ioctl_clean_segments.isra.19+0xb08/0xf40 [nilfs2] + nilfs_ioctl+0xc52/0xfb0 [nilfs2] + __x64_sys_ioctl+0x11d/0x170 + +This fixes the remaining warning by using inode objects to hold those +page caches. + +Link: https://lkml.kernel.org/r/1647867427-30498-3-git-send-email-konishi.ryusuke@gmail.com +Signed-off-by: Ryusuke Konishi +Tested-by: Ryusuke Konishi +Cc: Matthew Wilcox +Cc: David Hildenbrand +Cc: Hao Sun +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + fs/nilfs2/dat.c | 4 ++- + fs/nilfs2/inode.c | 63 ++++++++++++++++++++++++++++++++++++++++++++--- + fs/nilfs2/mdt.c | 38 +++++++++++++++++++--------- + fs/nilfs2/mdt.h | 6 ++--- + fs/nilfs2/nilfs.h | 2 ++ + 5 files changed, 92 insertions(+), 21 deletions(-) + +diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c +index 8bccdf1158fc..1a3d183027b9 100644 +--- a/fs/nilfs2/dat.c ++++ b/fs/nilfs2/dat.c +@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size, + di = NILFS_DAT_I(dat); + lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); + nilfs_palloc_setup_cache(dat, &di->palloc_cache); +- nilfs_mdt_setup_shadow_map(dat, &di->shadow); ++ err = nilfs_mdt_setup_shadow_map(dat, &di->shadow); ++ if (err) ++ goto failed; + + err = nilfs_read_inode_common(dat, raw_inode); + if (err) +diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c +index c3e4e677c679..95684fa3c985 100644 +--- a/fs/nilfs2/inode.c ++++ b/fs/nilfs2/inode.c +@@ -30,6 +30,7 @@ + * @root: pointer on NILFS root object (mounted checkpoint) + * @for_gc: inode for GC flag + * @for_btnc: inode for B-tree node cache flag ++ * @for_shadow: inode for shadowed page cache flag + */ + struct nilfs_iget_args { + u64 ino; +@@ -37,6 +38,7 @@ struct nilfs_iget_args { + struct nilfs_root *root; + bool for_gc; + bool for_btnc; ++ bool for_shadow; + }; + + static int nilfs_iget_test(struct inode *inode, void *opaque); +@@ -317,7 +319,7 @@ static int nilfs_insert_inode_locked(struct inode *inode, + { + struct nilfs_iget_args args = { + .ino = ino, .root = root, .cno = 0, .for_gc = false, +- .for_btnc = false ++ .for_btnc = false, .for_shadow = false + }; + + return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); +@@ -536,6 +538,12 @@ static int nilfs_iget_test(struct inode *inode, void *opaque) + } else if (args->for_btnc) { + return 0; + } ++ if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { ++ if (!args->for_shadow) ++ return 0; ++ } else if (args->for_shadow) { ++ return 0; ++ } + + if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) + return !args->for_gc; +@@ -557,6 +565,8 @@ static int nilfs_iget_set(struct inode *inode, void *opaque) + NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); + if (args->for_btnc) + NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); ++ if (args->for_shadow) ++ NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); + return 0; + } + +@@ -565,7 +575,7 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, + { + struct nilfs_iget_args args = { + .ino = ino, .root = root, .cno = 0, .for_gc = false, +- .for_btnc = false ++ .for_btnc = false, .for_shadow = false + }; + + return ilookup5(sb, ino, nilfs_iget_test, &args); +@@ -576,7 +586,7 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, + { + struct nilfs_iget_args args = { + .ino = ino, .root = root, .cno = 0, .for_gc = false, +- .for_btnc = false ++ .for_btnc = false, .for_shadow = false + }; + + return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); +@@ -608,7 +618,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, + { + struct nilfs_iget_args args = { + .ino = ino, .root = NULL, .cno = cno, .for_gc = true, +- .for_btnc = false ++ .for_btnc = false, .for_shadow = false + }; + struct inode *inode; + int err; +@@ -655,6 +665,7 @@ int nilfs_attach_btree_node_cache(struct inode *inode) + args.cno = ii->i_cno; + args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; + args.for_btnc = true; ++ args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; + + btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, + nilfs_iget_set, &args); +@@ -690,6 +701,50 @@ void nilfs_detach_btree_node_cache(struct inode *inode) + } + } + ++/** ++ * nilfs_iget_for_shadow - obtain inode for shadow mapping ++ * @inode: inode object that uses shadow mapping ++ * ++ * nilfs_iget_for_shadow() allocates a pair of inodes that holds page ++ * caches for shadow mapping. The page cache for data pages is set up ++ * in one inode and the one for b-tree node pages is set up in the ++ * other inode, which is attached to the former inode. ++ * ++ * Return Value: On success, a pointer to the inode for data pages is ++ * returned. On errors, one of the following negative error code is returned ++ * in a pointer type. ++ * ++ * %-ENOMEM - Insufficient memory available. ++ */ ++struct inode *nilfs_iget_for_shadow(struct inode *inode) ++{ ++ struct nilfs_iget_args args = { ++ .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, ++ .for_btnc = false, .for_shadow = true ++ }; ++ struct inode *s_inode; ++ int err; ++ ++ s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, ++ nilfs_iget_set, &args); ++ if (unlikely(!s_inode)) ++ return ERR_PTR(-ENOMEM); ++ if (!(s_inode->i_state & I_NEW)) ++ return inode; ++ ++ NILFS_I(s_inode)->i_flags = 0; ++ memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); ++ mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); ++ ++ err = nilfs_attach_btree_node_cache(s_inode); ++ if (unlikely(err)) { ++ iget_failed(s_inode); ++ return ERR_PTR(err); ++ } ++ unlock_new_inode(s_inode); ++ return s_inode; ++} ++ + void nilfs_write_inode_common(struct inode *inode, + struct nilfs_inode *raw_inode, int has_bmap) + { +diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c +index d3f6cb9c32a0..e80ef2c0a785 100644 +--- a/fs/nilfs2/mdt.c ++++ b/fs/nilfs2/mdt.c +@@ -469,9 +469,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) + void nilfs_mdt_clear(struct inode *inode) + { + struct nilfs_mdt_info *mdi = NILFS_MDT(inode); ++ struct nilfs_shadow_map *shadow = mdi->mi_shadow; + + if (mdi->mi_palloc_cache) + nilfs_palloc_destroy_cache(inode); ++ ++ if (shadow) { ++ struct inode *s_inode = shadow->inode; ++ ++ shadow->inode = NULL; ++ iput(s_inode); ++ mdi->mi_shadow = NULL; ++ } + } + + /** +@@ -505,12 +514,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, + struct nilfs_shadow_map *shadow) + { + struct nilfs_mdt_info *mi = NILFS_MDT(inode); ++ struct inode *s_inode; + + INIT_LIST_HEAD(&shadow->frozen_buffers); +- address_space_init_once(&shadow->frozen_data); +- nilfs_mapping_init(&shadow->frozen_data, inode); +- address_space_init_once(&shadow->frozen_btnodes); +- nilfs_mapping_init(&shadow->frozen_btnodes, inode); ++ ++ s_inode = nilfs_iget_for_shadow(inode); ++ if (IS_ERR(s_inode)) ++ return PTR_ERR(s_inode); ++ ++ shadow->inode = s_inode; + mi->mi_shadow = shadow; + return 0; + } +@@ -524,13 +536,14 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode) + struct nilfs_mdt_info *mi = NILFS_MDT(inode); + struct nilfs_inode_info *ii = NILFS_I(inode); + struct nilfs_shadow_map *shadow = mi->mi_shadow; ++ struct inode *s_inode = shadow->inode; + int ret; + +- ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping); ++ ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping); + if (ret) + goto out; + +- ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes, ++ ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping, + ii->i_assoc_inode->i_mapping); + if (ret) + goto out; +@@ -547,7 +560,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) + struct page *page; + int blkbits = inode->i_blkbits; + +- page = grab_cache_page(&shadow->frozen_data, bh->b_page->index); ++ page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index); + if (!page) + return -ENOMEM; + +@@ -579,7 +592,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) + struct page *page; + int n; + +- page = find_lock_page(&shadow->frozen_data, bh->b_page->index); ++ page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index); + if (page) { + if (page_has_buffers(page)) { + n = bh_offset(bh) >> inode->i_blkbits; +@@ -620,11 +633,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode) + nilfs_palloc_clear_cache(inode); + + nilfs_clear_dirty_pages(inode->i_mapping, true); +- nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); ++ nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping); + + nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true); + nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping, +- &shadow->frozen_btnodes); ++ NILFS_I(shadow->inode)->i_assoc_inode->i_mapping); + + nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); + +@@ -639,10 +652,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode) + { + struct nilfs_mdt_info *mi = NILFS_MDT(inode); + struct nilfs_shadow_map *shadow = mi->mi_shadow; ++ struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode; + + down_write(&mi->mi_sem); + nilfs_release_frozen_buffers(shadow); +- truncate_inode_pages(&shadow->frozen_data, 0); +- truncate_inode_pages(&shadow->frozen_btnodes, 0); ++ truncate_inode_pages(shadow->inode->i_mapping, 0); ++ truncate_inode_pages(shadow_btnc_inode->i_mapping, 0); + up_write(&mi->mi_sem); + } +diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h +index e77aea4bb921..9d8ac0d27c16 100644 +--- a/fs/nilfs2/mdt.h ++++ b/fs/nilfs2/mdt.h +@@ -18,14 +18,12 @@ + /** + * struct nilfs_shadow_map - shadow mapping of meta data file + * @bmap_store: shadow copy of bmap state +- * @frozen_data: shadowed dirty data pages +- * @frozen_btnodes: shadowed dirty b-tree nodes' pages ++ * @inode: holder of page caches used in shadow mapping + * @frozen_buffers: list of frozen buffers + */ + struct nilfs_shadow_map { + struct nilfs_bmap_store bmap_store; +- struct address_space frozen_data; +- struct address_space frozen_btnodes; ++ struct inode *inode; + struct list_head frozen_buffers; + }; + +diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h +index 635383b30d67..9ca165bc97d2 100644 +--- a/fs/nilfs2/nilfs.h ++++ b/fs/nilfs2/nilfs.h +@@ -92,6 +92,7 @@ enum { + NILFS_I_BMAP, /* has bmap and btnode_cache */ + NILFS_I_GCINODE, /* inode for GC, on memory only */ + NILFS_I_BTNC, /* inode for btree node cache */ ++ NILFS_I_SHADOW, /* inode for shadowed page cache */ + }; + + /* +@@ -260,6 +261,7 @@ extern struct inode *nilfs_iget_for_gc(struct super_block *sb, + unsigned long ino, __u64 cno); + int nilfs_attach_btree_node_cache(struct inode *inode); + void nilfs_detach_btree_node_cache(struct inode *inode); ++struct inode *nilfs_iget_for_shadow(struct inode *inode); + extern void nilfs_update_inode(struct inode *, struct buffer_head *, int); + extern void nilfs_truncate(struct inode *); + extern void nilfs_evict_inode(struct inode *); +-- +2.35.1 + diff --git a/queue-5.10/nilfs2-fix-lockdep-warnings-in-page-operations-for-b.patch b/queue-5.10/nilfs2-fix-lockdep-warnings-in-page-operations-for-b.patch new file mode 100644 index 00000000000..f7837b13438 --- /dev/null +++ b/queue-5.10/nilfs2-fix-lockdep-warnings-in-page-operations-for-b.patch @@ -0,0 +1,624 @@ +From 453495bcc0851d3b17f36e4afc77534d181ad6fd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Apr 2022 11:28:18 -0700 +Subject: nilfs2: fix lockdep warnings in page operations for btree nodes + +From: Ryusuke Konishi + +[ Upstream commit e897be17a441fa637cd166fc3de1445131e57692 ] + +Patch series "nilfs2 lockdep warning fixes". + +The first two are to resolve the lockdep warning issue, and the last one +is the accompanying cleanup and low priority. + +Based on your comment, this series solves the issue by separating inode +object as needed. Since I was worried about the impact of the object +composition changes, I tested the series carefully not to cause +regressions especially for delicate functions such like disk space +reclamation and snapshots. + +This patch (of 3): + +If CONFIG_LOCKDEP is enabled, nilfs2 hits lockdep warnings at +inode_to_wb() during page/folio operations for btree nodes: + + WARNING: CPU: 0 PID: 6575 at include/linux/backing-dev.h:269 inode_to_wb include/linux/backing-dev.h:269 [inline] + WARNING: CPU: 0 PID: 6575 at include/linux/backing-dev.h:269 folio_account_dirtied mm/page-writeback.c:2460 [inline] + WARNING: CPU: 0 PID: 6575 at include/linux/backing-dev.h:269 __folio_mark_dirty+0xa7c/0xe30 mm/page-writeback.c:2509 + Modules linked in: + ... + RIP: 0010:inode_to_wb include/linux/backing-dev.h:269 [inline] + RIP: 0010:folio_account_dirtied mm/page-writeback.c:2460 [inline] + RIP: 0010:__folio_mark_dirty+0xa7c/0xe30 mm/page-writeback.c:2509 + ... + Call Trace: + __set_page_dirty include/linux/pagemap.h:834 [inline] + mark_buffer_dirty+0x4e6/0x650 fs/buffer.c:1145 + nilfs_btree_propagate_p fs/nilfs2/btree.c:1889 [inline] + nilfs_btree_propagate+0x4ae/0xea0 fs/nilfs2/btree.c:2085 + nilfs_bmap_propagate+0x73/0x170 fs/nilfs2/bmap.c:337 + nilfs_collect_dat_data+0x45/0xd0 fs/nilfs2/segment.c:625 + nilfs_segctor_apply_buffers+0x14a/0x470 fs/nilfs2/segment.c:1009 + nilfs_segctor_scan_file+0x47a/0x700 fs/nilfs2/segment.c:1048 + nilfs_segctor_collect_blocks fs/nilfs2/segment.c:1224 [inline] + nilfs_segctor_collect fs/nilfs2/segment.c:1494 [inline] + nilfs_segctor_do_construct+0x14f3/0x6c60 fs/nilfs2/segment.c:2036 + nilfs_segctor_construct+0x7a7/0xb30 fs/nilfs2/segment.c:2372 + nilfs_segctor_thread_construct fs/nilfs2/segment.c:2480 [inline] + nilfs_segctor_thread+0x3c3/0xf90 fs/nilfs2/segment.c:2563 + kthread+0x405/0x4f0 kernel/kthread.c:327 + ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:295 + +This is because nilfs2 uses two page caches for each inode and +inode->i_mapping never points to one of them, the btree node cache. + +This causes inode_to_wb(inode) to refer to a different page cache than +the caller page/folio operations such like __folio_start_writeback(), +__folio_end_writeback(), or __folio_mark_dirty() acquired the lock. + +This patch resolves the issue by allocating and using an additional +inode to hold the page cache of btree nodes. The inode is attached +one-to-one to the traditional nilfs2 inode if it requires a block +mapping with b-tree. This setup change is in memory only and does not +affect the disk format. + +Link: https://lkml.kernel.org/r/1647867427-30498-1-git-send-email-konishi.ryusuke@gmail.com +Link: https://lkml.kernel.org/r/1647867427-30498-2-git-send-email-konishi.ryusuke@gmail.com +Link: https://lore.kernel.org/r/YXrYvIo8YRnAOJCj@casper.infradead.org +Link: https://lore.kernel.org/r/9a20b33d-b38f-b4a2-4742-c1eb5b8e4d6c@redhat.com +Signed-off-by: Ryusuke Konishi +Reported-by: syzbot+0d5b462a6f07447991b3@syzkaller.appspotmail.com +Reported-by: syzbot+34ef28bb2aeb28724aa0@syzkaller.appspotmail.com +Reported-by: Hao Sun +Reported-by: David Hildenbrand +Tested-by: Ryusuke Konishi +Cc: Matthew Wilcox +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + fs/nilfs2/btnode.c | 23 ++++++++-- + fs/nilfs2/btnode.h | 1 + + fs/nilfs2/btree.c | 27 ++++++++---- + fs/nilfs2/gcinode.c | 7 +-- + fs/nilfs2/inode.c | 104 ++++++++++++++++++++++++++++++++++++++------ + fs/nilfs2/mdt.c | 7 +-- + fs/nilfs2/nilfs.h | 14 +++--- + fs/nilfs2/page.c | 7 ++- + fs/nilfs2/segment.c | 9 ++-- + fs/nilfs2/super.c | 5 +-- + 10 files changed, 154 insertions(+), 50 deletions(-) + +diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c +index 4391fd3abd8f..e00e184b1261 100644 +--- a/fs/nilfs2/btnode.c ++++ b/fs/nilfs2/btnode.c +@@ -20,6 +20,23 @@ + #include "page.h" + #include "btnode.h" + ++ ++/** ++ * nilfs_init_btnc_inode - initialize B-tree node cache inode ++ * @btnc_inode: inode to be initialized ++ * ++ * nilfs_init_btnc_inode() sets up an inode for B-tree node cache. ++ */ ++void nilfs_init_btnc_inode(struct inode *btnc_inode) ++{ ++ struct nilfs_inode_info *ii = NILFS_I(btnc_inode); ++ ++ btnc_inode->i_mode = S_IFREG; ++ ii->i_flags = 0; ++ memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap)); ++ mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS); ++} ++ + void nilfs_btnode_cache_clear(struct address_space *btnc) + { + invalidate_mapping_pages(btnc, 0, -1); +@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc) + struct buffer_head * + nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) + { +- struct inode *inode = NILFS_BTNC_I(btnc); ++ struct inode *inode = btnc->host; + struct buffer_head *bh; + + bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); +@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, + struct buffer_head **pbh, sector_t *submit_ptr) + { + struct buffer_head *bh; +- struct inode *inode = NILFS_BTNC_I(btnc); ++ struct inode *inode = btnc->host; + struct page *page; + int err; + +@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, + struct nilfs_btnode_chkey_ctxt *ctxt) + { + struct buffer_head *obh, *nbh; +- struct inode *inode = NILFS_BTNC_I(btnc); ++ struct inode *inode = btnc->host; + __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; + int err; + +diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h +index 0f88dbc9bcb3..05ab64d354dc 100644 +--- a/fs/nilfs2/btnode.h ++++ b/fs/nilfs2/btnode.h +@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt { + struct buffer_head *newbh; + }; + ++void nilfs_init_btnc_inode(struct inode *btnc_inode); + void nilfs_btnode_cache_clear(struct address_space *); + struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, + __u64 blocknr); +diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c +index f42ab57201e7..77efd69213a3 100644 +--- a/fs/nilfs2/btree.c ++++ b/fs/nilfs2/btree.c +@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path) + static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, + __u64 ptr, struct buffer_head **bhp) + { +- struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; ++ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; ++ struct address_space *btnc = btnc_inode->i_mapping; + struct buffer_head *bh; + + bh = nilfs_btnode_create_block(btnc, ptr); +@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, + struct buffer_head **bhp, + const struct nilfs_btree_readahead_info *ra) + { +- struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; ++ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; ++ struct address_space *btnc = btnc_inode->i_mapping; + struct buffer_head *bh, *ra_bh; + sector_t submit_ptr = 0; + int ret; +@@ -1742,6 +1744,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, + dat = nilfs_bmap_get_dat(btree); + } + ++ ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode); ++ if (ret < 0) ++ return ret; ++ + ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); + if (ret < 0) + return ret; +@@ -1914,7 +1920,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, + path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr; + path[level].bp_ctxt.bh = path[level].bp_bh; + ret = nilfs_btnode_prepare_change_key( +- &NILFS_BMAP_I(btree)->i_btnode_cache, ++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, + &path[level].bp_ctxt); + if (ret < 0) { + nilfs_dat_abort_update(dat, +@@ -1940,7 +1946,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, + + if (buffer_nilfs_node(path[level].bp_bh)) { + nilfs_btnode_commit_change_key( +- &NILFS_BMAP_I(btree)->i_btnode_cache, ++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, + &path[level].bp_ctxt); + path[level].bp_bh = path[level].bp_ctxt.bh; + } +@@ -1959,7 +1965,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree, + &path[level].bp_newreq.bpr_req); + if (buffer_nilfs_node(path[level].bp_bh)) + nilfs_btnode_abort_change_key( +- &NILFS_BMAP_I(btree)->i_btnode_cache, ++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, + &path[level].bp_ctxt); + } + +@@ -2135,7 +2141,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, + static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree, + struct list_head *listp) + { +- struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache; ++ struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; ++ struct address_space *btcache = btnc_inode->i_mapping; + struct list_head lists[NILFS_BTREE_LEVEL_MAX]; + struct pagevec pvec; + struct buffer_head *bh, *head; +@@ -2189,12 +2196,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree, + path[level].bp_ctxt.newkey = blocknr; + path[level].bp_ctxt.bh = *bh; + ret = nilfs_btnode_prepare_change_key( +- &NILFS_BMAP_I(btree)->i_btnode_cache, ++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, + &path[level].bp_ctxt); + if (ret < 0) + return ret; + nilfs_btnode_commit_change_key( +- &NILFS_BMAP_I(btree)->i_btnode_cache, ++ NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, + &path[level].bp_ctxt); + *bh = path[level].bp_ctxt.bh; + } +@@ -2399,6 +2406,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap) + + if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode)) + ret = -EIO; ++ else ++ ret = nilfs_attach_btree_node_cache( ++ &NILFS_BMAP_I(bmap)->vfs_inode); ++ + return ret; + } + +diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c +index 448320496856..aadea660c66c 100644 +--- a/fs/nilfs2/gcinode.c ++++ b/fs/nilfs2/gcinode.c +@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, + int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, + __u64 vbn, struct buffer_head **out_bh) + { ++ struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode; + int ret; + +- ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, ++ ret = nilfs_btnode_submit_block(btnc_inode->i_mapping, + vbn ? : pbn, pbn, REQ_OP_READ, 0, + out_bh, &pbn); + if (ret == -EEXIST) /* internal code (cache hit) */ +@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode) + ii->i_flags = 0; + nilfs_bmap_init_gc(ii->i_bmap); + +- return 0; ++ return nilfs_attach_btree_node_cache(inode); + } + + /** +@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) + ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); + list_del_init(&ii->i_dirty); + truncate_inode_pages(&ii->vfs_inode.i_data, 0); +- nilfs_btnode_cache_clear(&ii->i_btnode_cache); ++ nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); + iput(&ii->vfs_inode); + } + } +diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c +index 745d371d6fea..c3e4e677c679 100644 +--- a/fs/nilfs2/inode.c ++++ b/fs/nilfs2/inode.c +@@ -29,12 +29,14 @@ + * @cno: checkpoint number + * @root: pointer on NILFS root object (mounted checkpoint) + * @for_gc: inode for GC flag ++ * @for_btnc: inode for B-tree node cache flag + */ + struct nilfs_iget_args { + u64 ino; + __u64 cno; + struct nilfs_root *root; +- int for_gc; ++ bool for_gc; ++ bool for_btnc; + }; + + static int nilfs_iget_test(struct inode *inode, void *opaque); +@@ -314,7 +316,8 @@ static int nilfs_insert_inode_locked(struct inode *inode, + unsigned long ino) + { + struct nilfs_iget_args args = { +- .ino = ino, .root = root, .cno = 0, .for_gc = 0 ++ .ino = ino, .root = root, .cno = 0, .for_gc = false, ++ .for_btnc = false + }; + + return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); +@@ -527,6 +530,13 @@ static int nilfs_iget_test(struct inode *inode, void *opaque) + return 0; + + ii = NILFS_I(inode); ++ if (test_bit(NILFS_I_BTNC, &ii->i_state)) { ++ if (!args->for_btnc) ++ return 0; ++ } else if (args->for_btnc) { ++ return 0; ++ } ++ + if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) + return !args->for_gc; + +@@ -538,15 +548,15 @@ static int nilfs_iget_set(struct inode *inode, void *opaque) + struct nilfs_iget_args *args = opaque; + + inode->i_ino = args->ino; +- if (args->for_gc) { ++ NILFS_I(inode)->i_cno = args->cno; ++ NILFS_I(inode)->i_root = args->root; ++ if (args->root && args->ino == NILFS_ROOT_INO) ++ nilfs_get_root(args->root); ++ ++ if (args->for_gc) + NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); +- NILFS_I(inode)->i_cno = args->cno; +- NILFS_I(inode)->i_root = NULL; +- } else { +- if (args->root && args->ino == NILFS_ROOT_INO) +- nilfs_get_root(args->root); +- NILFS_I(inode)->i_root = args->root; +- } ++ if (args->for_btnc) ++ NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); + return 0; + } + +@@ -554,7 +564,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, + unsigned long ino) + { + struct nilfs_iget_args args = { +- .ino = ino, .root = root, .cno = 0, .for_gc = 0 ++ .ino = ino, .root = root, .cno = 0, .for_gc = false, ++ .for_btnc = false + }; + + return ilookup5(sb, ino, nilfs_iget_test, &args); +@@ -564,7 +575,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, + unsigned long ino) + { + struct nilfs_iget_args args = { +- .ino = ino, .root = root, .cno = 0, .for_gc = 0 ++ .ino = ino, .root = root, .cno = 0, .for_gc = false, ++ .for_btnc = false + }; + + return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); +@@ -595,7 +607,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, + __u64 cno) + { + struct nilfs_iget_args args = { +- .ino = ino, .root = NULL, .cno = cno, .for_gc = 1 ++ .ino = ino, .root = NULL, .cno = cno, .for_gc = true, ++ .for_btnc = false + }; + struct inode *inode; + int err; +@@ -615,6 +628,68 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, + return inode; + } + ++/** ++ * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode ++ * @inode: inode object ++ * ++ * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, ++ * or does nothing if the inode already has it. This function allocates ++ * an additional inode to maintain page cache of B-tree nodes one-on-one. ++ * ++ * Return Value: On success, 0 is returned. On errors, one of the following ++ * negative error code is returned. ++ * ++ * %-ENOMEM - Insufficient memory available. ++ */ ++int nilfs_attach_btree_node_cache(struct inode *inode) ++{ ++ struct nilfs_inode_info *ii = NILFS_I(inode); ++ struct inode *btnc_inode; ++ struct nilfs_iget_args args; ++ ++ if (ii->i_assoc_inode) ++ return 0; ++ ++ args.ino = inode->i_ino; ++ args.root = ii->i_root; ++ args.cno = ii->i_cno; ++ args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; ++ args.for_btnc = true; ++ ++ btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, ++ nilfs_iget_set, &args); ++ if (unlikely(!btnc_inode)) ++ return -ENOMEM; ++ if (btnc_inode->i_state & I_NEW) { ++ nilfs_init_btnc_inode(btnc_inode); ++ unlock_new_inode(btnc_inode); ++ } ++ NILFS_I(btnc_inode)->i_assoc_inode = inode; ++ NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; ++ ii->i_assoc_inode = btnc_inode; ++ ++ return 0; ++} ++ ++/** ++ * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode ++ * @inode: inode object ++ * ++ * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its ++ * holder inode bound to @inode, or does nothing if @inode doesn't have it. ++ */ ++void nilfs_detach_btree_node_cache(struct inode *inode) ++{ ++ struct nilfs_inode_info *ii = NILFS_I(inode); ++ struct inode *btnc_inode = ii->i_assoc_inode; ++ ++ if (btnc_inode) { ++ NILFS_I(btnc_inode)->i_assoc_inode = NULL; ++ ii->i_assoc_inode = NULL; ++ iput(btnc_inode); ++ } ++} ++ + void nilfs_write_inode_common(struct inode *inode, + struct nilfs_inode *raw_inode, int has_bmap) + { +@@ -762,7 +837,8 @@ static void nilfs_clear_inode(struct inode *inode) + if (test_bit(NILFS_I_BMAP, &ii->i_state)) + nilfs_bmap_clear(ii->i_bmap); + +- nilfs_btnode_cache_clear(&ii->i_btnode_cache); ++ if (!test_bit(NILFS_I_BTNC, &ii->i_state)) ++ nilfs_detach_btree_node_cache(inode); + + if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) + nilfs_put_root(ii->i_root); +diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c +index c0361ce45f62..d3f6cb9c32a0 100644 +--- a/fs/nilfs2/mdt.c ++++ b/fs/nilfs2/mdt.c +@@ -531,7 +531,7 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode) + goto out; + + ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes, +- &ii->i_btnode_cache); ++ ii->i_assoc_inode->i_mapping); + if (ret) + goto out; + +@@ -622,8 +622,9 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode) + nilfs_clear_dirty_pages(inode->i_mapping, true); + nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); + +- nilfs_clear_dirty_pages(&ii->i_btnode_cache, true); +- nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes); ++ nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true); ++ nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping, ++ &shadow->frozen_btnodes); + + nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); + +diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h +index f8450ee3fd06..635383b30d67 100644 +--- a/fs/nilfs2/nilfs.h ++++ b/fs/nilfs2/nilfs.h +@@ -28,7 +28,7 @@ + * @i_xattr: + * @i_dir_start_lookup: page index of last successful search + * @i_cno: checkpoint number for GC inode +- * @i_btnode_cache: cached pages of b-tree nodes ++ * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer) + * @i_dirty: list for connecting dirty files + * @xattr_sem: semaphore for extended attributes processing + * @i_bh: buffer contains disk inode +@@ -43,7 +43,7 @@ struct nilfs_inode_info { + __u64 i_xattr; /* sector_t ??? */ + __u32 i_dir_start_lookup; + __u64 i_cno; /* check point number for GC inode */ +- struct address_space i_btnode_cache; ++ struct inode *i_assoc_inode; + struct list_head i_dirty; /* List for connecting dirty files */ + + #ifdef CONFIG_NILFS_XATTR +@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap) + return container_of(bmap, struct nilfs_inode_info, i_bmap_data); + } + +-static inline struct inode *NILFS_BTNC_I(struct address_space *btnc) +-{ +- struct nilfs_inode_info *ii = +- container_of(btnc, struct nilfs_inode_info, i_btnode_cache); +- return &ii->vfs_inode; +-} +- + /* + * Dynamic state flags of NILFS on-memory inode (i_state) + */ +@@ -98,6 +91,7 @@ enum { + NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */ + NILFS_I_BMAP, /* has bmap and btnode_cache */ + NILFS_I_GCINODE, /* inode for GC, on memory only */ ++ NILFS_I_BTNC, /* inode for btree node cache */ + }; + + /* +@@ -264,6 +258,8 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, + unsigned long ino); + extern struct inode *nilfs_iget_for_gc(struct super_block *sb, + unsigned long ino, __u64 cno); ++int nilfs_attach_btree_node_cache(struct inode *inode); ++void nilfs_detach_btree_node_cache(struct inode *inode); + extern void nilfs_update_inode(struct inode *, struct buffer_head *, int); + extern void nilfs_truncate(struct inode *); + extern void nilfs_evict_inode(struct inode *); +diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c +index 171fb5cd427f..d1a148f0cae3 100644 +--- a/fs/nilfs2/page.c ++++ b/fs/nilfs2/page.c +@@ -448,10 +448,9 @@ void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) + /* + * NILFS2 needs clear_page_dirty() in the following two cases: + * +- * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears +- * page dirty flags when it copies back pages from the shadow cache +- * (gcdat->{i_mapping,i_btnode_cache}) to its original cache +- * (dat->{i_mapping,i_btnode_cache}). ++ * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty ++ * flag of pages when it copies back pages from shadow cache to the ++ * original cache. + * + * 2) Some B-tree operations like insertion or deletion may dispose buffers + * in dirty state, and this needs to cancel the dirty state of their pages. +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index e3726aca28ed..8350c2eaee75 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -738,15 +738,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode, + struct list_head *listp) + { + struct nilfs_inode_info *ii = NILFS_I(inode); +- struct address_space *mapping = &ii->i_btnode_cache; ++ struct inode *btnc_inode = ii->i_assoc_inode; + struct pagevec pvec; + struct buffer_head *bh, *head; + unsigned int i; + pgoff_t index = 0; + ++ if (!btnc_inode) ++ return; ++ + pagevec_init(&pvec); + +- while (pagevec_lookup_tag(&pvec, mapping, &index, ++ while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index, + PAGECACHE_TAG_DIRTY)) { + for (i = 0; i < pagevec_count(&pvec); i++) { + bh = head = page_buffers(pvec.pages[i]); +@@ -2415,7 +2418,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) + continue; + list_del_init(&ii->i_dirty); + truncate_inode_pages(&ii->vfs_inode.i_data, 0); +- nilfs_btnode_cache_clear(&ii->i_btnode_cache); ++ nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); + iput(&ii->vfs_inode); + } + } +diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c +index 4abd928b0bc8..b9d30e8c43b0 100644 +--- a/fs/nilfs2/super.c ++++ b/fs/nilfs2/super.c +@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) + ii->i_bh = NULL; + ii->i_state = 0; + ii->i_cno = 0; +- nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode); ++ ii->i_assoc_inode = NULL; ++ ii->i_bmap = &ii->i_bmap_data; + return &ii->vfs_inode; + } + +@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj) + #ifdef CONFIG_NILFS_XATTR + init_rwsem(&ii->xattr_sem); + #endif +- address_space_init_once(&ii->i_btnode_cache); +- ii->i_bmap = &ii->i_bmap_data; + inode_init_once(&ii->vfs_inode); + } + +-- +2.35.1 + diff --git a/queue-5.10/nvme-multipath-fix-hang-when-disk-goes-live-over-rec.patch b/queue-5.10/nvme-multipath-fix-hang-when-disk-goes-live-over-rec.patch new file mode 100644 index 00000000000..4ed72a2a786 --- /dev/null +++ b/queue-5.10/nvme-multipath-fix-hang-when-disk-goes-live-over-rec.patch @@ -0,0 +1,165 @@ +From 4939519be5763d83b856a0a7cbcddf688f53a2cc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 24 Mar 2022 13:05:11 -0600 +Subject: nvme-multipath: fix hang when disk goes live over reconnect + +From: Anton Eidelman + +[ Upstream commit a4a6f3c8f61c3cfbda4998ad94596059ad7e4332 ] + +nvme_mpath_init_identify() invoked from nvme_init_identify() fetches a +fresh ANA log from the ctrl. This is essential to have an up to date +path states for both existing namespaces and for those scan_work may +discover once the ctrl is up. + +This happens in the following cases: + 1) A new ctrl is being connected. + 2) An existing ctrl is successfully reconnected. + 3) An existing ctrl is being reset. + +While in (1) ctrl->namespaces is empty, (2 & 3) may have namespaces, and +nvme_read_ana_log() may call nvme_update_ns_ana_state(). + +This result in a hang when the ANA state of an existing namespace changes +and makes the disk live: nvme_mpath_set_live() issues IO to the namespace +through the ctrl, which does NOT have IO queues yet. + +See sample hang below. + +Solution: +- nvme_update_ns_ana_state() to call set_live only if ctrl is live +- nvme_read_ana_log() call from nvme_mpath_init_identify() + therefore only fetches and parses the ANA log; + any erros in this process will fail the ctrl setup as appropriate; +- a separate function nvme_mpath_update() + is called in nvme_start_ctrl(); + this parses the ANA log without fetching it. + At this point the ctrl is live, + therefore, disks can be set live normally. + +Sample failure: + nvme nvme0: starting error recovery + nvme nvme0: Reconnecting in 10 seconds... + block nvme0n6: no usable path - requeuing I/O + INFO: task kworker/u8:3:312 blocked for more than 122 seconds. + Tainted: G E 5.14.5-1.el7.elrepo.x86_64 #1 + Workqueue: nvme-wq nvme_tcp_reconnect_ctrl_work [nvme_tcp] + Call Trace: + __schedule+0x2a2/0x7e0 + schedule+0x4e/0xb0 + io_schedule+0x16/0x40 + wait_on_page_bit_common+0x15c/0x3e0 + do_read_cache_page+0x1e0/0x410 + read_cache_page+0x12/0x20 + read_part_sector+0x46/0x100 + read_lba+0x121/0x240 + efi_partition+0x1d2/0x6a0 + bdev_disk_changed.part.0+0x1df/0x430 + bdev_disk_changed+0x18/0x20 + blkdev_get_whole+0x77/0xe0 + blkdev_get_by_dev+0xd2/0x3a0 + __device_add_disk+0x1ed/0x310 + device_add_disk+0x13/0x20 + nvme_mpath_set_live+0x138/0x1b0 [nvme_core] + nvme_update_ns_ana_state+0x2b/0x30 [nvme_core] + nvme_update_ana_state+0xca/0xe0 [nvme_core] + nvme_parse_ana_log+0xac/0x170 [nvme_core] + nvme_read_ana_log+0x7d/0xe0 [nvme_core] + nvme_mpath_init_identify+0x105/0x150 [nvme_core] + nvme_init_identify+0x2df/0x4d0 [nvme_core] + nvme_init_ctrl_finish+0x8d/0x3b0 [nvme_core] + nvme_tcp_setup_ctrl+0x337/0x390 [nvme_tcp] + nvme_tcp_reconnect_ctrl_work+0x24/0x40 [nvme_tcp] + process_one_work+0x1bd/0x360 + worker_thread+0x50/0x3d0 + +Signed-off-by: Anton Eidelman +Reviewed-by: Sagi Grimberg +Signed-off-by: Christoph Hellwig +Signed-off-by: Sasha Levin +--- + drivers/nvme/host/core.c | 1 + + drivers/nvme/host/multipath.c | 25 +++++++++++++++++++++++-- + drivers/nvme/host/nvme.h | 4 ++++ + 3 files changed, 28 insertions(+), 2 deletions(-) + +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index ad4f1cfbad2e..e73a5c62a858 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -4420,6 +4420,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) + if (ctrl->queue_count > 1) { + nvme_queue_scan(ctrl); + nvme_start_queues(ctrl); ++ nvme_mpath_update(ctrl); + } + } + EXPORT_SYMBOL_GPL(nvme_start_ctrl); +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 18a756444d5a..a9e15c8f907b 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -484,8 +484,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, + ns->ana_grpid = le32_to_cpu(desc->grpid); + ns->ana_state = desc->state; + clear_bit(NVME_NS_ANA_PENDING, &ns->flags); +- +- if (nvme_state_is_live(ns->ana_state)) ++ /* ++ * nvme_mpath_set_live() will trigger I/O to the multipath path device ++ * and in turn to this path device. However we cannot accept this I/O ++ * if the controller is not live. This may deadlock if called from ++ * nvme_mpath_init_identify() and the ctrl will never complete ++ * initialization, preventing I/O from completing. For this case we ++ * will reprocess the ANA log page in nvme_mpath_update() once the ++ * controller is ready. ++ */ ++ if (nvme_state_is_live(ns->ana_state) && ++ ns->ctrl->state == NVME_CTRL_LIVE) + nvme_mpath_set_live(ns); + } + +@@ -572,6 +581,18 @@ static void nvme_ana_work(struct work_struct *work) + nvme_read_ana_log(ctrl); + } + ++void nvme_mpath_update(struct nvme_ctrl *ctrl) ++{ ++ u32 nr_change_groups = 0; ++ ++ if (!ctrl->ana_log_buf) ++ return; ++ ++ mutex_lock(&ctrl->ana_lock); ++ nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state); ++ mutex_unlock(&ctrl->ana_lock); ++} ++ + static void nvme_anatt_timeout(struct timer_list *t) + { + struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index 10e5ae3a8c0d..95b9657cabaf 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -712,6 +712,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); + void nvme_mpath_remove_disk(struct nvme_ns_head *head); + int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); + void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); ++void nvme_mpath_update(struct nvme_ctrl *ctrl); + void nvme_mpath_uninit(struct nvme_ctrl *ctrl); + void nvme_mpath_stop(struct nvme_ctrl *ctrl); + bool nvme_mpath_clear_current_path(struct nvme_ns *ns); +@@ -798,6 +799,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, + "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); + return 0; + } ++static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) ++{ ++} + static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) + { + } +-- +2.35.1 + diff --git a/queue-5.10/nvme-pci-add-quirks-for-samsung-x5-ssds.patch b/queue-5.10/nvme-pci-add-quirks-for-samsung-x5-ssds.patch new file mode 100644 index 00000000000..cdcca590d75 --- /dev/null +++ b/queue-5.10/nvme-pci-add-quirks-for-samsung-x5-ssds.patch @@ -0,0 +1,38 @@ +From 5f9ce3b875fd393288e0b75703248f66aac065d2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 16 Mar 2022 13:24:49 +0530 +Subject: nvme-pci: add quirks for Samsung X5 SSDs + +From: Monish Kumar R + +[ Upstream commit bc360b0b1611566e1bd47384daf49af6a1c51837 ] + +Add quirks to not fail the initialization and to have quick resume +latency after cold/warm reboot. + +Signed-off-by: Monish Kumar R +Signed-off-by: Christoph Hellwig +Signed-off-by: Sasha Levin +--- + drivers/nvme/host/pci.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 6939b03a16c5..a36db0701d17 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3265,7 +3265,10 @@ static const struct pci_device_id nvme_id_table[] = { + NVME_QUIRK_128_BYTES_SQES | + NVME_QUIRK_SHARED_TAGS | + NVME_QUIRK_SKIP_CID_GEN }, +- ++ { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */ ++ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY| ++ NVME_QUIRK_NO_DEEPEST_PS | ++ NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, + { 0, } + }; +-- +2.35.1 + diff --git a/queue-5.10/platform-chrome-cros_ec_debugfs-detach-log-reader-wq.patch b/queue-5.10/platform-chrome-cros_ec_debugfs-detach-log-reader-wq.patch new file mode 100644 index 00000000000..26914469f7b --- /dev/null +++ b/queue-5.10/platform-chrome-cros_ec_debugfs-detach-log-reader-wq.patch @@ -0,0 +1,120 @@ +From 9b2cb8ec2140f2c4acd967d31f9acac1fd2175c8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 9 Feb 2022 13:11:30 +0800 +Subject: platform/chrome: cros_ec_debugfs: detach log reader wq from devm + +From: Tzung-Bi Shih + +[ Upstream commit 0e8eb5e8acbad19ac2e1856b2fb2320184299b33 ] + +Debugfs console_log uses devm memory (e.g. debug_info in +cros_ec_console_log_poll()). However, lifecycles of device and debugfs +are independent. An use-after-free issue is observed if userland +program operates the debugfs after the memory has been freed. + +The call trace: + do_raw_spin_lock + _raw_spin_lock_irqsave + remove_wait_queue + ep_unregister_pollwait + ep_remove + do_epoll_ctl + +A Python example to reproduce the issue: +... import select +... p = select.epoll() +... f = open('/sys/kernel/debug/cros_scp/console_log') +... p.register(f, select.POLLIN) +... p.poll(1) +[(4, 1)] # 4=fd, 1=select.POLLIN + +[ shutdown cros_scp at the point ] + +... p.poll(1) +[(4, 16)] # 4=fd, 16=select.POLLHUP +... p.unregister(f) + +An use-after-free issue raises here. It called epoll_ctl with +EPOLL_CTL_DEL which in turn to use the workqueue in the devm (i.e. +log_wq). + +Detaches log reader's workqueue from devm to make sure it is persistent +even if the device has been removed. + +Signed-off-by: Tzung-Bi Shih +Reviewed-by: Guenter Roeck +Link: https://lore.kernel.org/r/20220209051130.386175-1-tzungbi@google.com +Signed-off-by: Benson Leung +Signed-off-by: Sasha Levin +--- + drivers/platform/chrome/cros_ec_debugfs.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c +index 272c89837d74..0dbceee87a4b 100644 +--- a/drivers/platform/chrome/cros_ec_debugfs.c ++++ b/drivers/platform/chrome/cros_ec_debugfs.c +@@ -25,6 +25,9 @@ + + #define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1)) + ++/* waitqueue for log readers */ ++static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq); ++ + /** + * struct cros_ec_debugfs - EC debugging information. + * +@@ -33,7 +36,6 @@ + * @log_buffer: circular buffer for console log information + * @read_msg: preallocated EC command and buffer to read console log + * @log_mutex: mutex to protect circular buffer +- * @log_wq: waitqueue for log readers + * @log_poll_work: recurring task to poll EC for new console log data + * @panicinfo_blob: panicinfo debugfs blob + */ +@@ -44,7 +46,6 @@ struct cros_ec_debugfs { + struct circ_buf log_buffer; + struct cros_ec_command *read_msg; + struct mutex log_mutex; +- wait_queue_head_t log_wq; + struct delayed_work log_poll_work; + /* EC panicinfo */ + struct debugfs_blob_wrapper panicinfo_blob; +@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work) + buf_space--; + } + +- wake_up(&debug_info->log_wq); ++ wake_up(&cros_ec_debugfs_log_wq); + } + + mutex_unlock(&debug_info->log_mutex); +@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf, + + mutex_unlock(&debug_info->log_mutex); + +- ret = wait_event_interruptible(debug_info->log_wq, ++ ret = wait_event_interruptible(cros_ec_debugfs_log_wq, + CIRC_CNT(cb->head, cb->tail, LOG_SIZE)); + if (ret < 0) + return ret; +@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file, + struct cros_ec_debugfs *debug_info = file->private_data; + __poll_t mask = 0; + +- poll_wait(file, &debug_info->log_wq, wait); ++ poll_wait(file, &cros_ec_debugfs_log_wq, wait); + + mutex_lock(&debug_info->log_mutex); + if (CIRC_CNT(debug_info->log_buffer.head, +@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info) + debug_info->log_buffer.tail = 0; + + mutex_init(&debug_info->log_mutex); +- init_waitqueue_head(&debug_info->log_wq); + + debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir, + debug_info, &cros_ec_console_log_fops); +-- +2.35.1 + diff --git a/queue-5.10/reinstate-some-of-swiotlb-rework-fix-info-leak-with-.patch b/queue-5.10/reinstate-some-of-swiotlb-rework-fix-info-leak-with-.patch new file mode 100644 index 00000000000..f93a4ad9af5 --- /dev/null +++ b/queue-5.10/reinstate-some-of-swiotlb-rework-fix-info-leak-with-.patch @@ -0,0 +1,63 @@ +From 585d1062fd2a1b878f9456e31eae017cbbbc93e8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 28 Mar 2022 11:37:05 -0700 +Subject: Reinstate some of "swiotlb: rework "fix info leak with + DMA_FROM_DEVICE"" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Linus Torvalds + +[ Upstream commit 901c7280ca0d5e2b4a8929fbe0bfb007ac2a6544 ] + +Halil Pasic points out [1] that the full revert of that commit (revert +in bddac7c1e02b), and that a partial revert that only reverts the +problematic case, but still keeps some of the cleanups is probably +better.  + +And that partial revert [2] had already been verified by Oleksandr +Natalenko to also fix the issue, I had just missed that in the long +discussion. + +So let's reinstate the cleanups from commit aa6f8dcbab47 ("swiotlb: +rework "fix info leak with DMA_FROM_DEVICE""), and effectively only +revert the part that caused problems. + +Link: https://lore.kernel.org/all/20220328013731.017ae3e3.pasic@linux.ibm.com/ [1] +Link: https://lore.kernel.org/all/20220324055732.GB12078@lst.de/ [2] +Link: https://lore.kernel.org/all/4386660.LvFx2qVVIh@natalenko.name/ [3] +Suggested-by: Halil Pasic +Tested-by: Oleksandr Natalenko +Cc: Christoph Hellwig" +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + kernel/dma/swiotlb.c | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c +index 0ed0e1f215c7..274587a57717 100644 +--- a/kernel/dma/swiotlb.c ++++ b/kernel/dma/swiotlb.c +@@ -597,9 +597,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, + io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i); + + tlb_addr = slot_addr(io_tlb_start, index) + offset; +- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && +- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) +- swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); ++ /* ++ * When dir == DMA_FROM_DEVICE we could omit the copy from the orig ++ * to the tlb buffer, if we knew for sure the device will ++ * overwirte the entire current content. But we don't. Thus ++ * unconditional bounce may prevent leaking swiotlb content (i.e. ++ * kernel memory) to user-space. ++ */ ++ swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); + return tlb_addr; + } + +-- +2.35.1 + diff --git a/queue-5.10/revert-swiotlb-fix-info-leak-with-dma_from_device.patch b/queue-5.10/revert-swiotlb-fix-info-leak-with-dma_from_device.patch new file mode 100644 index 00000000000..2962a2e38c1 --- /dev/null +++ b/queue-5.10/revert-swiotlb-fix-info-leak-with-dma_from_device.patch @@ -0,0 +1,70 @@ +From 9f27512d8cf6811ab95770bcba7ed0ffe0e5c24d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 18 May 2022 15:28:18 -0400 +Subject: Revert "swiotlb: fix info leak with DMA_FROM_DEVICE" + +This reverts commit d4d975e7921079f877f828099bb8260af335508f. + +Upstream had a follow-up fix, revert, and a semi-reverted-revert. +Instead of going through this chain which is more painful to backport, +I'm just going to revert this original commit and pick the final one. + +Signed-off-by: Sasha Levin +--- + Documentation/core-api/dma-attributes.rst | 8 -------- + include/linux/dma-mapping.h | 8 -------- + kernel/dma/swiotlb.c | 3 +-- + 3 files changed, 1 insertion(+), 18 deletions(-) + +diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst +index 17706dc91ec9..1887d92e8e92 100644 +--- a/Documentation/core-api/dma-attributes.rst ++++ b/Documentation/core-api/dma-attributes.rst +@@ -130,11 +130,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged + subsystem that the buffer is fully accessible at the elevated privilege + level (and ideally inaccessible or at least read-only at the + lesser-privileged levels). +- +-DMA_ATTR_OVERWRITE +------------------- +- +-This is a hint to the DMA-mapping subsystem that the device is expected to +-overwrite the entire mapped size, thus the caller does not require any of the +-previous buffer contents to be preserved. This allows bounce-buffering +-implementations to optimise DMA_FROM_DEVICE transfers. +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index a9361178c5db..a7d70cdee25e 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -61,14 +61,6 @@ + */ + #define DMA_ATTR_PRIVILEGED (1UL << 9) + +-/* +- * This is a hint to the DMA-mapping subsystem that the device is expected +- * to overwrite the entire mapped size, thus the caller does not require any +- * of the previous buffer contents to be preserved. This allows +- * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers. +- */ +-#define DMA_ATTR_OVERWRITE (1UL << 10) +- + /* + * A dma_addr_t can hold any valid DMA or bus address for the platform. It can + * be given to a device to use as a DMA source or target. It is specific to a +diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c +index 62b1e5fa8673..0ed0e1f215c7 100644 +--- a/kernel/dma/swiotlb.c ++++ b/kernel/dma/swiotlb.c +@@ -598,8 +598,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, + + tlb_addr = slot_addr(io_tlb_start, index) + offset; + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && +- (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE || +- dir == DMA_BIDIRECTIONAL)) ++ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); + return tlb_addr; + } +-- +2.35.1 + diff --git a/queue-5.10/rtc-fix-use-after-free-on-device-removal.patch b/queue-5.10/rtc-fix-use-after-free-on-device-removal.patch new file mode 100644 index 00000000000..00c1cdc7bb5 --- /dev/null +++ b/queue-5.10/rtc-fix-use-after-free-on-device-removal.patch @@ -0,0 +1,80 @@ +From 14c4a1b4be3f452aefc2891acab5b4b5dea436a4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 10 Dec 2021 17:09:51 +0100 +Subject: rtc: fix use-after-free on device removal + +From: Vincent Whitchurch + +[ Upstream commit c8fa17d9f08a448184f03d352145099b5beb618e ] + +If the irqwork is still scheduled or running while the RTC device is +removed, a use-after-free occurs in rtc_timer_do_work(). Cleanup the +timerqueue and ensure the work is stopped to fix this. + + BUG: KASAN: use-after-free in mutex_lock+0x94/0x110 + Write of size 8 at addr ffffff801d846338 by task kworker/3:1/41 + + Workqueue: events rtc_timer_do_work + Call trace: + mutex_lock+0x94/0x110 + rtc_timer_do_work+0xec/0x630 + process_one_work+0x5fc/0x1344 + ... + + Allocated by task 551: + kmem_cache_alloc_trace+0x384/0x6e0 + devm_rtc_allocate_device+0xf0/0x574 + devm_rtc_device_register+0x2c/0x12c + ... + + Freed by task 572: + kfree+0x114/0x4d0 + rtc_device_release+0x64/0x80 + device_release+0x8c/0x1f4 + kobject_put+0x1c4/0x4b0 + put_device+0x20/0x30 + devm_rtc_release_device+0x1c/0x30 + devm_action_release+0x54/0x90 + release_nodes+0x124/0x310 + devres_release_group+0x170/0x240 + i2c_device_remove+0xd8/0x314 + ... + + Last potentially related work creation: + insert_work+0x5c/0x330 + queue_work_on+0xcc/0x154 + rtc_set_time+0x188/0x5bc + rtc_dev_ioctl+0x2ac/0xbd0 + ... + +Signed-off-by: Vincent Whitchurch +Signed-off-by: Alexandre Belloni +Link: https://lore.kernel.org/r/20211210160951.7718-1-vincent.whitchurch@axis.com +Signed-off-by: Sasha Levin +--- + drivers/rtc/class.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c +index 7c88d190c51f..625effe6cb65 100644 +--- a/drivers/rtc/class.c ++++ b/drivers/rtc/class.c +@@ -26,6 +26,15 @@ struct class *rtc_class; + static void rtc_device_release(struct device *dev) + { + struct rtc_device *rtc = to_rtc_device(dev); ++ struct timerqueue_head *head = &rtc->timerqueue; ++ struct timerqueue_node *node; ++ ++ mutex_lock(&rtc->ops_lock); ++ while ((node = timerqueue_getnext(head))) ++ timerqueue_del(head, node); ++ mutex_unlock(&rtc->ops_lock); ++ ++ cancel_work_sync(&rtc->irqwork); + + ida_simple_remove(&rtc_ida, rtc->id); + kfree(rtc); +-- +2.35.1 + diff --git a/queue-5.10/rtc-mc146818-lib-fix-the-altcentury-for-amd-platform.patch b/queue-5.10/rtc-mc146818-lib-fix-the-altcentury-for-amd-platform.patch new file mode 100644 index 00000000000..294bd02e2a0 --- /dev/null +++ b/queue-5.10/rtc-mc146818-lib-fix-the-altcentury-for-amd-platform.patch @@ -0,0 +1,109 @@ +From 6f4c28746ce479a8e1c3a06bed6af94d06f5571a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 11 Jan 2022 16:57:50 -0600 +Subject: rtc: mc146818-lib: Fix the AltCentury for AMD platforms + +From: Mario Limonciello + +[ Upstream commit 3ae8fd41573af4fb3a490c9ed947fc936ba87190 ] + +Setting the century forward has been failing on AMD platforms. +There was a previous attempt at fixing this for family 0x17 as part of +commit 7ad295d5196a ("rtc: Fix the AltCentury value on AMD/Hygon +platform") but this was later reverted due to some problems reported +that appeared to stem from an FW bug on a family 0x17 desktop system. + +The same comments mentioned in the previous commit continue to apply +to the newer platforms as well. + +``` +MC146818 driver use function mc146818_set_time() to set register +RTC_FREQ_SELECT(RTC_REG_A)'s bit4-bit6 field which means divider stage +reset value on Intel platform to 0x7. + +While AMD/Hygon RTC_REG_A(0Ah)'s bit4 is defined as DV0 [Reference]: +DV0 = 0 selects Bank 0, DV0 = 1 selects Bank 1. Bit5-bit6 is defined +as reserved. + +DV0 is set to 1, it will select Bank 1, which will disable AltCentury +register(0x32) access. As UEFI pass acpi_gbl_FADT.century 0x32 +(AltCentury), the CMOS write will be failed on code: +CMOS_WRITE(century, acpi_gbl_FADT.century). + +Correct RTC_REG_A bank select bit(DV0) to 0 on AMD/Hygon CPUs, it will +enable AltCentury(0x32) register writing and finally setup century as +expected. +``` + +However in closer examination the change previously submitted was also +modifying bits 5 & 6 which are declared reserved in the AMD documentation. +So instead modify just the DV0 bank selection bit. + +Being cognizant that there was a failure reported before, split the code +change out to a static function that can also be used for exclusions if +any regressions such as Mikhail's pop up again. + +Cc: Jinke Fan +Cc: Mikhail Gavrilov +Link: https://lore.kernel.org/all/CABXGCsMLob0DC25JS8wwAYydnDoHBSoMh2_YLPfqm3TTvDE-Zw@mail.gmail.com/ +Link: https://www.amd.com/system/files/TechDocs/51192_Bolton_FCH_RRG.pdf +Signed-off-by: Raul E Rangel +Signed-off-by: Mario Limonciello +Signed-off-by: Alexandre Belloni +Link: https://lore.kernel.org/r/20220111225750.1699-1-mario.limonciello@amd.com +Signed-off-by: Sasha Levin +--- + drivers/rtc/rtc-mc146818-lib.c | 16 +++++++++++++++- + include/linux/mc146818rtc.h | 2 ++ + 2 files changed, 17 insertions(+), 1 deletion(-) + +diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c +index 5add637c9ad2..b036ff33fbe6 100644 +--- a/drivers/rtc/rtc-mc146818-lib.c ++++ b/drivers/rtc/rtc-mc146818-lib.c +@@ -99,6 +99,17 @@ unsigned int mc146818_get_time(struct rtc_time *time) + } + EXPORT_SYMBOL_GPL(mc146818_get_time); + ++/* AMD systems don't allow access to AltCentury with DV1 */ ++static bool apply_amd_register_a_behavior(void) ++{ ++#ifdef CONFIG_X86 ++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || ++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) ++ return true; ++#endif ++ return false; ++} ++ + /* Set the current date and time in the real time clock. */ + int mc146818_set_time(struct rtc_time *time) + { +@@ -172,7 +183,10 @@ int mc146818_set_time(struct rtc_time *time) + save_control = CMOS_READ(RTC_CONTROL); + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); +- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); ++ if (apply_amd_register_a_behavior()) ++ CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); ++ else ++ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + + #ifdef CONFIG_MACH_DECSTATION + CMOS_WRITE(real_yrs, RTC_DEC_YEAR); +diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h +index 0661af17a758..1e0205811394 100644 +--- a/include/linux/mc146818rtc.h ++++ b/include/linux/mc146818rtc.h +@@ -86,6 +86,8 @@ struct cmos_rtc_board_info { + /* 2 values for divider stage reset, others for "testing purposes only" */ + # define RTC_DIV_RESET1 0x60 + # define RTC_DIV_RESET2 0x70 ++ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */ ++# define RTC_AMD_BANK_SELECT 0x10 + /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ + # define RTC_RATE_SELECT 0x0F + +-- +2.35.1 + diff --git a/queue-5.10/rtc-pcf2127-fix-bug-when-reading-alarm-registers.patch b/queue-5.10/rtc-pcf2127-fix-bug-when-reading-alarm-registers.patch new file mode 100644 index 00000000000..bd9d196a4e7 --- /dev/null +++ b/queue-5.10/rtc-pcf2127-fix-bug-when-reading-alarm-registers.patch @@ -0,0 +1,45 @@ +From 972be1b30dd8148e0478914dd2de96ada20be147 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 8 Feb 2022 11:29:07 -0500 +Subject: rtc: pcf2127: fix bug when reading alarm registers + +From: Hugo Villeneuve + +[ Upstream commit 73ce05302007eece23a6acb7dc124c92a2209087 ] + +The first bug is that reading the 5 alarm registers results in a read +operation of 20 bytes. The reason is because the destination buffer is +defined as an array of "unsigned int", and we use the sizeof() +operator on this array to define the bulk read count. + +The second bug is that the read value is invalid, because we are +indexing the destination buffer as integers (4 bytes), instead of +indexing it as u8. + +Changing the destination buffer type to u8 fixes both problems. + +Signed-off-by: Hugo Villeneuve +Signed-off-by: Alexandre Belloni +Link: https://lore.kernel.org/r/20220208162908.3182581-1-hugo@hugovil.com +Signed-off-by: Sasha Levin +--- + drivers/rtc/rtc-pcf2127.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c +index f0a6861ff3ae..715513311ece 100644 +--- a/drivers/rtc/rtc-pcf2127.c ++++ b/drivers/rtc/rtc-pcf2127.c +@@ -366,7 +366,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127) + static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) + { + struct pcf2127 *pcf2127 = dev_get_drvdata(dev); +- unsigned int buf[5], ctrl2; ++ u8 buf[5]; ++ unsigned int ctrl2; + int ret; + + ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2); +-- +2.35.1 + diff --git a/queue-5.10/rtc-sun6i-fix-time-overflow-handling.patch b/queue-5.10/rtc-sun6i-fix-time-overflow-handling.patch new file mode 100644 index 00000000000..df0ec749be1 --- /dev/null +++ b/queue-5.10/rtc-sun6i-fix-time-overflow-handling.patch @@ -0,0 +1,80 @@ +From 78eeabad3a0b4dfbc723438ef39f6ed2f56a61f5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 11 Feb 2022 12:26:28 +0000 +Subject: rtc: sun6i: Fix time overflow handling + +From: Andre Przywara + +[ Upstream commit 9f6cd82eca7e91a0d0311242a87c6aa3c2737968 ] + +Using "unsigned long" for UNIX timestamps is never a good idea, and +comparing the value of such a variable against U32_MAX does not do +anything useful on 32-bit systems. + +Use the proper time64_t type when dealing with timestamps, and avoid +cutting down the time range unnecessarily. This also fixes the flawed +check for the alarm time being too far into the future. + +The check for this condition is actually somewhat theoretical, as the +RTC counts till 2033 only anyways, and 2^32 seconds from now is not +before the year 2157 - at which point I hope nobody will be using this +hardware anymore. + +Signed-off-by: Andre Przywara +Reviewed-by: Jernej Skrabec +Signed-off-by: Alexandre Belloni +Link: https://lore.kernel.org/r/20220211122643.1343315-4-andre.przywara@arm.com +Signed-off-by: Sasha Levin +--- + drivers/rtc/rtc-sun6i.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c +index f2818cdd11d8..52b36b7c6129 100644 +--- a/drivers/rtc/rtc-sun6i.c ++++ b/drivers/rtc/rtc-sun6i.c +@@ -138,7 +138,7 @@ struct sun6i_rtc_dev { + const struct sun6i_rtc_clk_data *data; + void __iomem *base; + int irq; +- unsigned long alarm; ++ time64_t alarm; + + struct clk_hw hw; + struct clk_hw *int_osc; +@@ -510,10 +510,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) + struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); + struct rtc_time *alrm_tm = &wkalrm->time; + struct rtc_time tm_now; +- unsigned long time_now = 0; +- unsigned long time_set = 0; +- unsigned long time_gap = 0; +- int ret = 0; ++ time64_t time_now, time_set; ++ int ret; + + ret = sun6i_rtc_gettime(dev, &tm_now); + if (ret < 0) { +@@ -528,9 +526,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) + return -EINVAL; + } + +- time_gap = time_set - time_now; +- +- if (time_gap > U32_MAX) { ++ if ((time_set - time_now) > U32_MAX) { + dev_err(dev, "Date too far in the future\n"); + return -EINVAL; + } +@@ -539,7 +535,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) + writel(0, chip->base + SUN6I_ALRM_COUNTER); + usleep_range(100, 300); + +- writel(time_gap, chip->base + SUN6I_ALRM_COUNTER); ++ writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER); + chip->alarm = time_set; + + sun6i_rtc_setaie(wkalrm->enabled, chip); +-- +2.35.1 + diff --git a/queue-5.10/s390-pci-improve-zpci_dev-reference-counting.patch b/queue-5.10/s390-pci-improve-zpci_dev-reference-counting.patch new file mode 100644 index 00000000000..c7e0dd3cfdb --- /dev/null +++ b/queue-5.10/s390-pci-improve-zpci_dev-reference-counting.patch @@ -0,0 +1,135 @@ +From 7f4554066ba24afcd5d756353f201d634127db14 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 20 Sep 2021 09:32:21 +0200 +Subject: s390/pci: improve zpci_dev reference counting + +From: Niklas Schnelle + +[ Upstream commit c122383d221dfa2f41cfe5e672540595de986fde ] + +Currently zpci_dev uses kref based reference counting but only accounts +for one original reference plus one reference from an added pci_dev to +its underlying zpci_dev. Counting just the original reference worked +until the pci_dev reference was added in commit 2a671f77ee49 ("s390/pci: +fix use after free of zpci_dev") because once a zpci_dev goes away, i.e. +enters the reserved state, it would immediately get released. However +with the pci_dev reference this is no longer the case and the zpci_dev +may still appear in multiple availability events indicating that it was +reserved. This was solved by detecting when the zpci_dev is already on +its way out but still hanging around. This has however shown some light +on how unusual our zpci_dev reference counting is. + +Improve upon this by modelling zpci_dev reference counting on pci_dev. +Analogous to pci_get_slot() increment the reference count in +get_zdev_by_fid(). Thus all users of get_zdev_by_fid() must drop the +reference once they are done with the zpci_dev. + +Similar to pci_scan_single_device(), zpci_create_device() returns the +device with an initial count of 1 and the device added to the zpci_list +(analogous to the PCI bus' device_list). In turn users of +zpci_create_device() must only drop the reference once the device is +gone from the point of view of the zPCI subsystem, it might still be +referenced by the common PCI subsystem though. + +Reviewed-by: Matthew Rosato +Signed-off-by: Niklas Schnelle +Signed-off-by: Vasily Gorbik +Signed-off-by: Sasha Levin +--- + arch/s390/pci/pci.c | 1 + + arch/s390/pci/pci_bus.h | 3 ++- + arch/s390/pci/pci_clp.c | 9 +++++++-- + arch/s390/pci/pci_event.c | 7 ++++++- + 4 files changed, 16 insertions(+), 4 deletions(-) + +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c +index e14e4a3a647a..74799439b259 100644 +--- a/arch/s390/pci/pci.c ++++ b/arch/s390/pci/pci.c +@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid) + list_for_each_entry(tmp, &zpci_list, entry) { + if (tmp->fid == fid) { + zdev = tmp; ++ zpci_zdev_get(zdev); + break; + } + } +diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h +index 55c9488e504c..8d2fcd091ca7 100644 +--- a/arch/s390/pci/pci_bus.h ++++ b/arch/s390/pci/pci_bus.h +@@ -13,7 +13,8 @@ void zpci_bus_device_unregister(struct zpci_dev *zdev); + void zpci_release_device(struct kref *kref); + static inline void zpci_zdev_put(struct zpci_dev *zdev) + { +- kref_put(&zdev->kref, zpci_release_device); ++ if (zdev) ++ kref_put(&zdev->kref, zpci_release_device); + } + + static inline void zpci_zdev_get(struct zpci_dev *zdev) +diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c +index 0a0e8b8293be..d1a5c80a41cb 100644 +--- a/arch/s390/pci/pci_clp.c ++++ b/arch/s390/pci/pci_clp.c +@@ -22,6 +22,8 @@ + #include + #include + ++#include "pci_bus.h" ++ + bool zpci_unique_uid; + + void update_uid_checking(bool new) +@@ -372,8 +374,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) + return; + + zdev = get_zdev_by_fid(entry->fid); +- if (!zdev) +- zpci_create_device(entry->fid, entry->fh, entry->config_state); ++ if (zdev) { ++ zpci_zdev_put(zdev); ++ return; ++ } ++ zpci_create_device(entry->fid, entry->fh, entry->config_state); + } + + int clp_scan_pci_devices(void) +diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c +index b7cfde7e80a8..6ced44b5be8a 100644 +--- a/arch/s390/pci/pci_event.c ++++ b/arch/s390/pci/pci_event.c +@@ -61,10 +61,12 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) + pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); + + if (!pdev) +- return; ++ goto no_pdev; + + pdev->error_state = pci_channel_io_perm_failure; + pci_dev_put(pdev); ++no_pdev: ++ zpci_zdev_put(zdev); + } + + void zpci_event_error(void *data) +@@ -76,6 +78,7 @@ void zpci_event_error(void *data) + static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) + { + struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); ++ bool existing_zdev = !!zdev; + enum zpci_state state; + struct pci_dev *pdev; + int ret; +@@ -161,6 +164,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) + default: + break; + } ++ if (existing_zdev) ++ zpci_zdev_put(zdev); + } + + void zpci_event_availability(void *data) +-- +2.35.1 + diff --git a/queue-5.10/series b/queue-5.10/series index f11f7c6a3a1..39bbfaeff88 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -5,3 +5,28 @@ revert-drm-i915-opregion-check-port-number-bounds-for-swsci-display-power-state. igc-remove-_i_phy_id-checking.patch igc-remove-phy-type-checking.patch igc-update-i226_k-device-id.patch +rtc-fix-use-after-free-on-device-removal.patch +rtc-pcf2127-fix-bug-when-reading-alarm-registers.patch +um-cleanup-syscall_handler_t-definition-cast-fix-war.patch +input-add-bounds-checking-to-input_set_capability.patch +input-stmfts-fix-reference-leak-in-stmfts_input_open.patch +nvme-pci-add-quirks-for-samsung-x5-ssds.patch +gfs2-disable-page-faults-during-lockless-buffered-re.patch +rtc-sun6i-fix-time-overflow-handling.patch +crypto-stm32-fix-reference-leak-in-stm32_crc_remove.patch +crypto-x86-chacha20-avoid-spurious-jumps-to-other-fu.patch +alsa-hda-realtek-enable-headset-mic-on-lenovo-p360.patch +s390-pci-improve-zpci_dev-reference-counting.patch +vhost_vdpa-don-t-setup-irq-offloading-when-irq_num-0.patch +tools-virtio-compile-with-pthread.patch +nvme-multipath-fix-hang-when-disk-goes-live-over-rec.patch +rtc-mc146818-lib-fix-the-altcentury-for-amd-platform.patch +fs-fix-an-infinite-loop-in-iomap_fiemap.patch +mips-lantiq-check-the-return-value-of-kzalloc.patch +drbd-remove-usage-of-list-iterator-variable-after-lo.patch +platform-chrome-cros_ec_debugfs-detach-log-reader-wq.patch +arm-9191-1-arm-stacktrace-kasan-silence-kasan-warnin.patch +nilfs2-fix-lockdep-warnings-in-page-operations-for-b.patch +nilfs2-fix-lockdep-warnings-during-disk-space-reclam.patch +revert-swiotlb-fix-info-leak-with-dma_from_device.patch +reinstate-some-of-swiotlb-rework-fix-info-leak-with-.patch diff --git a/queue-5.10/tools-virtio-compile-with-pthread.patch b/queue-5.10/tools-virtio-compile-with-pthread.patch new file mode 100644 index 00000000000..7bc6a7bf9b0 --- /dev/null +++ b/queue-5.10/tools-virtio-compile-with-pthread.patch @@ -0,0 +1,38 @@ +From 4bc94dcd865405db72a343d0098182eda913823e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 20 Mar 2022 07:02:14 -0400 +Subject: tools/virtio: compile with -pthread + +From: Michael S. Tsirkin + +[ Upstream commit f03560a57c1f60db6ac23ffd9714e1c69e2f95c7 ] + +When using pthreads, one has to compile and link with -lpthread, +otherwise e.g. glibc is not guaranteed to be reentrant. + +This replaces -lpthread. + +Reported-by: Matthew Wilcox +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Sasha Levin +--- + tools/virtio/Makefile | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile +index 0d7bbe49359d..1b25cc7c64bb 100644 +--- a/tools/virtio/Makefile ++++ b/tools/virtio/Makefile +@@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o + vringh_test: vringh_test.o vringh.o virtio_ring.o + + CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h +-LDFLAGS += -lpthread ++CFLAGS += -pthread ++LDFLAGS += -pthread + vpath %.c ../../drivers/virtio ../../drivers/vhost + mod: + ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} +-- +2.35.1 + diff --git a/queue-5.10/um-cleanup-syscall_handler_t-definition-cast-fix-war.patch b/queue-5.10/um-cleanup-syscall_handler_t-definition-cast-fix-war.patch new file mode 100644 index 00000000000..ea746c1747b --- /dev/null +++ b/queue-5.10/um-cleanup-syscall_handler_t-definition-cast-fix-war.patch @@ -0,0 +1,70 @@ +From ce4875991c90f922ecf662ba6265781e10618564 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 10 Feb 2022 11:43:53 +0800 +Subject: um: Cleanup syscall_handler_t definition/cast, fix warning +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: David Gow + +[ Upstream commit f4f03f299a56ce4d73c5431e0327b3b6cb55ebb9 ] + +The syscall_handler_t type for x86_64 was defined as 'long (*)(void)', +but always cast to 'long (*)(long, long, long, long, long, long)' before +use. This now triggers a warning (see below). + +Define syscall_handler_t as the latter instead, and remove the cast. +This simplifies the code, and fixes the warning. + +Warning: +In file included from ../arch/um/include/asm/processor-generic.h:13 + from ../arch/x86/um/asm/processor.h:41 + from ../include/linux/rcupdate.h:30 + from ../include/linux/rculist.h:11 + from ../include/linux/pid.h:5 + from ../include/linux/sched.h:14 + from ../include/linux/ptrace.h:6 + from ../arch/um/kernel/skas/syscall.c:7: +../arch/um/kernel/skas/syscall.c: In function ‘handle_syscall’: +../arch/x86/um/shared/sysdep/syscalls_64.h:18:11: warning: cast between incompatible function types from ‘long int (*)(void)’ to ‘long int (*)(long int, long int, long int, long int, long int, long int)’ [ +-Wcast-function-type] + 18 | (((long (*)(long, long, long, long, long, long)) \ + | ^ +../arch/x86/um/asm/ptrace.h:36:62: note: in definition of macro ‘PT_REGS_SET_SYSCALL_RETURN’ + 36 | #define PT_REGS_SET_SYSCALL_RETURN(r, res) (PT_REGS_AX(r) = (res)) + | ^~~ +../arch/um/kernel/skas/syscall.c:46:33: note: in expansion of macro ‘EXECUTE_SYSCALL’ + 46 | EXECUTE_SYSCALL(syscall, regs)); + | ^~~~~~~~~~~~~~~ + +Signed-off-by: David Gow +Signed-off-by: Richard Weinberger +Signed-off-by: Sasha Levin +--- + arch/x86/um/shared/sysdep/syscalls_64.h | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h +index 8a7d5e1da98e..1e6875b4ffd8 100644 +--- a/arch/x86/um/shared/sysdep/syscalls_64.h ++++ b/arch/x86/um/shared/sysdep/syscalls_64.h +@@ -10,13 +10,12 @@ + #include + #include + +-typedef long syscall_handler_t(void); ++typedef long syscall_handler_t(long, long, long, long, long, long); + + extern syscall_handler_t *sys_call_table[]; + + #define EXECUTE_SYSCALL(syscall, regs) \ +- (((long (*)(long, long, long, long, long, long)) \ +- (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \ ++ (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \ + UPT_SYSCALL_ARG2(®s->regs), \ + UPT_SYSCALL_ARG3(®s->regs), \ + UPT_SYSCALL_ARG4(®s->regs), \ +-- +2.35.1 + diff --git a/queue-5.10/vhost_vdpa-don-t-setup-irq-offloading-when-irq_num-0.patch b/queue-5.10/vhost_vdpa-don-t-setup-irq-offloading-when-irq_num-0.patch new file mode 100644 index 00000000000..adb583adf5a --- /dev/null +++ b/queue-5.10/vhost_vdpa-don-t-setup-irq-offloading-when-irq_num-0.patch @@ -0,0 +1,41 @@ +From 628867663a15f0e1b90d912ca733724b5b09a610 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 22 Feb 2022 19:54:25 +0800 +Subject: vhost_vdpa: don't setup irq offloading when irq_num < 0 + +From: Zhu Lingshan + +[ Upstream commit cce0ab2b2a39072d81f98017f7b076f3410ef740 ] + +When irq number is negative(e.g., -EINVAL), the virtqueue +may be disabled or the virtqueues are sharing a device irq. +In such case, we should not setup irq offloading for a virtqueue. + +Signed-off-by: Zhu Lingshan +Link: https://lore.kernel.org/r/20220222115428.998334-3-lingshan.zhu@intel.com +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Sasha Levin +--- + drivers/vhost/vdpa.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c +index e4d60009d908..04578aa87e4d 100644 +--- a/drivers/vhost/vdpa.c ++++ b/drivers/vhost/vdpa.c +@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) + return; + + irq = ops->get_vq_irq(vdpa, qid); ++ if (irq < 0) ++ return; ++ + irq_bypass_unregister_producer(&vq->call_ctx.producer); +- if (!vq->call_ctx.ctx || irq < 0) ++ if (!vq->call_ctx.ctx) + return; + + vq->call_ctx.producer.token = vq->call_ctx.ctx; +-- +2.35.1 +