From: Greg Kroah-Hartman Date: Sat, 8 Jan 2022 13:22:15 +0000 (+0100) Subject: 5.15-stable patches X-Git-Tag: v4.4.299~20 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4e87fe8689a2d95308f11e8a06f3af8bf169e68b;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: edac-i10nm-release-mdev-mbase-when-failing-to-detect-hbm.patch kvm-x86-check-for-rmaps-allocation.patch md-raid1-fix-missing-bitmap-update-w-o-writemostly-devices.patch --- diff --git a/queue-5.15/edac-i10nm-release-mdev-mbase-when-failing-to-detect-hbm.patch b/queue-5.15/edac-i10nm-release-mdev-mbase-when-failing-to-detect-hbm.patch new file mode 100644 index 00000000000..e48266ed432 --- /dev/null +++ b/queue-5.15/edac-i10nm-release-mdev-mbase-when-failing-to-detect-hbm.patch @@ -0,0 +1,53 @@ +From c370baa328022cbd46c59c821d1b467a97f047be Mon Sep 17 00:00:00 2001 +From: Qiuxu Zhuo +Date: Fri, 24 Dec 2021 04:11:26 -0500 +Subject: EDAC/i10nm: Release mdev/mbase when failing to detect HBM + +From: Qiuxu Zhuo + +commit c370baa328022cbd46c59c821d1b467a97f047be upstream. + +On systems without HBM (High Bandwidth Memory) mdev/mbase are not +released/unmapped. + +Add the code to release mdev/mbase when failing to detect HBM. + +[Tony: re-word commit message] + +Cc: +Fixes: c945088384d0 ("EDAC/i10nm: Add support for high bandwidth memory") +Reported-by: kernel test robot +Reported-by: Dan Carpenter +Signed-off-by: Qiuxu Zhuo +Signed-off-by: Tony Luck +Link: https://lore.kernel.org/r/20211224091126.1246-1-qiuxu.zhuo@intel.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/edac/i10nm_base.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/drivers/edac/i10nm_base.c ++++ b/drivers/edac/i10nm_base.c +@@ -358,6 +358,9 @@ static int i10nm_get_hbm_munits(void) + + mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE); + if (!mbase) { ++ pci_dev_put(d->imc[lmc].mdev); ++ d->imc[lmc].mdev = NULL; ++ + i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n", + base + off); + return -ENOMEM; +@@ -368,6 +371,12 @@ static int i10nm_get_hbm_munits(void) + + mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0); + if (!I10NM_IS_HBM_IMC(mcmtr)) { ++ iounmap(d->imc[lmc].mbase); ++ d->imc[lmc].mbase = NULL; ++ d->imc[lmc].hbm_mc = false; ++ pci_dev_put(d->imc[lmc].mdev); ++ d->imc[lmc].mdev = NULL; ++ + i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n"); + return -ENODEV; + } diff --git a/queue-5.15/kvm-x86-check-for-rmaps-allocation.patch b/queue-5.15/kvm-x86-check-for-rmaps-allocation.patch new file mode 100644 index 00000000000..81ee9b3caab --- /dev/null +++ b/queue-5.15/kvm-x86-check-for-rmaps-allocation.patch @@ -0,0 +1,59 @@ +From fffb5323780786c81ba005f8b8603d4a558aad28 Mon Sep 17 00:00:00 2001 +From: Nikunj A Dadhania +Date: Wed, 5 Jan 2022 09:33:37 +0530 +Subject: KVM: x86: Check for rmaps allocation + +From: Nikunj A Dadhania + +commit fffb5323780786c81ba005f8b8603d4a558aad28 upstream. + +With TDP MMU being the default now, access to mmu_rmaps_stat debugfs +file causes following oops: + +BUG: kernel NULL pointer dereference, address: 0000000000000000 +PGD 0 P4D 0 +Oops: 0000 [#1] PREEMPT SMP NOPTI +CPU: 7 PID: 3185 Comm: cat Not tainted 5.16.0-rc4+ #204 +RIP: 0010:pte_list_count+0x6/0x40 + Call Trace: + + ? kvm_mmu_rmaps_stat_show+0x15e/0x320 + seq_read_iter+0x126/0x4b0 + ? aa_file_perm+0x124/0x490 + seq_read+0xf5/0x140 + full_proxy_read+0x5c/0x80 + vfs_read+0x9f/0x1a0 + ksys_read+0x67/0xe0 + __x64_sys_read+0x19/0x20 + do_syscall_64+0x3b/0xc0 + entry_SYSCALL_64_after_hwframe+0x44/0xae + RIP: 0033:0x7fca6fc13912 + +Return early when rmaps are not present. + +Reported-by: Vasant Hegde +Tested-by: Vasant Hegde +Signed-off-by: Nikunj A Dadhania +Reviewed-by: Peter Xu +Reviewed-by: Sean Christopherson +Message-Id: <20220105040337.4234-1-nikunj@amd.com> +Cc: stable@vger.kernel.org +Fixes: 3bcd0662d66f ("KVM: X86: Introduce mmu_rmaps_stat per-vm debugfs file") +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kvm/debugfs.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/arch/x86/kvm/debugfs.c ++++ b/arch/x86/kvm/debugfs.c +@@ -95,6 +95,9 @@ static int kvm_mmu_rmaps_stat_show(struc + unsigned int *log[KVM_NR_PAGE_SIZES], *cur; + int i, j, k, l, ret; + ++ if (!kvm_memslots_have_rmaps(kvm)) ++ return 0; ++ + ret = -ENOMEM; + memset(log, 0, sizeof(log)); + for (i = 0; i < KVM_NR_PAGE_SIZES; i++) { diff --git a/queue-5.15/md-raid1-fix-missing-bitmap-update-w-o-writemostly-devices.patch b/queue-5.15/md-raid1-fix-missing-bitmap-update-w-o-writemostly-devices.patch new file mode 100644 index 00000000000..5cf32be9545 --- /dev/null +++ b/queue-5.15/md-raid1-fix-missing-bitmap-update-w-o-writemostly-devices.patch @@ -0,0 +1,93 @@ +From 46669e8616c649c71c4cfcd712fd3d107e771380 Mon Sep 17 00:00:00 2001 +From: Song Liu +Date: Mon, 3 Jan 2022 13:49:36 -0800 +Subject: md/raid1: fix missing bitmap update w/o WriteMostly devices + +From: Song Liu + +commit 46669e8616c649c71c4cfcd712fd3d107e771380 upstream. + +commit [1] causes missing bitmap updates when there isn't any WriteMostly +devices. + +Detailed steps to reproduce by Norbert (which somehow didn't make to lore): + + # setup md10 (raid1) with two drives (1 GByte sparse files) + dd if=/dev/zero of=disk1 bs=1024k seek=1024 count=0 + dd if=/dev/zero of=disk2 bs=1024k seek=1024 count=0 + + losetup /dev/loop11 disk1 + losetup /dev/loop12 disk2 + + mdadm --create /dev/md10 --level=1 --raid-devices=2 /dev/loop11 /dev/loop12 + + # add bitmap (aka write-intent log) + mdadm /dev/md10 --grow --bitmap=internal + + echo check > /sys/block/md10/md/sync_action + + root:# cat /sys/block/md10/md/mismatch_cnt + 0 + root:# + + # remove member drive disk2 (loop12) + mdadm /dev/md10 -f loop12 ; mdadm /dev/md10 -r loop12 + + # modify degraded md device + dd if=/dev/urandom of=/dev/md10 bs=512 count=1 + + # no blocks recorded as out of sync on the remaining member disk1/loop11 + root:# mdadm -X /dev/loop11 | grep Bitmap + Bitmap : 16 bits (chunks), 0 dirty (0.0%) + root:# + + # re-add disk2, nothing synced because of empty bitmap + mdadm /dev/md10 --re-add /dev/loop12 + + # check integrity again + echo check > /sys/block/md10/md/sync_action + + # disk1 and disk2 are no longer in sync, reads return differend data + root:# cat /sys/block/md10/md/mismatch_cnt + 128 + root:# + + # clean up + mdadm -S /dev/md10 + losetup -d /dev/loop11 + losetup -d /dev/loop12 + rm disk1 disk2 + +Fix this by moving the WriteMostly check to the if condition for +alloc_behind_master_bio(). + +[1] commit fd3b6975e9c1 ("md/raid1: only allocate write behind bio for WriteMostly device") +Fixes: fd3b6975e9c1 ("md/raid1: only allocate write behind bio for WriteMostly device") +Cc: stable@vger.kernel.org # v5.12+ +Cc: Guoqing Jiang +Cc: Jens Axboe +Reported-by: Norbert Warmuth +Suggested-by: Linus Torvalds +Signed-off-by: Song Liu +Signed-off-by: Greg Kroah-Hartman +--- + drivers/md/raid1.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1496,12 +1496,13 @@ static void raid1_write_request(struct m + if (!r1_bio->bios[i]) + continue; + +- if (first_clone && test_bit(WriteMostly, &rdev->flags)) { ++ if (first_clone) { + /* do behind I/O ? + * Not if there are too many, or cannot + * allocate memory, or a reader on WriteMostly + * is waiting for behind writes to flush */ + if (bitmap && ++ test_bit(WriteMostly, &rdev->flags) && + (atomic_read(&bitmap->behind_writes) + < mddev->bitmap_info.max_write_behind) && + !waitqueue_active(&bitmap->behind_wait)) { diff --git a/queue-5.15/series b/queue-5.15/series index 4a919f995ce..f58f12e40ae 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -29,3 +29,6 @@ sch_qfq-prevent-shift-out-of-bounds-in-qfq_init_qdisc.patch net-ena-fix-undefined-state-when-tx-request-id-is-out-of-bounds.patch net-ena-fix-wrong-rx-request-id-by-resetting-device.patch net-ena-fix-error-handling-when-calculating-max-io-queues-number.patch +md-raid1-fix-missing-bitmap-update-w-o-writemostly-devices.patch +edac-i10nm-release-mdev-mbase-when-failing-to-detect-hbm.patch +kvm-x86-check-for-rmaps-allocation.patch