--- /dev/null
+From 332bd0778775d0cf105c4b9e03e460b590749916 Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Tue, 28 Jun 2022 00:37:22 +0200
+Subject: dm raid: fix accesses beyond end of raid member array
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit 332bd0778775d0cf105c4b9e03e460b590749916 upstream.
+
+On dm-raid table load (using raid_ctr), dm-raid allocates an array
+rs->devs[rs->raid_disks] for the raid device members. rs->raid_disks
+is defined by the number of raid metadata and image tupples passed
+into the target's constructor.
+
+In the case of RAID layout changes being requested, that number can be
+different from the current number of members for existing raid sets as
+defined in their superblocks. Example RAID layout changes include:
+- raid1 legs being added/removed
+- raid4/5/6/10 number of stripes changed (stripe reshaping)
+- takeover to higher raid level (e.g. raid5 -> raid6)
+
+When accessing array members, rs->raid_disks must be used in control
+loops instead of the potentially larger value in rs->md.raid_disks.
+Otherwise it will cause memory access beyond the end of the rs->devs
+array.
+
+Fix this by changing code that is prone to out-of-bounds access.
+Also fix validate_raid_redundancy() to validate all devices that are
+added. Also, use braces to help clean up raid_iterate_devices().
+
+The out-of-bounds memory accesses was discovered using KASAN.
+
+This commit was verified to pass all LVM2 RAID tests (with KASAN
+enabled).
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-raid.c | 34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -998,12 +998,13 @@ static int validate_region_size(struct r
+ static int validate_raid_redundancy(struct raid_set *rs)
+ {
+ unsigned int i, rebuild_cnt = 0;
+- unsigned int rebuilds_per_group = 0, copies;
++ unsigned int rebuilds_per_group = 0, copies, raid_disks;
+ unsigned int group_size, last_group_start;
+
+- for (i = 0; i < rs->md.raid_disks; i++)
+- if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+- !rs->dev[i].rdev.sb_page)
++ for (i = 0; i < rs->raid_disks; i++)
++ if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
++ ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
++ !rs->dev[i].rdev.sb_page)))
+ rebuild_cnt++;
+
+ switch (rs->md.level) {
+@@ -1043,8 +1044,9 @@ static int validate_raid_redundancy(stru
+ * A A B B C
+ * C D D E E
+ */
++ raid_disks = min(rs->raid_disks, rs->md.raid_disks);
+ if (__is_raid10_near(rs->md.new_layout)) {
+- for (i = 0; i < rs->md.raid_disks; i++) {
++ for (i = 0; i < raid_disks; i++) {
+ if (!(i % copies))
+ rebuilds_per_group = 0;
+ if ((!rs->dev[i].rdev.sb_page ||
+@@ -1067,10 +1069,10 @@ static int validate_raid_redundancy(stru
+ * results in the need to treat the last (potentially larger)
+ * set differently.
+ */
+- group_size = (rs->md.raid_disks / copies);
+- last_group_start = (rs->md.raid_disks / group_size) - 1;
++ group_size = (raid_disks / copies);
++ last_group_start = (raid_disks / group_size) - 1;
+ last_group_start *= group_size;
+- for (i = 0; i < rs->md.raid_disks; i++) {
++ for (i = 0; i < raid_disks; i++) {
+ if (!(i % copies) && !(i > last_group_start))
+ rebuilds_per_group = 0;
+ if ((!rs->dev[i].rdev.sb_page ||
+@@ -1585,7 +1587,7 @@ static sector_t __rdev_sectors(struct ra
+ {
+ int i;
+
+- for (i = 0; i < rs->md.raid_disks; i++) {
++ for (i = 0; i < rs->raid_disks; i++) {
+ struct md_rdev *rdev = &rs->dev[i].rdev;
+
+ if (!test_bit(Journal, &rdev->flags) &&
+@@ -3751,13 +3753,13 @@ static int raid_iterate_devices(struct d
+ unsigned int i;
+ int r = 0;
+
+- for (i = 0; !r && i < rs->md.raid_disks; i++)
+- if (rs->dev[i].data_dev)
+- r = fn(ti,
+- rs->dev[i].data_dev,
+- 0, /* No offset on data devs */
+- rs->md.dev_sectors,
+- data);
++ for (i = 0; !r && i < rs->raid_disks; i++) {
++ if (rs->dev[i].data_dev) {
++ r = fn(ti, rs->dev[i].data_dev,
++ 0, /* No offset on data devs */
++ rs->md.dev_sectors, data);
++ }
++ }
+
+ return r;
+ }
--- /dev/null
+From 617b365872a247480e9dcd50a32c8d1806b21861 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 29 Jun 2022 13:40:57 -0400
+Subject: dm raid: fix KASAN warning in raid5_add_disks
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 617b365872a247480e9dcd50a32c8d1806b21861 upstream.
+
+There's a KASAN warning in raid5_add_disk when running the LVM testsuite.
+The warning happens in the test
+lvconvert-raid-reshape-linear_to_raid6-single-type.sh. We fix the warning
+by verifying that rdev->saved_raid_disk is within limits.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid5.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -7716,6 +7716,7 @@ static int raid5_add_disk(struct mddev *
+ */
+ if (rdev->saved_raid_disk >= 0 &&
+ rdev->saved_raid_disk >= first &&
++ rdev->saved_raid_disk <= last &&
+ conf->disks[rdev->saved_raid_disk].rdev == NULL)
+ first = rdev->saved_raid_disk;
+
--- /dev/null
+From ef9102004a87cb3f8b26e000a095a261fc0467d3 Mon Sep 17 00:00:00 2001
+From: Chris Ye <chris.ye@intel.com>
+Date: Tue, 31 May 2022 17:09:54 -0700
+Subject: nvdimm: Fix badblocks clear off-by-one error
+
+From: Chris Ye <chris.ye@intel.com>
+
+commit ef9102004a87cb3f8b26e000a095a261fc0467d3 upstream.
+
+nvdimm_clear_badblocks_region() validates badblock clearing requests
+against the span of the region, however it compares the inclusive
+badblock request range to the exclusive region range. Fix up the
+off-by-one error.
+
+Fixes: 23f498448362 ("libnvdimm: rework region badblocks clearing")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Chris Ye <chris.ye@intel.com>
+Reviewed-by: Vishal Verma <vishal.l.verma@intel.com>
+Link: https://lore.kernel.org/r/165404219489.2445897.9792886413715690399.stgit@dwillia2-xfh
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvdimm/bus.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -196,8 +196,8 @@ static int nvdimm_clear_badblocks_region
+ ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
+
+ /* make sure we are in the region */
+- if (ctx->phys < nd_region->ndr_start
+- || (ctx->phys + ctx->cleared) > ndr_end)
++ if (ctx->phys < nd_region->ndr_start ||
++ (ctx->phys + ctx->cleared - 1) > ndr_end)
+ return 0;
+
+ sector = (ctx->phys - nd_region->ndr_start) / 512;