--- /dev/null
+From f99a8e4373eeacb279bc9696937a55adbff7a28a Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Wed, 21 Apr 2021 23:32:36 +0200
+Subject: dm raid: fix inconclusive reshape layout on fast raid4/5/6 table reload sequences
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit f99a8e4373eeacb279bc9696937a55adbff7a28a upstream.
+
+If fast table reloads occur during an ongoing reshape of raid4/5/6
+devices the target may race reading a superblock vs the the MD resync
+thread; causing an inconclusive reshape state to be read in its
+constructor.
+
+lvm2 test lvconvert-raid-reshape-stripes-load-reload.sh can cause
+BUG_ON() to trigger in md_run(), e.g.:
+"kernel BUG at drivers/md/raid5.c:7567!".
+
+Scenario triggering the bug:
+
+1. the MD sync thread calls end_reshape() from raid5_sync_request()
+ when done reshaping. However end_reshape() _only_ updates the
+ reshape position to MaxSector keeping the changed layout
+ configuration though (i.e. any delta disks, chunk sector or RAID
+ algorithm changes). That inconclusive configuration is stored in
+ the superblock.
+
+2. dm-raid constructs a mapping, loading named inconsistent superblock
+ as of step 1 before step 3 is able to finish resetting the reshape
+ state completely, and calls md_run() which leads to mentioned bug
+ in raid5.c.
+
+3. the MD RAID personality's finish_reshape() is called; which resets
+ the reshape information on chunk sectors, delta disks, etc. This
+ explains why the bug is rarely seen on multi-core machines, as MD's
+ finish_reshape() superblock update races with the dm-raid
+ constructor's superblock load in step 2.
+
+Fix identifies inconclusive superblock content in the dm-raid
+constructor and resets it before calling md_run(), factoring out
+identifying checks into rs_is_layout_change() to share in existing
+rs_reshape_requested() and new rs_reset_inclonclusive_reshape(). Also
+enhance a comment and remove an empty line.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-raid.c | 34 ++++++++++++++++++++++++++++------
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1869,6 +1869,14 @@ static bool rs_takeover_requested(struct
+ return rs->md.new_level != rs->md.level;
+ }
+
++/* True if layout is set to reshape. */
++static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
++{
++ return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
++ rs->md.new_layout != rs->md.layout ||
++ rs->md.new_chunk_sectors != rs->md.chunk_sectors;
++}
++
+ /* True if @rs is requested to reshape by ctr */
+ static bool rs_reshape_requested(struct raid_set *rs)
+ {
+@@ -1881,9 +1889,7 @@ static bool rs_reshape_requested(struct
+ if (rs_is_raid0(rs))
+ return false;
+
+- change = mddev->new_layout != mddev->layout ||
+- mddev->new_chunk_sectors != mddev->chunk_sectors ||
+- rs->delta_disks;
++ change = rs_is_layout_change(rs, false);
+
+ /* Historical case to support raid1 reshape without delta disks */
+ if (rs_is_raid1(rs)) {
+@@ -2818,7 +2824,7 @@ static sector_t _get_reshape_sectors(str
+ }
+
+ /*
+- *
++ * Reshape:
+ * - change raid layout
+ * - change chunk size
+ * - add disks
+@@ -2928,6 +2934,20 @@ static int rs_setup_reshape(struct raid_
+ }
+
+ /*
++ * If the md resync thread has updated superblock with max reshape position
++ * at the end of a reshape but not (yet) reset the layout configuration
++ * changes -> reset the latter.
++ */
++static void rs_reset_inconclusive_reshape(struct raid_set *rs)
++{
++ if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
++ rs_set_cur(rs);
++ rs->md.delta_disks = 0;
++ rs->md.reshape_backwards = 0;
++ }
++}
++
++/*
+ * Enable/disable discard support on RAID set depending on
+ * RAID level and discard properties of underlying RAID members.
+ */
+@@ -3213,11 +3233,14 @@ size_check:
+ if (r)
+ goto bad;
+
++ /* Catch any inconclusive reshape superblock content. */
++ rs_reset_inconclusive_reshape(rs);
++
+ /* Start raid set read-only and assumed clean to change in raid_resume() */
+ rs->md.ro = 1;
+ rs->md.in_sync = 1;
+
+- /* Keep array frozen */
++ /* Keep array frozen until resume. */
+ set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
+
+ /* Has to be held on running the array */
+@@ -3231,7 +3254,6 @@ size_check:
+ }
+
+ r = md_start(&rs->md);
+-
+ if (r) {
+ ti->error = "Failed to start raid array";
+ mddev_unlock(&rs->md);
--- /dev/null
+From 77edfc6e51055b61cae2f54c8e6c3bb7c762e4fe Mon Sep 17 00:00:00 2001
+From: Hyeongseok Kim <hyeongseok@gmail.com>
+Date: Thu, 4 Mar 2021 09:15:34 +0900
+Subject: exfat: fix erroneous discard when clear cluster bit
+
+From: Hyeongseok Kim <hyeongseok@gmail.com>
+
+commit 77edfc6e51055b61cae2f54c8e6c3bb7c762e4fe upstream.
+
+If mounted with discard option, exFAT issues discard command when clear
+cluster bit to remove file. But the input parameter of cluster-to-sector
+calculation is abnormally added by reserved cluster size which is 2,
+leading to discard unrelated sectors included in target+2 cluster.
+With fixing this, remove the wrong comments in set/clear/find bitmap
+functions.
+
+Fixes: 1e49a94cf707 ("exfat: add bitmap operations")
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Hyeongseok Kim <hyeongseok@gmail.com>
+Acked-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Namjae Jeon <namjae.jeon@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/exfat/balloc.c | 11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_i
+ kfree(sbi->vol_amap);
+ }
+
+-/*
+- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+- * the cluster heap.
+- */
+ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
+ {
+ int i, b;
+@@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode
+ return 0;
+ }
+
+-/*
+- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+- * the cluster heap.
+- */
+ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
+ {
+ int i, b;
+@@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *in
+ int ret_discard;
+
+ ret_discard = sb_issue_discard(sb,
+- exfat_cluster_to_sector(sbi, clu +
+- EXFAT_RESERVED_CLUSTERS),
++ exfat_cluster_to_sector(sbi, clu),
+ (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
+
+ if (ret_discard == -EOPNOTSUPP) {
--- /dev/null
+From 4f06dd92b5d0a6f8eec6a34b8d6ef3e1f4ac1e10 Mon Sep 17 00:00:00 2001
+From: Vivek Goyal <vgoyal@redhat.com>
+Date: Wed, 21 Oct 2020 16:12:49 -0400
+Subject: fuse: fix write deadlock
+
+From: Vivek Goyal <vgoyal@redhat.com>
+
+commit 4f06dd92b5d0a6f8eec6a34b8d6ef3e1f4ac1e10 upstream.
+
+There are two modes for write(2) and friends in fuse:
+
+a) write through (update page cache, send sync WRITE request to userspace)
+
+b) buffered write (update page cache, async writeout later)
+
+The write through method kept all the page cache pages locked that were
+used for the request. Keeping more than one page locked is deadlock prone
+and Qian Cai demonstrated this with trinity fuzzing.
+
+The reason for keeping the pages locked is that concurrent mapped reads
+shouldn't try to pull possibly stale data into the page cache.
+
+For full page writes, the easy way to fix this is to make the cached page
+be the authoritative source by marking the page PG_uptodate immediately.
+After this the page can be safely unlocked, since mapped/cached reads will
+take the written data from the cache.
+
+Concurrent mapped writes will now cause data in the original WRITE request
+to be updated; this however doesn't cause any data inconsistency and this
+scenario should be exceedingly rare anyway.
+
+If the WRITE request returns with an error in the above case, currently the
+page is not marked uptodate; this means that a concurrent read will always
+read consistent data. After this patch the page is uptodate between
+writing to the cache and receiving the error: there's window where a cached
+read will read the wrong data. While theoretically this could be a
+regression, it is unlikely to be one in practice, since this is normal for
+buffered writes.
+
+In case of a partial page write to an already uptodate page the locking is
+also unnecessary, with the above caveats.
+
+Partial write of a not uptodate page still needs to be handled. One way
+would be to read the complete page before doing the write. This is not
+possible, since it might break filesystems that don't expect any READ
+requests when the file was opened O_WRONLY.
+
+The other solution is to serialize the synchronous write with reads from
+the partial pages. The easiest way to do this is to keep the partial pages
+locked. The problem is that a write() may involve two such pages (one head
+and one tail). This patch fixes it by only locking the partial tail page.
+If there's a partial head page as well, then split that off as a separate
+WRITE request.
+
+Reported-by: Qian Cai <cai@lca.pw>
+Link: https://lore.kernel.org/linux-fsdevel/4794a3fa3742a5e84fb0f934944204b55730829b.camel@lca.pw/
+Fixes: ea9b9907b82a ("fuse: implement perform_write")
+Cc: <stable@vger.kernel.org> # v2.6.26
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c | 41 +++++++++++++++++++++++++++++------------
+ fs/fuse/fuse_i.h | 1 +
+ 2 files changed, 30 insertions(+), 12 deletions(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1093,6 +1093,7 @@ static ssize_t fuse_send_write_pages(str
+ struct fuse_file *ff = file->private_data;
+ struct fuse_mount *fm = ff->fm;
+ unsigned int offset, i;
++ bool short_write;
+ int err;
+
+ for (i = 0; i < ap->num_pages; i++)
+@@ -1105,32 +1106,38 @@ static ssize_t fuse_send_write_pages(str
+ if (!err && ia->write.out.size > count)
+ err = -EIO;
+
++ short_write = ia->write.out.size < count;
+ offset = ap->descs[0].offset;
+ count = ia->write.out.size;
+ for (i = 0; i < ap->num_pages; i++) {
+ struct page *page = ap->pages[i];
+
+- if (!err && !offset && count >= PAGE_SIZE)
+- SetPageUptodate(page);
+-
+- if (count > PAGE_SIZE - offset)
+- count -= PAGE_SIZE - offset;
+- else
+- count = 0;
+- offset = 0;
+-
+- unlock_page(page);
++ if (err) {
++ ClearPageUptodate(page);
++ } else {
++ if (count >= PAGE_SIZE - offset)
++ count -= PAGE_SIZE - offset;
++ else {
++ if (short_write)
++ ClearPageUptodate(page);
++ count = 0;
++ }
++ offset = 0;
++ }
++ if (ia->write.page_locked && (i == ap->num_pages - 1))
++ unlock_page(page);
+ put_page(page);
+ }
+
+ return err;
+ }
+
+-static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
++static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
+ struct address_space *mapping,
+ struct iov_iter *ii, loff_t pos,
+ unsigned int max_pages)
+ {
++ struct fuse_args_pages *ap = &ia->ap;
+ struct fuse_conn *fc = get_fuse_conn(mapping->host);
+ unsigned offset = pos & (PAGE_SIZE - 1);
+ size_t count = 0;
+@@ -1183,6 +1190,16 @@ static ssize_t fuse_fill_write_pages(str
+ if (offset == PAGE_SIZE)
+ offset = 0;
+
++ /* If we copied full page, mark it uptodate */
++ if (tmp == PAGE_SIZE)
++ SetPageUptodate(page);
++
++ if (PageUptodate(page)) {
++ unlock_page(page);
++ } else {
++ ia->write.page_locked = true;
++ break;
++ }
+ if (!fc->big_writes)
+ break;
+ } while (iov_iter_count(ii) && count < fc->max_write &&
+@@ -1226,7 +1243,7 @@ static ssize_t fuse_perform_write(struct
+ break;
+ }
+
+- count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
++ count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
+ if (count <= 0) {
+ err = count;
+ } else {
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -911,6 +911,7 @@ struct fuse_io_args {
+ struct {
+ struct fuse_write_in in;
+ struct fuse_write_out out;
++ bool page_locked;
+ } write;
+ };
+ struct fuse_args_pages ap;
--- /dev/null
+From 2417b9869b81882ab90fd5ed1081a1cb2d4db1dd Mon Sep 17 00:00:00 2001
+From: Paul Clements <paul.clements@us.sios.com>
+Date: Thu, 15 Apr 2021 17:17:57 -0400
+Subject: md/raid1: properly indicate failure when ending a failed write request
+
+From: Paul Clements <paul.clements@us.sios.com>
+
+commit 2417b9869b81882ab90fd5ed1081a1cb2d4db1dd upstream.
+
+This patch addresses a data corruption bug in raid1 arrays using bitmaps.
+Without this fix, the bitmap bits for the failed I/O end up being cleared.
+
+Since we are in the failure leg of raid1_end_write_request, the request
+either needs to be retried (R1BIO_WriteError) or failed (R1BIO_Degraded).
+
+Fixes: eeba6809d8d5 ("md/raid1: end bio when the device faulty")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Paul Clements <paul.clements@us.sios.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid1.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -478,6 +478,8 @@ static void raid1_end_write_request(stru
+ if (!test_bit(Faulty, &rdev->flags))
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
++ /* Fail the request */
++ set_bit(R1BIO_Degraded, &r1_bio->state);
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
tpm-vtpm_proxy-avoid-reading-host-log-when-using-a-virtual-device.patch
crypto-arm-curve25519-move-.fpu-after-.arch.patch
crypto-rng-fix-crypto_rng_reset-refcounting-when-crypto_stats.patch
+md-raid1-properly-indicate-failure-when-ending-a-failed-write-request.patch
+dm-raid-fix-inconclusive-reshape-layout-on-fast-raid4-5-6-table-reload-sequences.patch
+fuse-fix-write-deadlock.patch
+exfat-fix-erroneous-discard-when-clear-cluster-bit.patch
+sfc-farch-fix-tx-queue-lookup-in-tx-flush-done-handling.patch
+sfc-farch-fix-tx-queue-lookup-in-tx-event-handling.patch
--- /dev/null
+From 83b09a1807415608b387c7bc748d329fefc5617e Mon Sep 17 00:00:00 2001
+From: Edward Cree <ecree.xilinx@gmail.com>
+Date: Tue, 20 Apr 2021 13:28:28 +0100
+Subject: sfc: farch: fix TX queue lookup in TX event handling
+
+From: Edward Cree <ecree.xilinx@gmail.com>
+
+commit 83b09a1807415608b387c7bc748d329fefc5617e upstream.
+
+We're starting from a TXQ label, not a TXQ type, so
+ efx_channel_get_tx_queue() is inappropriate (and could return NULL,
+ leading to panics).
+
+Fixes: 12804793b17c ("sfc: decouple TXQ type from label")
+Cc: stable@vger.kernel.org
+Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/farch.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/farch.c
++++ b/drivers/net/ethernet/sfc/farch.c
+@@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_cha
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+- tx_queue = efx_channel_get_tx_queue(
+- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++ tx_queue = channel->tx_queue +
++ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+- tx_queue = efx_channel_get_tx_queue(
+- channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++ tx_queue = channel->tx_queue +
++ (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+
+ netif_tx_lock(efx->net_dev);
+ efx_farch_notify_tx_desc(tx_queue);
--- /dev/null
+From 5b1faa92289b53cad654123ed2bc8e10f6ddd4ac Mon Sep 17 00:00:00 2001
+From: Edward Cree <ecree.xilinx@gmail.com>
+Date: Tue, 20 Apr 2021 13:27:22 +0100
+Subject: sfc: farch: fix TX queue lookup in TX flush done handling
+
+From: Edward Cree <ecree.xilinx@gmail.com>
+
+commit 5b1faa92289b53cad654123ed2bc8e10f6ddd4ac upstream.
+
+We're starting from a TXQ instance number ('qid'), not a TXQ type, so
+ efx_get_tx_queue() is inappropriate (and could return NULL, leading
+ to panics).
+
+Fixes: 12804793b17c ("sfc: decouple TXQ type from label")
+Reported-by: Trevor Hemsley <themsley@voiceflex.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/farch.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/farch.c
++++ b/drivers/net/ethernet/sfc/farch.c
+@@ -1081,16 +1081,16 @@ static void
+ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+ {
+ struct efx_tx_queue *tx_queue;
++ struct efx_channel *channel;
+ int qid;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
+- tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
+- qid % EFX_MAX_TXQ_PER_CHANNEL);
+- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
++ channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
++ tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
++ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
+ efx_farch_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+- }
+ }
+ }
+