]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Mar 2021 13:55:29 +0000 (14:55 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Mar 2021 13:55:29 +0000 (14:55 +0100)
added patches:
cpufreq-intel_pstate-get-per-cpu-max-freq-via-msr_hwp_capabilities-if-available.patch
dm-era-fix-bitset-memory-leaks.patch
dm-era-only-resize-metadata-in-preresume.patch
dm-era-recover-committed-writeset-after-crash.patch
dm-era-reinitialize-bitset-cache-before-digesting-a-new-writeset.patch
dm-era-use-correct-value-size-in-equality-function-of-writeset-tree.patch
dm-era-verify-the-data-block-size-hasn-t-changed.patch
dm-fix-deadlock-when-swapping-to-encrypted-device.patch
dm-writecache-fix-writing-beyond-end-of-underlying-device-when-shrinking.patch
f2fs-fix-out-of-repair-__setattr_copy.patch
gfs2-don-t-skip-dlm-unlock-if-glock-has-an-lvb.patch
gfs2-recursive-gfs2_quota_hold-in-gfs2_iomap_end.patch
s390-vtime-fix-inline-assembly-clobber-list.patch
sparc32-fix-a-user-triggerable-oops-in-clear_user.patch
spi-spi-synquacer-fix-set_cs-handling.patch
um-mm-check-more-comprehensively-for-stub-changes.patch
virtio-s390-implement-virtio-ccw-revision-2-correctly.patch

18 files changed:
queue-5.4/cpufreq-intel_pstate-get-per-cpu-max-freq-via-msr_hwp_capabilities-if-available.patch [new file with mode: 0644]
queue-5.4/dm-era-fix-bitset-memory-leaks.patch [new file with mode: 0644]
queue-5.4/dm-era-only-resize-metadata-in-preresume.patch [new file with mode: 0644]
queue-5.4/dm-era-recover-committed-writeset-after-crash.patch [new file with mode: 0644]
queue-5.4/dm-era-reinitialize-bitset-cache-before-digesting-a-new-writeset.patch [new file with mode: 0644]
queue-5.4/dm-era-use-correct-value-size-in-equality-function-of-writeset-tree.patch [new file with mode: 0644]
queue-5.4/dm-era-verify-the-data-block-size-hasn-t-changed.patch [new file with mode: 0644]
queue-5.4/dm-fix-deadlock-when-swapping-to-encrypted-device.patch [new file with mode: 0644]
queue-5.4/dm-writecache-fix-writing-beyond-end-of-underlying-device-when-shrinking.patch [new file with mode: 0644]
queue-5.4/f2fs-fix-out-of-repair-__setattr_copy.patch [new file with mode: 0644]
queue-5.4/gfs2-don-t-skip-dlm-unlock-if-glock-has-an-lvb.patch [new file with mode: 0644]
queue-5.4/gfs2-recursive-gfs2_quota_hold-in-gfs2_iomap_end.patch [new file with mode: 0644]
queue-5.4/s390-vtime-fix-inline-assembly-clobber-list.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/sparc32-fix-a-user-triggerable-oops-in-clear_user.patch [new file with mode: 0644]
queue-5.4/spi-spi-synquacer-fix-set_cs-handling.patch [new file with mode: 0644]
queue-5.4/um-mm-check-more-comprehensively-for-stub-changes.patch [new file with mode: 0644]
queue-5.4/virtio-s390-implement-virtio-ccw-revision-2-correctly.patch [new file with mode: 0644]

diff --git a/queue-5.4/cpufreq-intel_pstate-get-per-cpu-max-freq-via-msr_hwp_capabilities-if-available.patch b/queue-5.4/cpufreq-intel_pstate-get-per-cpu-max-freq-via-msr_hwp_capabilities-if-available.patch
new file mode 100644 (file)
index 0000000..d20f183
--- /dev/null
@@ -0,0 +1,58 @@
+From 6f67e060083a84a4cc364eab6ae40c717165fb0c Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Tue, 12 Jan 2021 13:21:27 +0800
+Subject: cpufreq: intel_pstate: Get per-CPU max freq via MSR_HWP_CAPABILITIES if available
+
+From: Chen Yu <yu.c.chen@intel.com>
+
+commit 6f67e060083a84a4cc364eab6ae40c717165fb0c upstream.
+
+Currently, when turbo is disabled (either by BIOS or by the user),
+the intel_pstate driver reads the max non-turbo frequency from the
+package-wide MSR_PLATFORM_INFO(0xce) register.
+
+However, on asymmetric platforms it is possible in theory that small
+and big core with HWP enabled might have different max non-turbo CPU
+frequency, because MSR_HWP_CAPABILITIES is per-CPU scope according
+to Intel Software Developer Manual.
+
+The turbo max freq is already per-CPU in current code, so make
+similar change to the max non-turbo frequency as well.
+
+Reported-by: Wendy Wang <wendy.wang@intel.com>
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+[ rjw: Subject and changelog edits ]
+Cc: 4.18+ <stable@vger.kernel.org> # 4.18+: a45ee4d4e13b: cpufreq: intel_pstate: Change intel_pstate_get_hwp_max() argument
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1566,11 +1566,9 @@ static void intel_pstate_max_within_limi
+ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ {
+       cpu->pstate.min_pstate = pstate_funcs.get_min();
+-      cpu->pstate.max_pstate = pstate_funcs.get_max();
+       cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
+       cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+       cpu->pstate.scaling = pstate_funcs.get_scaling();
+-      cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+       if (hwp_active && !hwp_mode_bdw) {
+               unsigned int phy_max, current_max;
+@@ -1578,9 +1576,12 @@ static void intel_pstate_get_cpu_pstates
+               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+               cpu->pstate.turbo_pstate = phy_max;
++              cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
+       } else {
+               cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
++              cpu->pstate.max_pstate = pstate_funcs.get_max();
+       }
++      cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+       if (pstate_funcs.get_aperf_mperf_shift)
+               cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
diff --git a/queue-5.4/dm-era-fix-bitset-memory-leaks.patch b/queue-5.4/dm-era-fix-bitset-memory-leaks.patch
new file mode 100644 (file)
index 0000000..97602b4
--- /dev/null
@@ -0,0 +1,58 @@
+From 904e6b266619c2da5c58b5dce14ae30629e39645 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 22 Jan 2021 17:25:54 +0200
+Subject: dm era: Fix bitset memory leaks
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 904e6b266619c2da5c58b5dce14ae30629e39645 upstream.
+
+Deallocate the memory allocated for the in-core bitsets when destroying
+the target and in error paths.
+
+Fixes: eec40579d84873 ("dm: add era target")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Reviewed-by: Ming-Hung Tsai <mtsai@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-era-target.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -47,6 +47,7 @@ struct writeset {
+ static void writeset_free(struct writeset *ws)
+ {
+       vfree(ws->bits);
++      ws->bits = NULL;
+ }
+ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+@@ -811,6 +812,8 @@ static struct era_metadata *metadata_ope
+ static void metadata_close(struct era_metadata *md)
+ {
++      writeset_free(&md->writesets[0]);
++      writeset_free(&md->writesets[1]);
+       destroy_persistent_data_objects(md);
+       kfree(md);
+ }
+@@ -848,6 +851,7 @@ static int metadata_resize(struct era_me
+       r = writeset_alloc(&md->writesets[1], *new_size);
+       if (r) {
+               DMERR("%s: writeset_alloc failed for writeset 1", __func__);
++              writeset_free(&md->writesets[0]);
+               return r;
+       }
+@@ -858,6 +862,8 @@ static int metadata_resize(struct era_me
+                           &value, &md->era_array_root);
+       if (r) {
+               DMERR("%s: dm_array_resize failed", __func__);
++              writeset_free(&md->writesets[0]);
++              writeset_free(&md->writesets[1]);
+               return r;
+       }
diff --git a/queue-5.4/dm-era-only-resize-metadata-in-preresume.patch b/queue-5.4/dm-era-only-resize-metadata-in-preresume.patch
new file mode 100644 (file)
index 0000000..db7b521
--- /dev/null
@@ -0,0 +1,80 @@
+From cca2c6aebe86f68103a8615074b3578e854b5016 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Thu, 11 Feb 2021 16:22:43 +0200
+Subject: dm era: only resize metadata in preresume
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit cca2c6aebe86f68103a8615074b3578e854b5016 upstream.
+
+Metadata resize shouldn't happen in the ctr. The ctr loads a temporary
+(inactive) table that will only become active upon resume. That is why
+resize should always be done in terms of resume. Otherwise a load (ctr)
+whose inactive table never becomes active will incorrectly resize the
+metadata.
+
+Also, perform the resize directly in preresume, instead of using the
+worker to do it.
+
+The worker might run other metadata operations, e.g., it could start
+digestion, before resizing the metadata. These operations will end up
+using the old size.
+
+This could lead to errors, like:
+
+  device-mapper: era: metadata_digest_transcribe_writeset: dm_array_set_value failed
+  device-mapper: era: process_old_eras: digest step failed, stopping digestion
+
+The reason of the above error is that the worker started the digestion
+of the archived writeset using the old, larger size.
+
+As a result, metadata_digest_transcribe_writeset tried to write beyond
+the end of the era array.
+
+Fixes: eec40579d84873 ("dm: add era target")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-era-target.c |   21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -1501,15 +1501,6 @@ static int era_ctr(struct dm_target *ti,
+       }
+       era->md = md;
+-      era->nr_blocks = calc_nr_blocks(era);
+-
+-      r = metadata_resize(era->md, &era->nr_blocks);
+-      if (r) {
+-              ti->error = "couldn't resize metadata";
+-              era_destroy(era);
+-              return -ENOMEM;
+-      }
+-
+       era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+       if (!era->wq) {
+               ti->error = "could not create workqueue for metadata object";
+@@ -1586,9 +1577,17 @@ static int era_preresume(struct dm_targe
+       dm_block_t new_size = calc_nr_blocks(era);
+       if (era->nr_blocks != new_size) {
+-              r = in_worker1(era, metadata_resize, &new_size);
+-              if (r)
++              r = metadata_resize(era->md, &new_size);
++              if (r) {
++                      DMERR("%s: metadata_resize failed", __func__);
++                      return r;
++              }
++
++              r = metadata_commit(era->md);
++              if (r) {
++                      DMERR("%s: metadata_commit failed", __func__);
+                       return r;
++              }
+               era->nr_blocks = new_size;
+       }
diff --git a/queue-5.4/dm-era-recover-committed-writeset-after-crash.patch b/queue-5.4/dm-era-recover-committed-writeset-after-crash.patch
new file mode 100644 (file)
index 0000000..687594d
--- /dev/null
@@ -0,0 +1,125 @@
+From de89afc1e40fdfa5f8b666e5d07c43d21a1d3be0 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 22 Jan 2021 17:19:30 +0200
+Subject: dm era: Recover committed writeset after crash
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit de89afc1e40fdfa5f8b666e5d07c43d21a1d3be0 upstream.
+
+Following a system crash, dm-era fails to recover the committed writeset
+for the current era, leading to lost writes. That is, we lose the
+information about what blocks were written during the affected era.
+
+dm-era assumes that the writeset of the current era is archived when the
+device is suspended. So, when resuming the device, it just moves on to
+the next era, ignoring the committed writeset.
+
+This assumption holds when the device is properly shut down. But, when
+the system crashes, the code that suspends the target never runs, so the
+writeset for the current era is not archived.
+
+There are three issues that cause the committed writeset to get lost:
+
+1. dm-era doesn't load the committed writeset when opening the metadata
+2. The code that resizes the metadata wipes the information about the
+   committed writeset (assuming it was loaded at step 1)
+3. era_preresume() starts a new era, without taking into account that
+   the current era might not have been archived, due to a system crash.
+
+To fix this:
+
+1. Load the committed writeset when opening the metadata
+2. Fix the code that resizes the metadata to make sure it doesn't wipe
+   the loaded writeset
+3. Fix era_preresume() to check for a loaded writeset and archive it,
+   before starting a new era.
+
+Fixes: eec40579d84873 ("dm: add era target")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-era-target.c |   17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -71,8 +71,6 @@ static size_t bitset_size(unsigned nr_bi
+  */
+ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+ {
+-      ws->md.nr_bits = nr_blocks;
+-      ws->md.root = INVALID_WRITESET_ROOT;
+       ws->bits = vzalloc(bitset_size(nr_blocks));
+       if (!ws->bits) {
+               DMERR("%s: couldn't allocate in memory bitset", __func__);
+@@ -85,12 +83,14 @@ static int writeset_alloc(struct writese
+ /*
+  * Wipes the in-core bitset, and creates a new on disk bitset.
+  */
+-static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
++static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
++                       dm_block_t nr_blocks)
+ {
+       int r;
+-      memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
++      memset(ws->bits, 0, bitset_size(nr_blocks));
++      ws->md.nr_bits = nr_blocks;
+       r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
+       if (r) {
+               DMERR("%s: setup_on_disk_bitset failed", __func__);
+@@ -579,6 +579,7 @@ static int open_metadata(struct era_meta
+       md->nr_blocks = le32_to_cpu(disk->nr_blocks);
+       md->current_era = le32_to_cpu(disk->current_era);
++      ws_unpack(&disk->current_writeset, &md->current_writeset->md);
+       md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
+       md->era_array_root = le64_to_cpu(disk->era_array_root);
+       md->metadata_snap = le64_to_cpu(disk->metadata_snap);
+@@ -870,7 +871,6 @@ static int metadata_era_archive(struct e
+       }
+       ws_pack(&md->current_writeset->md, &value);
+-      md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+       keys[0] = md->current_era;
+       __dm_bless_for_disk(&value);
+@@ -882,6 +882,7 @@ static int metadata_era_archive(struct e
+               return r;
+       }
++      md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+       md->archived_writesets = true;
+       return 0;
+@@ -898,7 +899,7 @@ static int metadata_new_era(struct era_m
+       int r;
+       struct writeset *new_writeset = next_writeset(md);
+-      r = writeset_init(&md->bitset_info, new_writeset);
++      r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
+       if (r) {
+               DMERR("%s: writeset_init failed", __func__);
+               return r;
+@@ -951,7 +952,7 @@ static int metadata_commit(struct era_me
+       int r;
+       struct dm_block *sblock;
+-      if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
++      if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
+               r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
+                                   &md->current_writeset->md.root);
+               if (r) {
+@@ -1580,7 +1581,7 @@ static int era_preresume(struct dm_targe
+       start_worker(era);
+-      r = in_worker0(era, metadata_new_era);
++      r = in_worker0(era, metadata_era_rollover);
+       if (r) {
+               DMERR("%s: metadata_era_rollover failed", __func__);
+               return r;
diff --git a/queue-5.4/dm-era-reinitialize-bitset-cache-before-digesting-a-new-writeset.patch b/queue-5.4/dm-era-reinitialize-bitset-cache-before-digesting-a-new-writeset.patch
new file mode 100644 (file)
index 0000000..dfbdda4
--- /dev/null
@@ -0,0 +1,80 @@
+From 2524933307fd0036d5c32357c693c021ab09a0b0 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 22 Jan 2021 17:22:04 +0200
+Subject: dm era: Reinitialize bitset cache before digesting a new writeset
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 2524933307fd0036d5c32357c693c021ab09a0b0 upstream.
+
+In case of devices with at most 64 blocks, the digestion of consecutive
+eras uses the writeset of the first era as the writeset of all eras to
+digest, leading to lost writes. That is, we lose the information about
+what blocks were written during the affected eras.
+
+The digestion code uses a dm_disk_bitset object to access the archived
+writesets. This structure includes a one word (64-bit) cache to reduce
+the number of array lookups.
+
+This structure is initialized only once, in metadata_digest_start(),
+when we kick off digestion.
+
+But, when we insert a new writeset into the writeset tree, before the
+digestion of the previous writeset is done, or equivalently when there
+are multiple writesets in the writeset tree to digest, then all these
+writesets are digested using the same cache and the cache is not
+re-initialized when moving from one writeset to the next.
+
+For devices with more than 64 blocks, i.e., the size of the cache, the
+cache is indirectly invalidated when we move to a next set of blocks, so
+we avoid the bug.
+
+But for devices with at most 64 blocks we end up using the same cached
+data for digesting all archived writesets, i.e., the cache is loaded
+when digesting the first writeset and it never gets reloaded, until the
+digestion is done.
+
+As a result, the writeset of the first era to digest is used as the
+writeset of all the following archived eras, leading to lost writes.
+
+Fix this by reinitializing the dm_disk_bitset structure, and thus
+invalidating the cache, every time the digestion code starts digesting a
+new writeset.
+
+Fixes: eec40579d84873 ("dm: add era target")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-era-target.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -756,6 +756,12 @@ static int metadata_digest_lookup_writes
+       ws_unpack(&disk, &d->writeset);
+       d->value = cpu_to_le32(key);
++      /*
++       * We initialise another bitset info to avoid any caching side effects
++       * with the previous one.
++       */
++      dm_disk_bitset_init(md->tm, &d->info);
++
+       d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
+       d->current_bit = 0;
+       d->step = metadata_digest_transcribe_writeset;
+@@ -769,12 +775,6 @@ static int metadata_digest_start(struct
+               return 0;
+       memset(d, 0, sizeof(*d));
+-
+-      /*
+-       * We initialise another bitset info to avoid any caching side
+-       * effects with the previous one.
+-       */
+-      dm_disk_bitset_init(md->tm, &d->info);
+       d->step = metadata_digest_lookup_writeset;
+       return 0;
diff --git a/queue-5.4/dm-era-use-correct-value-size-in-equality-function-of-writeset-tree.patch b/queue-5.4/dm-era-use-correct-value-size-in-equality-function-of-writeset-tree.patch
new file mode 100644 (file)
index 0000000..12ec7d9
--- /dev/null
@@ -0,0 +1,33 @@
+From 64f2d15afe7b336aafebdcd14cc835ecf856df4b Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 22 Jan 2021 17:25:55 +0200
+Subject: dm era: Use correct value size in equality function of writeset tree
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 64f2d15afe7b336aafebdcd14cc835ecf856df4b upstream.
+
+Fix the writeset tree equality test function to use the right value size
+when comparing two btree values.
+
+Fixes: eec40579d84873 ("dm: add era target")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Reviewed-by: Ming-Hung Tsai <mtsai@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-era-target.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -389,7 +389,7 @@ static void ws_dec(void *context, const
+ static int ws_eq(void *context, const void *value1, const void *value2)
+ {
+-      return !memcmp(value1, value2, sizeof(struct writeset_metadata));
++      return !memcmp(value1, value2, sizeof(struct writeset_disk));
+ }
+ /*----------------------------------------------------------------*/
diff --git a/queue-5.4/dm-era-verify-the-data-block-size-hasn-t-changed.patch b/queue-5.4/dm-era-verify-the-data-block-size-hasn-t-changed.patch
new file mode 100644 (file)
index 0000000..a086475
--- /dev/null
@@ -0,0 +1,49 @@
+From c8e846ff93d5eaa5384f6f325a1687ac5921aade Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 22 Jan 2021 17:25:53 +0200
+Subject: dm era: Verify the data block size hasn't changed
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit c8e846ff93d5eaa5384f6f325a1687ac5921aade upstream.
+
+dm-era doesn't support changing the data block size of existing devices,
+so check explicitly that the requested block size for a new target
+matches the one stored in the metadata.
+
+Fixes: eec40579d84873 ("dm: add era target")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Reviewed-by: Ming-Hung Tsai <mtsai@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-era-target.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -564,6 +564,15 @@ static int open_metadata(struct era_meta
+       }
+       disk = dm_block_data(sblock);
++
++      /* Verify the data block size hasn't changed */
++      if (le32_to_cpu(disk->data_block_size) != md->block_size) {
++              DMERR("changing the data block size (from %u to %llu) is not supported",
++                    le32_to_cpu(disk->data_block_size), md->block_size);
++              r = -EINVAL;
++              goto bad;
++      }
++
+       r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
+                              disk->metadata_space_map_root,
+                              sizeof(disk->metadata_space_map_root),
+@@ -575,7 +584,6 @@ static int open_metadata(struct era_meta
+       setup_infos(md);
+-      md->block_size = le32_to_cpu(disk->data_block_size);
+       md->nr_blocks = le32_to_cpu(disk->nr_blocks);
+       md->current_era = le32_to_cpu(disk->current_era);
diff --git a/queue-5.4/dm-fix-deadlock-when-swapping-to-encrypted-device.patch b/queue-5.4/dm-fix-deadlock-when-swapping-to-encrypted-device.patch
new file mode 100644 (file)
index 0000000..b8a8a08
--- /dev/null
@@ -0,0 +1,201 @@
+From a666e5c05e7c4aaabb2c5d58117b0946803d03d2 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 10 Feb 2021 15:26:23 -0500
+Subject: dm: fix deadlock when swapping to encrypted device
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit a666e5c05e7c4aaabb2c5d58117b0946803d03d2 upstream.
+
+The system would deadlock when swapping to a dm-crypt device. The reason
+is that for each incoming write bio, dm-crypt allocates memory that holds
+encrypted data. These excessive allocations exhaust all the memory and the
+result is either deadlock or OOM trigger.
+
+This patch limits the number of in-flight swap bios, so that the memory
+consumed by dm-crypt is limited. The limit is enforced if the target set
+the "limit_swap_bios" variable and if the bio has REQ_SWAP set.
+
+Non-swap bios are not affected becuase taking the semaphore would cause
+performance degradation.
+
+This is similar to request-based drivers - they will also block when the
+number of requests is over the limit.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-core.h          |    4 ++
+ drivers/md/dm-crypt.c         |    1 
+ drivers/md/dm.c               |   60 ++++++++++++++++++++++++++++++++++++++++++
+ include/linux/device-mapper.h |    5 +++
+ 4 files changed, 70 insertions(+)
+
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -106,6 +106,10 @@ struct mapped_device {
+       struct block_device *bdev;
++      int swap_bios;
++      struct semaphore swap_bios_semaphore;
++      struct mutex swap_bios_lock;
++
+       struct dm_stats stats;
+       /* for blk-mq request-based DM support */
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -2737,6 +2737,7 @@ static int crypt_ctr(struct dm_target *t
+       wake_up_process(cc->write_thread);
+       ti->num_flush_bios = 1;
++      ti->limit_swap_bios = true;
+       return 0;
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -146,6 +146,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_
+ #define DM_NUMA_NODE NUMA_NO_NODE
+ static int dm_numa_node = DM_NUMA_NODE;
++#define DEFAULT_SWAP_BIOS     (8 * 1048576 / PAGE_SIZE)
++static int swap_bios = DEFAULT_SWAP_BIOS;
++static int get_swap_bios(void)
++{
++      int latch = READ_ONCE(swap_bios);
++      if (unlikely(latch <= 0))
++              latch = DEFAULT_SWAP_BIOS;
++      return latch;
++}
++
+ /*
+  * For mempools pre-allocation at the table loading time.
+  */
+@@ -972,6 +982,11 @@ void disable_write_zeroes(struct mapped_
+       limits->max_write_zeroes_sectors = 0;
+ }
++static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
++{
++      return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
++}
++
+ static void clone_endio(struct bio *bio)
+ {
+       blk_status_t error = bio->bi_status;
+@@ -1009,6 +1024,11 @@ static void clone_endio(struct bio *bio)
+               }
+       }
++      if (unlikely(swap_bios_limit(tio->ti, bio))) {
++              struct mapped_device *md = io->md;
++              up(&md->swap_bios_semaphore);
++      }
++
+       free_tio(tio);
+       dec_pending(io, error);
+ }
+@@ -1263,6 +1283,22 @@ void dm_remap_zone_report(struct dm_targ
+ }
+ EXPORT_SYMBOL_GPL(dm_remap_zone_report);
++static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
++{
++      mutex_lock(&md->swap_bios_lock);
++      while (latch < md->swap_bios) {
++              cond_resched();
++              down(&md->swap_bios_semaphore);
++              md->swap_bios--;
++      }
++      while (latch > md->swap_bios) {
++              cond_resched();
++              up(&md->swap_bios_semaphore);
++              md->swap_bios++;
++      }
++      mutex_unlock(&md->swap_bios_lock);
++}
++
+ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ {
+       int r;
+@@ -1283,6 +1319,14 @@ static blk_qc_t __map_bio(struct dm_targ
+       atomic_inc(&io->io_count);
+       sector = clone->bi_iter.bi_sector;
++      if (unlikely(swap_bios_limit(ti, clone))) {
++              struct mapped_device *md = io->md;
++              int latch = get_swap_bios();
++              if (unlikely(latch != md->swap_bios))
++                      __set_swap_bios_limit(md, latch);
++              down(&md->swap_bios_semaphore);
++      }
++
+       r = ti->type->map(ti, clone);
+       switch (r) {
+       case DM_MAPIO_SUBMITTED:
+@@ -1297,10 +1341,18 @@ static blk_qc_t __map_bio(struct dm_targ
+                       ret = generic_make_request(clone);
+               break;
+       case DM_MAPIO_KILL:
++              if (unlikely(swap_bios_limit(ti, clone))) {
++                      struct mapped_device *md = io->md;
++                      up(&md->swap_bios_semaphore);
++              }
+               free_tio(tio);
+               dec_pending(io, BLK_STS_IOERR);
+               break;
+       case DM_MAPIO_REQUEUE:
++              if (unlikely(swap_bios_limit(ti, clone))) {
++                      struct mapped_device *md = io->md;
++                      up(&md->swap_bios_semaphore);
++              }
+               free_tio(tio);
+               dec_pending(io, BLK_STS_DM_REQUEUE);
+               break;
+@@ -1894,6 +1946,7 @@ static void cleanup_mapped_device(struct
+       mutex_destroy(&md->suspend_lock);
+       mutex_destroy(&md->type_lock);
+       mutex_destroy(&md->table_devices_lock);
++      mutex_destroy(&md->swap_bios_lock);
+       dm_mq_cleanup_mapped_device(md);
+ }
+@@ -1963,6 +2016,10 @@ static struct mapped_device *alloc_dev(i
+       init_waitqueue_head(&md->eventq);
+       init_completion(&md->kobj_holder.completion);
++      md->swap_bios = get_swap_bios();
++      sema_init(&md->swap_bios_semaphore, md->swap_bios);
++      mutex_init(&md->swap_bios_lock);
++
+       md->disk->major = _major;
+       md->disk->first_minor = minor;
+       md->disk->fops = &dm_blk_dops;
+@@ -3245,6 +3302,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios,
+ module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
++module_param(swap_bios, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
++
+ MODULE_DESCRIPTION(DM_NAME " driver");
+ MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+ MODULE_LICENSE("GPL");
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -316,6 +316,11 @@ struct dm_target {
+        * whether or not its underlying devices have support.
+        */
+       bool discards_supported:1;
++
++      /*
++       * Set if we need to limit the number of in-flight bios when swapping.
++       */
++      bool limit_swap_bios:1;
+ };
+ /* Each target can link one of these into the table */
diff --git a/queue-5.4/dm-writecache-fix-writing-beyond-end-of-underlying-device-when-shrinking.patch b/queue-5.4/dm-writecache-fix-writing-beyond-end-of-underlying-device-when-shrinking.patch
new file mode 100644 (file)
index 0000000..08f3f8d
--- /dev/null
@@ -0,0 +1,78 @@
+From 4134455f2aafdfeab50cabb4cccb35e916034b93 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 9 Feb 2021 10:56:20 -0500
+Subject: dm writecache: fix writing beyond end of underlying device when shrinking
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 4134455f2aafdfeab50cabb4cccb35e916034b93 upstream.
+
+Do not attempt to write any data beyond the end of the underlying data
+device while shrinking it.
+
+The DM writecache device must be suspended when the underlying data
+device is shrunk.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-writecache.c |   18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -142,6 +142,7 @@ struct dm_writecache {
+       size_t metadata_sectors;
+       size_t n_blocks;
+       uint64_t seq_count;
++      sector_t data_device_sectors;
+       void *block_start;
+       struct wc_entry *entries;
+       unsigned block_size;
+@@ -918,6 +919,8 @@ static void writecache_resume(struct dm_
+       wc_lock(wc);
++      wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
++
+       if (WC_MODE_PMEM(wc)) {
+               persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+       } else {
+@@ -1488,6 +1491,10 @@ static bool wc_add_block(struct writebac
+       void *address = memory_data(wc, e);
+       persistent_memory_flush_cache(address, block_size);
++
++      if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
++              return true;
++
+       return bio_add_page(&wb->bio, persistent_memory_page(address),
+                           block_size, persistent_memory_page_offset(address)) != 0;
+ }
+@@ -1559,6 +1566,9 @@ static void __writecache_writeback_pmem(
+               if (writecache_has_error(wc)) {
+                       bio->bi_status = BLK_STS_IOERR;
+                       bio_endio(bio);
++              } else if (unlikely(!bio_sectors(bio))) {
++                      bio->bi_status = BLK_STS_OK;
++                      bio_endio(bio);
+               } else {
+                       submit_bio(bio);
+               }
+@@ -1602,6 +1612,14 @@ static void __writecache_writeback_ssd(s
+                       e = f;
+               }
++              if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
++                      if (to.sector >= wc->data_device_sectors) {
++                              writecache_copy_endio(0, 0, c);
++                              continue;
++                      }
++                      from.count = to.count = wc->data_device_sectors - to.sector;
++              }
++
+               dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
+               __writeback_throttle(wc, wbl);
diff --git a/queue-5.4/f2fs-fix-out-of-repair-__setattr_copy.patch b/queue-5.4/f2fs-fix-out-of-repair-__setattr_copy.patch
new file mode 100644 (file)
index 0000000..9efa939
--- /dev/null
@@ -0,0 +1,36 @@
+From 2562515f0ad7342bde6456602c491b64c63fe950 Mon Sep 17 00:00:00 2001
+From: Chao Yu <yuchao0@huawei.com>
+Date: Wed, 16 Dec 2020 17:15:23 +0800
+Subject: f2fs: fix out-of-repair __setattr_copy()
+
+From: Chao Yu <yuchao0@huawei.com>
+
+commit 2562515f0ad7342bde6456602c491b64c63fe950 upstream.
+
+__setattr_copy() was copied from setattr_copy() in fs/attr.c, there is
+two missing patches doesn't cover this inner function, fix it.
+
+Commit 7fa294c8991c ("userns: Allow chown and setgid preservation")
+Commit 23adbe12ef7d ("fs,userns: Change inode_capable to capable_wrt_inode_uidgid")
+
+Fixes: fbfa2cc58d53 ("f2fs: add file operations")
+Cc: stable@vger.kernel.org
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/file.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -765,7 +765,8 @@ static void __setattr_copy(struct inode
+       if (ia_valid & ATTR_MODE) {
+               umode_t mode = attr->ia_mode;
+-              if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
++              if (!in_group_p(inode->i_gid) &&
++                      !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+                       mode &= ~S_ISGID;
+               set_acl_inode(inode, mode);
+       }
diff --git a/queue-5.4/gfs2-don-t-skip-dlm-unlock-if-glock-has-an-lvb.patch b/queue-5.4/gfs2-don-t-skip-dlm-unlock-if-glock-has-an-lvb.patch
new file mode 100644 (file)
index 0000000..94536c3
--- /dev/null
@@ -0,0 +1,65 @@
+From 78178ca844f0eb88f21f31c7fde969384be4c901 Mon Sep 17 00:00:00 2001
+From: Bob Peterson <rpeterso@redhat.com>
+Date: Fri, 5 Feb 2021 13:50:41 -0500
+Subject: gfs2: Don't skip dlm unlock if glock has an lvb
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+commit 78178ca844f0eb88f21f31c7fde969384be4c901 upstream.
+
+Patch fb6791d100d1 was designed to allow gfs2 to unmount quicker by
+skipping the step where it tells dlm to unlock glocks in EX with lvbs.
+This was done because when gfs2 unmounts a file system, it destroys the
+dlm lockspace shortly after it destroys the glocks so it doesn't need to
+unlock them all: the unlock is implied when the lockspace is destroyed
+by dlm.
+
+However, that patch introduced a use-after-free in dlm: as part of its
+normal dlm_recoverd process, it can call ls_recovery to recover dead
+locks. In so doing, it can call recover_rsbs which calls recover_lvb for
+any mastered rsbs. Func recover_lvb runs through the list of lkbs queued
+to the given rsb (if the glock is cached but unlocked, it will still be
+queued to the lkb, but in NL--Unlocked--mode) and if it has an lvb,
+copies it to the rsb, thus trying to preserve the lkb. However, when
+gfs2 skips the dlm unlock step, it frees the glock and its lvb, which
+means dlm's function recover_lvb references the now freed lvb pointer,
+copying the freed lvb memory to the rsb.
+
+This patch changes the check in gdlm_put_lock so that it calls
+dlm_unlock for all glocks that contain an lvb pointer.
+
+Fixes: fb6791d100d1 ("GFS2: skip dlm_unlock calls in unmount")
+Cc: stable@vger.kernel.org # v3.8+
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/gfs2/lock_dlm.c |    8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -280,7 +280,6 @@ static void gdlm_put_lock(struct gfs2_gl
+ {
+       struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+       struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+-      int lvb_needs_unlock = 0;
+       int error;
+       if (gl->gl_lksb.sb_lkid == 0) {
+@@ -293,13 +292,10 @@ static void gdlm_put_lock(struct gfs2_gl
+       gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
+       gfs2_update_request_times(gl);
+-      /* don't want to skip dlm_unlock writing the lvb when lock is ex */
+-
+-      if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
+-              lvb_needs_unlock = 1;
++      /* don't want to skip dlm_unlock writing the lvb when lock has one */
+       if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+-          !lvb_needs_unlock) {
++          !gl->gl_lksb.sb_lvbptr) {
+               gfs2_glock_free(gl);
+               return;
+       }
diff --git a/queue-5.4/gfs2-recursive-gfs2_quota_hold-in-gfs2_iomap_end.patch b/queue-5.4/gfs2-recursive-gfs2_quota_hold-in-gfs2_iomap_end.patch
new file mode 100644 (file)
index 0000000..a8b5f3b
--- /dev/null
@@ -0,0 +1,46 @@
+From 7009fa9cd9a5262944b30eb7efb1f0561d074b68 Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Tue, 9 Feb 2021 18:32:32 +0100
+Subject: gfs2: Recursive gfs2_quota_hold in gfs2_iomap_end
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit 7009fa9cd9a5262944b30eb7efb1f0561d074b68 upstream.
+
+When starting an iomap write, gfs2_quota_lock_check -> gfs2_quota_lock
+-> gfs2_quota_hold is called from gfs2_iomap_begin.  At the end of the
+write, before unlocking the quotas, punch_hole -> gfs2_quota_hold can be
+called again in gfs2_iomap_end, which is incorrect and leads to a failed
+assertion.  Instead, move the call to gfs2_quota_unlock before the call
+to punch_hole to fix that.
+
+Fixes: 64bc06bb32ee ("gfs2: iomap buffered write support")
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/gfs2/bmap.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1228,6 +1228,9 @@ static int gfs2_iomap_end(struct inode *
+       gfs2_inplace_release(ip);
++      if (ip->i_qadata && ip->i_qadata->qa_qd_num)
++              gfs2_quota_unlock(ip);
++
+       if (length != written && (iomap->flags & IOMAP_F_NEW)) {
+               /* Deallocate blocks that were just allocated. */
+               loff_t blockmask = i_blocksize(inode) - 1;
+@@ -1240,9 +1243,6 @@ static int gfs2_iomap_end(struct inode *
+               }
+       }
+-      if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+-              gfs2_quota_unlock(ip);
+-
+       if (unlikely(!written))
+               goto out_unlock;
diff --git a/queue-5.4/s390-vtime-fix-inline-assembly-clobber-list.patch b/queue-5.4/s390-vtime-fix-inline-assembly-clobber-list.patch
new file mode 100644 (file)
index 0000000..c68e57b
--- /dev/null
@@ -0,0 +1,38 @@
+From b29c5093820d333eef22f58cd04ec0d089059c39 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Tue, 2 Feb 2021 16:45:37 +0100
+Subject: s390/vtime: fix inline assembly clobber list
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit b29c5093820d333eef22f58cd04ec0d089059c39 upstream.
+
+The stck/stckf instruction used within the inline assembly within
+do_account_vtime() changes the condition code. This is not reflected
+with the clobber list, and therefore might result in incorrect code
+generation.
+
+It seems unlikely that the compiler could generate incorrect code
+considering the surrounding C code, but it must still be fixed.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/vtime.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -136,7 +136,8 @@ static int do_account_vtime(struct task_
+               "       stck    %1"     /* Store current tod clock value */
+ #endif
+               : "=Q" (S390_lowcore.last_update_timer),
+-                "=Q" (S390_lowcore.last_update_clock));
++                "=Q" (S390_lowcore.last_update_clock)
++              : : "cc");
+       clock = S390_lowcore.last_update_clock - clock;
+       timer -= S390_lowcore.last_update_timer;
index 0d18fbafa61af28aa37ab51fd66429c76f7f70aa..237a4fcbc949fcf4deccea45734cef8efc3c3d7b 100644 (file)
@@ -310,3 +310,20 @@ powerpc-32s-add-missing-call-to-kuep_lock-on-syscall-entry.patch
 spmi-spmi-pmic-arb-fix-hw_irq-overflow.patch
 gpio-pcf857x-fix-missing-first-interrupt.patch
 printk-fix-deadlock-when-kernel-panic.patch
+cpufreq-intel_pstate-get-per-cpu-max-freq-via-msr_hwp_capabilities-if-available.patch
+s390-vtime-fix-inline-assembly-clobber-list.patch
+virtio-s390-implement-virtio-ccw-revision-2-correctly.patch
+um-mm-check-more-comprehensively-for-stub-changes.patch
+f2fs-fix-out-of-repair-__setattr_copy.patch
+sparc32-fix-a-user-triggerable-oops-in-clear_user.patch
+spi-spi-synquacer-fix-set_cs-handling.patch
+gfs2-don-t-skip-dlm-unlock-if-glock-has-an-lvb.patch
+gfs2-recursive-gfs2_quota_hold-in-gfs2_iomap_end.patch
+dm-fix-deadlock-when-swapping-to-encrypted-device.patch
+dm-writecache-fix-writing-beyond-end-of-underlying-device-when-shrinking.patch
+dm-era-recover-committed-writeset-after-crash.patch
+dm-era-verify-the-data-block-size-hasn-t-changed.patch
+dm-era-fix-bitset-memory-leaks.patch
+dm-era-use-correct-value-size-in-equality-function-of-writeset-tree.patch
+dm-era-reinitialize-bitset-cache-before-digesting-a-new-writeset.patch
+dm-era-only-resize-metadata-in-preresume.patch
diff --git a/queue-5.4/sparc32-fix-a-user-triggerable-oops-in-clear_user.patch b/queue-5.4/sparc32-fix-a-user-triggerable-oops-in-clear_user.patch
new file mode 100644 (file)
index 0000000..780001e
--- /dev/null
@@ -0,0 +1,50 @@
+From 7780918b36489f0b2f9a3749d7be00c2ceaec513 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Mon, 20 Jul 2020 02:21:51 +0100
+Subject: sparc32: fix a user-triggerable oops in clear_user()
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 7780918b36489f0b2f9a3749d7be00c2ceaec513 upstream.
+
+Back in 2.1.29 the clear_user() guts (__bzero()) had been merged
+with memset().  Unfortunately, while all exception handlers had been
+copied, one of the exception table entries got lost.  As the result,
+clear_user() starting at 128*n bytes before the end of page and
+spanning between 8 and 127 bytes into the next page would oops when
+the second page is unmapped.  It's trivial to reproduce - all
+it takes is
+
+main()
+{
+       int fd = open("/dev/zero", O_RDONLY);
+       char *p = mmap(NULL, 16384, PROT_READ|PROT_WRITE,
+                       MAP_PRIVATE|MAP_ANON, -1, 0);
+       munmap(p + 8192, 8192);
+       read(fd, p + 8192 - 128, 192);
+}
+
+which had been oopsing since March 1997.  Says something about
+the quality of test coverage... ;-/  And while today sparc32 port
+is nearly dead, back in '97 it had been very much alive; in fact,
+sparc64 had only been in mainline for 3 months by that point...
+
+Cc: stable@kernel.org
+Fixes: v2.1.29
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/lib/memset.S |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/sparc/lib/memset.S
++++ b/arch/sparc/lib/memset.S
+@@ -142,6 +142,7 @@ __bzero:
+       ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
+       ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
+ 13:
++      EXT(12b, 13b, 21f)
+       be      8f
+        andcc  %o1, 4, %g0
diff --git a/queue-5.4/spi-spi-synquacer-fix-set_cs-handling.patch b/queue-5.4/spi-spi-synquacer-fix-set_cs-handling.patch
new file mode 100644 (file)
index 0000000..5b22130
--- /dev/null
@@ -0,0 +1,36 @@
+From 1c9f1750f0305bf605ff22686fc0ac89c06deb28 Mon Sep 17 00:00:00 2001
+From: Masahisa Kojima <masahisa.kojima@linaro.org>
+Date: Mon, 1 Feb 2021 01:31:09 -0600
+Subject: spi: spi-synquacer: fix set_cs handling
+
+From: Masahisa Kojima <masahisa.kojima@linaro.org>
+
+commit 1c9f1750f0305bf605ff22686fc0ac89c06deb28 upstream.
+
+When the slave chip select is deasserted, DMSTOP bit
+must be set.
+
+Fixes: b0823ee35cf9 ("spi: Add spi driver for Socionext SynQuacer platform")
+Signed-off-by: Masahisa Kojima <masahisa.kojima@linaro.org>
+Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210201073109.9036-1-jassisinghbrar@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-synquacer.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/spi/spi-synquacer.c
++++ b/drivers/spi/spi-synquacer.c
+@@ -490,6 +490,10 @@ static void synquacer_spi_set_cs(struct
+       val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
+                SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
+       val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
++
++      if (!enable)
++              val |= SYNQUACER_HSSPI_DMSTOP_STOP;
++
+       writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ }
diff --git a/queue-5.4/um-mm-check-more-comprehensively-for-stub-changes.patch b/queue-5.4/um-mm-check-more-comprehensively-for-stub-changes.patch
new file mode 100644 (file)
index 0000000..018f95d
--- /dev/null
@@ -0,0 +1,71 @@
+From 47da29763ec9a153b9b685bff9db659e4e09e494 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Wed, 13 Jan 2021 22:08:02 +0100
+Subject: um: mm: check more comprehensively for stub changes
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 47da29763ec9a153b9b685bff9db659e4e09e494 upstream.
+
+If userspace tries to change the stub, we need to kill it,
+because otherwise it can escape the virtual machine. In a
+few cases the stub checks weren't good, e.g. if userspace
+just tries to
+
+       mmap(0x100000 - 0x1000, 0x3000, ...)
+
+it could succeed to get a new private/anonymous mapping
+replacing the stubs. Fix this by checking everywhere, and
+checking for _overlap_, not just direct changes.
+
+Cc: stable@vger.kernel.org
+Fixes: 3963333fe676 ("uml: cover stubs with a VMA")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/kernel/tlb.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/um/kernel/tlb.c
++++ b/arch/um/kernel/tlb.c
+@@ -126,6 +126,9 @@ static int add_mmap(unsigned long virt,
+       struct host_vm_op *last;
+       int fd = -1, ret = 0;
++      if (virt + len > STUB_START && virt < STUB_END)
++              return -EINVAL;
++
+       if (hvc->userspace)
+               fd = phys_mapping(phys, &offset);
+       else
+@@ -163,7 +166,7 @@ static int add_munmap(unsigned long addr
+       struct host_vm_op *last;
+       int ret = 0;
+-      if ((addr >= STUB_START) && (addr < STUB_END))
++      if (addr + len > STUB_START && addr < STUB_END)
+               return -EINVAL;
+       if (hvc->index != 0) {
+@@ -193,6 +196,9 @@ static int add_mprotect(unsigned long ad
+       struct host_vm_op *last;
+       int ret = 0;
++      if (addr + len > STUB_START && addr < STUB_END)
++              return -EINVAL;
++
+       if (hvc->index != 0) {
+               last = &hvc->ops[hvc->index - 1];
+               if ((last->type == MPROTECT) &&
+@@ -433,6 +439,10 @@ void flush_tlb_page(struct vm_area_struc
+       struct mm_id *mm_id;
+       address &= PAGE_MASK;
++
++      if (address >= STUB_START && address < STUB_END)
++              goto kill;
++
+       pgd = pgd_offset(mm, address);
+       if (!pgd_present(*pgd))
+               goto kill;
diff --git a/queue-5.4/virtio-s390-implement-virtio-ccw-revision-2-correctly.patch b/queue-5.4/virtio-s390-implement-virtio-ccw-revision-2-correctly.patch
new file mode 100644 (file)
index 0000000..d2c2e21
--- /dev/null
@@ -0,0 +1,59 @@
+From 182f709c5cff683e6732d04c78e328de0532284f Mon Sep 17 00:00:00 2001
+From: Cornelia Huck <cohuck@redhat.com>
+Date: Tue, 16 Feb 2021 12:06:45 +0100
+Subject: virtio/s390: implement virtio-ccw revision 2 correctly
+
+From: Cornelia Huck <cohuck@redhat.com>
+
+commit 182f709c5cff683e6732d04c78e328de0532284f upstream.
+
+CCW_CMD_READ_STATUS was introduced with revision 2 of virtio-ccw,
+and drivers should only rely on it being implemented when they
+negotiated at least that revision with the device.
+
+However, virtio_ccw_get_status() issued READ_STATUS for any
+device operating at least at revision 1. If the device accepts
+READ_STATUS regardless of the negotiated revision (which some
+implementations like QEMU do, even though the spec currently does
+not allow it), everything works as intended. While a device
+rejecting the command should also be handled gracefully, we will
+not be able to see any changes the device makes to the status,
+such as setting NEEDS_RESET or setting the status to zero after
+a completed reset.
+
+We negotiated the revision to at most 1, as we never bumped the
+maximum revision; let's do that now and properly send READ_STATUS
+only if we are operating at least at revision 2.
+
+Cc: stable@vger.kernel.org
+Fixes: 7d3ce5ab9430 ("virtio/s390: support READ_STATUS command for virtio-ccw")
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Link: https://lore.kernel.org/r/20210216110645.1087321-1-cohuck@redhat.com
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/virtio/virtio_ccw.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -117,7 +117,7 @@ struct virtio_rev_info {
+ };
+ /* the highest virtio-ccw revision we support */
+-#define VIRTIO_CCW_REV_MAX 1
++#define VIRTIO_CCW_REV_MAX 2
+ struct virtio_ccw_vq_info {
+       struct virtqueue *vq;
+@@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct v
+       u8 old_status = vcdev->dma_area->status;
+       struct ccw1 *ccw;
+-      if (vcdev->revision < 1)
++      if (vcdev->revision < 2)
+               return vcdev->dma_area->status;
+       ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));