]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Aug 2023 06:27:36 +0000 (08:27 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Aug 2023 06:27:36 +0000 (08:27 +0200)
added patches:
ceph-never-send-metrics-if-disable_send_metrics-is-set.patch
dm-cache-policy-smq-ensure-io-doesn-t-prevent-cleaner-policy-progress.patch

queue-5.10/ceph-never-send-metrics-if-disable_send_metrics-is-set.patch [new file with mode: 0644]
queue-5.10/dm-cache-policy-smq-ensure-io-doesn-t-prevent-cleaner-policy-progress.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/ceph-never-send-metrics-if-disable_send_metrics-is-set.patch b/queue-5.10/ceph-never-send-metrics-if-disable_send_metrics-is-set.patch
new file mode 100644 (file)
index 0000000..9f9d1af
--- /dev/null
@@ -0,0 +1,34 @@
+From 50164507f6b7b7ed85d8c3ac0266849fbd908db7 Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Thu, 20 Jul 2023 11:33:55 +0800
+Subject: ceph: never send metrics if disable_send_metrics is set
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit 50164507f6b7b7ed85d8c3ac0266849fbd908db7 upstream.
+
+Even the 'disable_send_metrics' is true so when the session is
+being opened it will always trigger to send the metric for the
+first time.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Venky Shankar <vshankar@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/metric.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ceph/metric.c
++++ b/fs/ceph/metric.c
+@@ -130,7 +130,7 @@ static void metric_delayed_work(struct w
+       struct ceph_mds_client *mdsc =
+               container_of(m, struct ceph_mds_client, metric);
+-      if (mdsc->stopping)
++      if (mdsc->stopping || disable_send_metrics)
+               return;
+       if (!m->session || !check_session_state(m->session)) {
diff --git a/queue-5.10/dm-cache-policy-smq-ensure-io-doesn-t-prevent-cleaner-policy-progress.patch b/queue-5.10/dm-cache-policy-smq-ensure-io-doesn-t-prevent-cleaner-policy-progress.patch
new file mode 100644 (file)
index 0000000..702c16f
--- /dev/null
@@ -0,0 +1,106 @@
+From 1e4ab7b4c881cf26c1c72b3f56519e03475486fb Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Tue, 25 Jul 2023 11:44:41 -0400
+Subject: dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 1e4ab7b4c881cf26c1c72b3f56519e03475486fb upstream.
+
+When using the cleaner policy to decommission the cache, there is
+never any writeback started from the cache as it is constantly delayed
+due to normal I/O keeping the device busy. Meaning @idle=false was
+always being passed to clean_target_met()
+
+Fix this by adding a specific 'cleaner' flag that is set when the
+cleaner policy is configured. This flag serves to always allow the
+cleaner's writeback work to be queued until the cache is
+decommissioned (even if the cache isn't idle).
+
+Reported-by: David Jeffery <djeffery@redhat.com>
+Fixes: b29d4986d0da ("dm cache: significant rework to leverage dm-bio-prison-v2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-cache-policy-smq.c |   28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+--- a/drivers/md/dm-cache-policy-smq.c
++++ b/drivers/md/dm-cache-policy-smq.c
+@@ -854,7 +854,13 @@ struct smq_policy {
+       struct background_tracker *bg_work;
+-      bool migrations_allowed;
++      bool migrations_allowed:1;
++
++      /*
++       * If this is set the policy will try and clean the whole cache
++       * even if the device is not idle.
++       */
++      bool cleaner:1;
+ };
+ /*----------------------------------------------------------------*/
+@@ -1133,7 +1139,7 @@ static bool clean_target_met(struct smq_
+        * Cache entries may not be populated.  So we cannot rely on the
+        * size of the clean queue.
+        */
+-      if (idle) {
++      if (idle || mq->cleaner) {
+               /*
+                * We'd like to clean everything.
+                */
+@@ -1716,11 +1722,9 @@ static void calc_hotspot_params(sector_t
+               *hotspot_block_size /= 2u;
+ }
+-static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
+-                                          sector_t origin_size,
+-                                          sector_t cache_block_size,
+-                                          bool mimic_mq,
+-                                          bool migrations_allowed)
++static struct dm_cache_policy *
++__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
++           bool mimic_mq, bool migrations_allowed, bool cleaner)
+ {
+       unsigned i;
+       unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
+@@ -1807,6 +1811,7 @@ static struct dm_cache_policy *__smq_cre
+               goto bad_btracker;
+       mq->migrations_allowed = migrations_allowed;
++      mq->cleaner = cleaner;
+       return &mq->policy;
+@@ -1830,21 +1835,24 @@ static struct dm_cache_policy *smq_creat
+                                         sector_t origin_size,
+                                         sector_t cache_block_size)
+ {
+-      return __smq_create(cache_size, origin_size, cache_block_size, false, true);
++      return __smq_create(cache_size, origin_size, cache_block_size,
++                          false, true, false);
+ }
+ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+                                        sector_t origin_size,
+                                        sector_t cache_block_size)
+ {
+-      return __smq_create(cache_size, origin_size, cache_block_size, true, true);
++      return __smq_create(cache_size, origin_size, cache_block_size,
++                          true, true, false);
+ }
+ static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
+                                             sector_t origin_size,
+                                             sector_t cache_block_size)
+ {
+-      return __smq_create(cache_size, origin_size, cache_block_size, false, false);
++      return __smq_create(cache_size, origin_size, cache_block_size,
++                          false, false, true);
+ }
+ /*----------------------------------------------------------------*/
index 2515d3058a714e5fab3b88b995b4f1e99deb2cf8..9a74107e1ce5d9f4a6f7732a3defe6812a51d43d 100644 (file)
@@ -101,3 +101,5 @@ btrfs-check-if-the-transaction-was-aborted-at-btrfs_.patch
 virtio-net-fix-race-between-set-queues-and-probe.patch
 s390-dasd-fix-hanging-device-after-quiesce-resume.patch
 asoc-wm8904-fill-the-cache-for-wm8904_adc_test_0-register.patch
+ceph-never-send-metrics-if-disable_send_metrics-is-set.patch
+dm-cache-policy-smq-ensure-io-doesn-t-prevent-cleaner-policy-progress.patch