]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop a bunch of 6.1 block patches that were causing problems.
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Jul 2023 18:46:02 +0000 (20:46 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Jul 2023 18:46:02 +0000 (20:46 +0200)
queue-6.1/blk-cgroup-don-t-update-io-stat-for-root-cgroup.patch [deleted file]
queue-6.1/blk-cgroup-flush-stats-before-releasing-blkcg_gq.patch [deleted file]
queue-6.1/blk-cgroup-optimize-blkcg_rstat_flush.patch [deleted file]
queue-6.1/blk-cgroup-reinit-blkg_iostat_set-after-clearing-in-blkcg_reset_stats.patch [deleted file]
queue-6.1/blk-throttle-fix-io-statistics-for-cgroup-v1.patch [deleted file]
queue-6.1/block-make-sure-local-irq-is-disabled-when-calling-__blkcg_rstat_flush.patch [deleted file]
queue-6.1/series

diff --git a/queue-6.1/blk-cgroup-don-t-update-io-stat-for-root-cgroup.patch b/queue-6.1/blk-cgroup-don-t-update-io-stat-for-root-cgroup.patch
deleted file mode 100644 (file)
index f0b9b50..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-From 527c91479bffb990d54c25cab35fdc3629d3f774 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 2 Feb 2023 10:18:04 +0800
-Subject: blk-cgroup: don't update io stat for root cgroup
-
-From: Ming Lei <ming.lei@redhat.com>
-
-[ Upstream commit 0416f3be58c6b1ea066cd52e354b857693feb01e ]
-
-We source root cgroup stats from the system-wide stats, see blkcg_print_stat
-and blkcg_rstat_flush, so don't update io state for root cgroup.
-
-Fixes blkg leak issue introduced in commit 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()")
-which starts to grab blkg's reference when adding iostat_cpu into percpu
-blkcg list, but this state won't be consumed by blkcg_rstat_flush() where
-the blkg reference is dropped.
-
-Tested-by: Bart van Assche <bvanassche@acm.org>
-Reported-by: Bart van Assche <bvanassche@acm.org>
-Fixes: 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()")
-Cc: Tejun Heo <tj@kernel.org>
-Cc: Waiman Long <longman@redhat.com>
-Signed-off-by: Ming Lei <ming.lei@redhat.com>
-Link: https://lore.kernel.org/r/20230202021804.278582-1-ming.lei@redhat.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: ad7c3b41e86b ("blk-throttle: Fix io statistics for cgroup v1")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-cgroup.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
-index 2c7be256ff879..5ee0ae8ddbf6f 100644
---- a/block/blk-cgroup.c
-+++ b/block/blk-cgroup.c
-@@ -2008,6 +2008,10 @@ void blk_cgroup_bio_start(struct bio *bio)
-       struct blkg_iostat_set *bis;
-       unsigned long flags;
-+      /* Root-level stats are sourced from system-wide IO stats */
-+      if (!cgroup_parent(blkcg->css.cgroup))
-+              return;
-+
-       cpu = get_cpu();
-       bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
-       flags = u64_stats_update_begin_irqsave(&bis->sync);
--- 
-2.39.2
-
diff --git a/queue-6.1/blk-cgroup-flush-stats-before-releasing-blkcg_gq.patch b/queue-6.1/blk-cgroup-flush-stats-before-releasing-blkcg_gq.patch
deleted file mode 100644 (file)
index caba5c4..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-From 20cb1c2fb7568a6054c55defe044311397e01ddb Mon Sep 17 00:00:00 2001
-From: Ming Lei <ming.lei@redhat.com>
-Date: Sat, 10 Jun 2023 07:42:49 +0800
-Subject: blk-cgroup: Flush stats before releasing blkcg_gq
-
-From: Ming Lei <ming.lei@redhat.com>
-
-commit 20cb1c2fb7568a6054c55defe044311397e01ddb upstream.
-
-As noted by Michal, the blkg_iostat_set's in the lockless list hold
-reference to blkg's to protect against their removal. Those blkg's
-hold reference to blkcg. When a cgroup is being destroyed,
-cgroup_rstat_flush() is only called at css_release_work_fn() which
-is called when the blkcg reference count reaches 0. This circular
-dependency will prevent blkcg and some blkgs from being freed after
-they are made offline.
-
-It is less a problem if the cgroup to be destroyed also has other
-controllers like memory that will call cgroup_rstat_flush() which will
-clean up the reference count. If block is the only controller that uses
-rstat, these offline blkcg and blkgs may never be freed leaking more
-and more memory over time.
-
-To prevent this potential memory leak:
-
-- flush blkcg per-cpu stats list in __blkg_release(), when no new stat
-can be added
-
-- add global blkg_stat_lock for covering concurrent parent blkg stat
-update
-
-- don't grab bio->bi_blkg reference when adding the stats into blkcg's
-per-cpu stat list since all stats are guaranteed to be consumed before
-releasing blkg instance, and grabbing blkg reference for stats was the
-most fragile part of original patch
-
-Based on Waiman's patch:
-
-https://lore.kernel.org/linux-block/20221215033132.230023-3-longman@redhat.com/
-
-Fixes: 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()")
-Cc: stable@vger.kernel.org
-Reported-by: Jay Shin <jaeshin@redhat.com>
-Acked-by: Tejun Heo <tj@kernel.org>
-Cc: Waiman Long <longman@redhat.com>
-Cc: mkoutny@suse.com
-Cc: Yosry Ahmed <yosryahmed@google.com>
-Signed-off-by: Ming Lei <ming.lei@redhat.com>
-Link: https://lore.kernel.org/r/20230609234249.1412858-1-ming.lei@redhat.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- block/blk-cgroup.c |   41 ++++++++++++++++++++++++++++++++---------
- 1 file changed, 32 insertions(+), 9 deletions(-)
-
---- a/block/blk-cgroup.c
-+++ b/block/blk-cgroup.c
-@@ -35,6 +35,8 @@
- #include "blk-throttle.h"
- #include "blk-rq-qos.h"
-+static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu);
-+
- /*
-  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
-  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
-@@ -58,6 +60,8 @@ static LIST_HEAD(all_blkcgs);                /* protec
- bool blkcg_debug_stats = false;
- static struct workqueue_struct *blkcg_punt_bio_wq;
-+static DEFINE_RAW_SPINLOCK(blkg_stat_lock);
-+
- #define BLKG_DESTROY_BATCH_SIZE  64
- /*
-@@ -153,9 +157,20 @@ static void blkg_free(struct blkcg_gq *b
- static void __blkg_release(struct rcu_head *rcu)
- {
-       struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
-+      struct blkcg *blkcg = blkg->blkcg;
-+      int cpu;
-       WARN_ON(!bio_list_empty(&blkg->async_bios));
-+      /*
-+       * Flush all the non-empty percpu lockless lists before releasing
-+       * us, given these stat belongs to us.
-+       *
-+       * blkg_stat_lock is for serializing blkg stat update
-+       */
-+      for_each_possible_cpu(cpu)
-+              __blkcg_rstat_flush(blkcg, cpu);
-+
-       /* release the blkcg and parent blkg refs this blkg has been holding */
-       css_put(&blkg->blkcg->css);
-       if (blkg->parent)
-@@ -866,17 +881,12 @@ static void blkcg_iostat_update(struct b
-       u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
- }
--static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
-+static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
- {
--      struct blkcg *blkcg = css_to_blkcg(css);
-       struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
-       struct llist_node *lnode;
-       struct blkg_iostat_set *bisc, *next_bisc;
--      /* Root-level stats are sourced from system-wide IO stats */
--      if (!cgroup_parent(css->cgroup))
--              return;
--
-       rcu_read_lock();
-       lnode = llist_del_all(lhead);
-@@ -884,6 +894,14 @@ static void blkcg_rstat_flush(struct cgr
-               goto out;
-       /*
-+       * For covering concurrent parent blkg update from blkg_release().
-+       *
-+       * When flushing from cgroup, cgroup_rstat_lock is always held, so
-+       * this lock won't cause contention most of time.
-+       */
-+      raw_spin_lock(&blkg_stat_lock);
-+
-+      /*
-        * Iterate only the iostat_cpu's queued in the lockless list.
-        */
-       llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) {
-@@ -906,13 +924,19 @@ static void blkcg_rstat_flush(struct cgr
-               if (parent && parent->parent)
-                       blkcg_iostat_update(parent, &blkg->iostat.cur,
-                                           &blkg->iostat.last);
--              percpu_ref_put(&blkg->refcnt);
-       }
--
-+      raw_spin_unlock(&blkg_stat_lock);
- out:
-       rcu_read_unlock();
- }
-+static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
-+{
-+      /* Root-level stats are sourced from system-wide IO stats */
-+      if (cgroup_parent(css->cgroup))
-+              __blkcg_rstat_flush(css_to_blkcg(css), cpu);
-+}
-+
- /*
-  * We source root cgroup stats from the system-wide stats to avoid
-  * tracking the same information twice and incurring overhead when no
-@@ -2043,7 +2067,6 @@ void blk_cgroup_bio_start(struct bio *bi
-               llist_add(&bis->lnode, lhead);
-               WRITE_ONCE(bis->lqueued, true);
--              percpu_ref_get(&bis->blkg->refcnt);
-       }
-       u64_stats_update_end_irqrestore(&bis->sync, flags);
diff --git a/queue-6.1/blk-cgroup-optimize-blkcg_rstat_flush.patch b/queue-6.1/blk-cgroup-optimize-blkcg_rstat_flush.patch
deleted file mode 100644 (file)
index 88612e2..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-From a7c7dbac32b9fc615ba132dbac34a467192964dc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 4 Nov 2022 20:59:01 -0400
-Subject: blk-cgroup: Optimize blkcg_rstat_flush()
-
-From: Waiman Long <longman@redhat.com>
-
-[ Upstream commit 3b8cc6298724021da845f2f9fd7dd4b6829a6817 ]
-
-For a system with many CPUs and block devices, the time to do
-blkcg_rstat_flush() from cgroup_rstat_flush() can be rather long. It
-can be especially problematic as interrupt is disabled during the flush.
-It was reported that it might take seconds to complete in some extreme
-cases leading to hard lockup messages.
-
-As it is likely that not all the percpu blkg_iostat_set's has been
-updated since the last flush, those stale blkg_iostat_set's don't need
-to be flushed in this case. This patch optimizes blkcg_rstat_flush()
-by keeping a lockless list of recently updated blkg_iostat_set's in a
-newly added percpu blkcg->lhead pointer.
-
-The blkg_iostat_set is added to a lockless list on the update side
-in blk_cgroup_bio_start(). It is removed from the lockless list when
-flushed in blkcg_rstat_flush(). Due to racing, it is possible that
-blk_iostat_set's in the lockless list may have no new IO stats to be
-flushed, but that is OK.
-
-To protect against destruction of blkg, a percpu reference is gotten
-when putting into the lockless list and put back when removed.
-
-When booting up an instrumented test kernel with this patch on a
-2-socket 96-thread system with cgroup v2, out of the 2051 calls to
-cgroup_rstat_flush() after bootup, 1788 of the calls were exited
-immediately because of empty lockless list. After an all-cpu kernel
-build, the ratio became 6295424/6340513. That was more than 99%.
-
-Signed-off-by: Waiman Long <longman@redhat.com>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20221105005902.407297-3-longman@redhat.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: ad7c3b41e86b ("blk-throttle: Fix io statistics for cgroup v1")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-cgroup.c | 76 ++++++++++++++++++++++++++++++++++++++++++----
- block/blk-cgroup.h | 10 ++++++
- 2 files changed, 80 insertions(+), 6 deletions(-)
-
-diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
-index 60f366f98fa2b..2c7be256ff879 100644
---- a/block/blk-cgroup.c
-+++ b/block/blk-cgroup.c
-@@ -60,6 +60,37 @@ static struct workqueue_struct *blkcg_punt_bio_wq;
- #define BLKG_DESTROY_BATCH_SIZE  64
-+/*
-+ * Lockless lists for tracking IO stats update
-+ *
-+ * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg).
-+ * There are multiple blkg's (one for each block device) attached to each
-+ * blkcg. The rstat code keeps track of which cpu has IO stats updated,
-+ * but it doesn't know which blkg has the updated stats. If there are many
-+ * block devices in a system, the cost of iterating all the blkg's to flush
-+ * out the IO stats can be high. To reduce such overhead, a set of percpu
-+ * lockless lists (lhead) per blkcg are used to track the set of recently
-+ * updated iostat_cpu's since the last flush. An iostat_cpu will be put
-+ * onto the lockless list on the update side [blk_cgroup_bio_start()] if
-+ * not there yet and then removed when being flushed [blkcg_rstat_flush()].
-+ * References to blkg are gotten and then put back in the process to
-+ * protect against blkg removal.
-+ *
-+ * Return: 0 if successful or -ENOMEM if allocation fails.
-+ */
-+static int init_blkcg_llists(struct blkcg *blkcg)
-+{
-+      int cpu;
-+
-+      blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL);
-+      if (!blkcg->lhead)
-+              return -ENOMEM;
-+
-+      for_each_possible_cpu(cpu)
-+              init_llist_head(per_cpu_ptr(blkcg->lhead, cpu));
-+      return 0;
-+}
-+
- /**
-  * blkcg_css - find the current css
-  *
-@@ -237,8 +268,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
-       blkg->blkcg = blkcg;
-       u64_stats_init(&blkg->iostat.sync);
--      for_each_possible_cpu(cpu)
-+      for_each_possible_cpu(cpu) {
-               u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
-+              per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg;
-+      }
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
-@@ -831,7 +864,9 @@ static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur,
- static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
- {
-       struct blkcg *blkcg = css_to_blkcg(css);
--      struct blkcg_gq *blkg;
-+      struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
-+      struct llist_node *lnode;
-+      struct blkg_iostat_set *bisc, *next_bisc;
-       /* Root-level stats are sourced from system-wide IO stats */
-       if (!cgroup_parent(css->cgroup))
-@@ -839,12 +874,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
-       rcu_read_lock();
--      hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
-+      lnode = llist_del_all(lhead);
-+      if (!lnode)
-+              goto out;
-+
-+      /*
-+       * Iterate only the iostat_cpu's queued in the lockless list.
-+       */
-+      llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) {
-+              struct blkcg_gq *blkg = bisc->blkg;
-               struct blkcg_gq *parent = blkg->parent;
--              struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
-               struct blkg_iostat cur;
-               unsigned int seq;
-+              WRITE_ONCE(bisc->lqueued, false);
-+
-               /* fetch the current per-cpu values */
-               do {
-                       seq = u64_stats_fetch_begin(&bisc->sync);
-@@ -857,8 +901,10 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
-               if (parent && parent->parent)
-                       blkcg_iostat_update(parent, &blkg->iostat.cur,
-                                           &blkg->iostat.last);
-+              percpu_ref_put(&blkg->refcnt);
-       }
-+out:
-       rcu_read_unlock();
- }
-@@ -1136,6 +1182,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
-       mutex_unlock(&blkcg_pol_mutex);
-+      free_percpu(blkcg->lhead);
-       kfree(blkcg);
- }
-@@ -1158,6 +1205,9 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
-               }
-       }
-+      if (init_blkcg_llists(blkcg))
-+              goto free_blkcg;
-+
-       for (i = 0; i < BLKCG_MAX_POLS ; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
-               struct blkcg_policy_data *cpd;
-@@ -1199,7 +1249,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
-       for (i--; i >= 0; i--)
-               if (blkcg->cpd[i])
-                       blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
--
-+      free_percpu(blkcg->lhead);
-+free_blkcg:
-       if (blkcg != &blkcg_root)
-               kfree(blkcg);
- unlock:
-@@ -1952,6 +2003,7 @@ static int blk_cgroup_io_type(struct bio *bio)
- void blk_cgroup_bio_start(struct bio *bio)
- {
-+      struct blkcg *blkcg = bio->bi_blkg->blkcg;
-       int rwd = blk_cgroup_io_type(bio), cpu;
-       struct blkg_iostat_set *bis;
-       unsigned long flags;
-@@ -1970,9 +2022,21 @@ void blk_cgroup_bio_start(struct bio *bio)
-       }
-       bis->cur.ios[rwd]++;
-+      /*
-+       * If the iostat_cpu isn't in a lockless list, put it into the
-+       * list to indicate that a stat update is pending.
-+       */
-+      if (!READ_ONCE(bis->lqueued)) {
-+              struct llist_head *lhead = this_cpu_ptr(blkcg->lhead);
-+
-+              llist_add(&bis->lnode, lhead);
-+              WRITE_ONCE(bis->lqueued, true);
-+              percpu_ref_get(&bis->blkg->refcnt);
-+      }
-+
-       u64_stats_update_end_irqrestore(&bis->sync, flags);
-       if (cgroup_subsys_on_dfl(io_cgrp_subsys))
--              cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
-+              cgroup_rstat_updated(blkcg->css.cgroup, cpu);
-       put_cpu();
- }
-diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
-index aa2b286bc825f..1e94e404eaa80 100644
---- a/block/blk-cgroup.h
-+++ b/block/blk-cgroup.h
-@@ -18,6 +18,7 @@
- #include <linux/cgroup.h>
- #include <linux/kthread.h>
- #include <linux/blk-mq.h>
-+#include <linux/llist.h>
- struct blkcg_gq;
- struct blkg_policy_data;
-@@ -43,6 +44,9 @@ struct blkg_iostat {
- struct blkg_iostat_set {
-       struct u64_stats_sync           sync;
-+      struct blkcg_gq                *blkg;
-+      struct llist_node               lnode;
-+      int                             lqueued;        /* queued in llist */
-       struct blkg_iostat              cur;
-       struct blkg_iostat              last;
- };
-@@ -97,6 +101,12 @@ struct blkcg {
-       struct blkcg_policy_data        *cpd[BLKCG_MAX_POLS];
-       struct list_head                all_blkcgs_node;
-+
-+      /*
-+       * List of updated percpu blkg_iostat_set's since the last flush.
-+       */
-+      struct llist_head __percpu      *lhead;
-+
- #ifdef CONFIG_BLK_CGROUP_FC_APPID
-       char                            fc_app_id[FC_APPID_LEN];
- #endif
--- 
-2.39.2
-
diff --git a/queue-6.1/blk-cgroup-reinit-blkg_iostat_set-after-clearing-in-blkcg_reset_stats.patch b/queue-6.1/blk-cgroup-reinit-blkg_iostat_set-after-clearing-in-blkcg_reset_stats.patch
deleted file mode 100644 (file)
index ba9c0c8..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-From 3d2af77e31ade05ff7ccc3658c3635ec1bea0979 Mon Sep 17 00:00:00 2001
-From: Waiman Long <longman@redhat.com>
-Date: Tue, 6 Jun 2023 14:07:24 -0400
-Subject: blk-cgroup: Reinit blkg_iostat_set after clearing in blkcg_reset_stats()
-
-From: Waiman Long <longman@redhat.com>
-
-commit 3d2af77e31ade05ff7ccc3658c3635ec1bea0979 upstream.
-
-When blkg_alloc() is called to allocate a blkcg_gq structure
-with the associated blkg_iostat_set's, there are 2 fields within
-blkg_iostat_set that requires proper initialization - blkg & sync.
-The former field was introduced by commit 3b8cc6298724 ("blk-cgroup:
-Optimize blkcg_rstat_flush()") while the later one was introduced by
-commit f73316482977 ("blk-cgroup: reimplement basic IO stats using
-cgroup rstat").
-
-Unfortunately those fields in the blkg_iostat_set's are not properly
-re-initialized when they are cleared in v1's blkcg_reset_stats(). This
-can lead to a kernel panic due to NULL pointer access of the blkg
-pointer. The missing initialization of sync is less problematic and
-can be a problem in a debug kernel due to missing lockdep initialization.
-
-Fix these problems by re-initializing them after memory clearing.
-
-Fixes: 3b8cc6298724 ("blk-cgroup: Optimize blkcg_rstat_flush()")
-Fixes: f73316482977 ("blk-cgroup: reimplement basic IO stats using cgroup rstat")
-Signed-off-by: Waiman Long <longman@redhat.com>
-Reviewed-by: Ming Lei <ming.lei@redhat.com>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230606180724.2455066-1-longman@redhat.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- block/blk-cgroup.c |    5 +++++
- 1 file changed, 5 insertions(+)
-
---- a/block/blk-cgroup.c
-+++ b/block/blk-cgroup.c
-@@ -544,8 +544,13 @@ static int blkcg_reset_stats(struct cgro
-                       struct blkg_iostat_set *bis =
-                               per_cpu_ptr(blkg->iostat_cpu, cpu);
-                       memset(bis, 0, sizeof(*bis));
-+
-+                      /* Re-initialize the cleared blkg_iostat_set */
-+                      u64_stats_init(&bis->sync);
-+                      bis->blkg = blkg;
-               }
-               memset(&blkg->iostat, 0, sizeof(blkg->iostat));
-+              u64_stats_init(&blkg->iostat.sync);
-               for (i = 0; i < BLKCG_MAX_POLS; i++) {
-                       struct blkcg_policy *pol = blkcg_policy[i];
diff --git a/queue-6.1/blk-throttle-fix-io-statistics-for-cgroup-v1.patch b/queue-6.1/blk-throttle-fix-io-statistics-for-cgroup-v1.patch
deleted file mode 100644 (file)
index 6d9c2c9..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-From a664cdf1cba90269e240aeb587e2c528a51090cd Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 8 May 2023 01:06:31 +0800
-Subject: blk-throttle: Fix io statistics for cgroup v1
-
-From: Jinke Han <hanjinke.666@bytedance.com>
-
-[ Upstream commit ad7c3b41e86b59943a903d23c7b037d820e6270c ]
-
-After commit f382fb0bcef4 ("block: remove legacy IO schedulers"),
-blkio.throttle.io_serviced and blkio.throttle.io_service_bytes become
-the only stable io stats interface of cgroup v1, and these statistics
-are done in the blk-throttle code. But the current code only counts the
-bios that are actually throttled. When the user does not add the throttle
-limit, the io stats for cgroup v1 has nothing. I fix it according to the
-statistical method of v2, and made it count all ios accurately.
-
-Fixes: a7b36ee6ba29 ("block: move blk-throtl fast path inline")
-Tested-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Jinke Han <hanjinke.666@bytedance.com>
-Acked-by: Muchun Song <songmuchun@bytedance.com>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230507170631.89607-1-hanjinke.666@bytedance.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-cgroup.c   | 6 ++++--
- block/blk-throttle.c | 6 ------
- block/blk-throttle.h | 9 +++++++++
- 3 files changed, 13 insertions(+), 8 deletions(-)
-
-diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
-index 5ee0ae8ddbf6f..e812e48d5bb8a 100644
---- a/block/blk-cgroup.c
-+++ b/block/blk-cgroup.c
-@@ -2008,6 +2008,9 @@ void blk_cgroup_bio_start(struct bio *bio)
-       struct blkg_iostat_set *bis;
-       unsigned long flags;
-+      if (!cgroup_subsys_on_dfl(io_cgrp_subsys))
-+              return;
-+
-       /* Root-level stats are sourced from system-wide IO stats */
-       if (!cgroup_parent(blkcg->css.cgroup))
-               return;
-@@ -2039,8 +2042,7 @@ void blk_cgroup_bio_start(struct bio *bio)
-       }
-       u64_stats_update_end_irqrestore(&bis->sync, flags);
--      if (cgroup_subsys_on_dfl(io_cgrp_subsys))
--              cgroup_rstat_updated(blkcg->css.cgroup, cpu);
-+      cgroup_rstat_updated(blkcg->css.cgroup, cpu);
-       put_cpu();
- }
-diff --git a/block/blk-throttle.c b/block/blk-throttle.c
-index f1bc600c4ded6..d88147d1358fc 100644
---- a/block/blk-throttle.c
-+++ b/block/blk-throttle.c
-@@ -2167,12 +2167,6 @@ bool __blk_throtl_bio(struct bio *bio)
-       rcu_read_lock();
--      if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
--              blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
--                              bio->bi_iter.bi_size);
--              blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
--      }
--
-       spin_lock_irq(&q->queue_lock);
-       throtl_update_latency_buckets(td);
-diff --git a/block/blk-throttle.h b/block/blk-throttle.h
-index ef4b7a4de987d..d1ccbfe9f7978 100644
---- a/block/blk-throttle.h
-+++ b/block/blk-throttle.h
-@@ -185,6 +185,15 @@ static inline bool blk_should_throtl(struct bio *bio)
-       struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
-       int rw = bio_data_dir(bio);
-+      if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
-+              if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
-+                      bio_set_flag(bio, BIO_CGROUP_ACCT);
-+                      blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
-+                                      bio->bi_iter.bi_size);
-+              }
-+              blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
-+      }
-+
-       /* iops limit is always counted */
-       if (tg->has_rules_iops[rw])
-               return true;
--- 
-2.39.2
-
diff --git a/queue-6.1/block-make-sure-local-irq-is-disabled-when-calling-__blkcg_rstat_flush.patch b/queue-6.1/block-make-sure-local-irq-is-disabled-when-calling-__blkcg_rstat_flush.patch
deleted file mode 100644 (file)
index 61d6f91..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-From 9c39b7a905d84b7da5f59d80f2e455853fea7217 Mon Sep 17 00:00:00 2001
-From: Ming Lei <ming.lei@redhat.com>
-Date: Thu, 22 Jun 2023 16:42:49 +0800
-Subject: block: make sure local irq is disabled when calling __blkcg_rstat_flush
-
-From: Ming Lei <ming.lei@redhat.com>
-
-commit 9c39b7a905d84b7da5f59d80f2e455853fea7217 upstream.
-
-When __blkcg_rstat_flush() is called from cgroup_rstat_flush*() code
-path, interrupt is always disabled.
-
-When we start to flush blkcg per-cpu stats list in __blkg_release()
-for avoiding to leak blkcg_gq's reference in commit 20cb1c2fb756
-("blk-cgroup: Flush stats before releasing blkcg_gq"), local irq
-isn't disabled yet, then lockdep warning may be triggered because
-the dependent cgroup locks may be acquired from irq(soft irq) handler.
-
-Fix the issue by disabling local irq always.
-
-Fixes: 20cb1c2fb756 ("blk-cgroup: Flush stats before releasing blkcg_gq")
-Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
-Closes: https://lore.kernel.org/linux-block/pz2wzwnmn5tk3pwpskmjhli6g3qly7eoknilb26of376c7kwxy@qydzpvt6zpis/T/#u
-Cc: stable@vger.kernel.org
-Cc: Jay Shin <jaeshin@redhat.com>
-Cc: Tejun Heo <tj@kernel.org>
-Cc: Waiman Long <longman@redhat.com>
-Signed-off-by: Ming Lei <ming.lei@redhat.com>
-Reviewed-by: Waiman Long <longman@redhat.com>
-Link: https://lore.kernel.org/r/20230622084249.1208005-1-ming.lei@redhat.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- block/blk-cgroup.c |    5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
---- a/block/blk-cgroup.c
-+++ b/block/blk-cgroup.c
-@@ -886,6 +886,7 @@ static void __blkcg_rstat_flush(struct b
-       struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
-       struct llist_node *lnode;
-       struct blkg_iostat_set *bisc, *next_bisc;
-+      unsigned long flags;
-       rcu_read_lock();
-@@ -899,7 +900,7 @@ static void __blkcg_rstat_flush(struct b
-        * When flushing from cgroup, cgroup_rstat_lock is always held, so
-        * this lock won't cause contention most of time.
-        */
--      raw_spin_lock(&blkg_stat_lock);
-+      raw_spin_lock_irqsave(&blkg_stat_lock, flags);
-       /*
-        * Iterate only the iostat_cpu's queued in the lockless list.
-@@ -925,7 +926,7 @@ static void __blkcg_rstat_flush(struct b
-                       blkcg_iostat_update(parent, &blkg->iostat.cur,
-                                           &blkg->iostat.last);
-       }
--      raw_spin_unlock(&blkg_stat_lock);
-+      raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
- out:
-       rcu_read_unlock();
- }
index 2674cfb16bd0143b4947aa9b4e6c416473c6201e..4dca943aaa26d9835ad05aba3df949ffc38ba11d 100644 (file)
@@ -459,9 +459,6 @@ pwm-sysfs-do-not-apply-state-to-already-disabled-pwm.patch
 pwm-ab8500-fix-error-code-in-probe.patch
 pwm-mtk_disp-fix-the-disable-flow-of-disp_pwm.patch
 md-raid10-fix-the-condition-to-call-bio_end_io_acct.patch
-blk-cgroup-optimize-blkcg_rstat_flush.patch
-blk-cgroup-don-t-update-io-stat-for-root-cgroup.patch
-blk-throttle-fix-io-statistics-for-cgroup-v1.patch
 rtc-st-lpc-release-some-resources-in-st_rtc_probe-in.patch
 drm-i915-psr-use-hw.adjusted-mode-when-calculating-i.patch
 drm-i915-guc-slpc-apply-min-softlimit-correctly.patch
@@ -579,10 +576,7 @@ drm-i915-tc-fix-tc-port-link-ref-init-for-dp-mst-during-hw-readout.patch
 drm-i915-tc-fix-system-resume-mst-mode-restore-for-dp-alt-sinks.patch
 mtd-parsers-refer-to-arch_bcmbca-instead-of-arch_bcm4908.patch
 netfilter-nf_tables-unbind-non-anonymous-set-if-rule-construction-fails.patch
-blk-cgroup-reinit-blkg_iostat_set-after-clearing-in-blkcg_reset_stats.patch
-blk-cgroup-flush-stats-before-releasing-blkcg_gq.patch
 mips-dts-ci20-raise-vddcore-voltage-to-1.125-volts.patch
-block-make-sure-local-irq-is-disabled-when-calling-__blkcg_rstat_flush.patch
 netfilter-conntrack-avoid-nf_ct_helper_hash-uses-after-free.patch
 netfilter-nf_tables-do-not-ignore-genmask-when-looking-up-chain-by-id.patch
 netfilter-nf_tables-prevent-oob-access-in-nft_byteorder_eval.patch