]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
blk-throttle: remove blk_throtl_drain
authorGuoqing Jiang <guoqing.jiang@cloud.ionos.com>
Fri, 8 May 2020 22:00:12 +0000 (00:00 +0200)
committerJens Axboe <axboe@kernel.dk>
Fri, 29 May 2020 22:30:39 +0000 (16:30 -0600)
After the commit 5addeae1bedc4 ("blk-cgroup: remove blkcg_drain_queue"),
there is no caller of blk_throtl_drain, so let's remove it.

Signed-off-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-throttle.c
block/blk.h

index 98233c9c65a8d18adc7b76d88f466cf6c1e34a67..0b2ce7fb77a770f0b683de0e36f80376bd4dc55e 100644 (file)
@@ -2380,47 +2380,6 @@ static void tg_drain_bios(struct throtl_service_queue *parent_sq)
        }
 }
 
-/**
- * blk_throtl_drain - drain throttled bios
- * @q: request_queue to drain throttled bios for
- *
- * Dispatch all currently throttled bios on @q through ->make_request_fn().
- */
-void blk_throtl_drain(struct request_queue *q)
-       __releases(&q->queue_lock) __acquires(&q->queue_lock)
-{
-       struct throtl_data *td = q->td;
-       struct blkcg_gq *blkg;
-       struct cgroup_subsys_state *pos_css;
-       struct bio *bio;
-       int rw;
-
-       rcu_read_lock();
-
-       /*
-        * Drain each tg while doing post-order walk on the blkg tree, so
-        * that all bios are propagated to td->service_queue.  It'd be
-        * better to walk service_queue tree directly but blkg walk is
-        * easier.
-        */
-       blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
-               tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
-
-       /* finally, transfer bios from top-level tg's into the td */
-       tg_drain_bios(&td->service_queue);
-
-       rcu_read_unlock();
-       spin_unlock_irq(&q->queue_lock);
-
-       /* all bios now should be in td->service_queue, issue them */
-       for (rw = READ; rw <= WRITE; rw++)
-               while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
-                                               NULL)))
-                       generic_make_request(bio);
-
-       spin_lock_irq(&q->queue_lock);
-}
-
 int blk_throtl_init(struct request_queue *q)
 {
        struct throtl_data *td;
index 428f7e5d70a86c06d73613aaa4d9fee019ff4d42..aa16e524dc35e010d55ad0e62b12ae386345407b 100644 (file)
@@ -296,12 +296,10 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
  * Internal throttling interface
  */
 #ifdef CONFIG_BLK_DEV_THROTTLING
-extern void blk_throtl_drain(struct request_queue *q);
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 extern void blk_throtl_register_queue(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
-static inline void blk_throtl_drain(struct request_queue *q) { }
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline void blk_throtl_exit(struct request_queue *q) { }
 static inline void blk_throtl_register_queue(struct request_queue *q) { }