]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
fixes for 5.0
authorSasha Levin <sashal@kernel.org>
Sat, 11 May 2019 01:45:49 +0000 (21:45 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 11 May 2019 01:45:49 +0000 (21:45 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.0/bfq-update-internal-depth-state-when-queue-depth-cha.patch [new file with mode: 0644]
queue-5.0/series [new file with mode: 0644]

diff --git a/queue-5.0/bfq-update-internal-depth-state-when-queue-depth-cha.patch b/queue-5.0/bfq-update-internal-depth-state-when-queue-depth-cha.patch
new file mode 100644 (file)
index 0000000..4d02fd2
--- /dev/null
@@ -0,0 +1,87 @@
+From 8094863d86dc9666b4b285fd5f30bfce2ed4ba80 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Fri, 18 Jan 2019 10:34:16 -0700
+Subject: bfq: update internal depth state when queue depth changes
+
+[ Upstream commit 77f1e0a52d26242b6c2dba019f6ebebfb9ff701e ]
+
+A previous commit moved the shallow depth and BFQ depth map calculations
+to be done at init time, moving it outside of the hotter IO path. This
+potentially causes hangs if the users changes the depth of the scheduler
+map, by writing to the 'nr_requests' sysfs file for that device.
+
+Add a blk-mq-sched hook that allows blk-mq to inform the scheduler if
+the depth changes, so that the scheduler can update its internal state.
+
+Tested-by: Kai Krakow <kai@kaishome.de>
+Reported-by: Paolo Valente <paolo.valente@linaro.org>
+Fixes: f0635b8a416e ("bfq: calculate shallow depths at init time")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bfq-iosched.c      | 8 +++++++-
+ block/blk-mq.c           | 2 ++
+ include/linux/elevator.h | 1 +
+ 3 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 72510c4700014..356620414cf94 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -5353,7 +5353,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
+       return min_shallow;
+ }
+-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
++static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
+ {
+       struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+       struct blk_mq_tags *tags = hctx->sched_tags;
+@@ -5361,6 +5361,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+       min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
+       sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
++}
++
++static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
++{
++      bfq_depth_updated(hctx);
+       return 0;
+ }
+@@ -5783,6 +5788,7 @@ static struct elevator_type iosched_bfq_mq = {
+               .requests_merged        = bfq_requests_merged,
+               .request_merged         = bfq_request_merged,
+               .has_work               = bfq_has_work,
++              .depth_updated          = bfq_depth_updated,
+               .init_hctx              = bfq_init_hctx,
+               .init_sched             = bfq_init_queue,
+               .exit_sched             = bfq_exit_queue,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 6930c82ab75fc..5b920a82bfe60 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3131,6 +3131,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
+               }
+               if (ret)
+                       break;
++              if (q->elevator && q->elevator->type->ops.depth_updated)
++                      q->elevator->type->ops.depth_updated(hctx);
+       }
+       if (!ret)
+diff --git a/include/linux/elevator.h b/include/linux/elevator.h
+index 2e9e2763bf47d..6e8bc53740f05 100644
+--- a/include/linux/elevator.h
++++ b/include/linux/elevator.h
+@@ -31,6 +31,7 @@ struct elevator_mq_ops {
+       void (*exit_sched)(struct elevator_queue *);
+       int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+       void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
++      void (*depth_updated)(struct blk_mq_hw_ctx *);
+       bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+       bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
+-- 
+2.20.1
+
diff --git a/queue-5.0/series b/queue-5.0/series
new file mode 100644 (file)
index 0000000..29682e0
--- /dev/null
@@ -0,0 +1 @@
+bfq-update-internal-depth-state-when-queue-depth-cha.patch