]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.5
authorSasha Levin <sashal@kernel.org>
Wed, 18 Mar 2020 23:55:01 +0000 (19:55 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 18 Mar 2020 23:55:01 +0000 (19:55 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.5/blk-mq-insert-flush-request-to-the-front-of-dispatch.patch [new file with mode: 0644]
queue-5.5/locks-fix-a-potential-use-after-free-problem-when-wa.patch [new file with mode: 0644]
queue-5.5/series

diff --git a/queue-5.5/blk-mq-insert-flush-request-to-the-front-of-dispatch.patch b/queue-5.5/blk-mq-insert-flush-request-to-the-front-of-dispatch.patch
new file mode 100644 (file)
index 0000000..756cded
--- /dev/null
@@ -0,0 +1,73 @@
+From 329dd25f256dee299bf281ec00ed25f67edce49c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Mar 2020 17:15:48 +0800
+Subject: blk-mq: insert flush request to the front of dispatch queue
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit cc3200eac4c5eb11c3f34848a014d1f286316310 ]
+
+commit 01e99aeca397 ("blk-mq: insert passthrough request into
+hctx->dispatch directly") may change to add flush request to the tail
+of dispatch by applying the 'add_head' parameter of
+blk_mq_sched_insert_request.
+
+Turns out this way causes performance regression on NCQ controller because
+flush is non-NCQ command, which can't be queued when there is any in-flight
+NCQ command. When adding flush rq to the front of hctx->dispatch, it is
+easier to introduce extra time to flush rq's latency compared with adding
+to the tail of dispatch queue because of S_SCHED_RESTART, then chance of
+flush merge is increased, and less flush requests may be issued to
+controller.
+
+So always insert flush request to the front of dispatch queue just like
+before applying commit 01e99aeca397 ("blk-mq: insert passthrough request
+into hctx->dispatch directly").
+
+Cc: Damien Le Moal <Damien.LeMoal@wdc.com>
+Cc: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Fixes: 01e99aeca397 ("blk-mq: insert passthrough request into hctx->dispatch directly")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 856356b1619e8..74cedea560348 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -398,6 +398,28 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
+       WARN_ON(e && (rq->tag != -1));
+       if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
++              /*
++               * Firstly normal IO request is inserted to scheduler queue or
++               * sw queue, meantime we add flush request to dispatch queue(
++               * hctx->dispatch) directly and there is at most one in-flight
++               * flush request for each hw queue, so it doesn't matter to add
++               * flush request to tail or front of the dispatch queue.
++               *
++               * Secondly in case of NCQ, flush request belongs to non-NCQ
++               * command, and queueing it will fail when there is any
++               * in-flight normal IO request(NCQ command). When adding flush
++               * rq to the front of hctx->dispatch, it is easier to introduce
++               * extra time to flush rq's latency because of S_SCHED_RESTART
++               * compared with adding to the tail of dispatch queue, then
++               * chance of flush merge is increased, and less flush requests
++               * will be issued to controller. It is observed that ~10% time
++               * is saved in blktests block/004 on disk attached to AHCI/NCQ
++               * drive when adding flush rq to the front of hctx->dispatch.
++               *
++               * Simply queue flush rq to the front of hctx->dispatch so that
++               * intensive flush workloads can benefit in case of NCQ HW.
++               */
++              at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
+               blk_mq_request_bypass_insert(rq, at_head, false);
+               goto run;
+       }
+-- 
+2.20.1
+
diff --git a/queue-5.5/locks-fix-a-potential-use-after-free-problem-when-wa.patch b/queue-5.5/locks-fix-a-potential-use-after-free-problem-when-wa.patch
new file mode 100644 (file)
index 0000000..2b3203d
--- /dev/null
@@ -0,0 +1,81 @@
+From 4cddc5e10cc4b3105fc8e2d13b1896d5c000958e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2020 15:25:56 +0800
+Subject: locks: fix a potential use-after-free problem when wakeup a waiter
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: yangerkun <yangerkun@huawei.com>
+
+[ Upstream commit 6d390e4b5d48ec03bb87e63cf0a2bff5f4e116da ]
+
+'16306a61d3b7 ("fs/locks: always delete_block after waiting.")' add the
+logic to check waiter->fl_blocker without blocked_lock_lock. And it will
+trigger a UAF when we try to wakeup some waiter:
+
+Thread 1 has create a write flock a on file, and now thread 2 try to
+unlock and delete flock a, thread 3 try to add flock b on the same file.
+
+Thread2                         Thread3
+                                flock syscall(create flock b)
+                               ...flock_lock_inode_wait
+                                   flock_lock_inode(will insert
+                                   our fl_blocked_member list
+                                   to flock a's fl_blocked_requests)
+                                  sleep
+flock syscall(unlock)
+...flock_lock_inode_wait
+    locks_delete_lock_ctx
+    ...__locks_wake_up_blocks
+        __locks_delete_blocks(
+       b->fl_blocker = NULL)
+       ...
+                                   break by a signal
+                                  locks_delete_block
+                                   b->fl_blocker == NULL &&
+                                   list_empty(&b->fl_blocked_requests)
+                                   success, return directly
+                                locks_free_lock b
+       wake_up(&b->fl_waiter)
+       trigger UAF
+
+Fix it by remove this logic, and this patch may also fix CVE-2019-19769.
+
+Cc: stable@vger.kernel.org
+Fixes: 16306a61d3b7 ("fs/locks: always delete_block after waiting.")
+Signed-off-by: yangerkun <yangerkun@huawei.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/locks.c | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/fs/locks.c b/fs/locks.c
+index 44b6da0328426..426b55d333d5b 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
+ {
+       int status = -ENOENT;
+-      /*
+-       * If fl_blocker is NULL, it won't be set again as this thread
+-       * "owns" the lock and is the only one that might try to claim
+-       * the lock.  So it is safe to test fl_blocker locklessly.
+-       * Also if fl_blocker is NULL, this waiter is not listed on
+-       * fl_blocked_requests for some lock, so no other request can
+-       * be added to the list of fl_blocked_requests for this
+-       * request.  So if fl_blocker is NULL, it is safe to
+-       * locklessly check if fl_blocked_requests is empty.  If both
+-       * of these checks succeed, there is no need to take the lock.
+-       */
+-      if (waiter->fl_blocker == NULL &&
+-          list_empty(&waiter->fl_blocked_requests))
+-              return status;
+       spin_lock(&blocked_lock_lock);
+       if (waiter->fl_blocker)
+               status = 0;
+-- 
+2.20.1
+
index ed2fc8c5db6ffde54d903cbf15e1476195845dc1..badb62803286fcde0c6b8d6f7a702aa2b9935070 100644 (file)
@@ -55,3 +55,5 @@ sfc-fix-timestamp-reconstruction-at-16-bit-rollover-.patch
 mlxsw-pci-wait-longer-before-accessing-the-device-af.patch
 net-dsa-mv88e6xxx-fix-masking-of-egress-port.patch
 jbd2-fix-data-races-at-struct-journal_head.patch
+locks-fix-a-potential-use-after-free-problem-when-wa.patch
+blk-mq-insert-flush-request-to-the-front-of-dispatch.patch