--- /dev/null
+From 44ea5fe2f7bce3f55a833d13643fdb9c46ac7959 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Mar 2020 17:15:48 +0800
+Subject: blk-mq: insert flush request to the front of dispatch queue
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit cc3200eac4c5eb11c3f34848a014d1f286316310 ]
+
+commit 01e99aeca397 ("blk-mq: insert passthrough request into
+hctx->dispatch directly") may change to add flush request to the tail
+of dispatch by applying the 'add_head' parameter of
+blk_mq_sched_insert_request.
+
+Turns out this way causes performance regression on NCQ controller because
+flush is non-NCQ command, which can't be queued when there is any in-flight
+NCQ command. When adding flush rq to the front of hctx->dispatch, it is
+easier to introduce extra time to flush rq's latency compared with adding
+to the tail of dispatch queue because of S_SCHED_RESTART, then chance of
+flush merge is increased, and less flush requests may be issued to
+controller.
+
+So always insert flush request to the front of dispatch queue just like
+before applying commit 01e99aeca397 ("blk-mq: insert passthrough request
+into hctx->dispatch directly").
+
+Cc: Damien Le Moal <Damien.LeMoal@wdc.com>
+Cc: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Fixes: 01e99aeca397 ("blk-mq: insert passthrough request into hctx->dispatch directly")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 856356b1619e8..74cedea560348 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -398,6 +398,28 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
+ WARN_ON(e && (rq->tag != -1));
+
+ if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
++ /*
++ * Firstly normal IO request is inserted to scheduler queue or
++ * sw queue, meantime we add flush request to dispatch queue(
++ * hctx->dispatch) directly and there is at most one in-flight
++ * flush request for each hw queue, so it doesn't matter to add
++ * flush request to tail or front of the dispatch queue.
++ *
++ * Secondly in case of NCQ, flush request belongs to non-NCQ
++ * command, and queueing it will fail when there is any
++ * in-flight normal IO request(NCQ command). When adding flush
++ * rq to the front of hctx->dispatch, it is easier to introduce
++ * extra time to flush rq's latency because of S_SCHED_RESTART
++ * compared with adding to the tail of dispatch queue, then
++ * chance of flush merge is increased, and less flush requests
++ * will be issued to controller. It is observed that ~10% time
++ * is saved in blktests block/004 on disk attached to AHCI/NCQ
++ * drive when adding flush rq to the front of hctx->dispatch.
++ *
++ * Simply queue flush rq to the front of hctx->dispatch so that
++ * intensive flush workloads can benefit in case of NCQ HW.
++ */
++ at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
+ blk_mq_request_bypass_insert(rq, at_head, false);
+ goto run;
+ }
+--
+2.20.1
+
--- /dev/null
+From 68edfb98d4afd5c15eaacf0c62d5ad0991c02e1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2020 15:25:56 +0800
+Subject: locks: fix a potential use-after-free problem when wakeup a waiter
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: yangerkun <yangerkun@huawei.com>
+
+[ Upstream commit 6d390e4b5d48ec03bb87e63cf0a2bff5f4e116da ]
+
+'16306a61d3b7 ("fs/locks: always delete_block after waiting.")' add the
+logic to check waiter->fl_blocker without blocked_lock_lock. And it will
+trigger a UAF when we try to wakeup some waiter:
+
+Thread 1 has create a write flock a on file, and now thread 2 try to
+unlock and delete flock a, thread 3 try to add flock b on the same file.
+
+Thread2 Thread3
+ flock syscall(create flock b)
+ ...flock_lock_inode_wait
+ flock_lock_inode(will insert
+ our fl_blocked_member list
+ to flock a's fl_blocked_requests)
+ sleep
+flock syscall(unlock)
+...flock_lock_inode_wait
+ locks_delete_lock_ctx
+ ...__locks_wake_up_blocks
+ __locks_delete_blocks(
+ b->fl_blocker = NULL)
+ ...
+ break by a signal
+ locks_delete_block
+ b->fl_blocker == NULL &&
+ list_empty(&b->fl_blocked_requests)
+ success, return directly
+ locks_free_lock b
+ wake_up(&b->fl_waiter)
+ trigger UAF
+
+Fix it by remove this logic, and this patch may also fix CVE-2019-19769.
+
+Cc: stable@vger.kernel.org
+Fixes: 16306a61d3b7 ("fs/locks: always delete_block after waiting.")
+Signed-off-by: yangerkun <yangerkun@huawei.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/locks.c | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/fs/locks.c b/fs/locks.c
+index 44b6da0328426..426b55d333d5b 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
+ {
+ int status = -ENOENT;
+
+- /*
+- * If fl_blocker is NULL, it won't be set again as this thread
+- * "owns" the lock and is the only one that might try to claim
+- * the lock. So it is safe to test fl_blocker locklessly.
+- * Also if fl_blocker is NULL, this waiter is not listed on
+- * fl_blocked_requests for some lock, so no other request can
+- * be added to the list of fl_blocked_requests for this
+- * request. So if fl_blocker is NULL, it is safe to
+- * locklessly check if fl_blocked_requests is empty. If both
+- * of these checks succeed, there is no need to take the lock.
+- */
+- if (waiter->fl_blocker == NULL &&
+- list_empty(&waiter->fl_blocked_requests))
+- return status;
+ spin_lock(&blocked_lock_lock);
+ if (waiter->fl_blocker)
+ status = 0;
+--
+2.20.1
+