--- /dev/null
+From 5cc35e1a006e822e8ed36431ec257870347d52cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2020 15:25:56 +0800
+Subject: locks: fix a potential use-after-free problem when wakeup a waiter
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: yangerkun <yangerkun@huawei.com>
+
+[ Upstream commit 6d390e4b5d48ec03bb87e63cf0a2bff5f4e116da ]
+
+'16306a61d3b7 ("fs/locks: always delete_block after waiting.")' add the
+logic to check waiter->fl_blocker without blocked_lock_lock. And it will
+trigger a UAF when we try to wakeup some waiter:
+
+Thread 1 has create a write flock a on file, and now thread 2 try to
+unlock and delete flock a, thread 3 try to add flock b on the same file.
+
+Thread2 Thread3
+ flock syscall(create flock b)
+ ...flock_lock_inode_wait
+ flock_lock_inode(will insert
+ our fl_blocked_member list
+ to flock a's fl_blocked_requests)
+ sleep
+flock syscall(unlock)
+...flock_lock_inode_wait
+ locks_delete_lock_ctx
+ ...__locks_wake_up_blocks
+ __locks_delete_blocks(
+ b->fl_blocker = NULL)
+ ...
+ break by a signal
+ locks_delete_block
+ b->fl_blocker == NULL &&
+ list_empty(&b->fl_blocked_requests)
+ success, return directly
+ locks_free_lock b
+ wake_up(&b->fl_waiter)
+ trigger UAF
+
+Fix it by remove this logic, and this patch may also fix CVE-2019-19769.
+
+Cc: stable@vger.kernel.org
+Fixes: 16306a61d3b7 ("fs/locks: always delete_block after waiting.")
+Signed-off-by: yangerkun <yangerkun@huawei.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/locks.c | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/fs/locks.c b/fs/locks.c
+index 44b6da0328426..426b55d333d5b 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
+ {
+ int status = -ENOENT;
+
+- /*
+- * If fl_blocker is NULL, it won't be set again as this thread
+- * "owns" the lock and is the only one that might try to claim
+- * the lock. So it is safe to test fl_blocker locklessly.
+- * Also if fl_blocker is NULL, this waiter is not listed on
+- * fl_blocked_requests for some lock, so no other request can
+- * be added to the list of fl_blocked_requests for this
+- * request. So if fl_blocker is NULL, it is safe to
+- * locklessly check if fl_blocked_requests is empty. If both
+- * of these checks succeed, there is no need to take the lock.
+- */
+- if (waiter->fl_blocker == NULL &&
+- list_empty(&waiter->fl_blocked_requests))
+- return status;
+ spin_lock(&blocked_lock_lock);
+ if (waiter->fl_blocker)
+ status = 0;
+--
+2.20.1
+
--- /dev/null
+From 4686a391129948a39c12d01f2510deed85fbe4c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2020 07:52:21 -0400
+Subject: locks: reinstate locks_delete_block optimization
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit dcf23ac3e846ca0cf626c155a0e3fcbbcf4fae8a ]
+
+There is measurable performance impact in some synthetic tests due to
+commit 6d390e4b5d48 (locks: fix a potential use-after-free problem when
+wakeup a waiter). Fix the race condition instead by clearing the
+fl_blocker pointer after the wake_up, using explicit acquire/release
+semantics.
+
+This does mean that we can no longer use the clearing of fl_blocker as
+the wait condition, so switch the waiters over to checking whether the
+fl_blocked_member list_head is empty.
+
+Reviewed-by: yangerkun <yangerkun@huawei.com>
+Reviewed-by: NeilBrown <neilb@suse.de>
+Fixes: 6d390e4b5d48 (locks: fix a potential use-after-free problem when wakeup a waiter)
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/file.c | 3 ++-
+ fs/locks.c | 54 ++++++++++++++++++++++++++++++++++++++++++++------
+ 2 files changed, 50 insertions(+), 7 deletions(-)
+
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 0dbe47e897208..35c55cf38a358 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1173,7 +1173,8 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
+ rc = posix_lock_file(file, flock, NULL);
+ up_write(&cinode->lock_sem);
+ if (rc == FILE_LOCK_DEFERRED) {
+- rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
++ rc = wait_event_interruptible(flock->fl_wait,
++ list_empty(&flock->fl_blocked_member));
+ if (!rc)
+ goto try_again;
+ locks_delete_block(flock);
+diff --git a/fs/locks.c b/fs/locks.c
+index 426b55d333d5b..b8a31c1c4fff3 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -725,7 +725,6 @@ static void __locks_delete_block(struct file_lock *waiter)
+ {
+ locks_delete_global_blocked(waiter);
+ list_del_init(&waiter->fl_blocked_member);
+- waiter->fl_blocker = NULL;
+ }
+
+ static void __locks_wake_up_blocks(struct file_lock *blocker)
+@@ -740,6 +739,13 @@ static void __locks_wake_up_blocks(struct file_lock *blocker)
+ waiter->fl_lmops->lm_notify(waiter);
+ else
+ wake_up(&waiter->fl_wait);
++
++ /*
++ * The setting of fl_blocker to NULL marks the "done"
++ * point in deleting a block. Paired with acquire at the top
++ * of locks_delete_block().
++ */
++ smp_store_release(&waiter->fl_blocker, NULL);
+ }
+ }
+
+@@ -753,11 +759,42 @@ int locks_delete_block(struct file_lock *waiter)
+ {
+ int status = -ENOENT;
+
++ /*
++ * If fl_blocker is NULL, it won't be set again as this thread "owns"
++ * the lock and is the only one that might try to claim the lock.
++ *
++ * We use acquire/release to manage fl_blocker so that we can
++ * optimize away taking the blocked_lock_lock in many cases.
++ *
++ * The smp_load_acquire guarantees two things:
++ *
++ * 1/ that fl_blocked_requests can be tested locklessly. If something
++ * was recently added to that list it must have been in a locked region
++ * *before* the locked region when fl_blocker was set to NULL.
++ *
++ * 2/ that no other thread is accessing 'waiter', so it is safe to free
++ * it. __locks_wake_up_blocks is careful not to touch waiter after
++ * fl_blocker is released.
++ *
++ * If a lockless check of fl_blocker shows it to be NULL, we know that
++ * no new locks can be inserted into its fl_blocked_requests list, and
++ * can avoid doing anything further if the list is empty.
++ */
++ if (!smp_load_acquire(&waiter->fl_blocker) &&
++ list_empty(&waiter->fl_blocked_requests))
++ return status;
++
+ spin_lock(&blocked_lock_lock);
+ if (waiter->fl_blocker)
+ status = 0;
+ __locks_wake_up_blocks(waiter);
+ __locks_delete_block(waiter);
++
++ /*
++ * The setting of fl_blocker to NULL marks the "done" point in deleting
++ * a block. Paired with acquire at the top of this function.
++ */
++ smp_store_release(&waiter->fl_blocker, NULL);
+ spin_unlock(&blocked_lock_lock);
+ return status;
+ }
+@@ -1350,7 +1387,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+ error = posix_lock_inode(inode, fl, NULL);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+- error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
++ error = wait_event_interruptible(fl->fl_wait,
++ list_empty(&fl->fl_blocked_member));
+ if (error)
+ break;
+ }
+@@ -1435,7 +1473,8 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
+ error = posix_lock_inode(inode, &fl, NULL);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+- error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
++ error = wait_event_interruptible(fl.fl_wait,
++ list_empty(&fl.fl_blocked_member));
+ if (!error) {
+ /*
+ * If we've been sleeping someone might have
+@@ -1638,7 +1677,8 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+
+ locks_dispose_list(&dispose);
+ error = wait_event_interruptible_timeout(new_fl->fl_wait,
+- !new_fl->fl_blocker, break_time);
++ list_empty(&new_fl->fl_blocked_member),
++ break_time);
+
+ percpu_down_read(&file_rwsem);
+ spin_lock(&ctx->flc_lock);
+@@ -2122,7 +2162,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+ error = flock_lock_inode(inode, fl);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+- error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
++ error = wait_event_interruptible(fl->fl_wait,
++ list_empty(&fl->fl_blocked_member));
+ if (error)
+ break;
+ }
+@@ -2399,7 +2440,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
+ error = vfs_lock_file(filp, cmd, fl, NULL);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+- error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
++ error = wait_event_interruptible(fl->fl_wait,
++ list_empty(&fl->fl_blocked_member));
+ if (error)
+ break;
+ }
+--
+2.20.1
+