+++ /dev/null
-From 68edfb98d4afd5c15eaacf0c62d5ad0991c02e1b Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 4 Mar 2020 15:25:56 +0800
-Subject: locks: fix a potential use-after-free problem when wakeup a waiter
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: yangerkun <yangerkun@huawei.com>
-
-[ Upstream commit 6d390e4b5d48ec03bb87e63cf0a2bff5f4e116da ]
-
-'16306a61d3b7 ("fs/locks: always delete_block after waiting.")' add the
-logic to check waiter->fl_blocker without blocked_lock_lock. And it will
-trigger a UAF when we try to wakeup some waiter:
-
-Thread 1 has create a write flock a on file, and now thread 2 try to
-unlock and delete flock a, thread 3 try to add flock b on the same file.
-
-Thread2 Thread3
- flock syscall(create flock b)
- ...flock_lock_inode_wait
- flock_lock_inode(will insert
- our fl_blocked_member list
- to flock a's fl_blocked_requests)
- sleep
-flock syscall(unlock)
-...flock_lock_inode_wait
- locks_delete_lock_ctx
- ...__locks_wake_up_blocks
- __locks_delete_blocks(
- b->fl_blocker = NULL)
- ...
- break by a signal
- locks_delete_block
- b->fl_blocker == NULL &&
- list_empty(&b->fl_blocked_requests)
- success, return directly
- locks_free_lock b
- wake_up(&b->fl_waiter)
- trigger UAF
-
-Fix it by remove this logic, and this patch may also fix CVE-2019-19769.
-
-Cc: stable@vger.kernel.org
-Fixes: 16306a61d3b7 ("fs/locks: always delete_block after waiting.")
-Signed-off-by: yangerkun <yangerkun@huawei.com>
-Signed-off-by: Jeff Layton <jlayton@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/locks.c | 14 --------------
- 1 file changed, 14 deletions(-)
-
-diff --git a/fs/locks.c b/fs/locks.c
-index 44b6da0328426..426b55d333d5b 100644
---- a/fs/locks.c
-+++ b/fs/locks.c
-@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
- {
- int status = -ENOENT;
-
-- /*
-- * If fl_blocker is NULL, it won't be set again as this thread
-- * "owns" the lock and is the only one that might try to claim
-- * the lock. So it is safe to test fl_blocker locklessly.
-- * Also if fl_blocker is NULL, this waiter is not listed on
-- * fl_blocked_requests for some lock, so no other request can
-- * be added to the list of fl_blocked_requests for this
-- * request. So if fl_blocker is NULL, it is safe to
-- * locklessly check if fl_blocked_requests is empty. If both
-- * of these checks succeed, there is no need to take the lock.
-- */
-- if (waiter->fl_blocker == NULL &&
-- list_empty(&waiter->fl_blocked_requests))
-- return status;
- spin_lock(&blocked_lock_lock);
- if (waiter->fl_blocker)
- status = 0;
---
-2.20.1
-
net-rmnet-fix-packet-forwarding-in-rmnet-bridge-mode.patch
sfc-fix-timestamp-reconstruction-at-16-bit-rollover-.patch
jbd2-fix-data-races-at-struct-journal_head.patch
-locks-fix-a-potential-use-after-free-problem-when-wa.patch
blk-mq-insert-flush-request-to-the-front-of-dispatch.patch
+++ /dev/null
-From 4cddc5e10cc4b3105fc8e2d13b1896d5c000958e Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 4 Mar 2020 15:25:56 +0800
-Subject: locks: fix a potential use-after-free problem when wakeup a waiter
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: yangerkun <yangerkun@huawei.com>
-
-[ Upstream commit 6d390e4b5d48ec03bb87e63cf0a2bff5f4e116da ]
-
-'16306a61d3b7 ("fs/locks: always delete_block after waiting.")' add the
-logic to check waiter->fl_blocker without blocked_lock_lock. And it will
-trigger a UAF when we try to wakeup some waiter:
-
-Thread 1 has create a write flock a on file, and now thread 2 try to
-unlock and delete flock a, thread 3 try to add flock b on the same file.
-
-Thread2 Thread3
- flock syscall(create flock b)
- ...flock_lock_inode_wait
- flock_lock_inode(will insert
- our fl_blocked_member list
- to flock a's fl_blocked_requests)
- sleep
-flock syscall(unlock)
-...flock_lock_inode_wait
- locks_delete_lock_ctx
- ...__locks_wake_up_blocks
- __locks_delete_blocks(
- b->fl_blocker = NULL)
- ...
- break by a signal
- locks_delete_block
- b->fl_blocker == NULL &&
- list_empty(&b->fl_blocked_requests)
- success, return directly
- locks_free_lock b
- wake_up(&b->fl_waiter)
- trigger UAF
-
-Fix it by remove this logic, and this patch may also fix CVE-2019-19769.
-
-Cc: stable@vger.kernel.org
-Fixes: 16306a61d3b7 ("fs/locks: always delete_block after waiting.")
-Signed-off-by: yangerkun <yangerkun@huawei.com>
-Signed-off-by: Jeff Layton <jlayton@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- fs/locks.c | 14 --------------
- 1 file changed, 14 deletions(-)
-
-diff --git a/fs/locks.c b/fs/locks.c
-index 44b6da0328426..426b55d333d5b 100644
---- a/fs/locks.c
-+++ b/fs/locks.c
-@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
- {
- int status = -ENOENT;
-
-- /*
-- * If fl_blocker is NULL, it won't be set again as this thread
-- * "owns" the lock and is the only one that might try to claim
-- * the lock. So it is safe to test fl_blocker locklessly.
-- * Also if fl_blocker is NULL, this waiter is not listed on
-- * fl_blocked_requests for some lock, so no other request can
-- * be added to the list of fl_blocked_requests for this
-- * request. So if fl_blocker is NULL, it is safe to
-- * locklessly check if fl_blocked_requests is empty. If both
-- * of these checks succeed, there is no need to take the lock.
-- */
-- if (waiter->fl_blocker == NULL &&
-- list_empty(&waiter->fl_blocked_requests))
-- return status;
- spin_lock(&blocked_lock_lock);
- if (waiter->fl_blocker)
- status = 0;
---
-2.20.1
-
mlxsw-pci-wait-longer-before-accessing-the-device-af.patch
net-dsa-mv88e6xxx-fix-masking-of-egress-port.patch
jbd2-fix-data-races-at-struct-journal_head.patch
-locks-fix-a-potential-use-after-free-problem-when-wa.patch
blk-mq-insert-flush-request-to-the-front-of-dispatch.patch