]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
rbd: retrieve and check lock owner twice before blocklisting
authorIlya Dryomov <idryomov@gmail.com>
Sat, 22 Jul 2023 18:28:08 +0000 (20:28 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Aug 2023 14:23:12 +0000 (16:23 +0200)
[ Upstream commit 588159009d5b7a09c3e5904cffddbe4a4e170301 ]

An attempt to acquire exclusive lock can race with the current lock
owner closing the image:

1. lock is held by client123, rbd_lock() returns -EBUSY
2. get_lock_owner_info() returns client123 instance details
3. client123 closes the image, lock is released
4. find_watcher() returns 0 as there is no matching watcher anymore
5. client123 instance gets erroneously blocklisted

Particularly impacted is mirror snapshot scheduler in snapshot-based
mirroring since it happens to open and close images a lot (images are
opened only for as long as it takes to take the next mirror snapshot,
the same client instance is used for all images).

To reduce the potential for erroneous blocklisting, retrieve the lock
owner again after find_watcher() returns 0.  If it's still there, make
sure it matches the previously detected lock owner.

Cc: stable@vger.kernel.org # f38cb9d9c204: rbd: make get_lock_owner_info() return a single locker or NULL
Cc: stable@vger.kernel.org # 8ff2c64c9765: rbd: harden get_lock_owner_info() a bit
Cc: stable@vger.kernel.org
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/block/rbd.c

index dcb43c633c5e7772fd8d157e53adccbd1ce79a30..60d3a143ff4504581a53810faca7e4b6002930ca 100644 (file)
@@ -3914,6 +3914,15 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
        list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
 }
 
+static bool locker_equal(const struct ceph_locker *lhs,
+                        const struct ceph_locker *rhs)
+{
+       return lhs->id.name.type == rhs->id.name.type &&
+              lhs->id.name.num == rhs->id.name.num &&
+              !strcmp(lhs->id.cookie, rhs->id.cookie) &&
+              ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
+}
+
 static void free_locker(struct ceph_locker *locker)
 {
        if (locker)
@@ -4025,11 +4034,11 @@ out:
 static int rbd_try_lock(struct rbd_device *rbd_dev)
 {
        struct ceph_client *client = rbd_dev->rbd_client->client;
-       struct ceph_locker *locker;
+       struct ceph_locker *locker, *refreshed_locker;
        int ret;
 
        for (;;) {
-               locker = NULL;
+               locker = refreshed_locker = NULL;
 
                ret = rbd_lock(rbd_dev);
                if (ret != -EBUSY)
@@ -4049,6 +4058,16 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
                if (ret)
                        goto out; /* request lock or error */
 
+               refreshed_locker = get_lock_owner_info(rbd_dev);
+               if (IS_ERR(refreshed_locker)) {
+                       ret = PTR_ERR(refreshed_locker);
+                       refreshed_locker = NULL;
+                       goto out;
+               }
+               if (!refreshed_locker ||
+                   !locker_equal(locker, refreshed_locker))
+                       goto again;
+
                rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
                         ENTITY_NAME(locker->id.name));
 
@@ -4070,10 +4089,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
                }
 
 again:
+               free_locker(refreshed_locker);
                free_locker(locker);
        }
 
 out:
+       free_locker(refreshed_locker);
        free_locker(locker);
        return ret;
 }