]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/mlx5: Fix counter update on MR cache mkey creation
authorMichael Guralnik <michaelgur@nvidia.com>
Tue, 3 Sep 2024 11:24:48 +0000 (14:24 +0300)
committerLeon Romanovsky <leon@kernel.org>
Mon, 9 Sep 2024 18:17:09 +0000 (21:17 +0300)
After an mkey is created, update the counter for pending mkeys before
reshceduling the work that is filling the cache.

Rescheduling the work with a full MR cache entry and a wrong 'pending'
counter will cause us to miss disabling the fill_to_high_water flag.
Thus leaving the cache full but with an indication that it's still
needs to be filled up to it's full size (2 * limit).
Next time an mkey will be taken from the cache, we'll unnecessarily
continue the process of filling the cache to it's full size.

Fixes: 57e7071683ef ("RDMA/mlx5: Implement mkeys management via LIFO queue")
Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Link: https://patch.msgid.link/0f44f462ba22e45f72cb3d0ec6a748634086b8d0.1725362530.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mlx5/mr.c

index 511d5049135226bfede877e05d5a8ee4942b9511..0f90086327fc2d73177de001d5e08183f1ccbecf 100644 (file)
@@ -214,9 +214,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
 
        spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
        push_mkey_locked(ent, mkey_out->mkey);
+       ent->pending--;
        /* If we are doing fill_to_high_water then keep going. */
        queue_adjust_cache_locked(ent);
-       ent->pending--;
        spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
        kfree(mkey_out);
 }