]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
RDMA/mlx5: Fix counter update on MR cache mkey creation
authorMichael Guralnik <michaelgur@nvidia.com>
Tue, 3 Sep 2024 11:24:48 +0000 (14:24 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Oct 2024 14:33:10 +0000 (16:33 +0200)
[ Upstream commit 6f5cd6ac9a4201e4ba6f10b76a9da8044d6e38b0 ]

After an mkey is created, update the counter for pending mkeys before
reshceduling the work that is filling the cache.

Rescheduling the work with a full MR cache entry and a wrong 'pending'
counter will cause us to miss disabling the fill_to_high_water flag.
Thus leaving the cache full but with an indication that it's still
needs to be filled up to it's full size (2 * limit).
Next time an mkey will be taken from the cache, we'll unnecessarily
continue the process of filling the cache to it's full size.

Fixes: 57e7071683ef ("RDMA/mlx5: Implement mkeys management via LIFO queue")
Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Link: https://patch.msgid.link/0f44f462ba22e45f72cb3d0ec6a748634086b8d0.1725362530.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/infiniband/hw/mlx5/mr.c

index d3c1f63791a2b6354ded7589733df36591fc69cb..a03557c8416e81f241d45541686515fd1f2f6bb9 100644 (file)
@@ -211,9 +211,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
 
        spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
        push_mkey_locked(ent, mkey_out->mkey);
+       ent->pending--;
        /* If we are doing fill_to_high_water then keep going. */
        queue_adjust_cache_locked(ent);
-       ent->pending--;
        spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
        kfree(mkey_out);
 }