]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
NFS: return delegations from the end of a LRU when over the watermark
authorChristoph Hellwig <hch@lst.de>
Wed, 7 Jan 2026 07:27:14 +0000 (08:27 +0100)
committerAnna Schumaker <anna.schumaker@oracle.com>
Tue, 20 Jan 2026 19:49:47 +0000 (14:49 -0500)
Directly returning delegations on close when over the watermark is
rather suboptimal as these delegations are much more likely to be reused
than those that have been unused for a long time.  Switch to returning
unused delegations from a new LRU list when we are above the threshold and
there are reclaimable delegations instead.

Pass over referenced delegations during the first pass to give delegations
that aren't in active used by frequently used for stat() or similar another
chance to not be instantly reclaimed.  This scheme works the same as the
referenced flags in the VFS inode and dentry caches.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
fs/nfs/client.c
fs/nfs/delegation.c
include/linux/nfs_fs_sb.h

index 65b3de91b441f6a6c61fd1974b339ed036f6554e..62aece00f810e0ca98caa0e378c8f3d3cd978d60 100644 (file)
@@ -1062,6 +1062,7 @@ struct nfs_server *nfs_alloc_server(void)
        INIT_LIST_HEAD(&server->delegations);
        spin_lock_init(&server->delegations_lock);
        INIT_LIST_HEAD(&server->delegations_return);
+       INIT_LIST_HEAD(&server->delegations_lru);
        INIT_LIST_HEAD(&server->layouts);
        INIT_LIST_HEAD(&server->state_owners_lru);
        INIT_LIST_HEAD(&server->ss_copies);
index d2d2dd7454660dcb412e053fe884ae435f414b9c..848cb55073fce532758a99eb882fe78516ab7b49 100644 (file)
@@ -660,6 +660,60 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
        return err;
 }
 
+static inline bool nfs_delegations_over_limit(struct nfs_server *server)
+{
+       return !list_empty_careful(&server->delegations_lru) &&
+               atomic_long_read(&server->nr_active_delegations) >
+               nfs_delegation_watermark;
+}
+
+static void nfs_delegations_return_from_lru(struct nfs_server *server)
+{
+       struct nfs_delegation *d, *n;
+       unsigned int pass = 0;
+       bool moved = false;
+
+retry:
+       spin_lock(&server->delegations_lock);
+       list_for_each_entry_safe(d, n, &server->delegations_lru, entry) {
+               if (!nfs_delegations_over_limit(server))
+                       break;
+               if (pass == 0 && test_bit(NFS_DELEGATION_REFERENCED, &d->flags))
+                       continue;
+               list_move_tail(&d->entry, &server->delegations_return);
+               moved = true;
+       }
+       spin_unlock(&server->delegations_lock);
+
+       /*
+        * If we are still over the limit, try to reclaim referenced delegations
+        * as well.
+        */
+       if (pass == 0 && nfs_delegations_over_limit(server)) {
+               pass++;
+               goto retry;
+       }
+
+       if (moved) {
+               set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+               nfs4_schedule_state_manager(server->nfs_client);
+       }
+}
+
+static void nfs_delegation_add_lru(struct nfs_server *server,
+               struct nfs_delegation *delegation)
+{
+       spin_lock(&server->delegations_lock);
+       if (list_empty(&delegation->entry)) {
+               list_add_tail(&delegation->entry, &server->delegations_lru);
+               refcount_inc(&delegation->refcount);
+       }
+       spin_unlock(&server->delegations_lock);
+
+       if (nfs_delegations_over_limit(server))
+               nfs_delegations_return_from_lru(server);
+}
+
 static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
 {
        struct nfs_delegation *d;
@@ -825,6 +879,7 @@ out_unlock:
  */
 void nfs4_inode_return_delegation_on_close(struct inode *inode)
 {
+       struct nfs_server *server = NFS_SERVER(inode);
        struct nfs_delegation *delegation;
        bool return_now = false;
 
@@ -832,9 +887,7 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode)
        if (!delegation)
                return;
 
-       if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
-           atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >=
-           nfs_delegation_watermark) {
+       if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
                spin_lock(&delegation->lock);
                if (delegation->inode &&
                    list_empty(&NFS_I(inode)->open_files) &&
@@ -848,6 +901,8 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode)
        if (return_now) {
                nfs_clear_verifier_delegated(inode);
                nfs_end_delegation_return(inode, delegation, 0);
+       } else {
+               nfs_delegation_add_lru(server, delegation);
        }
        nfs_put_delegation(delegation);
 }
index e377b8c7086e932fb8dfbfd46757f9653cbb0cd2..bb13a294b69e686868e139fd2f3d113c0aa4985b 100644 (file)
@@ -261,6 +261,7 @@ struct nfs_server {
        struct list_head        delegations;
        spinlock_t              delegations_lock;
        struct list_head        delegations_return;
+       struct list_head        delegations_lru;
        atomic_long_t           nr_active_delegations;
        unsigned int            delegation_hash_mask;
        struct hlist_head       *delegation_hash_table;