]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/vmalloc.c: optimize code in decay_va_pool_node() a little bit
authorBaoquan He <bhe@redhat.com>
Fri, 18 Apr 2025 22:36:51 +0000 (06:36 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 13 May 2025 06:50:32 +0000 (23:50 -0700)
When purge lazily freed vmap areas, VA stored in vn->pool[] will also be
taken away into free vmap tree partially or completely accordingly, that
is done in decay_va_pool_node().  When doing that, for each pool of node,
the whole list is detached from the pool for handling.  At this time, that
pool is empty.  It's not necessary to update the pool size each time when
one VA is removed and addded into free vmap tree.

Here change code to update the pool size when attaching the pool back.

Link: https://lkml.kernel.org/r/20250418223653.243436-4-bhe@redhat.com
Signed-off-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Shivank Garg <shivankg@amd.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 26d86ab5e59e1198ec8f4a458192eb0a9f65aa7c..e58c2ed4bcba7b40724bb3a70e54168864c3c8cc 100644 (file)
@@ -2149,7 +2149,7 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
        LIST_HEAD(decay_list);
        struct rb_root decay_root = RB_ROOT;
        struct vmap_area *va, *nva;
-       unsigned long n_decay;
+       unsigned long n_decay, pool_len;
        int i;
 
        for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
@@ -2163,22 +2163,20 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
                list_replace_init(&vn->pool[i].head, &tmp_list);
                spin_unlock(&vn->pool_lock);
 
-               if (full_decay)
-                       WRITE_ONCE(vn->pool[i].len, 0);
+               pool_len = n_decay = vn->pool[i].len;
+               WRITE_ONCE(vn->pool[i].len, 0);
 
                /* Decay a pool by ~25% out of left objects. */
-               n_decay = vn->pool[i].len >> 2;
+               if (!full_decay)
+                       n_decay >>= 2;
+               pool_len -= n_decay;
 
                list_for_each_entry_safe(va, nva, &tmp_list, list) {
+                       if (!n_decay--)
+                               break;
+
                        list_del_init(&va->list);
                        merge_or_add_vmap_area(va, &decay_root, &decay_list);
-
-                       if (!full_decay) {
-                               WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
-
-                               if (!--n_decay)
-                                       break;
-                       }
                }
 
                /*
@@ -2187,9 +2185,10 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
                 * can populate the pool therefore a simple list replace
                 * operation takes place here.
                 */
-               if (!full_decay && !list_empty(&tmp_list)) {
+               if (!list_empty(&tmp_list)) {
                        spin_lock(&vn->pool_lock);
                        list_replace_init(&tmp_list, &vn->pool[i].head);
+                       WRITE_ONCE(vn->pool[i].len, pool_len);
                        spin_unlock(&vn->pool_lock);
                }
        }