]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
af_unix: Bulk update unix_tot_inflight/unix_inflight when queuing skb.
authorKuniyuki Iwashima <kuniyu@amazon.com>
Wed, 21 May 2025 15:27:10 +0000 (16:27 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 4 Jun 2025 12:40:23 +0000 (14:40 +0200)
commit 22c3c0c52d32f41cc38cd936ea0c93f22ced3315 upstream.

Currently, we track the number of inflight sockets in two variables.
unix_tot_inflight is the total number of inflight AF_UNIX sockets on
the host, and user->unix_inflight is the number of inflight fds per
user.

We update them one by one in unix_inflight(), which can be done once
in batch.  Also, sendmsg() could fail even after unix_inflight(), then
we need to acquire unix_gc_lock only to decrement the counters.

Let's bulk update the counters in unix_add_edges() and unix_del_edges(),
which is called only for successfully passed fds.

Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Link: https://lore.kernel.org/r/20240325202425.60930-5-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Lee Jones <lee@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
net/unix/garbage.c

index b5b4a200dbf3b3c52ba7afdba56394b8822face5..f7041fc230008c49617781563797acc2f2b47a2c 100644 (file)
@@ -144,6 +144,7 @@ static void unix_free_vertices(struct scm_fp_list *fpl)
 }
 
 DEFINE_SPINLOCK(unix_gc_lock);
+unsigned int unix_tot_inflight;
 
 void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
 {
@@ -168,7 +169,10 @@ void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
                unix_add_edge(fpl, edge);
        } while (i < fpl->count_unix);
 
+       WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
 out:
+       WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
+
        spin_unlock(&unix_gc_lock);
 
        fpl->inflight = true;
@@ -191,7 +195,10 @@ void unix_del_edges(struct scm_fp_list *fpl)
                unix_del_edge(fpl, edge);
        } while (i < fpl->count_unix);
 
+       WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
 out:
+       WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
+
        spin_unlock(&unix_gc_lock);
 
        fpl->inflight = false;
@@ -234,7 +241,6 @@ void unix_destroy_fpl(struct scm_fp_list *fpl)
        unix_free_vertices(fpl);
 }
 
-unsigned int unix_tot_inflight;
 static LIST_HEAD(gc_candidates);
 static LIST_HEAD(gc_inflight_list);
 
@@ -255,13 +261,8 @@ void unix_inflight(struct user_struct *user, struct file *filp)
                        WARN_ON_ONCE(list_empty(&u->link));
                }
                u->inflight++;
-
-               /* Paired with READ_ONCE() in wait_for_unix_gc() */
-               WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
        }
 
-       WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
-
        spin_unlock(&unix_gc_lock);
 }
 
@@ -278,13 +279,8 @@ void unix_notinflight(struct user_struct *user, struct file *filp)
                u->inflight--;
                if (!u->inflight)
                        list_del_init(&u->link);
-
-               /* Paired with READ_ONCE() in wait_for_unix_gc() */
-               WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
        }
 
-       WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
-
        spin_unlock(&unix_gc_lock);
 }