]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
af_unix: Consolidate unix_schedule_gc() and wait_for_unix_gc().
authorKuniyuki Iwashima <kuniyu@google.com>
Sat, 15 Nov 2025 02:08:38 +0000 (02:08 +0000)
committerJakub Kicinski <kuba@kernel.org>
Wed, 19 Nov 2025 03:19:32 +0000 (19:19 -0800)
unix_schedule_gc() and wait_for_unix_gc() share some code.

Let's consolidate the two.

Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20251115020935.2643121-8-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/unix/af_unix.c
net/unix/af_unix.h
net/unix/garbage.c

index 34952242bd81a988d41b79ca355d88f404ad61e9..e518116f8171904955df8569e1e952b877a06dbb 100644 (file)
@@ -733,7 +733,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
 
        /* ---- Socket is dead now and most probably destroyed ---- */
 
-       unix_schedule_gc();
+       unix_schedule_gc(NULL);
 }
 
 struct unix_peercred {
index 2f1bfe3217c1a6585995c46edb0732b89f9c2308..c4f1b2da363def13a5aeb117198a127f002dfe80 100644 (file)
@@ -29,7 +29,7 @@ void unix_del_edges(struct scm_fp_list *fpl);
 void unix_update_edges(struct unix_sock *receiver);
 int unix_prepare_fpl(struct scm_fp_list *fpl);
 void unix_destroy_fpl(struct scm_fp_list *fpl);
-void unix_schedule_gc(void);
+void unix_schedule_gc(struct user_struct *user);
 
 /* SOCK_DIAG */
 long unix_inq_len(struct sock *sk);
index fe1f74345b6696bc334fe3dc8a28d0d994192424..78323d43e63ed1ecabdb36ed10b2e2700b95c399 100644 (file)
@@ -279,8 +279,6 @@ void unix_update_edges(struct unix_sock *receiver)
        }
 }
 
-static void wait_for_unix_gc(struct scm_fp_list *fpl);
-
 int unix_prepare_fpl(struct scm_fp_list *fpl)
 {
        struct unix_vertex *vertex;
@@ -302,7 +300,7 @@ int unix_prepare_fpl(struct scm_fp_list *fpl)
        if (!fpl->edges)
                goto err;
 
-       wait_for_unix_gc(fpl);
+       unix_schedule_gc(fpl->user);
 
        return 0;
 
@@ -614,21 +612,9 @@ skip_gc:
 
 static DECLARE_WORK(unix_gc_work, unix_gc);
 
-void unix_schedule_gc(void)
-{
-       if (READ_ONCE(unix_graph_state) == UNIX_GRAPH_NOT_CYCLIC)
-               return;
-
-       if (READ_ONCE(gc_in_progress))
-               return;
-
-       WRITE_ONCE(gc_in_progress, true);
-       queue_work(system_dfl_wq, &unix_gc_work);
-}
-
 #define UNIX_INFLIGHT_SANE_USER                (SCM_MAX_FD * 8)
 
-static void wait_for_unix_gc(struct scm_fp_list *fpl)
+void unix_schedule_gc(struct user_struct *user)
 {
        if (READ_ONCE(unix_graph_state) == UNIX_GRAPH_NOT_CYCLIC)
                return;
@@ -636,11 +622,15 @@ static void wait_for_unix_gc(struct scm_fp_list *fpl)
        /* Penalise users who want to send AF_UNIX sockets
         * but whose sockets have not been received yet.
         */
-       if (READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
+       if (user &&
+           READ_ONCE(user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
                return;
 
-       unix_schedule_gc();
+       if (!READ_ONCE(gc_in_progress)) {
+               WRITE_ONCE(gc_in_progress, true);
+               queue_work(system_dfl_wq, &unix_gc_work);
+       }
 
-       if (READ_ONCE(unix_graph_cyclic_sccs))
+       if (user && READ_ONCE(unix_graph_cyclic_sccs))
                flush_work(&unix_gc_work);
 }