]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
can: gw: use call_rcu() instead of costly synchronize_rcu()
authorEric Dumazet <edumazet@google.com>
Mon, 7 Feb 2022 19:07:06 +0000 (11:07 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 4 Jun 2025 12:36:53 +0000 (14:36 +0200)
[ Upstream commit 181d4447905d551cc664f1e7e796b482c1eec992 ]

Commit fb8696ab14ad ("can: gw: synchronize rcu operations
before removing gw job entry") added three synchronize_rcu() calls
to make sure one rcu grace period was observed before freeing
a "struct cgw_job" (which are tiny objects).

This should be converted to call_rcu() to avoid adding delays
in device / network dismantles.

Use the rcu_head that was already in struct cgw_job,
not yet used.

Link: https://lore.kernel.org/all/20220207190706.1499190-1-eric.dumazet@gmail.com
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Oliver Hartkopp <socketcan@hartkopp.net>
Tested-by: Oliver Hartkopp <socketcan@hartkopp.net>
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Stable-dep-of: 511e64e13d8c ("can: gw: fix RCU/BH usage in cgw_create_job()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/can/gw.c

index cbb46d3aa963442e0b4319de0bf7e61b4e8c77de..59ce23996c6e087f393359abc279c4c003173c35 100644 (file)
@@ -515,6 +515,13 @@ static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj)
                          gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
 }
 
+static void cgw_job_free_rcu(struct rcu_head *rcu_head)
+{
+       struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
+
+       kmem_cache_free(cgw_cache, gwj);
+}
+
 static int cgw_notifier(struct notifier_block *nb,
                        unsigned long msg, void *ptr)
 {
@@ -534,8 +541,7 @@ static int cgw_notifier(struct notifier_block *nb,
                        if (gwj->src.dev == dev || gwj->dst.dev == dev) {
                                hlist_del(&gwj->list);
                                cgw_unregister_filter(net, gwj);
-                               synchronize_rcu();
-                               kmem_cache_free(cgw_cache, gwj);
+                               call_rcu(&gwj->rcu, cgw_job_free_rcu);
                        }
                }
        }
@@ -1093,8 +1099,7 @@ static void cgw_remove_all_jobs(struct net *net)
        hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
                hlist_del(&gwj->list);
                cgw_unregister_filter(net, gwj);
-               synchronize_rcu();
-               kmem_cache_free(cgw_cache, gwj);
+               call_rcu(&gwj->rcu, cgw_job_free_rcu);
        }
 }
 
@@ -1162,8 +1167,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
 
                hlist_del(&gwj->list);
                cgw_unregister_filter(net, gwj);
-               synchronize_rcu();
-               kmem_cache_free(cgw_cache, gwj);
+               call_rcu(&gwj->rcu, cgw_job_free_rcu);
                err = 0;
                break;
        }