]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
udp_tunnel: use static call for GRO hooks when possible
authorPaolo Abeni <pabeni@redhat.com>
Tue, 11 Mar 2025 20:42:29 +0000 (21:42 +0100)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 18 Mar 2025 10:40:30 +0000 (11:40 +0100)
It's quite common to have a single UDP tunnel type active in the
whole system. In such a case we can replace the indirect call for
the UDP tunnel GRO callback with a static call.

Add the related accounting in the control path and switch to static
call when possible. To keep the code simple use a static array for
the registered tunnel types, and size such array based on the kernel
config.

Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/6fd1f9c7651151493ecab174e7b8386a1534170d.1741718157.git.pabeni@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/net/udp_tunnel.h
net/ipv4/udp_offload.c
net/ipv4/udp_tunnel_core.c

index eda0f3e2f65fa7625d3f4f941569d12ed57460e0..a7b230867eb148fa23b859493ea89c777668b1e8 100644 (file)
@@ -205,9 +205,11 @@ static inline void udp_tunnel_encap_enable(struct sock *sk)
 
 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
 void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
+void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
 #else
 static inline void udp_tunnel_update_gro_lookup(struct net *net,
                                                struct sock *sk, bool add) {}
+static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
 #endif
 
 static inline void udp_tunnel_cleanup_gro(struct sock *sk)
@@ -215,6 +217,8 @@ static inline void udp_tunnel_cleanup_gro(struct sock *sk)
        struct udp_sock *up = udp_sk(sk);
        struct net *net = sock_net(sk);
 
+       udp_tunnel_update_gro_rcv(sk, false);
+
        if (!up->tunnel_list.pprev)
                return;
 
index e36d8a234848ffcfabcf29c77777ca31d0a2305a..088aa8cb8ac0cd397378b1f46309aab1f38c4779 100644 (file)
 #include <net/udp_tunnel.h>
 
 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
+
+/*
+ * Dummy GRO tunnel callback, exists mainly to avoid dangling/NULL
+ * values for the udp tunnel static call.
+ */
+static struct sk_buff *dummy_gro_rcv(struct sock *sk,
+                                    struct list_head *head,
+                                    struct sk_buff *skb)
+{
+       NAPI_GRO_CB(skb)->flush = 1;
+       return NULL;
+}
+
+typedef struct sk_buff *(*udp_tunnel_gro_rcv_t)(struct sock *sk,
+                                               struct list_head *head,
+                                               struct sk_buff *skb);
+
+struct udp_tunnel_type_entry {
+       udp_tunnel_gro_rcv_t gro_receive;
+       refcount_t count;
+};
+
+#define UDP_MAX_TUNNEL_TYPES (IS_ENABLED(CONFIG_GENEVE) + \
+                             IS_ENABLED(CONFIG_VXLAN) * 2 + \
+                             IS_ENABLED(CONFIG_NET_FOU) * 2)
+
+DEFINE_STATIC_CALL(udp_tunnel_gro_rcv, dummy_gro_rcv);
+static DEFINE_STATIC_KEY_FALSE(udp_tunnel_static_call);
+static struct mutex udp_tunnel_gro_type_lock;
+static struct udp_tunnel_type_entry udp_tunnel_gro_types[UDP_MAX_TUNNEL_TYPES];
+static unsigned int udp_tunnel_gro_type_nr;
 static DEFINE_SPINLOCK(udp_tunnel_gro_lock);
 
 void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add)
@@ -43,6 +74,101 @@ void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add)
        spin_unlock(&udp_tunnel_gro_lock);
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_lookup);
+
+void udp_tunnel_update_gro_rcv(struct sock *sk, bool add)
+{
+       struct udp_tunnel_type_entry *cur = NULL;
+       struct udp_sock *up = udp_sk(sk);
+       int i, old_gro_type_nr;
+
+       if (!up->gro_receive)
+               return;
+
+       mutex_lock(&udp_tunnel_gro_type_lock);
+       for (i = 0; i < udp_tunnel_gro_type_nr; i++)
+               if (udp_tunnel_gro_types[i].gro_receive == up->gro_receive)
+                       cur = &udp_tunnel_gro_types[i];
+
+       old_gro_type_nr = udp_tunnel_gro_type_nr;
+       if (add) {
+               /*
+                * Update the matching entry, if found, or add a new one
+                * if needed
+                */
+               if (cur) {
+                       refcount_inc(&cur->count);
+                       goto out;
+               }
+
+               if (unlikely(udp_tunnel_gro_type_nr == UDP_MAX_TUNNEL_TYPES)) {
+                       pr_err_once("Too many UDP tunnel types, please increase UDP_MAX_TUNNEL_TYPES\n");
+                       /* Ensure static call will never be enabled */
+                       udp_tunnel_gro_type_nr = UDP_MAX_TUNNEL_TYPES + 2;
+                       goto out;
+               }
+
+               cur = &udp_tunnel_gro_types[udp_tunnel_gro_type_nr++];
+               refcount_set(&cur->count, 1);
+               cur->gro_receive = up->gro_receive;
+       } else {
+               /*
+                * The stack cleanups only successfully added tunnel, the
+                * lookup on removal should never fail.
+                */
+               if (WARN_ON_ONCE(!cur))
+                       goto out;
+
+               if (!refcount_dec_and_test(&cur->count))
+                       goto out;
+
+               /* avoid gaps, so that the enable tunnel has always id 0 */
+               *cur = udp_tunnel_gro_types[--udp_tunnel_gro_type_nr];
+       }
+
+       if (udp_tunnel_gro_type_nr == 1) {
+               static_call_update(udp_tunnel_gro_rcv,
+                                  udp_tunnel_gro_types[0].gro_receive);
+               static_branch_enable(&udp_tunnel_static_call);
+       } else if (old_gro_type_nr == 1) {
+               static_branch_disable(&udp_tunnel_static_call);
+               static_call_update(udp_tunnel_gro_rcv, dummy_gro_rcv);
+       }
+
+out:
+       mutex_unlock(&udp_tunnel_gro_type_lock);
+}
+EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_rcv);
+
+static void udp_tunnel_gro_init(void)
+{
+       mutex_init(&udp_tunnel_gro_type_lock);
+}
+
+static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
+                                         struct list_head *head,
+                                         struct sk_buff *skb)
+{
+       if (static_branch_likely(&udp_tunnel_static_call)) {
+               if (unlikely(gro_recursion_inc_test(skb))) {
+                       NAPI_GRO_CB(skb)->flush |= 1;
+                       return NULL;
+               }
+               return static_call(udp_tunnel_gro_rcv)(sk, head, skb);
+       }
+       return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
+}
+
+#else
+
+static void udp_tunnel_gro_init(void) {}
+
+static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
+                                         struct list_head *head,
+                                         struct sk_buff *skb)
+{
+       return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
+}
+
 #endif
 
 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
@@ -654,7 +780,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
 
        skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
-       pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
+       pp = udp_tunnel_gro_rcv(sk, head, skb);
 
 out:
        skb_gro_flush_final(skb, pp, flush);
@@ -804,5 +930,7 @@ int __init udpv4_offload_init(void)
                        .gro_complete = udp4_gro_complete,
                },
        };
+
+       udp_tunnel_gro_init();
        return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
 }
index b5695826e57ad43f8958f8d5a86dd8c04e32adaf..c49fceea83139320d265ee1b2e713578fc2184d6 100644 (file)
@@ -90,6 +90,8 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
 
        udp_tunnel_encap_enable(sk);
 
+       udp_tunnel_update_gro_rcv(sock->sk, true);
+
        if (!sk->sk_dport && !sk->sk_bound_dev_if && sk_saddr_any(sock->sk))
                udp_tunnel_update_gro_lookup(net, sock->sk, true);
 }