]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Revert "net: group sk_backlog and sk_receive_queue"
authorEric Dumazet <edumazet@google.com>
Mon, 29 Sep 2025 18:21:12 +0000 (18:21 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 30 Sep 2025 01:30:32 +0000 (18:30 -0700)
This reverts commit 4effb335b5dab08cb6e2c38d038910f8b527cfc9.

This was a benefit for UDP flood case, which was later greatly improved
with commits 6471658dc66c ("udp: use skb_attempt_defer_free()")
and b650bf0977d3 ("udp: remove busylock and add per NUMA queues").

Apparently blamed commit added a regression for RAW sockets, possibly
because they do not use the dual RX queue strategy that UDP has.

sock_queue_rcv_skb_reason() and RAW recvmsg() compete for sk_receive_buf
and sk_rmem_alloc changes, and them being in the same
cache line reduce performance.

Fixes: 4effb335b5da ("net: group sk_backlog and sk_receive_queue")
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202509281326.f605b4eb-lkp@intel.com
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Willem de Bruijn <willemb@google.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250929182112.824154-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/sock.h

index 8c5b64f41ab72d2a28c066c2a5698eaff7973918..60bcb13f045c3144609908a36960528b33e4f71c 100644 (file)
@@ -395,6 +395,7 @@ struct sock {
 
        atomic_t                sk_drops;
        __s32                   sk_peek_off;
+       struct sk_buff_head     sk_error_queue;
        struct sk_buff_head     sk_receive_queue;
        /*
         * The backlog queue is special, it is always used with
@@ -412,7 +413,6 @@ struct sock {
        } sk_backlog;
 #define sk_rmem_alloc sk_backlog.rmem_alloc
 
-       struct sk_buff_head     sk_error_queue;
        __cacheline_group_end(sock_write_rx);
 
        __cacheline_group_begin(sock_read_rx);