]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: add sk_drops_read(), sk_drops_inc() and sk_drops_reset() helpers
authorEric Dumazet <edumazet@google.com>
Tue, 26 Aug 2025 12:50:27 +0000 (12:50 +0000)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 28 Aug 2025 11:14:50 +0000 (13:14 +0200)
We want to split sk->sk_drops in the future to reduce
potential contention on this field.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250826125031.1578842-2-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
17 files changed:
include/net/sock.h
include/net/tcp.h
net/core/datagram.c
net/core/sock.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/udp.c
net/ipv6/datagram.c
net/ipv6/raw.c
net/ipv6/udp.c
net/iucv/af_iucv.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/phonet/pep.c
net/phonet/socket.c
net/sctp/diag.c
net/tipc/socket.c

index 63a6a48afb48ad31abf05f5108886bac9831842a..34d7029eb622773e40e7c4ebd422d33b1c0a7836 100644 (file)
@@ -2682,11 +2682,26 @@ struct sock_skb_cb {
 #define sock_skb_cb_check_size(size) \
        BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
 
+static inline void sk_drops_inc(struct sock *sk)
+{
+       atomic_inc(&sk->sk_drops);
+}
+
+static inline int sk_drops_read(const struct sock *sk)
+{
+       return atomic_read(&sk->sk_drops);
+}
+
+static inline void sk_drops_reset(struct sock *sk)
+{
+       atomic_set(&sk->sk_drops, 0);
+}
+
 static inline void
 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
 {
        SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
-                                               atomic_read(&sk->sk_drops) : 0;
+                                               sk_drops_read(sk) : 0;
 }
 
 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
index 2936b8175950faa777f81f3c6b7230bcc375d772..16dc9cebb9d25832eac7a6ad590a9e9e47e85142 100644 (file)
@@ -2612,7 +2612,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
  */
 static inline void tcp_listendrop(const struct sock *sk)
 {
-       atomic_inc(&((struct sock *)sk)->sk_drops);
+       sk_drops_inc((struct sock *)sk);
        __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
 }
 
index 94cc4705e91da6ba6629ae469ae6507e9c6fdae9..ba8253aa6e07c2b0db361c9dfdaf66243dc1024c 100644 (file)
@@ -345,7 +345,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
                spin_unlock_bh(&sk_queue->lock);
        }
 
-       atomic_inc(&sk->sk_drops);
+       sk_drops_inc(sk);
        return err;
 }
 EXPORT_SYMBOL(__sk_queue_drop_skb);
index 8002ac6293dcac694962be139eadfa6346b72d5b..75368823969a7992a55a6f40d87ffb8886de2f39 100644 (file)
@@ -491,13 +491,13 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
        if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
        }
 
        if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                return -ENOBUFS;
        }
 
@@ -562,7 +562,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
        skb->dev = NULL;
 
        if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
                goto discard_and_relse;
        }
@@ -585,7 +585,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
                        reason = SKB_DROP_REASON_PFMEMALLOC;
                if (err == -ENOBUFS)
                        reason = SKB_DROP_REASON_SOCKET_BACKLOG;
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                goto discard_and_relse;
        }
 
@@ -2505,7 +2505,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
        newsk->sk_wmem_queued   = 0;
        newsk->sk_forward_alloc = 0;
        newsk->sk_reserved_mem  = 0;
-       atomic_set(&newsk->sk_drops, 0);
+       sk_drops_reset(newsk);
        newsk->sk_send_head     = NULL;
        newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
        atomic_set(&newsk->sk_zckey, 0);
@@ -3713,7 +3713,7 @@ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
         */
        smp_wmb();
        refcount_set(&sk->sk_refcnt, 1);
-       atomic_set(&sk->sk_drops, 0);
+       sk_drops_reset(sk);
 }
 EXPORT_SYMBOL(sock_init_data_uid);
 
@@ -3973,7 +3973,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
        mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
        mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
        mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
-       mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+       mem[SK_MEMINFO_DROPS] = sk_drops_read(sk);
 }
 
 #ifdef CONFIG_PROC_FS
index 031df4c19fcc5ca18137695c78358c3ad96a2c4a..f119da68fc301be00719213ad33615b6754e6272 100644 (file)
@@ -1119,7 +1119,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
                from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
                0, sock_i_ino(sp),
                refcount_read(&sp->sk_refcnt), sp,
-               atomic_read(&sp->sk_drops));
+               sk_drops_read(sp));
 }
 
 static int ping_v4_seq_show(struct seq_file *seq, void *v)
index 1d2c89d63cc71f39d742c8156879847fc4e53c71..0f9f02f6146eef6df3f5bbb4f564e16fbabd1ba2 100644 (file)
@@ -178,7 +178,7 @@ static int raw_v4_input(struct net *net, struct sk_buff *skb,
 
                if (atomic_read(&sk->sk_rmem_alloc) >=
                    READ_ONCE(sk->sk_rcvbuf)) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        continue;
                }
 
@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 int raw_rcv(struct sock *sk, struct sk_buff *skb)
 {
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
                return NET_RX_DROP;
        }
@@ -1045,7 +1045,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
                0, 0L, 0,
                from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
                0, sock_i_ino(sp),
-               refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
+               refcount_read(&sp->sk_refcnt), sp, sk_drops_read(sp));
 }
 
 static int raw_seq_show(struct seq_file *seq, void *v)
index cc3ce0f762ec211a963464c2dd7ac329a6be1ffd..732bdad43626948168bdb9e40c151787f047bbfd 100644 (file)
@@ -1787,7 +1787,7 @@ uncharge_drop:
        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
 
 drop:
-       atomic_inc(&sk->sk_drops);
+       sk_drops_inc(sk);
        busylock_release(busy);
        return err;
 }
@@ -1852,7 +1852,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
                                        IS_UDPLITE(sk));
                        __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
                                        IS_UDPLITE(sk));
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        __skb_unlink(skb, rcvq);
                        *total += skb->truesize;
                        kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
@@ -2008,7 +2008,7 @@ try_again:
 
                __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite);
                __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite);
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
                goto try_again;
        }
@@ -2078,7 +2078,7 @@ try_again:
 
        if (unlikely(err)) {
                if (!peeking) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_INERRORS, is_udplite);
                }
@@ -2449,7 +2449,7 @@ csum_error:
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-       atomic_inc(&sk->sk_drops);
+       sk_drops_inc(sk);
        sk_skb_reason_drop(sk, skb, drop_reason);
        return -1;
 }
@@ -2534,7 +2534,7 @@ start_lookup:
                nskb = skb_clone(skb, GFP_ATOMIC);
 
                if (unlikely(!nskb)) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
                                        IS_UDPLITE(sk));
                        __UDP_INC_STATS(net, UDP_MIB_INERRORS,
@@ -3386,7 +3386,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
                from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
                0, sock_i_ino(sp),
                refcount_read(&sp->sk_refcnt), sp,
-               atomic_read(&sp->sk_drops));
+               sk_drops_read(sp));
 }
 
 int udp4_seq_show(struct seq_file *seq, void *v)
index 972bf0426d599af43bfd2d0e4236592f34ec7866..33ebe93d80e3cb6d897a3c7f714f94c395856023 100644 (file)
@@ -1068,5 +1068,5 @@ void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
                   0,
                   sock_i_ino(sp),
                   refcount_read(&sp->sk_refcnt), sp,
-                  atomic_read(&sp->sk_drops));
+                  sk_drops_read(sp));
 }
index 4c3f8245c40f155f3efde0d7b8af50e0bef431c7..4026192143ec9f1b071f43874185bc367c950c67 100644 (file)
@@ -163,7 +163,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 
                if (atomic_read(&sk->sk_rmem_alloc) >=
                    READ_ONCE(sk->sk_rcvbuf)) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        continue;
                }
 
@@ -361,7 +361,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
            skb_checksum_complete(skb)) {
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
                return NET_RX_DROP;
        }
@@ -389,7 +389,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
        struct raw6_sock *rp = raw6_sk(sk);
 
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
                return NET_RX_DROP;
        }
@@ -414,7 +414,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (inet_test_bit(HDRINCL, sk)) {
                if (skb_checksum_complete(skb)) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
                        return NET_RX_DROP;
                }
index 6a68f77da44b55baed42b44c936902f865754140..a35ee6d693a8080b9009f61d23fafd2465b8c625 100644 (file)
@@ -524,7 +524,7 @@ try_again:
        }
        if (unlikely(err)) {
                if (!peeking) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
                }
                kfree_skb(skb);
@@ -908,7 +908,7 @@ csum_error:
        __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
        __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-       atomic_inc(&sk->sk_drops);
+       sk_drops_inc(sk);
        sk_skb_reason_drop(sk, skb, drop_reason);
        return -1;
 }
@@ -1013,7 +1013,7 @@ start_lookup:
                }
                nskb = skb_clone(skb, GFP_ATOMIC);
                if (unlikely(!nskb)) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
                                         IS_UDPLITE(sk));
                        __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
index cc2b3c44bc05a629d455e99369491b28b4b93884..6c717a7ef292831b49c1dca22ecc2bb7a7179b0f 100644 (file)
@@ -1187,7 +1187,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
 
        IUCV_SKB_CB(skb)->offset = 0;
        if (sk_filter(sk, skb)) {
-               atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
+               sk_drops_inc(sk);       /* skb rejected by filter */
                kfree_skb(skb);
                return;
        }
@@ -2011,7 +2011,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
        skb_reset_network_header(skb);
        IUCV_SKB_CB(skb)->offset = 0;
        if (sk_filter(sk, skb)) {
-               atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
+               sk_drops_inc(sk);       /* skb rejected by filter */
                kfree_skb(skb);
                return NET_RX_SUCCESS;
        }
index e2f7080dd5d7cd52722248b719c294cdccf70328..2b46c0cd752a313ad95cf17c46237883d6b85293 100644 (file)
@@ -356,7 +356,7 @@ static void netlink_overrun(struct sock *sk)
                        sk_error_report(sk);
                }
        }
-       atomic_inc(&sk->sk_drops);
+       sk_drops_inc(sk);
 }
 
 static void netlink_rcv_wake(struct sock *sk)
@@ -2711,7 +2711,7 @@ static int netlink_native_seq_show(struct seq_file *seq, void *v)
                           sk_wmem_alloc_get(s),
                           READ_ONCE(nlk->cb_running),
                           refcount_read(&s->sk_refcnt),
-                          atomic_read(&s->sk_drops),
+                          sk_drops_read(s),
                           sock_i_ino(s)
                        );
 
index a7017d7f09272058106181e95367080dc821da69..9d42c4bd6e390c7212fc0a8dde5cc14ba7a00d53 100644 (file)
@@ -2265,7 +2265,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 
 drop_n_acct:
        atomic_inc(&po->tp_drops);
-       atomic_inc(&sk->sk_drops);
+       sk_drops_inc(sk);
        drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
 
 drop_n_restore:
index 62527e1ebb883d2854bcdc5256cd48e85e5c5dbc..4db564d9d522b639e9527d48eaa42a1cd9fbfba7 100644 (file)
@@ -376,7 +376,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        case PNS_PEP_CTRL_REQ:
                if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        break;
                }
                __skb_pull(skb, 4);
@@ -397,7 +397,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                }
 
                if (pn->rx_credits == 0) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        err = -ENOBUFS;
                        break;
                }
@@ -567,7 +567,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
                }
 
                if (pn->rx_credits == 0) {
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        err = NET_RX_DROP;
                        break;
                }
index 2b61a40b568e91e340130a9b589e2b7a9346643f..db2d552e9b32e384c332774b99199108abd464f2 100644 (file)
@@ -587,7 +587,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
                        from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
                        sock_i_ino(sk),
                        refcount_read(&sk->sk_refcnt), sk,
-                       atomic_read(&sk->sk_drops));
+                       sk_drops_read(sk));
        }
        seq_pad(seq, '\n');
        return 0;
index 23359e522273f0377080007c75eb2c276945f781..996c2018f0e611bd0da2df2f73e90e2f94c463d9 100644 (file)
@@ -173,7 +173,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
                mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
                mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
                mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
-               mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+               mem[SK_MEMINFO_DROPS] = sk_drops_read(sk);
 
                if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
                        goto errout;
index e028bf6584992c5ab7307d81082fbe4582e78068..1574a83384f88533cfab330c559512d5878bf0aa 100644 (file)
@@ -2366,7 +2366,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
                else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
                        trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
                                           "err_overload2!");
-                       atomic_inc(&sk->sk_drops);
+                       sk_drops_inc(sk);
                        err = TIPC_ERR_OVERLOAD;
                }
 
@@ -2458,7 +2458,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
                trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
                /* Overload => reject message back to sender */
                onode = tipc_own_addr(sock_net(sk));
-               atomic_inc(&sk->sk_drops);
+               sk_drops_inc(sk);
                if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
                        trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
                                              "@sk_enqueue!");
@@ -3657,7 +3657,7 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
            nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
                        skb_queue_len(&sk->sk_write_queue)) ||
            nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
-                       atomic_read(&sk->sk_drops)))
+                       sk_drops_read(sk)))
                goto stat_msg_cancel;
 
        if (tsk->cong_link_cnt &&