typedef __u32 __bitwise __portpair;
typedef __u64 __bitwise __addrpair;
+struct socket_drop_counters {
+ atomic_t drops0 ____cacheline_aligned_in_smp;
+ atomic_t drops1 ____cacheline_aligned_in_smp;
+};
+
/**
* struct sock_common - minimal network layer representation of sockets
* @skc_daddr: Foreign IPv4 addr
* @sk_err_soft: errors that don't cause failure but are the cause of a
* persistent failure not just 'timed out'
* @sk_drops: raw/udp drops counter
+ * @sk_drop_counters: optional pointer to socket_drop_counters
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_uid: user id of owner
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
+ struct socket_drop_counters *sk_drop_counters;
__cacheline_group_end(sock_read_rxtx);
__cacheline_group_begin(sock_write_rxtx);
static inline void sk_drops_add(struct sock *sk, int segs)
{
- atomic_add(segs, &sk->sk_drops);
+ struct socket_drop_counters *sdc = sk->sk_drop_counters;
+
+ if (sdc) {
+ int n = numa_node_id() % 2;
+
+ if (n)
+ atomic_add(segs, &sdc->drops1);
+ else
+ atomic_add(segs, &sdc->drops0);
+ } else {
+ atomic_add(segs, &sk->sk_drops);
+ }
}
static inline void sk_drops_inc(struct sock *sk)
static inline int sk_drops_read(const struct sock *sk)
{
+ const struct socket_drop_counters *sdc = sk->sk_drop_counters;
+
+ if (sdc) {
+ DEBUG_NET_WARN_ON_ONCE(atomic_read(&sk->sk_drops));
+ return atomic_read(&sdc->drops0) + atomic_read(&sdc->drops1);
+ }
return atomic_read(&sk->sk_drops);
}
static inline void sk_drops_reset(struct sock *sk)
{
+ struct socket_drop_counters *sdc = sk->sk_drop_counters;
+
+ if (sdc) {
+ atomic_set(&sdc->drops0, 0);
+ atomic_set(&sdc->drops1, 0);
+ }
atomic_set(&sk->sk_drops, 0);
}
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
newsk->sk_reserved_mem = 0;
+ DEBUG_NET_WARN_ON_ONCE(newsk->sk_drop_counters);
sk_drops_reset(newsk);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
#ifdef CONFIG_MEMCG
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
#endif
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_drop_counters);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);