__skb_checksum_complete(skb);
}
-bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
- enum skb_drop_reason *reason);
+enum skb_drop_reason tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
static inline int tcp_filter(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason)
}
EXPORT_SYMBOL(tcp_v4_do_rcv);
-bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
- enum skb_drop_reason *reason)
+enum skb_drop_reason tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
u32 tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
if (unlikely(tcp_checksum_complete(skb))) {
bh_unlock_sock(sk);
trace_tcp_bad_csum(skb);
- *reason = SKB_DROP_REASON_TCP_CSUM;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
- return true;
+ return SKB_DROP_REASON_TCP_CSUM;
}
/* Attempt coalescing to last skb in backlog, even if we are
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPBACKLOGCOALESCE);
kfree_skb_partial(skb, fragstolen);
- return false;
+ return SKB_NOT_DROPPED_YET;
}
__skb_push(skb, hdrlen);
if (unlikely(err)) {
bh_unlock_sock(sk);
if (err == -ENOMEM) {
- *reason = SKB_DROP_REASON_PFMEMALLOC;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
- } else {
- *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+ return SKB_DROP_REASON_PFMEMALLOC;
}
- return true;
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+ return SKB_DROP_REASON_SOCKET_BACKLOG;
}
- return false;
+ return SKB_NOT_DROPPED_YET;
}
static void tcp_v4_restore_cb(struct sk_buff *skb)
if (!sock_owned_by_user(sk)) {
ret = tcp_v4_do_rcv(sk, skb);
} else {
- if (tcp_add_backlog(sk, skb, &drop_reason))
+ drop_reason = tcp_add_backlog(sk, skb);
+ if (drop_reason)
goto discard_and_relse;
}
bh_unlock_sock(sk);