}
EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
-static void qdisc_pkt_len_segs_init(struct sk_buff *skb)
+static enum skb_drop_reason qdisc_pkt_len_segs_init(struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
- unsigned int hdr_len;
+ unsigned int hdr_len, tlen;
u16 gso_segs;
qdisc_skb_cb(skb)->pkt_len = skb->len;
if (!shinfo->gso_size) {
qdisc_skb_cb(skb)->pkt_segs = 1;
- return;
+ return SKB_NOT_DROPPED_YET;
}
qdisc_skb_cb(skb)->pkt_segs = gso_segs = shinfo->gso_segs;
/* To get more precise estimation of bytes sent on wire,
* we add to pkt_len the headers size of all segments
*/
- if (unlikely(!skb_transport_header_was_set(skb)))
- return;
/* mac layer + network layer */
- if (!skb->encapsulation)
+ if (!skb->encapsulation) {
+ if (unlikely(!skb_transport_header_was_set(skb)))
+ return SKB_NOT_DROPPED_YET;
hdr_len = skb_transport_offset(skb);
- else
+ } else {
hdr_len = skb_inner_transport_offset(skb);
-
+ }
/* + transport layer */
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
const struct tcphdr *th;
- struct tcphdr _tcphdr;
- th = skb_header_pointer(skb, hdr_len,
- sizeof(_tcphdr), &_tcphdr);
- if (likely(th))
- hdr_len += __tcp_hdrlen(th);
- } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
- struct udphdr _udphdr;
+ if (!pskb_may_pull(skb, hdr_len + sizeof(struct tcphdr)))
+ return SKB_DROP_REASON_SKB_BAD_GSO;
- if (skb_header_pointer(skb, hdr_len,
- sizeof(_udphdr), &_udphdr))
- hdr_len += sizeof(struct udphdr);
+ th = (const struct tcphdr *)(skb->data + hdr_len);
+ tlen = __tcp_hdrlen(th);
+ if (tlen < sizeof(*th))
+ return SKB_DROP_REASON_SKB_BAD_GSO;
+ hdr_len += tlen;
+ if (!pskb_may_pull(skb, hdr_len))
+ return SKB_DROP_REASON_SKB_BAD_GSO;
+ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+ if (!pskb_may_pull(skb, hdr_len + sizeof(struct udphdr)))
+ return SKB_DROP_REASON_SKB_BAD_GSO;
+ hdr_len += sizeof(struct udphdr);
}
+ /* prior pskb_may_pull() might have changed skb->head. */
+ shinfo = skb_shinfo(skb);
if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) {
int payload = skb->len - hdr_len;
/* Malicious packet. */
if (payload <= 0)
- return;
+ return SKB_DROP_REASON_SKB_BAD_GSO;
gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
shinfo->gso_segs = gso_segs;
qdisc_skb_cb(skb)->pkt_segs = gso_segs;
}
qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
+ return SKB_NOT_DROPPED_YET;
}
static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
(SKBTX_SCHED_TSTAMP | SKBTX_BPF)))
__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
+ reason = qdisc_pkt_len_segs_init(skb);
+ if (unlikely(reason)) {
+ dev_core_stats_tx_dropped_inc(dev);
+ kfree_skb_reason(skb, reason);
+ return -EINVAL;
+ }
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
skb_update_prio(skb);
- qdisc_pkt_len_segs_init(skb);
tcx_set_ingress(skb, false);
#ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {