}
EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
-static void qdisc_pkt_len_init(struct sk_buff *skb)
+static void qdisc_pkt_len_segs_init(struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u16 gso_segs;
qdisc_skb_cb(skb)->pkt_len = skb->len;
+ if (!shinfo->gso_size) {
+ qdisc_skb_cb(skb)->pkt_segs = 1;
+ return;
+ }
+
+ qdisc_skb_cb(skb)->pkt_segs = gso_segs = shinfo->gso_segs;
/* To get more precise estimation of bytes sent on wire,
* we add to pkt_len the headers size of all segments
*/
- if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
- u16 gso_segs = shinfo->gso_segs;
+ if (skb_transport_header_was_set(skb)) {
unsigned int hdr_len;
/* mac layer + network layer */
return;
gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
shinfo->gso_segs = gso_segs;
+ qdisc_skb_cb(skb)->pkt_segs = gso_segs;
}
qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
}
skb_update_prio(skb);
- qdisc_pkt_len_init(skb);
+ qdisc_pkt_len_segs_init(skb);
tcx_set_ingress(skb, false);
#ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {
if (!shinfo->gso_size)
return cake_calc_overhead(q, len, off);
- /* borrowed from qdisc_pkt_len_init() */
+ /* borrowed from qdisc_pkt_len_segs_init() */
if (!skb->encapsulation)
hdr_len = skb_transport_offset(skb);
else