]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
netfilter: flowtable: fix inline pppoe encapsulation in xmit path
authorPablo Neira Ayuso <pablo@netfilter.org>
Thu, 30 Apr 2026 14:49:53 +0000 (16:49 +0200)
committerPablo Neira Ayuso <pablo@netfilter.org>
Thu, 30 Apr 2026 23:24:01 +0000 (01:24 +0200)
Address two issues in the inline pppoe encapsulation:

- Add needs_gso_segment flag to segment PPPoE packets in software
  given that there is no GSO support for this.

- Use FLOW_OFFLOAD_XMIT_DIRECT since neighbour cache is not available
  in point-to-point device, use the hardware address that is obtained
  via flowtable path discovery (ie. fill_forward_path).

Fixes: 18d27bed0880 ("netfilter: flowtable: inline pppoe encapsulation in xmit path")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
include/net/netfilter/nf_flow_table.h
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_flow_table_path.c

index b09c11c048d519d3254a80fbc0186c0049ad61f0..7b23b245a5a86ae4a9fe874b1c65c0d5b7846cf5 100644 (file)
@@ -148,9 +148,10 @@ struct flow_offload_tuple {
        /* All members above are keys for lookups, see flow_offload_hash(). */
        struct { }                      __hash;
 
-       u                             dir:2,
+       u16                             dir:2,
                                        xmit_type:3,
                                        encap_num:2,
+                                       needs_gso_segment:1,
                                        tun_num:2,
                                        in_vlan_ingress:2;
        u16                             mtu;
@@ -232,6 +233,7 @@ struct nf_flow_route {
                        u32                     hw_ifindex;
                        u8                      h_source[ETH_ALEN];
                        u8                      h_dest[ETH_ALEN];
+                       u8                      needs_gso_segment:1;
                } out;
                enum flow_offload_xmit_type     xmit_type;
        } tuple[FLOW_OFFLOAD_DIR_MAX];
index 2c4140e6f53c51b6da4ea266e77aa8c2e7191120..785d8c244a77154833334fd9c7ea963cd2a50b2e 100644 (file)
@@ -122,6 +122,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
 
        flow_tuple->tun = route->tuple[dir].in.tun;
        flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
+       flow_tuple->needs_gso_segment = route->tuple[dir].out.needs_gso_segment;
        flow_tuple->tun_num = route->tuple[dir].in.num_tuns;
 
        switch (route->tuple[dir].xmit_type) {
index 0ce3c209050ce3e6f45dfdf1d1c02f63771c6b8f..2eba64eb393a2e8c405bc958009e16a8df2b973d 100644 (file)
@@ -553,7 +553,8 @@ static int nf_flow_vlan_push(struct sk_buff *skb, __be16 proto, u16 id,
        return 0;
 }
 
-static int nf_flow_pppoe_push(struct sk_buff *skb, u16 id)
+static int nf_flow_pppoe_push(struct sk_buff *skb, u16 id,
+                             u32 needed_headroom)
 {
        int data_len = skb->len + sizeof(__be16);
        struct ppp_hdr {
@@ -562,7 +563,7 @@ static int nf_flow_pppoe_push(struct sk_buff *skb, u16 id)
        } *ph;
        __be16 proto;
 
-       if (skb_cow_head(skb, PPPOE_SES_HLEN))
+       if (skb_cow_head(skb, needed_headroom + PPPOE_SES_HLEN))
                return -1;
 
        switch (skb->protocol) {
@@ -755,7 +756,8 @@ static int nf_flow_encap_push(struct sk_buff *skb,
                                return -1;
                        break;
                case htons(ETH_P_PPP_SES):
-                       if (nf_flow_pppoe_push(skb, tuple->encap[i].id) < 0)
+                       if (nf_flow_pppoe_push(skb, tuple->encap[i].id,
+                                              needed_headroom) < 0)
                                return -1;
                        break;
                }
@@ -769,6 +771,7 @@ struct nf_flow_xmit {
        const void              *source;
        struct net_device       *outdev;
        struct flow_offload_tuple *tuple;
+       bool                    needs_gso_segment;
 };
 
 static void __nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
@@ -789,10 +792,41 @@ static void __nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
        dev_queue_xmit(skb);
 }
 
+static unsigned int nf_flow_encap_gso_xmit(struct net *net, struct sk_buff *skb,
+                                          struct nf_flow_xmit *xmit)
+{
+       struct sk_buff *segs, *nskb;
+
+       segs = skb_gso_segment(skb, 0);
+       if (IS_ERR(segs))
+               return NF_DROP;
+
+       if (segs)
+               consume_skb(skb);
+       else
+               segs = skb;
+
+       skb_list_walk_safe(segs, segs, nskb) {
+               skb_mark_not_on_list(segs);
+
+               if (nf_flow_encap_push(segs, xmit->tuple, xmit->outdev) < 0) {
+                       kfree_skb(segs);
+                       kfree_skb_list(nskb);
+                       return NF_STOLEN;
+               }
+               __nf_flow_queue_xmit(net, segs, xmit);
+       }
+
+       return NF_STOLEN;
+}
+
 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
                                       struct nf_flow_xmit *xmit)
 {
        if (xmit->tuple->encap_num) {
+               if (skb_is_gso(skb) && xmit->needs_gso_segment)
+                       return nf_flow_encap_gso_xmit(net, skb, xmit);
+
                if (nf_flow_encap_push(skb, xmit->tuple, xmit->outdev) < 0)
                        return NF_DROP;
        }
@@ -876,6 +910,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
                return NF_DROP;
        }
        xmit.tuple = other_tuple;
+       xmit.needs_gso_segment = tuplehash->tuple.needs_gso_segment;
 
        return nf_flow_queue_xmit(state->net, skb, &xmit);
 }
@@ -1196,6 +1231,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
                return NF_DROP;
        }
        xmit.tuple = other_tuple;
+       xmit.needs_gso_segment = tuplehash->tuple.needs_gso_segment;
 
        return nf_flow_queue_xmit(state->net, skb, &xmit);
 }
index 6bb9579dcc2abb06b7d89649aa2d19143f59f467..9e88ea6a2eef78077182d473c4d68709942bc0cc 100644 (file)
@@ -86,6 +86,7 @@ struct nft_forward_info {
        u8 ingress_vlans;
        u8 h_source[ETH_ALEN];
        u8 h_dest[ETH_ALEN];
+       bool needs_gso_segment;
        enum flow_offload_xmit_type xmit_type;
 };
 
@@ -138,8 +139,11 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
                                        path->encap.proto;
                                info->num_encaps++;
                        }
-                       if (path->type == DEV_PATH_PPPOE)
+                       if (path->type == DEV_PATH_PPPOE) {
                                memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
+                               info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+                               info->needs_gso_segment = 1;
+                       }
                        break;
                case DEV_PATH_BRIDGE:
                        if (is_zero_ether_addr(info->h_source))
@@ -279,6 +283,7 @@ static void nft_dev_forward_path(const struct nft_pktinfo *pkt,
                memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
                route->tuple[dir].xmit_type = info.xmit_type;
        }
+       route->tuple[dir].out.needs_gso_segment = info.needs_gso_segment;
 }
 
 int nft_flow_route(const struct nft_pktinfo *pkt, const struct nf_conn *ct,