}
EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
+static int ip6_tnl_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct ip6_tnl *t = netdev_priv(ctx->dev);
+ struct flowi6 fl6 = {
+ .daddr = t->parms.raddr,
+ };
+ struct dst_entry *dst;
+ int err;
+
+ dst = ip6_route_output(dev_net(ctx->dev), NULL, &fl6);
+ if (!dst->error) {
+ path->type = DEV_PATH_TUN;
+ path->tun.src_v6 = t->parms.laddr;
+ path->tun.dst_v6 = t->parms.raddr;
+ path->tun.l3_proto = IPPROTO_IPV6;
+ path->dev = ctx->dev;
+ ctx->dev = dst->dev;
+ }
+
+ err = dst->error;
+ dst_release(dst);
+
+ return err;
+}
+
static const struct net_device_ops ip6_tnl_netdev_ops = {
.ndo_init = ip6_tnl_dev_init,
.ndo_uninit = ip6_tnl_dev_uninit,
.ndo_change_mtu = ip6_tnl_change_mtu,
.ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip6_tnl_get_iflink,
+ .ndo_fill_forward_path = ip6_tnl_fill_forward_path,
};
#define IPXIPX_FEATURES (NETIF_F_SG | \
} tun;
};
-static void nf_flow_tuple_encap(struct sk_buff *skb,
+static void nf_flow_tuple_encap(struct nf_flowtable_ctx *ctx,
+ struct sk_buff *skb,
struct flow_offload_tuple *tuple)
{
__be16 inner_proto = skb->protocol;
struct vlan_ethhdr *veth;
struct pppoe_hdr *phdr;
+ struct ipv6hdr *ip6h;
struct iphdr *iph;
u16 offset = 0;
int i = 0;
break;
}
- if (inner_proto == htons(ETH_P_IP)) {
+ switch (inner_proto) {
+ case htons(ETH_P_IP):
iph = (struct iphdr *)(skb_network_header(skb) + offset);
- if (iph->protocol == IPPROTO_IPIP) {
+ if (ctx->tun.proto == IPPROTO_IPIP) {
tuple->tun.dst_v4.s_addr = iph->daddr;
tuple->tun.src_v4.s_addr = iph->saddr;
tuple->tun.l3_proto = IPPROTO_IPIP;
}
+ break;
+ case htons(ETH_P_IPV6):
+ ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
+ if (ctx->tun.proto == IPPROTO_IPV6) {
+ tuple->tun.dst_v6 = ip6h->daddr;
+ tuple->tun.src_v6 = ip6h->saddr;
+ tuple->tun.l3_proto = IPPROTO_IPV6;
+ }
+ break;
+ default:
+ break;
}
}
tuple->l3proto = AF_INET;
tuple->l4proto = ipproto;
tuple->iifidx = ctx->in->ifindex;
- nf_flow_tuple_encap(skb, tuple);
+ nf_flow_tuple_encap(ctx, skb, tuple);
return 0;
}
return true;
}
-static void nf_flow_ip4_tunnel_pop(struct nf_flowtable_ctx *ctx,
- struct sk_buff *skb)
+static bool nf_flow_ip6_tunnel_proto(struct nf_flowtable_ctx *ctx,
+ struct sk_buff *skb)
{
- if (ctx->tun.proto != IPPROTO_IPIP)
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h, _ip6h;
+ __be16 frag_off;
+ u8 nexthdr;
+ int hdrlen;
+
+ ip6h = skb_header_pointer(skb, ctx->offset, sizeof(*ip6h), &_ip6h);
+ if (!ip6h)
+ return false;
+
+ if (ip6h->hop_limit <= 1)
+ return false;
+
+ nexthdr = ip6h->nexthdr;
+ hdrlen = ipv6_skip_exthdr(skb, sizeof(*ip6h) + ctx->offset, &nexthdr,
+ &frag_off);
+ if (hdrlen < 0)
+ return false;
+
+ if (nexthdr == IPPROTO_IPV6) {
+ ctx->tun.hdr_size = hdrlen;
+ ctx->tun.proto = IPPROTO_IPV6;
+ }
+ ctx->offset += ctx->tun.hdr_size;
+
+ return true;
+#else
+ return false;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+}
+
+static void nf_flow_ip_tunnel_pop(struct nf_flowtable_ctx *ctx,
+ struct sk_buff *skb)
+{
+ if (ctx->tun.proto != IPPROTO_IPIP &&
+ ctx->tun.proto != IPPROTO_IPV6)
return;
skb_pull(skb, ctx->tun.hdr_size);
break;
}
- if (inner_proto == htons(ETH_P_IP))
+ switch (inner_proto) {
+ case htons(ETH_P_IP):
ret = nf_flow_ip4_tunnel_proto(ctx, skb);
+ break;
+ case htons(ETH_P_IPV6):
+ ret = nf_flow_ip6_tunnel_proto(ctx, skb);
+ break;
+ default:
+ break;
+ }
return ret;
}
}
}
- if (skb->protocol == htons(ETH_P_IP))
- nf_flow_ip4_tunnel_pop(ctx, skb);
+ if (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6))
+ nf_flow_ip_tunnel_pop(ctx, skb);
}
struct nf_flow_xmit {
tuple->l3proto = AF_INET6;
tuple->l4proto = nexthdr;
tuple->iifidx = ctx->in->ifindex;
- nf_flow_tuple_encap(skb, tuple);
+ nf_flow_tuple_encap(ctx, skb, tuple);
return 0;
}
{
struct flow_offload_tuple tuple = {};
- if (skb->protocol != htons(ETH_P_IPV6) &&
- !nf_flow_skb_encap_protocol(ctx, skb, htons(ETH_P_IPV6)))
+ if (!nf_flow_skb_encap_protocol(ctx, skb, htons(ETH_P_IPV6)))
return NULL;
if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)