]> git.ipfire.org Git - thirdparty/linux.git/blame - net/netfilter/nft_flow_offload.c
fs: indicate request originates from old mount API
[thirdparty/linux.git] / net / netfilter / nft_flow_offload.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
a3c90f7a
PNA
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/init.h>
5#include <linux/netlink.h>
6#include <linux/netfilter.h>
7#include <linux/workqueue.h>
8#include <linux/spinlock.h>
40d102cd 9#include <linux/netfilter/nf_conntrack_common.h>
a3c90f7a
PNA
10#include <linux/netfilter/nf_tables.h>
11#include <net/ip.h> /* for ipv4 options. */
12#include <net/netfilter/nf_tables.h>
13#include <net/netfilter/nf_tables_core.h>
14#include <net/netfilter/nf_conntrack_core.h>
40d102cd 15#include <net/netfilter/nf_conntrack_extend.h>
a3c90f7a
PNA
16#include <net/netfilter/nf_flow_table.h>
17
18struct nft_flow_offload {
19 struct nft_flowtable *flowtable;
20};
21
5139c0c0
PNA
22static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
23{
24 if (dst_xfrm(dst))
25 return FLOW_OFFLOAD_XMIT_XFRM;
26
27 return FLOW_OFFLOAD_XMIT_NEIGH;
28}
29
30static void nft_default_forward_path(struct nf_flow_route *route,
31 struct dst_entry *dst_cache,
32 enum ip_conntrack_dir dir)
33{
c63a7cc4 34 route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
5139c0c0
PNA
35 route->tuple[dir].dst = dst_cache;
36 route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
37}
38
45ca3e61
FF
39static bool nft_is_valid_ether_device(const struct net_device *dev)
40{
41 if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
42 dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
43 return false;
44
45 return true;
46}
47
c63a7cc4
PNA
48static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
49 const struct dst_entry *dst_cache,
50 const struct nf_conn *ct,
7a27f6ab 51 enum ip_conntrack_dir dir, u8 *ha,
c63a7cc4
PNA
52 struct net_device_path_stack *stack)
53{
54 const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
55 struct net_device *dev = dst_cache->dev;
c63a7cc4
PNA
56 struct neighbour *n;
57 u8 nud_state;
58
45ca3e61
FF
59 if (!nft_is_valid_ether_device(dev))
60 goto out;
61
c63a7cc4
PNA
62 n = dst_neigh_lookup(dst_cache, daddr);
63 if (!n)
64 return -1;
65
66 read_lock_bh(&n->lock);
67 nud_state = n->nud_state;
68 ether_addr_copy(ha, n->ha);
69 read_unlock_bh(&n->lock);
70 neigh_release(n);
71
72 if (!(nud_state & NUD_VALID))
73 return -1;
74
45ca3e61 75out:
c63a7cc4
PNA
76 return dev_fill_forward_path(dev, ha, stack);
77}
78
79struct nft_forward_info {
80 const struct net_device *indev;
7a27f6ab 81 const struct net_device *outdev;
73f97025 82 const struct net_device *hw_outdev;
4cd91f7c
PNA
83 struct id {
84 __u16 id;
85 __be16 proto;
86 } encap[NF_FLOW_TABLE_ENCAP_MAX];
87 u8 num_encaps;
26267bf9 88 u8 ingress_vlans;
7a27f6ab
PNA
89 u8 h_source[ETH_ALEN];
90 u8 h_dest[ETH_ALEN];
91 enum flow_offload_xmit_type xmit_type;
c63a7cc4
PNA
92};
93
94static void nft_dev_path_info(const struct net_device_path_stack *stack,
7a27f6ab 95 struct nft_forward_info *info,
73f97025 96 unsigned char *ha, struct nf_flowtable *flowtable)
c63a7cc4
PNA
97{
98 const struct net_device_path *path;
99 int i;
100
7a27f6ab
PNA
101 memcpy(info->h_dest, ha, ETH_ALEN);
102
c63a7cc4
PNA
103 for (i = 0; i < stack->num_paths; i++) {
104 path = &stack->path[i];
105 switch (path->type) {
106 case DEV_PATH_ETHERNET:
a11e7973 107 case DEV_PATH_DSA:
4cd91f7c 108 case DEV_PATH_VLAN:
72efd585 109 case DEV_PATH_PPPOE:
c63a7cc4 110 info->indev = path->dev;
7a27f6ab
PNA
111 if (is_zero_ether_addr(info->h_source))
112 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
4cd91f7c
PNA
113
114 if (path->type == DEV_PATH_ETHERNET)
115 break;
a11e7973
PNA
116 if (path->type == DEV_PATH_DSA) {
117 i = stack->num_paths;
118 break;
119 }
4cd91f7c 120
72efd585 121 /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
4cd91f7c
PNA
122 if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
123 info->indev = NULL;
124 break;
125 }
24560749
FF
126 if (!info->outdev)
127 info->outdev = path->dev;
4cd91f7c
PNA
128 info->encap[info->num_encaps].id = path->encap.id;
129 info->encap[info->num_encaps].proto = path->encap.proto;
130 info->num_encaps++;
72efd585
PNA
131 if (path->type == DEV_PATH_PPPOE)
132 memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
c63a7cc4 133 break;
c63a7cc4 134 case DEV_PATH_BRIDGE:
7a27f6ab
PNA
135 if (is_zero_ether_addr(info->h_source))
136 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
137
e990cef6 138 switch (path->bridge.vlan_mode) {
26267bf9
FF
139 case DEV_PATH_BR_VLAN_UNTAG_HW:
140 info->ingress_vlans |= BIT(info->num_encaps - 1);
141 break;
e990cef6
PNA
142 case DEV_PATH_BR_VLAN_TAG:
143 info->encap[info->num_encaps].id = path->bridge.vlan_id;
144 info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
145 info->num_encaps++;
146 break;
147 case DEV_PATH_BR_VLAN_UNTAG:
148 info->num_encaps--;
149 break;
150 case DEV_PATH_BR_VLAN_KEEP:
151 break;
152 }
7a27f6ab
PNA
153 info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
154 break;
c63a7cc4
PNA
155 default:
156 info->indev = NULL;
157 break;
158 }
159 }
7a27f6ab
PNA
160 if (!info->outdev)
161 info->outdev = info->indev;
73f97025
PNA
162
163 info->hw_outdev = info->indev;
164
165 if (nf_flowtable_hw_offload(flowtable) &&
166 nft_is_valid_ether_device(info->indev))
167 info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
c63a7cc4
PNA
168}
169
170static bool nft_flowtable_find_dev(const struct net_device *dev,
171 struct nft_flowtable *ft)
172{
173 struct nft_hook *hook;
174 bool found = false;
175
176 list_for_each_entry_rcu(hook, &ft->hook_list, list) {
177 if (hook->ops.dev != dev)
178 continue;
179
180 found = true;
181 break;
182 }
183
184 return found;
185}
186
187static void nft_dev_forward_path(struct nf_flow_route *route,
188 const struct nf_conn *ct,
189 enum ip_conntrack_dir dir,
190 struct nft_flowtable *ft)
191{
192 const struct dst_entry *dst = route->tuple[dir].dst;
193 struct net_device_path_stack stack;
194 struct nft_forward_info info = {};
7a27f6ab 195 unsigned char ha[ETH_ALEN];
4cd91f7c 196 int i;
c63a7cc4 197
7a27f6ab 198 if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
73f97025 199 nft_dev_path_info(&stack, &info, ha, &ft->data);
c63a7cc4
PNA
200
201 if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
202 return;
203
204 route->tuple[!dir].in.ifindex = info.indev->ifindex;
4cd91f7c
PNA
205 for (i = 0; i < info.num_encaps; i++) {
206 route->tuple[!dir].in.encap[i].id = info.encap[i].id;
207 route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
208 }
209 route->tuple[!dir].in.num_encaps = info.num_encaps;
26267bf9 210 route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
7a27f6ab
PNA
211
212 if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
213 memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
214 memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
215 route->tuple[dir].out.ifindex = info.outdev->ifindex;
73f97025 216 route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
7a27f6ab
PNA
217 route->tuple[dir].xmit_type = info.xmit_type;
218 }
c63a7cc4
PNA
219}
220
a3c90f7a
PNA
221static int nft_flow_route(const struct nft_pktinfo *pkt,
222 const struct nf_conn *ct,
223 struct nf_flow_route *route,
c63a7cc4
PNA
224 enum ip_conntrack_dir dir,
225 struct nft_flowtable *ft)
a3c90f7a
PNA
226{
227 struct dst_entry *this_dst = skb_dst(pkt->skb);
228 struct dst_entry *other_dst = NULL;
229 struct flowi fl;
230
231 memset(&fl, 0, sizeof(fl));
232 switch (nft_pf(pkt)) {
233 case NFPROTO_IPV4:
a799aea0 234 fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
97629b23 235 fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
10f4e765 236 fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
3412e164
SA
237 fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
238 fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos);
239 fl.u.ip4.flowi4_mark = pkt->skb->mark;
f1896d45 240 fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
a3c90f7a
PNA
241 break;
242 case NFPROTO_IPV6:
a799aea0 243 fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
97629b23 244 fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.src.u3.in6;
10f4e765 245 fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
3412e164
SA
246 fl.u.ip6.flowi6_iif = this_dst->dev->ifindex;
247 fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
248 fl.u.ip6.flowi6_mark = pkt->skb->mark;
f1896d45 249 fl.u.ip6.flowi6_flags = FLOWI_FLAG_ANYSRC;
a3c90f7a
PNA
250 break;
251 }
252
fa502c86
PNA
253 if (!dst_hold_safe(this_dst))
254 return -ENOENT;
255
a3c90f7a 256 nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
fa502c86
PNA
257 if (!other_dst) {
258 dst_release(this_dst);
a3c90f7a 259 return -ENOENT;
fa502c86 260 }
a3c90f7a 261
5139c0c0
PNA
262 nft_default_forward_path(route, this_dst, dir);
263 nft_default_forward_path(route, other_dst, !dir);
a3c90f7a 264
c63a7cc4
PNA
265 if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
266 route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
267 nft_dev_forward_path(route, ct, dir, ft);
268 nft_dev_forward_path(route, ct, !dir, ft);
269 }
270
a3c90f7a
PNA
271 return 0;
272}
273
69aeb538 274static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
a3c90f7a 275{
a3c90f7a
PNA
276 if (skb_sec_path(skb))
277 return true;
278
69aeb538
FW
279 if (family == NFPROTO_IPV4) {
280 const struct ip_options *opt;
281
282 opt = &(IPCB(skb)->opt);
283
284 if (unlikely(opt->optlen))
285 return true;
286 }
287
a3c90f7a
PNA
288 return false;
289}
290
291static void nft_flow_offload_eval(const struct nft_expr *expr,
292 struct nft_regs *regs,
293 const struct nft_pktinfo *pkt)
294{
295 struct nft_flow_offload *priv = nft_expr_priv(expr);
296 struct nf_flowtable *flowtable = &priv->flowtable->data;
dfe42be1 297 struct tcphdr _tcph, *tcph = NULL;
c63a7cc4 298 struct nf_flow_route route = {};
a3c90f7a 299 enum ip_conntrack_info ctinfo;
a3c90f7a
PNA
300 struct flow_offload *flow;
301 enum ip_conntrack_dir dir;
302 struct nf_conn *ct;
303 int ret;
304
69aeb538 305 if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
a3c90f7a
PNA
306 goto out;
307
308 ct = nf_ct_get(pkt->skb, &ctinfo);
309 if (!ct)
310 goto out;
311
312 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
313 case IPPROTO_TCP:
2d7b4ace 314 tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
dfe42be1 315 sizeof(_tcph), &_tcph);
e5eaac2b
PNA
316 if (unlikely(!tcph || tcph->fin || tcph->rst ||
317 !nf_conntrack_tcp_established(ct)))
dfe42be1 318 goto out;
8437a620 319 break;
a3c90f7a
PNA
320 case IPPROTO_UDP:
321 break;
4e8d9584
TM
322#ifdef CONFIG_NF_CT_PROTO_GRE
323 case IPPROTO_GRE: {
324 struct nf_conntrack_tuple *tuple;
325
326 if (ct->status & IPS_NAT_MASK)
327 goto out;
328 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
329 /* No support for GRE v1 */
330 if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
331 goto out;
332 break;
333 }
334#endif
a3c90f7a
PNA
335 default:
336 goto out;
337 }
338
91a9048f 339 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
c4617214 340 ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
a3c90f7a
PNA
341 goto out;
342
270a8a29 343 if (!nf_ct_is_confirmed(ct))
a3c90f7a
PNA
344 goto out;
345
346 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
347 goto out;
348
349 dir = CTINFO2DIR(ctinfo);
c63a7cc4 350 if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
a3c90f7a
PNA
351 goto err_flow_route;
352
f1363e05 353 flow = flow_offload_alloc(ct);
a3c90f7a
PNA
354 if (!flow)
355 goto err_flow_alloc;
356
fa502c86 357 flow_offload_route_init(flow, &route);
f1363e05 358
dfe42be1 359 if (tcph) {
8437a620
FW
360 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
361 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
362 }
363
a3c90f7a
PNA
364 ret = flow_offload_add(flowtable, flow);
365 if (ret < 0)
366 goto err_flow_add;
367
368 return;
369
370err_flow_add:
371 flow_offload_free(flow);
372err_flow_alloc:
fa502c86 373 dst_release(route.tuple[dir].dst);
a3c90f7a
PNA
374 dst_release(route.tuple[!dir].dst);
375err_flow_route:
376 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
377out:
378 regs->verdict.code = NFT_BREAK;
379}
380
381static int nft_flow_offload_validate(const struct nft_ctx *ctx,
382 const struct nft_expr *expr,
383 const struct nft_data **data)
384{
385 unsigned int hook_mask = (1 << NF_INET_FORWARD);
386
387 return nft_chain_validate_hooks(ctx->chain, hook_mask);
388}
389
14c41586
PNA
390static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
391 [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
392 .len = NFT_NAME_MAXLEN - 1 },
393};
394
a3c90f7a
PNA
395static int nft_flow_offload_init(const struct nft_ctx *ctx,
396 const struct nft_expr *expr,
397 const struct nlattr * const tb[])
398{
399 struct nft_flow_offload *priv = nft_expr_priv(expr);
400 u8 genmask = nft_genmask_next(ctx->net);
401 struct nft_flowtable *flowtable;
402
403 if (!tb[NFTA_FLOW_TABLE_NAME])
404 return -EINVAL;
405
cac20fcd
PNA
406 flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
407 genmask);
a3c90f7a
PNA
408 if (IS_ERR(flowtable))
409 return PTR_ERR(flowtable);
410
1689f259
PNA
411 if (!nft_use_inc(&flowtable->use))
412 return -EMFILE;
413
a3c90f7a 414 priv->flowtable = flowtable;
a3c90f7a 415
36596dad 416 return nf_ct_netns_get(ctx->net, ctx->family);
a3c90f7a
PNA
417}
418
9b05b6e1
LGL
419static void nft_flow_offload_deactivate(const struct nft_ctx *ctx,
420 const struct nft_expr *expr,
421 enum nft_trans_phase phase)
422{
423 struct nft_flow_offload *priv = nft_expr_priv(expr);
424
425 nf_tables_deactivate_flowtable(ctx, priv->flowtable, phase);
426}
427
428static void nft_flow_offload_activate(const struct nft_ctx *ctx,
429 const struct nft_expr *expr)
430{
431 struct nft_flow_offload *priv = nft_expr_priv(expr);
432
1689f259 433 nft_use_inc_restore(&priv->flowtable->use);
9b05b6e1
LGL
434}
435
a3c90f7a
PNA
436static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
437 const struct nft_expr *expr)
438{
36596dad 439 nf_ct_netns_put(ctx->net, ctx->family);
a3c90f7a
PNA
440}
441
7d34aa3e
PS
442static int nft_flow_offload_dump(struct sk_buff *skb,
443 const struct nft_expr *expr, bool reset)
a3c90f7a
PNA
444{
445 struct nft_flow_offload *priv = nft_expr_priv(expr);
446
447 if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
448 goto nla_put_failure;
449
450 return 0;
451
452nla_put_failure:
453 return -1;
454}
455
456static struct nft_expr_type nft_flow_offload_type;
457static const struct nft_expr_ops nft_flow_offload_ops = {
458 .type = &nft_flow_offload_type,
459 .size = NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
460 .eval = nft_flow_offload_eval,
461 .init = nft_flow_offload_init,
9b05b6e1
LGL
462 .activate = nft_flow_offload_activate,
463 .deactivate = nft_flow_offload_deactivate,
a3c90f7a
PNA
464 .destroy = nft_flow_offload_destroy,
465 .validate = nft_flow_offload_validate,
466 .dump = nft_flow_offload_dump,
b2d30654 467 .reduce = NFT_REDUCE_READONLY,
a3c90f7a
PNA
468};
469
470static struct nft_expr_type nft_flow_offload_type __read_mostly = {
471 .name = "flow_offload",
472 .ops = &nft_flow_offload_ops,
14c41586 473 .policy = nft_flow_offload_policy,
a3c90f7a
PNA
474 .maxattr = NFTA_FLOW_MAX,
475 .owner = THIS_MODULE,
476};
477
a3c90f7a
PNA
478static int flow_offload_netdev_event(struct notifier_block *this,
479 unsigned long event, void *ptr)
480{
481 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
482
483 if (event != NETDEV_DOWN)
484 return NOTIFY_DONE;
485
5f1be84a 486 nf_flow_table_cleanup(dev);
a3c90f7a
PNA
487
488 return NOTIFY_DONE;
489}
490
491static struct notifier_block flow_offload_netdev_notifier = {
492 .notifier_call = flow_offload_netdev_event,
493};
494
495static int __init nft_flow_offload_module_init(void)
496{
497 int err;
498
584eab29
TY
499 err = register_netdevice_notifier(&flow_offload_netdev_notifier);
500 if (err)
501 goto err;
a3c90f7a
PNA
502
503 err = nft_register_expr(&nft_flow_offload_type);
504 if (err < 0)
505 goto register_expr;
506
507 return 0;
508
509register_expr:
510 unregister_netdevice_notifier(&flow_offload_netdev_notifier);
584eab29 511err:
a3c90f7a
PNA
512 return err;
513}
514
515static void __exit nft_flow_offload_module_exit(void)
516{
a3c90f7a
PNA
517 nft_unregister_expr(&nft_flow_offload_type);
518 unregister_netdevice_notifier(&flow_offload_netdev_notifier);
a3c90f7a
PNA
519}
520
521module_init(nft_flow_offload_module_init);
522module_exit(nft_flow_offload_module_exit);
523
524MODULE_LICENSE("GPL");
525MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
526MODULE_ALIAS_NFT_EXPR("flow_offload");
4cacc395 527MODULE_DESCRIPTION("nftables hardware flow offload module");