]> git.ipfire.org Git - thirdparty/linux.git/blob - net/netfilter/nf_flow_table_core.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / net / netfilter / nf_flow_table_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
23 {
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42 ft->src_port = ctt->src.u.tcp.port;
43 ft->dst_port = ctt->dst.u.tcp.port;
44 }
45
46 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
47 {
48 struct flow_offload *flow;
49
50 if (unlikely(nf_ct_is_dying(ct) ||
51 !atomic_inc_not_zero(&ct->ct_general.use)))
52 return NULL;
53
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
55 if (!flow)
56 goto err_ct_refcnt;
57
58 flow->ct = ct;
59
60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
62
63 if (ct->status & IPS_SRC_NAT)
64 __set_bit(NF_FLOW_SNAT, &flow->flags);
65 if (ct->status & IPS_DST_NAT)
66 __set_bit(NF_FLOW_DNAT, &flow->flags);
67
68 return flow;
69
70 err_ct_refcnt:
71 nf_ct_put(ct);
72
73 return NULL;
74 }
75 EXPORT_SYMBOL_GPL(flow_offload_alloc);
76
77 static int flow_offload_fill_route(struct flow_offload *flow,
78 const struct nf_flow_route *route,
79 enum flow_offload_tuple_dir dir)
80 {
81 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
82 struct dst_entry *other_dst = route->tuple[!dir].dst;
83 struct dst_entry *dst = route->tuple[dir].dst;
84
85 if (!dst_hold_safe(route->tuple[dir].dst))
86 return -1;
87
88 switch (flow_tuple->l3proto) {
89 case NFPROTO_IPV4:
90 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
91 break;
92 case NFPROTO_IPV6:
93 flow_tuple->mtu = ip6_dst_mtu_forward(dst);
94 break;
95 }
96
97 flow_tuple->iifidx = other_dst->dev->ifindex;
98 flow_tuple->dst_cache = dst;
99
100 return 0;
101 }
102
103 int flow_offload_route_init(struct flow_offload *flow,
104 const struct nf_flow_route *route)
105 {
106 int err;
107
108 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
109 if (err < 0)
110 return err;
111
112 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
113 if (err < 0)
114 goto err_route_reply;
115
116 flow->type = NF_FLOW_OFFLOAD_ROUTE;
117
118 return 0;
119
120 err_route_reply:
121 dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
122
123 return err;
124 }
125 EXPORT_SYMBOL_GPL(flow_offload_route_init);
126
127 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
128 {
129 tcp->state = TCP_CONNTRACK_ESTABLISHED;
130 tcp->seen[0].td_maxwin = 0;
131 tcp->seen[1].td_maxwin = 0;
132 }
133
134 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
135 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
136
137 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
138 {
139 const struct nf_conntrack_l4proto *l4proto;
140 int l4num = nf_ct_protonum(ct);
141 unsigned int timeout;
142
143 l4proto = nf_ct_l4proto_find(l4num);
144 if (!l4proto)
145 return;
146
147 if (l4num == IPPROTO_TCP)
148 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
149 else if (l4num == IPPROTO_UDP)
150 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
151 else
152 return;
153
154 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
155 ct->timeout = nfct_time_stamp + timeout;
156 }
157
158 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
159 {
160 if (nf_ct_protonum(ct) == IPPROTO_TCP)
161 flow_offload_fixup_tcp(&ct->proto.tcp);
162 }
163
164 static void flow_offload_fixup_ct(struct nf_conn *ct)
165 {
166 flow_offload_fixup_ct_state(ct);
167 flow_offload_fixup_ct_timeout(ct);
168 }
169
170 static void flow_offload_route_release(struct flow_offload *flow)
171 {
172 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
173 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
174 }
175
176 void flow_offload_free(struct flow_offload *flow)
177 {
178 switch (flow->type) {
179 case NF_FLOW_OFFLOAD_ROUTE:
180 flow_offload_route_release(flow);
181 break;
182 default:
183 break;
184 }
185 nf_ct_put(flow->ct);
186 kfree_rcu(flow, rcu_head);
187 }
188 EXPORT_SYMBOL_GPL(flow_offload_free);
189
190 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
191 {
192 const struct flow_offload_tuple *tuple = data;
193
194 return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
195 }
196
197 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
198 {
199 const struct flow_offload_tuple_rhash *tuplehash = data;
200
201 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
202 }
203
204 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
205 const void *ptr)
206 {
207 const struct flow_offload_tuple *tuple = arg->key;
208 const struct flow_offload_tuple_rhash *x = ptr;
209
210 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
211 return 1;
212
213 return 0;
214 }
215
216 static const struct rhashtable_params nf_flow_offload_rhash_params = {
217 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
218 .hashfn = flow_offload_hash,
219 .obj_hashfn = flow_offload_hash_obj,
220 .obj_cmpfn = flow_offload_hash_cmp,
221 .automatic_shrinking = true,
222 };
223
224 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
225 {
226 int err;
227
228 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
229
230 err = rhashtable_insert_fast(&flow_table->rhashtable,
231 &flow->tuplehash[0].node,
232 nf_flow_offload_rhash_params);
233 if (err < 0)
234 return err;
235
236 err = rhashtable_insert_fast(&flow_table->rhashtable,
237 &flow->tuplehash[1].node,
238 nf_flow_offload_rhash_params);
239 if (err < 0) {
240 rhashtable_remove_fast(&flow_table->rhashtable,
241 &flow->tuplehash[0].node,
242 nf_flow_offload_rhash_params);
243 return err;
244 }
245
246 if (nf_flowtable_hw_offload(flow_table)) {
247 __set_bit(NF_FLOW_HW, &flow->flags);
248 nf_flow_offload_add(flow_table, flow);
249 }
250
251 return 0;
252 }
253 EXPORT_SYMBOL_GPL(flow_offload_add);
254
255 void flow_offload_refresh(struct nf_flowtable *flow_table,
256 struct flow_offload *flow)
257 {
258 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
259
260 if (likely(!nf_flowtable_hw_offload(flow_table) ||
261 !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
262 return;
263
264 nf_flow_offload_add(flow_table, flow);
265 }
266 EXPORT_SYMBOL_GPL(flow_offload_refresh);
267
268 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
269 {
270 return nf_flow_timeout_delta(flow->timeout) <= 0;
271 }
272
273 static void flow_offload_del(struct nf_flowtable *flow_table,
274 struct flow_offload *flow)
275 {
276 rhashtable_remove_fast(&flow_table->rhashtable,
277 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
278 nf_flow_offload_rhash_params);
279 rhashtable_remove_fast(&flow_table->rhashtable,
280 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
281 nf_flow_offload_rhash_params);
282
283 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
284
285 if (nf_flow_has_expired(flow))
286 flow_offload_fixup_ct(flow->ct);
287 else
288 flow_offload_fixup_ct_timeout(flow->ct);
289
290 flow_offload_free(flow);
291 }
292
293 void flow_offload_teardown(struct flow_offload *flow)
294 {
295 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
296
297 flow_offload_fixup_ct_state(flow->ct);
298 }
299 EXPORT_SYMBOL_GPL(flow_offload_teardown);
300
301 struct flow_offload_tuple_rhash *
302 flow_offload_lookup(struct nf_flowtable *flow_table,
303 struct flow_offload_tuple *tuple)
304 {
305 struct flow_offload_tuple_rhash *tuplehash;
306 struct flow_offload *flow;
307 int dir;
308
309 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
310 nf_flow_offload_rhash_params);
311 if (!tuplehash)
312 return NULL;
313
314 dir = tuplehash->tuple.dir;
315 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
316 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
317 return NULL;
318
319 if (unlikely(nf_ct_is_dying(flow->ct)))
320 return NULL;
321
322 return tuplehash;
323 }
324 EXPORT_SYMBOL_GPL(flow_offload_lookup);
325
326 static int
327 nf_flow_table_iterate(struct nf_flowtable *flow_table,
328 void (*iter)(struct flow_offload *flow, void *data),
329 void *data)
330 {
331 struct flow_offload_tuple_rhash *tuplehash;
332 struct rhashtable_iter hti;
333 struct flow_offload *flow;
334 int err = 0;
335
336 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
337 rhashtable_walk_start(&hti);
338
339 while ((tuplehash = rhashtable_walk_next(&hti))) {
340 if (IS_ERR(tuplehash)) {
341 if (PTR_ERR(tuplehash) != -EAGAIN) {
342 err = PTR_ERR(tuplehash);
343 break;
344 }
345 continue;
346 }
347 if (tuplehash->tuple.dir)
348 continue;
349
350 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
351
352 iter(flow, data);
353 }
354 rhashtable_walk_stop(&hti);
355 rhashtable_walk_exit(&hti);
356
357 return err;
358 }
359
360 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
361 {
362 struct nf_flowtable *flow_table = data;
363
364 if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
365 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
366
367 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
368 if (test_bit(NF_FLOW_HW, &flow->flags)) {
369 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
370 nf_flow_offload_del(flow_table, flow);
371 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
372 flow_offload_del(flow_table, flow);
373 } else {
374 flow_offload_del(flow_table, flow);
375 }
376 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
377 nf_flow_offload_stats(flow_table, flow);
378 }
379 }
380
381 static void nf_flow_offload_work_gc(struct work_struct *work)
382 {
383 struct nf_flowtable *flow_table;
384
385 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
386 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
387 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
388 }
389
390 int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
391 flow_setup_cb_t *cb, void *cb_priv)
392 {
393 struct flow_block *block = &flow_table->flow_block;
394 struct flow_block_cb *block_cb;
395 int err = 0;
396
397 down_write(&flow_table->flow_block_lock);
398 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
399 if (block_cb) {
400 err = -EEXIST;
401 goto unlock;
402 }
403
404 block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
405 if (IS_ERR(block_cb)) {
406 err = PTR_ERR(block_cb);
407 goto unlock;
408 }
409
410 list_add_tail(&block_cb->list, &block->cb_list);
411
412 unlock:
413 up_write(&flow_table->flow_block_lock);
414 return err;
415 }
416 EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
417
418 void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
419 flow_setup_cb_t *cb, void *cb_priv)
420 {
421 struct flow_block *block = &flow_table->flow_block;
422 struct flow_block_cb *block_cb;
423
424 down_write(&flow_table->flow_block_lock);
425 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
426 if (block_cb) {
427 list_del(&block_cb->list);
428 flow_block_cb_free(block_cb);
429 } else {
430 WARN_ON(true);
431 }
432 up_write(&flow_table->flow_block_lock);
433 }
434 EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
435
436 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
437 __be16 port, __be16 new_port)
438 {
439 struct tcphdr *tcph;
440
441 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
442 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
443 return -1;
444
445 tcph = (void *)(skb_network_header(skb) + thoff);
446 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
447
448 return 0;
449 }
450
451 static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
452 __be16 port, __be16 new_port)
453 {
454 struct udphdr *udph;
455
456 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
457 skb_try_make_writable(skb, thoff + sizeof(*udph)))
458 return -1;
459
460 udph = (void *)(skb_network_header(skb) + thoff);
461 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
462 inet_proto_csum_replace2(&udph->check, skb, port,
463 new_port, true);
464 if (!udph->check)
465 udph->check = CSUM_MANGLED_0;
466 }
467
468 return 0;
469 }
470
471 static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
472 u8 protocol, __be16 port, __be16 new_port)
473 {
474 switch (protocol) {
475 case IPPROTO_TCP:
476 if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
477 return NF_DROP;
478 break;
479 case IPPROTO_UDP:
480 if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
481 return NF_DROP;
482 break;
483 }
484
485 return 0;
486 }
487
488 int nf_flow_snat_port(const struct flow_offload *flow,
489 struct sk_buff *skb, unsigned int thoff,
490 u8 protocol, enum flow_offload_tuple_dir dir)
491 {
492 struct flow_ports *hdr;
493 __be16 port, new_port;
494
495 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
496 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
497 return -1;
498
499 hdr = (void *)(skb_network_header(skb) + thoff);
500
501 switch (dir) {
502 case FLOW_OFFLOAD_DIR_ORIGINAL:
503 port = hdr->source;
504 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
505 hdr->source = new_port;
506 break;
507 case FLOW_OFFLOAD_DIR_REPLY:
508 port = hdr->dest;
509 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
510 hdr->dest = new_port;
511 break;
512 default:
513 return -1;
514 }
515
516 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
517 }
518 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
519
520 int nf_flow_dnat_port(const struct flow_offload *flow,
521 struct sk_buff *skb, unsigned int thoff,
522 u8 protocol, enum flow_offload_tuple_dir dir)
523 {
524 struct flow_ports *hdr;
525 __be16 port, new_port;
526
527 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
528 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
529 return -1;
530
531 hdr = (void *)(skb_network_header(skb) + thoff);
532
533 switch (dir) {
534 case FLOW_OFFLOAD_DIR_ORIGINAL:
535 port = hdr->dest;
536 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
537 hdr->dest = new_port;
538 break;
539 case FLOW_OFFLOAD_DIR_REPLY:
540 port = hdr->source;
541 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
542 hdr->source = new_port;
543 break;
544 default:
545 return -1;
546 }
547
548 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
549 }
550 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
551
552 int nf_flow_table_init(struct nf_flowtable *flowtable)
553 {
554 int err;
555
556 INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
557 flow_block_init(&flowtable->flow_block);
558 init_rwsem(&flowtable->flow_block_lock);
559
560 err = rhashtable_init(&flowtable->rhashtable,
561 &nf_flow_offload_rhash_params);
562 if (err < 0)
563 return err;
564
565 queue_delayed_work(system_power_efficient_wq,
566 &flowtable->gc_work, HZ);
567
568 mutex_lock(&flowtable_lock);
569 list_add(&flowtable->list, &flowtables);
570 mutex_unlock(&flowtable_lock);
571
572 return 0;
573 }
574 EXPORT_SYMBOL_GPL(nf_flow_table_init);
575
576 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
577 {
578 struct net_device *dev = data;
579
580 if (!dev) {
581 flow_offload_teardown(flow);
582 return;
583 }
584
585 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
586 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
587 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
588 flow_offload_teardown(flow);
589 }
590
591 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
592 struct net_device *dev)
593 {
594 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
595 flush_delayed_work(&flowtable->gc_work);
596 nf_flow_table_offload_flush(flowtable);
597 }
598
599 void nf_flow_table_cleanup(struct net_device *dev)
600 {
601 struct nf_flowtable *flowtable;
602
603 mutex_lock(&flowtable_lock);
604 list_for_each_entry(flowtable, &flowtables, list)
605 nf_flow_table_iterate_cleanup(flowtable, dev);
606 mutex_unlock(&flowtable_lock);
607 }
608 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
609
610 void nf_flow_table_free(struct nf_flowtable *flow_table)
611 {
612 mutex_lock(&flowtable_lock);
613 list_del(&flow_table->list);
614 mutex_unlock(&flowtable_lock);
615
616 cancel_delayed_work_sync(&flow_table->gc_work);
617 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
618 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
619 nf_flow_table_offload_flush(flow_table);
620 if (nf_flowtable_hw_offload(flow_table))
621 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
622 flow_table);
623 rhashtable_destroy(&flow_table->rhashtable);
624 }
625 EXPORT_SYMBOL_GPL(nf_flow_table_free);
626
627 static int __init nf_flow_table_module_init(void)
628 {
629 return nf_flow_table_offload_init();
630 }
631
632 static void __exit nf_flow_table_module_exit(void)
633 {
634 nf_flow_table_offload_exit();
635 }
636
637 module_init(nf_flow_table_module_init);
638 module_exit(nf_flow_table_module_exit);
639
640 MODULE_LICENSE("GPL");
641 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");