]>
Commit | Line | Data |
---|---|---|
b57dc7c1 PB |
1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* - | |
3 | * net/sched/act_ct.c Connection Tracking action | |
4 | * | |
5 | * Authors: Paul Blakey <paulb@mellanox.com> | |
6 | * Yossi Kuperman <yossiku@mellanox.com> | |
7 | * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/skbuff.h> | |
14 | #include <linux/rtnetlink.h> | |
15 | #include <linux/pkt_cls.h> | |
16 | #include <linux/ip.h> | |
17 | #include <linux/ipv6.h> | |
c34b961a | 18 | #include <linux/rhashtable.h> |
b57dc7c1 PB |
19 | #include <net/netlink.h> |
20 | #include <net/pkt_sched.h> | |
21 | #include <net/pkt_cls.h> | |
22 | #include <net/act_api.h> | |
23 | #include <net/ip.h> | |
24 | #include <net/ipv6_frag.h> | |
25 | #include <uapi/linux/tc_act/tc_ct.h> | |
26 | #include <net/tc_act/tc_ct.h> | |
27 | ||
c34b961a | 28 | #include <net/netfilter/nf_flow_table.h> |
b57dc7c1 PB |
29 | #include <net/netfilter/nf_conntrack.h> |
30 | #include <net/netfilter/nf_conntrack_core.h> | |
31 | #include <net/netfilter/nf_conntrack_zones.h> | |
32 | #include <net/netfilter/nf_conntrack_helper.h> | |
33 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | |
40d102cd | 34 | #include <uapi/linux/netfilter/nf_nat.h> |
b57dc7c1 | 35 | |
c34b961a PB |
36 | static struct workqueue_struct *act_ct_wq; |
37 | static struct rhashtable zones_ht; | |
138470a9 | 38 | static DEFINE_MUTEX(zones_mutex); |
c34b961a PB |
39 | |
40 | struct tcf_ct_flow_table { | |
41 | struct rhash_head node; /* In zones tables */ | |
42 | ||
43 | struct rcu_work rwork; | |
44 | struct nf_flowtable nf_ft; | |
138470a9 | 45 | refcount_t ref; |
c34b961a | 46 | u16 zone; |
c34b961a PB |
47 | |
48 | bool dying; | |
49 | }; | |
50 | ||
51 | static const struct rhashtable_params zones_params = { | |
52 | .head_offset = offsetof(struct tcf_ct_flow_table, node), | |
53 | .key_offset = offsetof(struct tcf_ct_flow_table, zone), | |
54 | .key_len = sizeof_field(struct tcf_ct_flow_table, zone), | |
55 | .automatic_shrinking = true, | |
56 | }; | |
57 | ||
9c26ba9b PB |
58 | static struct flow_action_entry * |
59 | tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action) | |
60 | { | |
61 | int i = flow_action->num_entries++; | |
62 | ||
63 | return &flow_action->entries[i]; | |
64 | } | |
65 | ||
66 | static void tcf_ct_add_mangle_action(struct flow_action *action, | |
67 | enum flow_action_mangle_base htype, | |
68 | u32 offset, | |
69 | u32 mask, | |
70 | u32 val) | |
71 | { | |
72 | struct flow_action_entry *entry; | |
73 | ||
74 | entry = tcf_ct_flow_table_flow_action_get_next(action); | |
75 | entry->id = FLOW_ACTION_MANGLE; | |
76 | entry->mangle.htype = htype; | |
77 | entry->mangle.mask = ~mask; | |
78 | entry->mangle.offset = offset; | |
79 | entry->mangle.val = val; | |
80 | } | |
81 | ||
82 | /* The following nat helper functions check if the inverted reverse tuple | |
83 | * (target) is different then the current dir tuple - meaning nat for ports | |
84 | * and/or ip is needed, and add the relevant mangle actions. | |
85 | */ | |
86 | static void | |
87 | tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple, | |
88 | struct nf_conntrack_tuple target, | |
89 | struct flow_action *action) | |
90 | { | |
91 | if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3))) | |
92 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4, | |
93 | offsetof(struct iphdr, saddr), | |
94 | 0xFFFFFFFF, | |
95 | be32_to_cpu(target.src.u3.ip)); | |
96 | if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3))) | |
97 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4, | |
98 | offsetof(struct iphdr, daddr), | |
99 | 0xFFFFFFFF, | |
100 | be32_to_cpu(target.dst.u3.ip)); | |
101 | } | |
102 | ||
103 | static void | |
104 | tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action, | |
105 | union nf_inet_addr *addr, | |
106 | u32 offset) | |
107 | { | |
108 | int i; | |
109 | ||
110 | for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) | |
111 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6, | |
112 | i * sizeof(u32) + offset, | |
113 | 0xFFFFFFFF, be32_to_cpu(addr->ip6[i])); | |
114 | } | |
115 | ||
116 | static void | |
117 | tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple, | |
118 | struct nf_conntrack_tuple target, | |
119 | struct flow_action *action) | |
120 | { | |
121 | if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3))) | |
122 | tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3, | |
123 | offsetof(struct ipv6hdr, | |
124 | saddr)); | |
125 | if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3))) | |
126 | tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3, | |
127 | offsetof(struct ipv6hdr, | |
128 | daddr)); | |
129 | } | |
130 | ||
131 | static void | |
132 | tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple, | |
133 | struct nf_conntrack_tuple target, | |
134 | struct flow_action *action) | |
135 | { | |
136 | __be16 target_src = target.src.u.tcp.port; | |
137 | __be16 target_dst = target.dst.u.tcp.port; | |
138 | ||
139 | if (target_src != tuple->src.u.tcp.port) | |
140 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
141 | offsetof(struct tcphdr, source), | |
142 | 0xFFFF, be16_to_cpu(target_src)); | |
143 | if (target_dst != tuple->dst.u.tcp.port) | |
144 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
145 | offsetof(struct tcphdr, dest), | |
146 | 0xFFFF, be16_to_cpu(target_dst)); | |
147 | } | |
148 | ||
149 | static void | |
150 | tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple, | |
151 | struct nf_conntrack_tuple target, | |
152 | struct flow_action *action) | |
153 | { | |
154 | __be16 target_src = target.src.u.udp.port; | |
155 | __be16 target_dst = target.dst.u.udp.port; | |
156 | ||
157 | if (target_src != tuple->src.u.udp.port) | |
158 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
159 | offsetof(struct udphdr, source), | |
160 | 0xFFFF, be16_to_cpu(target_src)); | |
161 | if (target_dst != tuple->dst.u.udp.port) | |
162 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
163 | offsetof(struct udphdr, dest), | |
164 | 0xFFFF, be16_to_cpu(target_dst)); | |
165 | } | |
166 | ||
167 | static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct, | |
168 | enum ip_conntrack_dir dir, | |
169 | struct flow_action *action) | |
170 | { | |
171 | struct nf_conn_labels *ct_labels; | |
172 | struct flow_action_entry *entry; | |
30b0cf90 | 173 | enum ip_conntrack_info ctinfo; |
9c26ba9b PB |
174 | u32 *act_ct_labels; |
175 | ||
176 | entry = tcf_ct_flow_table_flow_action_get_next(action); | |
177 | entry->id = FLOW_ACTION_CT_METADATA; | |
178 | #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) | |
179 | entry->ct_metadata.mark = ct->mark; | |
180 | #endif | |
30b0cf90 PB |
181 | ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED : |
182 | IP_CT_ESTABLISHED_REPLY; | |
183 | /* aligns with the CT reference on the SKB nf_ct_set */ | |
184 | entry->ct_metadata.cookie = (unsigned long)ct | ctinfo; | |
9c26ba9b PB |
185 | |
186 | act_ct_labels = entry->ct_metadata.labels; | |
187 | ct_labels = nf_ct_labels_find(ct); | |
188 | if (ct_labels) | |
189 | memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE); | |
190 | else | |
191 | memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE); | |
192 | } | |
193 | ||
194 | static int tcf_ct_flow_table_add_action_nat(struct net *net, | |
195 | struct nf_conn *ct, | |
196 | enum ip_conntrack_dir dir, | |
197 | struct flow_action *action) | |
198 | { | |
199 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; | |
200 | struct nf_conntrack_tuple target; | |
201 | ||
05aa69e5 | 202 | if (!(ct->status & IPS_NAT_MASK)) |
203 | return 0; | |
204 | ||
9c26ba9b PB |
205 | nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); |
206 | ||
207 | switch (tuple->src.l3num) { | |
208 | case NFPROTO_IPV4: | |
209 | tcf_ct_flow_table_add_action_nat_ipv4(tuple, target, | |
210 | action); | |
211 | break; | |
212 | case NFPROTO_IPV6: | |
213 | tcf_ct_flow_table_add_action_nat_ipv6(tuple, target, | |
214 | action); | |
215 | break; | |
216 | default: | |
217 | return -EOPNOTSUPP; | |
218 | } | |
219 | ||
220 | switch (nf_ct_protonum(ct)) { | |
221 | case IPPROTO_TCP: | |
222 | tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action); | |
223 | break; | |
224 | case IPPROTO_UDP: | |
225 | tcf_ct_flow_table_add_action_nat_udp(tuple, target, action); | |
226 | break; | |
227 | default: | |
228 | return -EOPNOTSUPP; | |
229 | } | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | static int tcf_ct_flow_table_fill_actions(struct net *net, | |
235 | const struct flow_offload *flow, | |
236 | enum flow_offload_tuple_dir tdir, | |
237 | struct nf_flow_rule *flow_rule) | |
238 | { | |
239 | struct flow_action *action = &flow_rule->rule->action; | |
240 | int num_entries = action->num_entries; | |
241 | struct nf_conn *ct = flow->ct; | |
242 | enum ip_conntrack_dir dir; | |
243 | int i, err; | |
244 | ||
245 | switch (tdir) { | |
246 | case FLOW_OFFLOAD_DIR_ORIGINAL: | |
247 | dir = IP_CT_DIR_ORIGINAL; | |
248 | break; | |
249 | case FLOW_OFFLOAD_DIR_REPLY: | |
250 | dir = IP_CT_DIR_REPLY; | |
251 | break; | |
252 | default: | |
253 | return -EOPNOTSUPP; | |
254 | } | |
255 | ||
256 | err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action); | |
257 | if (err) | |
258 | goto err_nat; | |
259 | ||
260 | tcf_ct_flow_table_add_action_meta(ct, dir, action); | |
261 | return 0; | |
262 | ||
263 | err_nat: | |
264 | /* Clear filled actions */ | |
265 | for (i = num_entries; i < action->num_entries; i++) | |
266 | memset(&action->entries[i], 0, sizeof(action->entries[i])); | |
267 | action->num_entries = num_entries; | |
268 | ||
269 | return err; | |
270 | } | |
271 | ||
c34b961a | 272 | static struct nf_flowtable_type flowtable_ct = { |
9c26ba9b | 273 | .action = tcf_ct_flow_table_fill_actions, |
c34b961a PB |
274 | .owner = THIS_MODULE, |
275 | }; | |
276 | ||
277 | static int tcf_ct_flow_table_get(struct tcf_ct_params *params) | |
278 | { | |
279 | struct tcf_ct_flow_table *ct_ft; | |
280 | int err = -ENOMEM; | |
281 | ||
138470a9 | 282 | mutex_lock(&zones_mutex); |
c34b961a | 283 | ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params); |
138470a9 ED |
284 | if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) |
285 | goto out_unlock; | |
c34b961a | 286 | |
138470a9 | 287 | ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL); |
c34b961a PB |
288 | if (!ct_ft) |
289 | goto err_alloc; | |
138470a9 | 290 | refcount_set(&ct_ft->ref, 1); |
c34b961a PB |
291 | |
292 | ct_ft->zone = params->zone; | |
293 | err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params); | |
294 | if (err) | |
295 | goto err_insert; | |
296 | ||
297 | ct_ft->nf_ft.type = &flowtable_ct; | |
edd5861e | 298 | ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD; |
c34b961a PB |
299 | err = nf_flow_table_init(&ct_ft->nf_ft); |
300 | if (err) | |
301 | goto err_init; | |
302 | ||
303 | __module_get(THIS_MODULE); | |
138470a9 | 304 | out_unlock: |
c34b961a | 305 | params->ct_ft = ct_ft; |
edd5861e | 306 | params->nf_ft = &ct_ft->nf_ft; |
138470a9 | 307 | mutex_unlock(&zones_mutex); |
c34b961a PB |
308 | |
309 | return 0; | |
310 | ||
311 | err_init: | |
312 | rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); | |
313 | err_insert: | |
314 | kfree(ct_ft); | |
315 | err_alloc: | |
138470a9 | 316 | mutex_unlock(&zones_mutex); |
c34b961a PB |
317 | return err; |
318 | } | |
319 | ||
320 | static void tcf_ct_flow_table_cleanup_work(struct work_struct *work) | |
321 | { | |
322 | struct tcf_ct_flow_table *ct_ft; | |
323 | ||
324 | ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table, | |
325 | rwork); | |
326 | nf_flow_table_free(&ct_ft->nf_ft); | |
327 | kfree(ct_ft); | |
328 | ||
329 | module_put(THIS_MODULE); | |
330 | } | |
331 | ||
332 | static void tcf_ct_flow_table_put(struct tcf_ct_params *params) | |
333 | { | |
334 | struct tcf_ct_flow_table *ct_ft = params->ct_ft; | |
335 | ||
138470a9 | 336 | if (refcount_dec_and_test(¶ms->ct_ft->ref)) { |
c34b961a PB |
337 | rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); |
338 | INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work); | |
339 | queue_rcu_work(act_ct_wq, &ct_ft->rwork); | |
340 | } | |
c34b961a PB |
341 | } |
342 | ||
64ff70b8 PB |
343 | static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft, |
344 | struct nf_conn *ct, | |
345 | bool tcp) | |
346 | { | |
347 | struct flow_offload *entry; | |
348 | int err; | |
349 | ||
350 | if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status)) | |
351 | return; | |
352 | ||
353 | entry = flow_offload_alloc(ct); | |
354 | if (!entry) { | |
355 | WARN_ON_ONCE(1); | |
356 | goto err_alloc; | |
357 | } | |
358 | ||
359 | if (tcp) { | |
360 | ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; | |
361 | ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; | |
362 | } | |
363 | ||
364 | err = flow_offload_add(&ct_ft->nf_ft, entry); | |
365 | if (err) | |
366 | goto err_add; | |
367 | ||
368 | return; | |
369 | ||
370 | err_add: | |
371 | flow_offload_free(entry); | |
372 | err_alloc: | |
373 | clear_bit(IPS_OFFLOAD_BIT, &ct->status); | |
374 | } | |
375 | ||
376 | static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft, | |
377 | struct nf_conn *ct, | |
378 | enum ip_conntrack_info ctinfo) | |
379 | { | |
380 | bool tcp = false; | |
381 | ||
382 | if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) | |
383 | return; | |
384 | ||
385 | switch (nf_ct_protonum(ct)) { | |
386 | case IPPROTO_TCP: | |
387 | tcp = true; | |
388 | if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) | |
389 | return; | |
390 | break; | |
391 | case IPPROTO_UDP: | |
392 | break; | |
393 | default: | |
394 | return; | |
395 | } | |
396 | ||
397 | if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) || | |
398 | ct->status & IPS_SEQ_ADJUST) | |
399 | return; | |
400 | ||
401 | tcf_ct_flow_table_add(ct_ft, ct, tcp); | |
402 | } | |
403 | ||
46475bb2 PB |
404 | static bool |
405 | tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb, | |
07ac9d16 PB |
406 | struct flow_offload_tuple *tuple, |
407 | struct tcphdr **tcph) | |
46475bb2 PB |
408 | { |
409 | struct flow_ports *ports; | |
410 | unsigned int thoff; | |
411 | struct iphdr *iph; | |
412 | ||
4cc5fdec | 413 | if (!pskb_network_may_pull(skb, sizeof(*iph))) |
46475bb2 PB |
414 | return false; |
415 | ||
416 | iph = ip_hdr(skb); | |
417 | thoff = iph->ihl * 4; | |
418 | ||
419 | if (ip_is_fragment(iph) || | |
420 | unlikely(thoff != sizeof(struct iphdr))) | |
421 | return false; | |
422 | ||
423 | if (iph->protocol != IPPROTO_TCP && | |
424 | iph->protocol != IPPROTO_UDP) | |
425 | return false; | |
426 | ||
427 | if (iph->ttl <= 1) | |
428 | return false; | |
429 | ||
4cc5fdec PB |
430 | if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ? |
431 | thoff + sizeof(struct tcphdr) : | |
432 | thoff + sizeof(*ports))) | |
46475bb2 PB |
433 | return false; |
434 | ||
07ac9d16 PB |
435 | iph = ip_hdr(skb); |
436 | if (iph->protocol == IPPROTO_TCP) | |
437 | *tcph = (void *)(skb_network_header(skb) + thoff); | |
46475bb2 | 438 | |
07ac9d16 | 439 | ports = (struct flow_ports *)(skb_network_header(skb) + thoff); |
46475bb2 PB |
440 | tuple->src_v4.s_addr = iph->saddr; |
441 | tuple->dst_v4.s_addr = iph->daddr; | |
442 | tuple->src_port = ports->source; | |
443 | tuple->dst_port = ports->dest; | |
444 | tuple->l3proto = AF_INET; | |
445 | tuple->l4proto = iph->protocol; | |
446 | ||
447 | return true; | |
448 | } | |
449 | ||
450 | static bool | |
451 | tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb, | |
07ac9d16 PB |
452 | struct flow_offload_tuple *tuple, |
453 | struct tcphdr **tcph) | |
46475bb2 PB |
454 | { |
455 | struct flow_ports *ports; | |
456 | struct ipv6hdr *ip6h; | |
457 | unsigned int thoff; | |
458 | ||
4cc5fdec | 459 | if (!pskb_network_may_pull(skb, sizeof(*ip6h))) |
46475bb2 PB |
460 | return false; |
461 | ||
462 | ip6h = ipv6_hdr(skb); | |
463 | ||
464 | if (ip6h->nexthdr != IPPROTO_TCP && | |
465 | ip6h->nexthdr != IPPROTO_UDP) | |
466 | return false; | |
467 | ||
468 | if (ip6h->hop_limit <= 1) | |
469 | return false; | |
470 | ||
471 | thoff = sizeof(*ip6h); | |
4cc5fdec PB |
472 | if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ? |
473 | thoff + sizeof(struct tcphdr) : | |
474 | thoff + sizeof(*ports))) | |
46475bb2 PB |
475 | return false; |
476 | ||
07ac9d16 PB |
477 | ip6h = ipv6_hdr(skb); |
478 | if (ip6h->nexthdr == IPPROTO_TCP) | |
479 | *tcph = (void *)(skb_network_header(skb) + thoff); | |
46475bb2 | 480 | |
07ac9d16 | 481 | ports = (struct flow_ports *)(skb_network_header(skb) + thoff); |
46475bb2 PB |
482 | tuple->src_v6 = ip6h->saddr; |
483 | tuple->dst_v6 = ip6h->daddr; | |
484 | tuple->src_port = ports->source; | |
485 | tuple->dst_port = ports->dest; | |
486 | tuple->l3proto = AF_INET6; | |
487 | tuple->l4proto = ip6h->nexthdr; | |
488 | ||
489 | return true; | |
490 | } | |
491 | ||
46475bb2 PB |
492 | static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, |
493 | struct sk_buff *skb, | |
494 | u8 family) | |
495 | { | |
496 | struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft; | |
497 | struct flow_offload_tuple_rhash *tuplehash; | |
498 | struct flow_offload_tuple tuple = {}; | |
499 | enum ip_conntrack_info ctinfo; | |
07ac9d16 | 500 | struct tcphdr *tcph = NULL; |
46475bb2 PB |
501 | struct flow_offload *flow; |
502 | struct nf_conn *ct; | |
46475bb2 PB |
503 | u8 dir; |
504 | ||
505 | /* Previously seen or loopback */ | |
506 | ct = nf_ct_get(skb, &ctinfo); | |
507 | if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) | |
508 | return false; | |
509 | ||
510 | switch (family) { | |
511 | case NFPROTO_IPV4: | |
07ac9d16 | 512 | if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) |
46475bb2 PB |
513 | return false; |
514 | break; | |
515 | case NFPROTO_IPV6: | |
07ac9d16 | 516 | if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph)) |
46475bb2 PB |
517 | return false; |
518 | break; | |
519 | default: | |
520 | return false; | |
521 | } | |
522 | ||
523 | tuplehash = flow_offload_lookup(nf_ft, &tuple); | |
524 | if (!tuplehash) | |
525 | return false; | |
526 | ||
527 | dir = tuplehash->tuple.dir; | |
528 | flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); | |
529 | ct = flow->ct; | |
530 | ||
07ac9d16 PB |
531 | if (tcph && (unlikely(tcph->fin || tcph->rst))) { |
532 | flow_offload_teardown(flow); | |
533 | return false; | |
534 | } | |
535 | ||
46475bb2 PB |
536 | ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED : |
537 | IP_CT_ESTABLISHED_REPLY; | |
538 | ||
8b3646d6 | 539 | flow_offload_refresh(nf_ft, flow); |
46475bb2 PB |
540 | nf_conntrack_get(&ct->ct_general); |
541 | nf_ct_set(skb, ct, ctinfo); | |
542 | ||
543 | return true; | |
544 | } | |
545 | ||
c34b961a PB |
546 | static int tcf_ct_flow_tables_init(void) |
547 | { | |
548 | return rhashtable_init(&zones_ht, &zones_params); | |
549 | } | |
550 | ||
551 | static void tcf_ct_flow_tables_uninit(void) | |
552 | { | |
553 | rhashtable_destroy(&zones_ht); | |
554 | } | |
555 | ||
b57dc7c1 PB |
556 | static struct tc_action_ops act_ct_ops; |
557 | static unsigned int ct_net_id; | |
558 | ||
559 | struct tc_ct_action_net { | |
560 | struct tc_action_net tn; /* Must be first */ | |
561 | bool labels; | |
562 | }; | |
563 | ||
564 | /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */ | |
565 | static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb, | |
566 | u16 zone_id, bool force) | |
567 | { | |
568 | enum ip_conntrack_info ctinfo; | |
569 | struct nf_conn *ct; | |
570 | ||
571 | ct = nf_ct_get(skb, &ctinfo); | |
572 | if (!ct) | |
573 | return false; | |
574 | if (!net_eq(net, read_pnet(&ct->ct_net))) | |
575 | return false; | |
576 | if (nf_ct_zone(ct)->id != zone_id) | |
577 | return false; | |
578 | ||
579 | /* Force conntrack entry direction. */ | |
580 | if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { | |
581 | if (nf_ct_is_confirmed(ct)) | |
582 | nf_ct_kill(ct); | |
583 | ||
584 | nf_conntrack_put(&ct->ct_general); | |
585 | nf_ct_set(skb, NULL, IP_CT_UNTRACKED); | |
586 | ||
587 | return false; | |
588 | } | |
589 | ||
590 | return true; | |
591 | } | |
592 | ||
593 | /* Trim the skb to the length specified by the IP/IPv6 header, | |
594 | * removing any trailing lower-layer padding. This prepares the skb | |
595 | * for higher-layer processing that assumes skb->len excludes padding | |
596 | * (such as nf_ip_checksum). The caller needs to pull the skb to the | |
597 | * network header, and ensure ip_hdr/ipv6_hdr points to valid data. | |
598 | */ | |
599 | static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family) | |
600 | { | |
601 | unsigned int len; | |
602 | int err; | |
603 | ||
604 | switch (family) { | |
605 | case NFPROTO_IPV4: | |
606 | len = ntohs(ip_hdr(skb)->tot_len); | |
607 | break; | |
608 | case NFPROTO_IPV6: | |
609 | len = sizeof(struct ipv6hdr) | |
610 | + ntohs(ipv6_hdr(skb)->payload_len); | |
611 | break; | |
612 | default: | |
613 | len = skb->len; | |
614 | } | |
615 | ||
616 | err = pskb_trim_rcsum(skb, len); | |
617 | ||
618 | return err; | |
619 | } | |
620 | ||
621 | static u8 tcf_ct_skb_nf_family(struct sk_buff *skb) | |
622 | { | |
623 | u8 family = NFPROTO_UNSPEC; | |
624 | ||
625 | switch (skb->protocol) { | |
626 | case htons(ETH_P_IP): | |
627 | family = NFPROTO_IPV4; | |
628 | break; | |
629 | case htons(ETH_P_IPV6): | |
630 | family = NFPROTO_IPV6; | |
631 | break; | |
632 | default: | |
633 | break; | |
634 | } | |
635 | ||
636 | return family; | |
637 | } | |
638 | ||
639 | static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag) | |
640 | { | |
641 | unsigned int len; | |
642 | ||
643 | len = skb_network_offset(skb) + sizeof(struct iphdr); | |
644 | if (unlikely(skb->len < len)) | |
645 | return -EINVAL; | |
646 | if (unlikely(!pskb_may_pull(skb, len))) | |
647 | return -ENOMEM; | |
648 | ||
649 | *frag = ip_is_fragment(ip_hdr(skb)); | |
650 | return 0; | |
651 | } | |
652 | ||
653 | static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag) | |
654 | { | |
655 | unsigned int flags = 0, len, payload_ofs = 0; | |
656 | unsigned short frag_off; | |
657 | int nexthdr; | |
658 | ||
659 | len = skb_network_offset(skb) + sizeof(struct ipv6hdr); | |
660 | if (unlikely(skb->len < len)) | |
661 | return -EINVAL; | |
662 | if (unlikely(!pskb_may_pull(skb, len))) | |
663 | return -ENOMEM; | |
664 | ||
665 | nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); | |
666 | if (unlikely(nexthdr < 0)) | |
667 | return -EPROTO; | |
668 | ||
669 | *frag = flags & IP6_FH_F_FRAG; | |
670 | return 0; | |
671 | } | |
672 | ||
673 | static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, | |
674 | u8 family, u16 zone) | |
675 | { | |
676 | enum ip_conntrack_info ctinfo; | |
677 | struct nf_conn *ct; | |
678 | int err = 0; | |
679 | bool frag; | |
680 | ||
681 | /* Previously seen (loopback)? Ignore. */ | |
682 | ct = nf_ct_get(skb, &ctinfo); | |
683 | if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) | |
684 | return 0; | |
685 | ||
686 | if (family == NFPROTO_IPV4) | |
687 | err = tcf_ct_ipv4_is_fragment(skb, &frag); | |
688 | else | |
689 | err = tcf_ct_ipv6_is_fragment(skb, &frag); | |
690 | if (err || !frag) | |
691 | return err; | |
692 | ||
693 | skb_get(skb); | |
694 | ||
695 | if (family == NFPROTO_IPV4) { | |
696 | enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; | |
697 | ||
698 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | |
699 | local_bh_disable(); | |
700 | err = ip_defrag(net, skb, user); | |
701 | local_bh_enable(); | |
702 | if (err && err != -EINPROGRESS) | |
703 | goto out_free; | |
704 | } else { /* NFPROTO_IPV6 */ | |
705 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) | |
706 | enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; | |
707 | ||
708 | memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); | |
709 | err = nf_ct_frag6_gather(net, skb, user); | |
710 | if (err && err != -EINPROGRESS) | |
711 | goto out_free; | |
712 | #else | |
713 | err = -EOPNOTSUPP; | |
714 | goto out_free; | |
715 | #endif | |
716 | } | |
717 | ||
718 | skb_clear_hash(skb); | |
719 | skb->ignore_df = 1; | |
720 | return err; | |
721 | ||
722 | out_free: | |
723 | kfree_skb(skb); | |
724 | return err; | |
725 | } | |
726 | ||
727 | static void tcf_ct_params_free(struct rcu_head *head) | |
728 | { | |
729 | struct tcf_ct_params *params = container_of(head, | |
730 | struct tcf_ct_params, rcu); | |
731 | ||
c34b961a PB |
732 | tcf_ct_flow_table_put(params); |
733 | ||
b57dc7c1 PB |
734 | if (params->tmpl) |
735 | nf_conntrack_put(¶ms->tmpl->ct_general); | |
736 | kfree(params); | |
737 | } | |
738 | ||
739 | #if IS_ENABLED(CONFIG_NF_NAT) | |
740 | /* Modelled after nf_nat_ipv[46]_fn(). | |
741 | * range is only used for new, uninitialized NAT state. | |
742 | * Returns either NF_ACCEPT or NF_DROP. | |
743 | */ | |
744 | static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, | |
745 | enum ip_conntrack_info ctinfo, | |
746 | const struct nf_nat_range2 *range, | |
747 | enum nf_nat_manip_type maniptype) | |
748 | { | |
749 | int hooknum, err = NF_ACCEPT; | |
750 | ||
751 | /* See HOOK2MANIP(). */ | |
752 | if (maniptype == NF_NAT_MANIP_SRC) | |
753 | hooknum = NF_INET_LOCAL_IN; /* Source NAT */ | |
754 | else | |
755 | hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */ | |
756 | ||
757 | switch (ctinfo) { | |
758 | case IP_CT_RELATED: | |
759 | case IP_CT_RELATED_REPLY: | |
760 | if (skb->protocol == htons(ETH_P_IP) && | |
761 | ip_hdr(skb)->protocol == IPPROTO_ICMP) { | |
762 | if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, | |
763 | hooknum)) | |
764 | err = NF_DROP; | |
765 | goto out; | |
766 | } else if (IS_ENABLED(CONFIG_IPV6) && | |
767 | skb->protocol == htons(ETH_P_IPV6)) { | |
768 | __be16 frag_off; | |
769 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | |
770 | int hdrlen = ipv6_skip_exthdr(skb, | |
771 | sizeof(struct ipv6hdr), | |
772 | &nexthdr, &frag_off); | |
773 | ||
774 | if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { | |
775 | if (!nf_nat_icmpv6_reply_translation(skb, ct, | |
776 | ctinfo, | |
777 | hooknum, | |
778 | hdrlen)) | |
779 | err = NF_DROP; | |
780 | goto out; | |
781 | } | |
782 | } | |
783 | /* Non-ICMP, fall thru to initialize if needed. */ | |
784 | /* fall through */ | |
785 | case IP_CT_NEW: | |
786 | /* Seen it before? This can happen for loopback, retrans, | |
787 | * or local packets. | |
788 | */ | |
789 | if (!nf_nat_initialized(ct, maniptype)) { | |
790 | /* Initialize according to the NAT action. */ | |
791 | err = (range && range->flags & NF_NAT_RANGE_MAP_IPS) | |
792 | /* Action is set up to establish a new | |
793 | * mapping. | |
794 | */ | |
795 | ? nf_nat_setup_info(ct, range, maniptype) | |
796 | : nf_nat_alloc_null_binding(ct, hooknum); | |
797 | if (err != NF_ACCEPT) | |
798 | goto out; | |
799 | } | |
800 | break; | |
801 | ||
802 | case IP_CT_ESTABLISHED: | |
803 | case IP_CT_ESTABLISHED_REPLY: | |
804 | break; | |
805 | ||
806 | default: | |
807 | err = NF_DROP; | |
808 | goto out; | |
809 | } | |
810 | ||
811 | err = nf_nat_packet(ct, ctinfo, hooknum, skb); | |
812 | out: | |
813 | return err; | |
814 | } | |
815 | #endif /* CONFIG_NF_NAT */ | |
816 | ||
817 | static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) | |
818 | { | |
819 | #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) | |
820 | u32 new_mark; | |
821 | ||
822 | if (!mask) | |
823 | return; | |
824 | ||
825 | new_mark = mark | (ct->mark & ~(mask)); | |
826 | if (ct->mark != new_mark) { | |
827 | ct->mark = new_mark; | |
828 | if (nf_ct_is_confirmed(ct)) | |
829 | nf_conntrack_event_cache(IPCT_MARK, ct); | |
830 | } | |
831 | #endif | |
832 | } | |
833 | ||
834 | static void tcf_ct_act_set_labels(struct nf_conn *ct, | |
835 | u32 *labels, | |
836 | u32 *labels_m) | |
837 | { | |
838 | #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) | |
c593642c | 839 | size_t labels_sz = sizeof_field(struct tcf_ct_params, labels); |
b57dc7c1 PB |
840 | |
841 | if (!memchr_inv(labels_m, 0, labels_sz)) | |
842 | return; | |
843 | ||
844 | nf_connlabels_replace(ct, labels, labels_m, 4); | |
845 | #endif | |
846 | } | |
847 | ||
848 | static int tcf_ct_act_nat(struct sk_buff *skb, | |
849 | struct nf_conn *ct, | |
850 | enum ip_conntrack_info ctinfo, | |
851 | int ct_action, | |
852 | struct nf_nat_range2 *range, | |
853 | bool commit) | |
854 | { | |
855 | #if IS_ENABLED(CONFIG_NF_NAT) | |
95219afb | 856 | int err; |
b57dc7c1 PB |
857 | enum nf_nat_manip_type maniptype; |
858 | ||
859 | if (!(ct_action & TCA_CT_ACT_NAT)) | |
860 | return NF_ACCEPT; | |
861 | ||
862 | /* Add NAT extension if not confirmed yet. */ | |
863 | if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) | |
864 | return NF_DROP; /* Can't NAT. */ | |
865 | ||
866 | if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) && | |
867 | (ctinfo != IP_CT_RELATED || commit)) { | |
868 | /* NAT an established or related connection like before. */ | |
869 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) | |
870 | /* This is the REPLY direction for a connection | |
871 | * for which NAT was applied in the forward | |
872 | * direction. Do the reverse NAT. | |
873 | */ | |
874 | maniptype = ct->status & IPS_SRC_NAT | |
875 | ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC; | |
876 | else | |
877 | maniptype = ct->status & IPS_SRC_NAT | |
878 | ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST; | |
879 | } else if (ct_action & TCA_CT_ACT_NAT_SRC) { | |
880 | maniptype = NF_NAT_MANIP_SRC; | |
881 | } else if (ct_action & TCA_CT_ACT_NAT_DST) { | |
882 | maniptype = NF_NAT_MANIP_DST; | |
883 | } else { | |
884 | return NF_ACCEPT; | |
885 | } | |
886 | ||
95219afb AC |
887 | err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); |
888 | if (err == NF_ACCEPT && | |
889 | ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { | |
890 | if (maniptype == NF_NAT_MANIP_SRC) | |
891 | maniptype = NF_NAT_MANIP_DST; | |
892 | else | |
893 | maniptype = NF_NAT_MANIP_SRC; | |
894 | ||
895 | err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); | |
896 | } | |
897 | return err; | |
b57dc7c1 PB |
898 | #else |
899 | return NF_ACCEPT; | |
900 | #endif | |
901 | } | |
902 | ||
903 | static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, | |
904 | struct tcf_result *res) | |
905 | { | |
906 | struct net *net = dev_net(skb->dev); | |
907 | bool cached, commit, clear, force; | |
908 | enum ip_conntrack_info ctinfo; | |
909 | struct tcf_ct *c = to_ct(a); | |
910 | struct nf_conn *tmpl = NULL; | |
911 | struct nf_hook_state state; | |
912 | int nh_ofs, err, retval; | |
913 | struct tcf_ct_params *p; | |
46475bb2 | 914 | bool skip_add = false; |
b57dc7c1 PB |
915 | struct nf_conn *ct; |
916 | u8 family; | |
917 | ||
918 | p = rcu_dereference_bh(c->params); | |
919 | ||
920 | retval = READ_ONCE(c->tcf_action); | |
921 | commit = p->ct_action & TCA_CT_ACT_COMMIT; | |
922 | clear = p->ct_action & TCA_CT_ACT_CLEAR; | |
923 | force = p->ct_action & TCA_CT_ACT_FORCE; | |
924 | tmpl = p->tmpl; | |
925 | ||
926 | if (clear) { | |
927 | ct = nf_ct_get(skb, &ctinfo); | |
928 | if (ct) { | |
929 | nf_conntrack_put(&ct->ct_general); | |
930 | nf_ct_set(skb, NULL, IP_CT_UNTRACKED); | |
931 | } | |
932 | ||
933 | goto out; | |
934 | } | |
935 | ||
936 | family = tcf_ct_skb_nf_family(skb); | |
937 | if (family == NFPROTO_UNSPEC) | |
938 | goto drop; | |
939 | ||
940 | /* The conntrack module expects to be working at L3. | |
941 | * We also try to pull the IPv4/6 header to linear area | |
942 | */ | |
943 | nh_ofs = skb_network_offset(skb); | |
944 | skb_pull_rcsum(skb, nh_ofs); | |
945 | err = tcf_ct_handle_fragments(net, skb, family, p->zone); | |
946 | if (err == -EINPROGRESS) { | |
947 | retval = TC_ACT_STOLEN; | |
948 | goto out; | |
949 | } | |
950 | if (err) | |
951 | goto drop; | |
952 | ||
953 | err = tcf_ct_skb_network_trim(skb, family); | |
954 | if (err) | |
955 | goto drop; | |
956 | ||
957 | /* If we are recirculating packets to match on ct fields and | |
958 | * committing with a separate ct action, then we don't need to | |
959 | * actually run the packet through conntrack twice unless it's for a | |
960 | * different zone. | |
961 | */ | |
962 | cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force); | |
963 | if (!cached) { | |
46475bb2 PB |
964 | if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) { |
965 | skip_add = true; | |
966 | goto do_nat; | |
967 | } | |
968 | ||
b57dc7c1 PB |
969 | /* Associate skb with specified zone. */ |
970 | if (tmpl) { | |
971 | ct = nf_ct_get(skb, &ctinfo); | |
972 | if (skb_nfct(skb)) | |
973 | nf_conntrack_put(skb_nfct(skb)); | |
974 | nf_conntrack_get(&tmpl->ct_general); | |
975 | nf_ct_set(skb, tmpl, IP_CT_NEW); | |
976 | } | |
977 | ||
978 | state.hook = NF_INET_PRE_ROUTING; | |
979 | state.net = net; | |
980 | state.pf = family; | |
981 | err = nf_conntrack_in(skb, &state); | |
982 | if (err != NF_ACCEPT) | |
983 | goto out_push; | |
984 | } | |
985 | ||
46475bb2 | 986 | do_nat: |
b57dc7c1 PB |
987 | ct = nf_ct_get(skb, &ctinfo); |
988 | if (!ct) | |
989 | goto out_push; | |
990 | nf_ct_deliver_cached_events(ct); | |
991 | ||
992 | err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit); | |
993 | if (err != NF_ACCEPT) | |
994 | goto drop; | |
995 | ||
996 | if (commit) { | |
997 | tcf_ct_act_set_mark(ct, p->mark, p->mark_mask); | |
998 | tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); | |
999 | ||
1000 | /* This will take care of sending queued events | |
1001 | * even if the connection is already confirmed. | |
1002 | */ | |
1003 | nf_conntrack_confirm(skb); | |
46475bb2 PB |
1004 | } else if (!skip_add) { |
1005 | tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); | |
b57dc7c1 PB |
1006 | } |
1007 | ||
1008 | out_push: | |
1009 | skb_push_rcsum(skb, nh_ofs); | |
1010 | ||
1011 | out: | |
5e1ad95b | 1012 | tcf_action_update_bstats(&c->common, skb); |
b57dc7c1 PB |
1013 | return retval; |
1014 | ||
1015 | drop: | |
26b537a8 | 1016 | tcf_action_inc_drop_qstats(&c->common); |
b57dc7c1 PB |
1017 | return TC_ACT_SHOT; |
1018 | } | |
1019 | ||
1020 | static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = { | |
b57dc7c1 PB |
1021 | [TCA_CT_ACTION] = { .type = NLA_U16 }, |
1022 | [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) }, | |
1023 | [TCA_CT_ZONE] = { .type = NLA_U16 }, | |
1024 | [TCA_CT_MARK] = { .type = NLA_U32 }, | |
1025 | [TCA_CT_MARK_MASK] = { .type = NLA_U32 }, | |
1026 | [TCA_CT_LABELS] = { .type = NLA_BINARY, | |
1027 | .len = 128 / BITS_PER_BYTE }, | |
1028 | [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY, | |
1029 | .len = 128 / BITS_PER_BYTE }, | |
1030 | [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 }, | |
1031 | [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 }, | |
1032 | [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN, | |
1033 | .len = sizeof(struct in6_addr) }, | |
1034 | [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN, | |
1035 | .len = sizeof(struct in6_addr) }, | |
1036 | [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 }, | |
1037 | [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 }, | |
1038 | }; | |
1039 | ||
1040 | static int tcf_ct_fill_params_nat(struct tcf_ct_params *p, | |
1041 | struct tc_ct *parm, | |
1042 | struct nlattr **tb, | |
1043 | struct netlink_ext_ack *extack) | |
1044 | { | |
1045 | struct nf_nat_range2 *range; | |
1046 | ||
1047 | if (!(p->ct_action & TCA_CT_ACT_NAT)) | |
1048 | return 0; | |
1049 | ||
1050 | if (!IS_ENABLED(CONFIG_NF_NAT)) { | |
1051 | NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel"); | |
1052 | return -EOPNOTSUPP; | |
1053 | } | |
1054 | ||
1055 | if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) | |
1056 | return 0; | |
1057 | ||
1058 | if ((p->ct_action & TCA_CT_ACT_NAT_SRC) && | |
1059 | (p->ct_action & TCA_CT_ACT_NAT_DST)) { | |
1060 | NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time"); | |
1061 | return -EOPNOTSUPP; | |
1062 | } | |
1063 | ||
1064 | range = &p->range; | |
1065 | if (tb[TCA_CT_NAT_IPV4_MIN]) { | |
1066 | struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX]; | |
1067 | ||
1068 | p->ipv4_range = true; | |
1069 | range->flags |= NF_NAT_RANGE_MAP_IPS; | |
1070 | range->min_addr.ip = | |
1071 | nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]); | |
1072 | ||
1073 | range->max_addr.ip = max_attr ? | |
1074 | nla_get_in_addr(max_attr) : | |
1075 | range->min_addr.ip; | |
1076 | } else if (tb[TCA_CT_NAT_IPV6_MIN]) { | |
1077 | struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX]; | |
1078 | ||
1079 | p->ipv4_range = false; | |
1080 | range->flags |= NF_NAT_RANGE_MAP_IPS; | |
1081 | range->min_addr.in6 = | |
1082 | nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]); | |
1083 | ||
1084 | range->max_addr.in6 = max_attr ? | |
1085 | nla_get_in6_addr(max_attr) : | |
1086 | range->min_addr.in6; | |
1087 | } | |
1088 | ||
1089 | if (tb[TCA_CT_NAT_PORT_MIN]) { | |
1090 | range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | |
1091 | range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]); | |
1092 | ||
1093 | range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ? | |
1094 | nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) : | |
1095 | range->min_proto.all; | |
1096 | } | |
1097 | ||
1098 | return 0; | |
1099 | } | |
1100 | ||
1101 | static void tcf_ct_set_key_val(struct nlattr **tb, | |
1102 | void *val, int val_type, | |
1103 | void *mask, int mask_type, | |
1104 | int len) | |
1105 | { | |
1106 | if (!tb[val_type]) | |
1107 | return; | |
1108 | nla_memcpy(val, tb[val_type], len); | |
1109 | ||
1110 | if (!mask) | |
1111 | return; | |
1112 | ||
1113 | if (mask_type == TCA_CT_UNSPEC || !tb[mask_type]) | |
1114 | memset(mask, 0xff, len); | |
1115 | else | |
1116 | nla_memcpy(mask, tb[mask_type], len); | |
1117 | } | |
1118 | ||
1119 | static int tcf_ct_fill_params(struct net *net, | |
1120 | struct tcf_ct_params *p, | |
1121 | struct tc_ct *parm, | |
1122 | struct nlattr **tb, | |
1123 | struct netlink_ext_ack *extack) | |
1124 | { | |
1125 | struct tc_ct_action_net *tn = net_generic(net, ct_net_id); | |
1126 | struct nf_conntrack_zone zone; | |
1127 | struct nf_conn *tmpl; | |
1128 | int err; | |
1129 | ||
1130 | p->zone = NF_CT_DEFAULT_ZONE_ID; | |
1131 | ||
1132 | tcf_ct_set_key_val(tb, | |
1133 | &p->ct_action, TCA_CT_ACTION, | |
1134 | NULL, TCA_CT_UNSPEC, | |
1135 | sizeof(p->ct_action)); | |
1136 | ||
1137 | if (p->ct_action & TCA_CT_ACT_CLEAR) | |
1138 | return 0; | |
1139 | ||
1140 | err = tcf_ct_fill_params_nat(p, parm, tb, extack); | |
1141 | if (err) | |
1142 | return err; | |
1143 | ||
1144 | if (tb[TCA_CT_MARK]) { | |
1145 | if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { | |
1146 | NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled."); | |
1147 | return -EOPNOTSUPP; | |
1148 | } | |
1149 | tcf_ct_set_key_val(tb, | |
1150 | &p->mark, TCA_CT_MARK, | |
1151 | &p->mark_mask, TCA_CT_MARK_MASK, | |
1152 | sizeof(p->mark)); | |
1153 | } | |
1154 | ||
1155 | if (tb[TCA_CT_LABELS]) { | |
1156 | if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { | |
1157 | NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled."); | |
1158 | return -EOPNOTSUPP; | |
1159 | } | |
1160 | ||
1161 | if (!tn->labels) { | |
1162 | NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length"); | |
1163 | return -EOPNOTSUPP; | |
1164 | } | |
1165 | tcf_ct_set_key_val(tb, | |
1166 | p->labels, TCA_CT_LABELS, | |
1167 | p->labels_mask, TCA_CT_LABELS_MASK, | |
1168 | sizeof(p->labels)); | |
1169 | } | |
1170 | ||
1171 | if (tb[TCA_CT_ZONE]) { | |
1172 | if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { | |
1173 | NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled."); | |
1174 | return -EOPNOTSUPP; | |
1175 | } | |
1176 | ||
1177 | tcf_ct_set_key_val(tb, | |
1178 | &p->zone, TCA_CT_ZONE, | |
1179 | NULL, TCA_CT_UNSPEC, | |
1180 | sizeof(p->zone)); | |
1181 | } | |
1182 | ||
1183 | if (p->zone == NF_CT_DEFAULT_ZONE_ID) | |
1184 | return 0; | |
1185 | ||
1186 | nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); | |
1187 | tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); | |
1188 | if (!tmpl) { | |
1189 | NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template"); | |
1190 | return -ENOMEM; | |
1191 | } | |
1192 | __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); | |
1193 | nf_conntrack_get(&tmpl->ct_general); | |
1194 | p->tmpl = tmpl; | |
1195 | ||
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | static int tcf_ct_init(struct net *net, struct nlattr *nla, | |
1200 | struct nlattr *est, struct tc_action **a, | |
1201 | int replace, int bind, bool rtnl_held, | |
abbb0d33 | 1202 | struct tcf_proto *tp, u32 flags, |
b57dc7c1 PB |
1203 | struct netlink_ext_ack *extack) |
1204 | { | |
1205 | struct tc_action_net *tn = net_generic(net, ct_net_id); | |
1206 | struct tcf_ct_params *params = NULL; | |
1207 | struct nlattr *tb[TCA_CT_MAX + 1]; | |
1208 | struct tcf_chain *goto_ch = NULL; | |
1209 | struct tc_ct *parm; | |
1210 | struct tcf_ct *c; | |
1211 | int err, res = 0; | |
7be8ef2c | 1212 | u32 index; |
b57dc7c1 PB |
1213 | |
1214 | if (!nla) { | |
1215 | NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed"); | |
1216 | return -EINVAL; | |
1217 | } | |
1218 | ||
1219 | err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack); | |
1220 | if (err < 0) | |
1221 | return err; | |
1222 | ||
1223 | if (!tb[TCA_CT_PARMS]) { | |
1224 | NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters"); | |
1225 | return -EINVAL; | |
1226 | } | |
1227 | parm = nla_data(tb[TCA_CT_PARMS]); | |
7be8ef2c DL |
1228 | index = parm->index; |
1229 | err = tcf_idr_check_alloc(tn, &index, a, bind); | |
b57dc7c1 PB |
1230 | if (err < 0) |
1231 | return err; | |
1232 | ||
1233 | if (!err) { | |
e3822678 VB |
1234 | err = tcf_idr_create_from_flags(tn, index, est, a, |
1235 | &act_ct_ops, bind, flags); | |
b57dc7c1 | 1236 | if (err) { |
7be8ef2c | 1237 | tcf_idr_cleanup(tn, index); |
b57dc7c1 PB |
1238 | return err; |
1239 | } | |
1240 | res = ACT_P_CREATED; | |
1241 | } else { | |
1242 | if (bind) | |
1243 | return 0; | |
1244 | ||
1245 | if (!replace) { | |
1246 | tcf_idr_release(*a, bind); | |
1247 | return -EEXIST; | |
1248 | } | |
1249 | } | |
1250 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | |
1251 | if (err < 0) | |
1252 | goto cleanup; | |
1253 | ||
1254 | c = to_ct(*a); | |
1255 | ||
1256 | params = kzalloc(sizeof(*params), GFP_KERNEL); | |
1257 | if (unlikely(!params)) { | |
1258 | err = -ENOMEM; | |
1259 | goto cleanup; | |
1260 | } | |
1261 | ||
1262 | err = tcf_ct_fill_params(net, params, parm, tb, extack); | |
1263 | if (err) | |
1264 | goto cleanup; | |
1265 | ||
c34b961a PB |
1266 | err = tcf_ct_flow_table_get(params); |
1267 | if (err) | |
1268 | goto cleanup; | |
1269 | ||
b57dc7c1 PB |
1270 | spin_lock_bh(&c->tcf_lock); |
1271 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); | |
445d3749 PM |
1272 | params = rcu_replace_pointer(c->params, params, |
1273 | lockdep_is_held(&c->tcf_lock)); | |
b57dc7c1 PB |
1274 | spin_unlock_bh(&c->tcf_lock); |
1275 | ||
1276 | if (goto_ch) | |
1277 | tcf_chain_put_by_act(goto_ch); | |
1278 | if (params) | |
dd2af104 | 1279 | call_rcu(¶ms->rcu, tcf_ct_params_free); |
b57dc7c1 PB |
1280 | if (res == ACT_P_CREATED) |
1281 | tcf_idr_insert(tn, *a); | |
1282 | ||
1283 | return res; | |
1284 | ||
1285 | cleanup: | |
1286 | if (goto_ch) | |
1287 | tcf_chain_put_by_act(goto_ch); | |
1288 | kfree(params); | |
1289 | tcf_idr_release(*a, bind); | |
1290 | return err; | |
1291 | } | |
1292 | ||
1293 | static void tcf_ct_cleanup(struct tc_action *a) | |
1294 | { | |
1295 | struct tcf_ct_params *params; | |
1296 | struct tcf_ct *c = to_ct(a); | |
1297 | ||
1298 | params = rcu_dereference_protected(c->params, 1); | |
1299 | if (params) | |
1300 | call_rcu(¶ms->rcu, tcf_ct_params_free); | |
1301 | } | |
1302 | ||
1303 | static int tcf_ct_dump_key_val(struct sk_buff *skb, | |
1304 | void *val, int val_type, | |
1305 | void *mask, int mask_type, | |
1306 | int len) | |
1307 | { | |
1308 | int err; | |
1309 | ||
1310 | if (mask && !memchr_inv(mask, 0, len)) | |
1311 | return 0; | |
1312 | ||
1313 | err = nla_put(skb, val_type, len, val); | |
1314 | if (err) | |
1315 | return err; | |
1316 | ||
1317 | if (mask_type != TCA_CT_UNSPEC) { | |
1318 | err = nla_put(skb, mask_type, len, mask); | |
1319 | if (err) | |
1320 | return err; | |
1321 | } | |
1322 | ||
1323 | return 0; | |
1324 | } | |
1325 | ||
1326 | static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p) | |
1327 | { | |
1328 | struct nf_nat_range2 *range = &p->range; | |
1329 | ||
1330 | if (!(p->ct_action & TCA_CT_ACT_NAT)) | |
1331 | return 0; | |
1332 | ||
1333 | if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) | |
1334 | return 0; | |
1335 | ||
1336 | if (range->flags & NF_NAT_RANGE_MAP_IPS) { | |
1337 | if (p->ipv4_range) { | |
1338 | if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN, | |
1339 | range->min_addr.ip)) | |
1340 | return -1; | |
1341 | if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX, | |
1342 | range->max_addr.ip)) | |
1343 | return -1; | |
1344 | } else { | |
1345 | if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN, | |
1346 | &range->min_addr.in6)) | |
1347 | return -1; | |
1348 | if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX, | |
1349 | &range->max_addr.in6)) | |
1350 | return -1; | |
1351 | } | |
1352 | } | |
1353 | ||
1354 | if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { | |
1355 | if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN, | |
1356 | range->min_proto.all)) | |
1357 | return -1; | |
1358 | if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX, | |
1359 | range->max_proto.all)) | |
1360 | return -1; | |
1361 | } | |
1362 | ||
1363 | return 0; | |
1364 | } | |
1365 | ||
1366 | static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a, | |
1367 | int bind, int ref) | |
1368 | { | |
1369 | unsigned char *b = skb_tail_pointer(skb); | |
1370 | struct tcf_ct *c = to_ct(a); | |
1371 | struct tcf_ct_params *p; | |
1372 | ||
1373 | struct tc_ct opt = { | |
1374 | .index = c->tcf_index, | |
1375 | .refcnt = refcount_read(&c->tcf_refcnt) - ref, | |
1376 | .bindcnt = atomic_read(&c->tcf_bindcnt) - bind, | |
1377 | }; | |
1378 | struct tcf_t t; | |
1379 | ||
1380 | spin_lock_bh(&c->tcf_lock); | |
1381 | p = rcu_dereference_protected(c->params, | |
1382 | lockdep_is_held(&c->tcf_lock)); | |
1383 | opt.action = c->tcf_action; | |
1384 | ||
1385 | if (tcf_ct_dump_key_val(skb, | |
1386 | &p->ct_action, TCA_CT_ACTION, | |
1387 | NULL, TCA_CT_UNSPEC, | |
1388 | sizeof(p->ct_action))) | |
1389 | goto nla_put_failure; | |
1390 | ||
1391 | if (p->ct_action & TCA_CT_ACT_CLEAR) | |
1392 | goto skip_dump; | |
1393 | ||
1394 | if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && | |
1395 | tcf_ct_dump_key_val(skb, | |
1396 | &p->mark, TCA_CT_MARK, | |
1397 | &p->mark_mask, TCA_CT_MARK_MASK, | |
1398 | sizeof(p->mark))) | |
1399 | goto nla_put_failure; | |
1400 | ||
1401 | if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && | |
1402 | tcf_ct_dump_key_val(skb, | |
1403 | p->labels, TCA_CT_LABELS, | |
1404 | p->labels_mask, TCA_CT_LABELS_MASK, | |
1405 | sizeof(p->labels))) | |
1406 | goto nla_put_failure; | |
1407 | ||
1408 | if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && | |
1409 | tcf_ct_dump_key_val(skb, | |
1410 | &p->zone, TCA_CT_ZONE, | |
1411 | NULL, TCA_CT_UNSPEC, | |
1412 | sizeof(p->zone))) | |
1413 | goto nla_put_failure; | |
1414 | ||
1415 | if (tcf_ct_dump_nat(skb, p)) | |
1416 | goto nla_put_failure; | |
1417 | ||
1418 | skip_dump: | |
1419 | if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt)) | |
1420 | goto nla_put_failure; | |
1421 | ||
1422 | tcf_tm_dump(&t, &c->tcf_tm); | |
1423 | if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD)) | |
1424 | goto nla_put_failure; | |
1425 | spin_unlock_bh(&c->tcf_lock); | |
1426 | ||
1427 | return skb->len; | |
1428 | nla_put_failure: | |
1429 | spin_unlock_bh(&c->tcf_lock); | |
1430 | nlmsg_trim(skb, b); | |
1431 | return -1; | |
1432 | } | |
1433 | ||
1434 | static int tcf_ct_walker(struct net *net, struct sk_buff *skb, | |
1435 | struct netlink_callback *cb, int type, | |
1436 | const struct tc_action_ops *ops, | |
1437 | struct netlink_ext_ack *extack) | |
1438 | { | |
1439 | struct tc_action_net *tn = net_generic(net, ct_net_id); | |
1440 | ||
1441 | return tcf_generic_walker(tn, skb, cb, type, ops, extack); | |
1442 | } | |
1443 | ||
1444 | static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index) | |
1445 | { | |
1446 | struct tc_action_net *tn = net_generic(net, ct_net_id); | |
1447 | ||
1448 | return tcf_idr_search(tn, a, index); | |
1449 | } | |
1450 | ||
1451 | static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, | |
1452 | u64 lastuse, bool hw) | |
1453 | { | |
1454 | struct tcf_ct *c = to_ct(a); | |
1455 | ||
c8ecebd0 | 1456 | tcf_action_update_stats(a, bytes, packets, false, hw); |
b57dc7c1 PB |
1457 | c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); |
1458 | } | |
1459 | ||
1460 | static struct tc_action_ops act_ct_ops = { | |
1461 | .kind = "ct", | |
1462 | .id = TCA_ID_CT, | |
1463 | .owner = THIS_MODULE, | |
1464 | .act = tcf_ct_act, | |
1465 | .dump = tcf_ct_dump, | |
1466 | .init = tcf_ct_init, | |
1467 | .cleanup = tcf_ct_cleanup, | |
1468 | .walk = tcf_ct_walker, | |
1469 | .lookup = tcf_ct_search, | |
1470 | .stats_update = tcf_stats_update, | |
1471 | .size = sizeof(struct tcf_ct), | |
1472 | }; | |
1473 | ||
1474 | static __net_init int ct_init_net(struct net *net) | |
1475 | { | |
c593642c | 1476 | unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8; |
b57dc7c1 PB |
1477 | struct tc_ct_action_net *tn = net_generic(net, ct_net_id); |
1478 | ||
1479 | if (nf_connlabels_get(net, n_bits - 1)) { | |
1480 | tn->labels = false; | |
1481 | pr_err("act_ct: Failed to set connlabels length"); | |
1482 | } else { | |
1483 | tn->labels = true; | |
1484 | } | |
1485 | ||
981471bd | 1486 | return tc_action_net_init(net, &tn->tn, &act_ct_ops); |
b57dc7c1 PB |
1487 | } |
1488 | ||
1489 | static void __net_exit ct_exit_net(struct list_head *net_list) | |
1490 | { | |
1491 | struct net *net; | |
1492 | ||
1493 | rtnl_lock(); | |
1494 | list_for_each_entry(net, net_list, exit_list) { | |
1495 | struct tc_ct_action_net *tn = net_generic(net, ct_net_id); | |
1496 | ||
1497 | if (tn->labels) | |
1498 | nf_connlabels_put(net); | |
1499 | } | |
1500 | rtnl_unlock(); | |
1501 | ||
1502 | tc_action_net_exit(net_list, ct_net_id); | |
1503 | } | |
1504 | ||
1505 | static struct pernet_operations ct_net_ops = { | |
1506 | .init = ct_init_net, | |
1507 | .exit_batch = ct_exit_net, | |
1508 | .id = &ct_net_id, | |
1509 | .size = sizeof(struct tc_ct_action_net), | |
1510 | }; | |
1511 | ||
1512 | static int __init ct_init_module(void) | |
1513 | { | |
c34b961a PB |
1514 | int err; |
1515 | ||
1516 | act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0); | |
1517 | if (!act_ct_wq) | |
1518 | return -ENOMEM; | |
1519 | ||
1520 | err = tcf_ct_flow_tables_init(); | |
1521 | if (err) | |
1522 | goto err_tbl_init; | |
1523 | ||
1524 | err = tcf_register_action(&act_ct_ops, &ct_net_ops); | |
1525 | if (err) | |
1526 | goto err_register; | |
1527 | ||
1528 | return 0; | |
1529 | ||
1530 | err_tbl_init: | |
1531 | destroy_workqueue(act_ct_wq); | |
1532 | err_register: | |
1533 | tcf_ct_flow_tables_uninit(); | |
1534 | return err; | |
b57dc7c1 PB |
1535 | } |
1536 | ||
1537 | static void __exit ct_cleanup_module(void) | |
1538 | { | |
1539 | tcf_unregister_action(&act_ct_ops, &ct_net_ops); | |
c34b961a PB |
1540 | tcf_ct_flow_tables_uninit(); |
1541 | destroy_workqueue(act_ct_wq); | |
b57dc7c1 PB |
1542 | } |
1543 | ||
30b0cf90 PB |
1544 | void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) |
1545 | { | |
1546 | enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK; | |
1547 | struct nf_conn *ct; | |
1548 | ||
1549 | ct = (struct nf_conn *)(cookie & NFCT_PTRMASK); | |
1550 | nf_conntrack_get(&ct->ct_general); | |
1551 | nf_ct_set(skb, ct, ctinfo); | |
1552 | } | |
1553 | EXPORT_SYMBOL_GPL(tcf_ct_flow_table_restore_skb); | |
1554 | ||
b57dc7c1 PB |
1555 | module_init(ct_init_module); |
1556 | module_exit(ct_cleanup_module); | |
1557 | MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>"); | |
1558 | MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>"); | |
1559 | MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>"); | |
1560 | MODULE_DESCRIPTION("Connection tracking action"); | |
1561 | MODULE_LICENSE("GPL v2"); | |
1562 |