1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32 #include <linux/siphash.h>
34 #include <linux/netfilter.h>
35 #include <net/netlink.h>
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #include <net/netfilter/nf_conntrack_synproxy.h>
49 #if IS_ENABLED(CONFIG_NF_NAT)
50 #include <net/netfilter/nf_nat.h>
51 #include <net/netfilter/nf_nat_helper.h>
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
57 #include "nf_internals.h"
59 MODULE_LICENSE("GPL");
61 static int ctnetlink_dump_tuples_proto(struct sk_buff
*skb
,
62 const struct nf_conntrack_tuple
*tuple
,
63 const struct nf_conntrack_l4proto
*l4proto
)
66 struct nlattr
*nest_parms
;
68 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_PROTO
);
71 if (nla_put_u8(skb
, CTA_PROTO_NUM
, tuple
->dst
.protonum
))
74 if (likely(l4proto
->tuple_to_nlattr
))
75 ret
= l4proto
->tuple_to_nlattr(skb
, tuple
);
77 nla_nest_end(skb
, nest_parms
);
85 static int ipv4_tuple_to_nlattr(struct sk_buff
*skb
,
86 const struct nf_conntrack_tuple
*tuple
)
88 if (nla_put_in_addr(skb
, CTA_IP_V4_SRC
, tuple
->src
.u3
.ip
) ||
89 nla_put_in_addr(skb
, CTA_IP_V4_DST
, tuple
->dst
.u3
.ip
))
94 static int ipv6_tuple_to_nlattr(struct sk_buff
*skb
,
95 const struct nf_conntrack_tuple
*tuple
)
97 if (nla_put_in6_addr(skb
, CTA_IP_V6_SRC
, &tuple
->src
.u3
.in6
) ||
98 nla_put_in6_addr(skb
, CTA_IP_V6_DST
, &tuple
->dst
.u3
.in6
))
103 static int ctnetlink_dump_tuples_ip(struct sk_buff
*skb
,
104 const struct nf_conntrack_tuple
*tuple
)
107 struct nlattr
*nest_parms
;
109 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_IP
);
111 goto nla_put_failure
;
113 switch (tuple
->src
.l3num
) {
115 ret
= ipv4_tuple_to_nlattr(skb
, tuple
);
118 ret
= ipv6_tuple_to_nlattr(skb
, tuple
);
122 nla_nest_end(skb
, nest_parms
);
130 static int ctnetlink_dump_tuples(struct sk_buff
*skb
,
131 const struct nf_conntrack_tuple
*tuple
)
133 const struct nf_conntrack_l4proto
*l4proto
;
137 ret
= ctnetlink_dump_tuples_ip(skb
, tuple
);
140 l4proto
= nf_ct_l4proto_find(tuple
->dst
.protonum
);
141 ret
= ctnetlink_dump_tuples_proto(skb
, tuple
, l4proto
);
147 static int ctnetlink_dump_zone_id(struct sk_buff
*skb
, int attrtype
,
148 const struct nf_conntrack_zone
*zone
, int dir
)
150 if (zone
->id
== NF_CT_DEFAULT_ZONE_ID
|| zone
->dir
!= dir
)
152 if (nla_put_be16(skb
, attrtype
, htons(zone
->id
)))
153 goto nla_put_failure
;
160 static int ctnetlink_dump_status(struct sk_buff
*skb
, const struct nf_conn
*ct
)
162 if (nla_put_be32(skb
, CTA_STATUS
, htonl(ct
->status
)))
163 goto nla_put_failure
;
170 static int ctnetlink_dump_timeout(struct sk_buff
*skb
, const struct nf_conn
*ct
,
173 long timeout
= nf_ct_expires(ct
) / HZ
;
175 if (skip_zero
&& timeout
== 0)
178 if (nla_put_be32(skb
, CTA_TIMEOUT
, htonl(timeout
)))
179 goto nla_put_failure
;
186 static int ctnetlink_dump_protoinfo(struct sk_buff
*skb
, struct nf_conn
*ct
,
189 const struct nf_conntrack_l4proto
*l4proto
;
190 struct nlattr
*nest_proto
;
193 l4proto
= nf_ct_l4proto_find(nf_ct_protonum(ct
));
194 if (!l4proto
->to_nlattr
)
197 nest_proto
= nla_nest_start(skb
, CTA_PROTOINFO
);
199 goto nla_put_failure
;
201 ret
= l4proto
->to_nlattr(skb
, nest_proto
, ct
, destroy
);
203 nla_nest_end(skb
, nest_proto
);
211 static int ctnetlink_dump_helpinfo(struct sk_buff
*skb
,
212 const struct nf_conn
*ct
)
214 struct nlattr
*nest_helper
;
215 const struct nf_conn_help
*help
= nfct_help(ct
);
216 struct nf_conntrack_helper
*helper
;
222 helper
= rcu_dereference(help
->helper
);
226 nest_helper
= nla_nest_start(skb
, CTA_HELP
);
228 goto nla_put_failure
;
229 if (nla_put_string(skb
, CTA_HELP_NAME
, helper
->name
))
230 goto nla_put_failure
;
232 if (helper
->to_nlattr
)
233 helper
->to_nlattr(skb
, ct
);
235 nla_nest_end(skb
, nest_helper
);
246 dump_counters(struct sk_buff
*skb
, struct nf_conn_acct
*acct
,
247 enum ip_conntrack_dir dir
, int type
)
249 enum ctattr_type attr
= dir
? CTA_COUNTERS_REPLY
: CTA_COUNTERS_ORIG
;
250 struct nf_conn_counter
*counter
= acct
->counter
;
251 struct nlattr
*nest_count
;
254 if (type
== IPCTNL_MSG_CT_GET_CTRZERO
) {
255 pkts
= atomic64_xchg(&counter
[dir
].packets
, 0);
256 bytes
= atomic64_xchg(&counter
[dir
].bytes
, 0);
258 pkts
= atomic64_read(&counter
[dir
].packets
);
259 bytes
= atomic64_read(&counter
[dir
].bytes
);
262 nest_count
= nla_nest_start(skb
, attr
);
264 goto nla_put_failure
;
266 if (nla_put_be64(skb
, CTA_COUNTERS_PACKETS
, cpu_to_be64(pkts
),
268 nla_put_be64(skb
, CTA_COUNTERS_BYTES
, cpu_to_be64(bytes
),
270 goto nla_put_failure
;
272 nla_nest_end(skb
, nest_count
);
281 ctnetlink_dump_acct(struct sk_buff
*skb
, const struct nf_conn
*ct
, int type
)
283 struct nf_conn_acct
*acct
= nf_conn_acct_find(ct
);
288 if (dump_counters(skb
, acct
, IP_CT_DIR_ORIGINAL
, type
) < 0)
290 if (dump_counters(skb
, acct
, IP_CT_DIR_REPLY
, type
) < 0)
297 ctnetlink_dump_timestamp(struct sk_buff
*skb
, const struct nf_conn
*ct
)
299 struct nlattr
*nest_count
;
300 const struct nf_conn_tstamp
*tstamp
;
302 tstamp
= nf_conn_tstamp_find(ct
);
306 nest_count
= nla_nest_start(skb
, CTA_TIMESTAMP
);
308 goto nla_put_failure
;
310 if (nla_put_be64(skb
, CTA_TIMESTAMP_START
, cpu_to_be64(tstamp
->start
),
311 CTA_TIMESTAMP_PAD
) ||
312 (tstamp
->stop
!= 0 && nla_put_be64(skb
, CTA_TIMESTAMP_STOP
,
313 cpu_to_be64(tstamp
->stop
),
315 goto nla_put_failure
;
316 nla_nest_end(skb
, nest_count
);
324 #ifdef CONFIG_NF_CONNTRACK_MARK
325 static int ctnetlink_dump_mark(struct sk_buff
*skb
, const struct nf_conn
*ct
)
327 if (nla_put_be32(skb
, CTA_MARK
, htonl(ct
->mark
)))
328 goto nla_put_failure
;
335 #define ctnetlink_dump_mark(a, b) (0)
338 #ifdef CONFIG_NF_CONNTRACK_SECMARK
339 static int ctnetlink_dump_secctx(struct sk_buff
*skb
, const struct nf_conn
*ct
)
341 struct nlattr
*nest_secctx
;
345 ret
= security_secid_to_secctx(ct
->secmark
, &secctx
, &len
);
350 nest_secctx
= nla_nest_start(skb
, CTA_SECCTX
);
352 goto nla_put_failure
;
354 if (nla_put_string(skb
, CTA_SECCTX_NAME
, secctx
))
355 goto nla_put_failure
;
356 nla_nest_end(skb
, nest_secctx
);
360 security_release_secctx(secctx
, len
);
364 #define ctnetlink_dump_secctx(a, b) (0)
367 #ifdef CONFIG_NF_CONNTRACK_LABELS
368 static inline int ctnetlink_label_size(const struct nf_conn
*ct
)
370 struct nf_conn_labels
*labels
= nf_ct_labels_find(ct
);
374 return nla_total_size(sizeof(labels
->bits
));
378 ctnetlink_dump_labels(struct sk_buff
*skb
, const struct nf_conn
*ct
)
380 struct nf_conn_labels
*labels
= nf_ct_labels_find(ct
);
388 if (labels
->bits
[i
] != 0)
389 return nla_put(skb
, CTA_LABELS
, sizeof(labels
->bits
),
392 } while (i
< ARRAY_SIZE(labels
->bits
));
397 #define ctnetlink_dump_labels(a, b) (0)
398 #define ctnetlink_label_size(a) (0)
401 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
403 static int ctnetlink_dump_master(struct sk_buff
*skb
, const struct nf_conn
*ct
)
405 struct nlattr
*nest_parms
;
407 if (!(ct
->status
& IPS_EXPECTED
))
410 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_MASTER
);
412 goto nla_put_failure
;
413 if (ctnetlink_dump_tuples(skb
, master_tuple(ct
)) < 0)
414 goto nla_put_failure
;
415 nla_nest_end(skb
, nest_parms
);
424 dump_ct_seq_adj(struct sk_buff
*skb
, const struct nf_ct_seqadj
*seq
, int type
)
426 struct nlattr
*nest_parms
;
428 nest_parms
= nla_nest_start(skb
, type
);
430 goto nla_put_failure
;
432 if (nla_put_be32(skb
, CTA_SEQADJ_CORRECTION_POS
,
433 htonl(seq
->correction_pos
)) ||
434 nla_put_be32(skb
, CTA_SEQADJ_OFFSET_BEFORE
,
435 htonl(seq
->offset_before
)) ||
436 nla_put_be32(skb
, CTA_SEQADJ_OFFSET_AFTER
,
437 htonl(seq
->offset_after
)))
438 goto nla_put_failure
;
440 nla_nest_end(skb
, nest_parms
);
448 static int ctnetlink_dump_ct_seq_adj(struct sk_buff
*skb
, struct nf_conn
*ct
)
450 struct nf_conn_seqadj
*seqadj
= nfct_seqadj(ct
);
451 struct nf_ct_seqadj
*seq
;
453 if (!(ct
->status
& IPS_SEQ_ADJUST
) || !seqadj
)
456 spin_lock_bh(&ct
->lock
);
457 seq
= &seqadj
->seq
[IP_CT_DIR_ORIGINAL
];
458 if (dump_ct_seq_adj(skb
, seq
, CTA_SEQ_ADJ_ORIG
) == -1)
461 seq
= &seqadj
->seq
[IP_CT_DIR_REPLY
];
462 if (dump_ct_seq_adj(skb
, seq
, CTA_SEQ_ADJ_REPLY
) == -1)
465 spin_unlock_bh(&ct
->lock
);
468 spin_unlock_bh(&ct
->lock
);
472 static int ctnetlink_dump_ct_synproxy(struct sk_buff
*skb
, struct nf_conn
*ct
)
474 struct nf_conn_synproxy
*synproxy
= nfct_synproxy(ct
);
475 struct nlattr
*nest_parms
;
480 nest_parms
= nla_nest_start(skb
, CTA_SYNPROXY
);
482 goto nla_put_failure
;
484 if (nla_put_be32(skb
, CTA_SYNPROXY_ISN
, htonl(synproxy
->isn
)) ||
485 nla_put_be32(skb
, CTA_SYNPROXY_ITS
, htonl(synproxy
->its
)) ||
486 nla_put_be32(skb
, CTA_SYNPROXY_TSOFF
, htonl(synproxy
->tsoff
)))
487 goto nla_put_failure
;
489 nla_nest_end(skb
, nest_parms
);
497 static int ctnetlink_dump_id(struct sk_buff
*skb
, const struct nf_conn
*ct
)
499 __be32 id
= (__force __be32
)nf_ct_get_id(ct
);
501 if (nla_put_be32(skb
, CTA_ID
, id
))
502 goto nla_put_failure
;
509 static int ctnetlink_dump_use(struct sk_buff
*skb
, const struct nf_conn
*ct
)
511 if (nla_put_be32(skb
, CTA_USE
, htonl(refcount_read(&ct
->ct_general
.use
))))
512 goto nla_put_failure
;
519 /* all these functions access ct->ext. Caller must either hold a reference
520 * on ct or prevent its deletion by holding either the bucket spinlock or
521 * pcpu dying list lock.
523 static int ctnetlink_dump_extinfo(struct sk_buff
*skb
,
524 struct nf_conn
*ct
, u32 type
)
526 if (ctnetlink_dump_acct(skb
, ct
, type
) < 0 ||
527 ctnetlink_dump_timestamp(skb
, ct
) < 0 ||
528 ctnetlink_dump_helpinfo(skb
, ct
) < 0 ||
529 ctnetlink_dump_labels(skb
, ct
) < 0 ||
530 ctnetlink_dump_ct_seq_adj(skb
, ct
) < 0 ||
531 ctnetlink_dump_ct_synproxy(skb
, ct
) < 0)
537 static int ctnetlink_dump_info(struct sk_buff
*skb
, struct nf_conn
*ct
)
539 if (ctnetlink_dump_status(skb
, ct
) < 0 ||
540 ctnetlink_dump_mark(skb
, ct
) < 0 ||
541 ctnetlink_dump_secctx(skb
, ct
) < 0 ||
542 ctnetlink_dump_id(skb
, ct
) < 0 ||
543 ctnetlink_dump_use(skb
, ct
) < 0 ||
544 ctnetlink_dump_master(skb
, ct
) < 0)
547 if (!test_bit(IPS_OFFLOAD_BIT
, &ct
->status
) &&
548 (ctnetlink_dump_timeout(skb
, ct
, false) < 0 ||
549 ctnetlink_dump_protoinfo(skb
, ct
, false) < 0))
556 ctnetlink_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, u32 type
,
557 struct nf_conn
*ct
, bool extinfo
, unsigned int flags
)
559 const struct nf_conntrack_zone
*zone
;
560 struct nlmsghdr
*nlh
;
561 struct nlattr
*nest_parms
;
565 flags
|= NLM_F_MULTI
;
566 event
= nfnl_msg_type(NFNL_SUBSYS_CTNETLINK
, IPCTNL_MSG_CT_NEW
);
567 nlh
= nfnl_msg_put(skb
, portid
, seq
, event
, flags
, nf_ct_l3num(ct
),
572 zone
= nf_ct_zone(ct
);
574 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
);
576 goto nla_put_failure
;
577 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
578 goto nla_put_failure
;
579 if (ctnetlink_dump_zone_id(skb
, CTA_TUPLE_ZONE
, zone
,
580 NF_CT_ZONE_DIR_ORIG
) < 0)
581 goto nla_put_failure
;
582 nla_nest_end(skb
, nest_parms
);
584 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
);
586 goto nla_put_failure
;
587 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
588 goto nla_put_failure
;
589 if (ctnetlink_dump_zone_id(skb
, CTA_TUPLE_ZONE
, zone
,
590 NF_CT_ZONE_DIR_REPL
) < 0)
591 goto nla_put_failure
;
592 nla_nest_end(skb
, nest_parms
);
594 if (ctnetlink_dump_zone_id(skb
, CTA_ZONE
, zone
,
595 NF_CT_DEFAULT_ZONE_DIR
) < 0)
596 goto nla_put_failure
;
598 if (ctnetlink_dump_info(skb
, ct
) < 0)
599 goto nla_put_failure
;
600 if (extinfo
&& ctnetlink_dump_extinfo(skb
, ct
, type
) < 0)
601 goto nla_put_failure
;
608 nlmsg_cancel(skb
, nlh
);
612 static const struct nla_policy cta_ip_nla_policy
[CTA_IP_MAX
+ 1] = {
613 [CTA_IP_V4_SRC
] = { .type
= NLA_U32
},
614 [CTA_IP_V4_DST
] = { .type
= NLA_U32
},
615 [CTA_IP_V6_SRC
] = { .len
= sizeof(__be32
) * 4 },
616 [CTA_IP_V6_DST
] = { .len
= sizeof(__be32
) * 4 },
619 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
620 static size_t ctnetlink_proto_size(const struct nf_conn
*ct
)
622 const struct nf_conntrack_l4proto
*l4proto
;
623 size_t len
, len4
= 0;
625 len
= nla_policy_len(cta_ip_nla_policy
, CTA_IP_MAX
+ 1);
626 len
*= 3u; /* ORIG, REPLY, MASTER */
628 l4proto
= nf_ct_l4proto_find(nf_ct_protonum(ct
));
629 len
+= l4proto
->nlattr_size
;
630 if (l4proto
->nlattr_tuple_size
) {
631 len4
= l4proto
->nlattr_tuple_size();
632 len4
*= 3u; /* ORIG, REPLY, MASTER */
639 static inline size_t ctnetlink_acct_size(const struct nf_conn
*ct
)
641 if (!nf_ct_ext_exist(ct
, NF_CT_EXT_ACCT
))
643 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
644 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
645 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
649 static inline int ctnetlink_secctx_size(const struct nf_conn
*ct
)
651 #ifdef CONFIG_NF_CONNTRACK_SECMARK
654 ret
= security_secid_to_secctx(ct
->secmark
, NULL
, &len
);
658 return nla_total_size(0) /* CTA_SECCTX */
659 + nla_total_size(sizeof(char) * len
); /* CTA_SECCTX_NAME */
665 static inline size_t ctnetlink_timestamp_size(const struct nf_conn
*ct
)
667 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
668 if (!nf_ct_ext_exist(ct
, NF_CT_EXT_TSTAMP
))
670 return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
676 #ifdef CONFIG_NF_CONNTRACK_EVENTS
677 static size_t ctnetlink_nlmsg_size(const struct nf_conn
*ct
)
679 return NLMSG_ALIGN(sizeof(struct nfgenmsg
))
680 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
681 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
682 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
683 + 3 * nla_total_size(sizeof(u_int8_t
)) /* CTA_PROTO_NUM */
684 + nla_total_size(sizeof(u_int32_t
)) /* CTA_ID */
685 + nla_total_size(sizeof(u_int32_t
)) /* CTA_STATUS */
686 + ctnetlink_acct_size(ct
)
687 + ctnetlink_timestamp_size(ct
)
688 + nla_total_size(sizeof(u_int32_t
)) /* CTA_TIMEOUT */
689 + nla_total_size(0) /* CTA_PROTOINFO */
690 + nla_total_size(0) /* CTA_HELP */
691 + nla_total_size(NF_CT_HELPER_NAME_LEN
) /* CTA_HELP_NAME */
692 + ctnetlink_secctx_size(ct
)
693 #if IS_ENABLED(CONFIG_NF_NAT)
694 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
695 + 6 * nla_total_size(sizeof(u_int32_t
)) /* CTA_NAT_SEQ_OFFSET */
697 #ifdef CONFIG_NF_CONNTRACK_MARK
698 + nla_total_size(sizeof(u_int32_t
)) /* CTA_MARK */
700 #ifdef CONFIG_NF_CONNTRACK_ZONES
701 + nla_total_size(sizeof(u_int16_t
)) /* CTA_ZONE|CTA_TUPLE_ZONE */
703 + ctnetlink_proto_size(ct
)
704 + ctnetlink_label_size(ct
)
709 ctnetlink_conntrack_event(unsigned int events
, const struct nf_ct_event
*item
)
711 const struct nf_conntrack_zone
*zone
;
713 struct nlmsghdr
*nlh
;
714 struct nlattr
*nest_parms
;
715 struct nf_conn
*ct
= item
->ct
;
718 unsigned int flags
= 0, group
;
721 if (events
& (1 << IPCT_DESTROY
)) {
722 type
= IPCTNL_MSG_CT_DELETE
;
723 group
= NFNLGRP_CONNTRACK_DESTROY
;
724 } else if (events
& ((1 << IPCT_NEW
) | (1 << IPCT_RELATED
))) {
725 type
= IPCTNL_MSG_CT_NEW
;
726 flags
= NLM_F_CREATE
|NLM_F_EXCL
;
727 group
= NFNLGRP_CONNTRACK_NEW
;
729 type
= IPCTNL_MSG_CT_NEW
;
730 group
= NFNLGRP_CONNTRACK_UPDATE
;
735 if (!item
->report
&& !nfnetlink_has_listeners(net
, group
))
738 skb
= nlmsg_new(ctnetlink_nlmsg_size(ct
), GFP_ATOMIC
);
742 type
= nfnl_msg_type(NFNL_SUBSYS_CTNETLINK
, type
);
743 nlh
= nfnl_msg_put(skb
, item
->portid
, 0, type
, flags
, nf_ct_l3num(ct
),
748 zone
= nf_ct_zone(ct
);
750 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
);
752 goto nla_put_failure
;
753 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
754 goto nla_put_failure
;
755 if (ctnetlink_dump_zone_id(skb
, CTA_TUPLE_ZONE
, zone
,
756 NF_CT_ZONE_DIR_ORIG
) < 0)
757 goto nla_put_failure
;
758 nla_nest_end(skb
, nest_parms
);
760 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
);
762 goto nla_put_failure
;
763 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
764 goto nla_put_failure
;
765 if (ctnetlink_dump_zone_id(skb
, CTA_TUPLE_ZONE
, zone
,
766 NF_CT_ZONE_DIR_REPL
) < 0)
767 goto nla_put_failure
;
768 nla_nest_end(skb
, nest_parms
);
770 if (ctnetlink_dump_zone_id(skb
, CTA_ZONE
, zone
,
771 NF_CT_DEFAULT_ZONE_DIR
) < 0)
772 goto nla_put_failure
;
774 if (ctnetlink_dump_id(skb
, ct
) < 0)
775 goto nla_put_failure
;
777 if (ctnetlink_dump_status(skb
, ct
) < 0)
778 goto nla_put_failure
;
780 if (events
& (1 << IPCT_DESTROY
)) {
781 if (ctnetlink_dump_timeout(skb
, ct
, true) < 0)
782 goto nla_put_failure
;
784 if (ctnetlink_dump_acct(skb
, ct
, type
) < 0 ||
785 ctnetlink_dump_timestamp(skb
, ct
) < 0 ||
786 ctnetlink_dump_protoinfo(skb
, ct
, true) < 0)
787 goto nla_put_failure
;
789 if (ctnetlink_dump_timeout(skb
, ct
, false) < 0)
790 goto nla_put_failure
;
792 if (events
& (1 << IPCT_PROTOINFO
) &&
793 ctnetlink_dump_protoinfo(skb
, ct
, false) < 0)
794 goto nla_put_failure
;
796 if ((events
& (1 << IPCT_HELPER
) || nfct_help(ct
))
797 && ctnetlink_dump_helpinfo(skb
, ct
) < 0)
798 goto nla_put_failure
;
800 #ifdef CONFIG_NF_CONNTRACK_SECMARK
801 if ((events
& (1 << IPCT_SECMARK
) || ct
->secmark
)
802 && ctnetlink_dump_secctx(skb
, ct
) < 0)
803 goto nla_put_failure
;
805 if (events
& (1 << IPCT_LABEL
) &&
806 ctnetlink_dump_labels(skb
, ct
) < 0)
807 goto nla_put_failure
;
809 if (events
& (1 << IPCT_RELATED
) &&
810 ctnetlink_dump_master(skb
, ct
) < 0)
811 goto nla_put_failure
;
813 if (events
& (1 << IPCT_SEQADJ
) &&
814 ctnetlink_dump_ct_seq_adj(skb
, ct
) < 0)
815 goto nla_put_failure
;
817 if (events
& (1 << IPCT_SYNPROXY
) &&
818 ctnetlink_dump_ct_synproxy(skb
, ct
) < 0)
819 goto nla_put_failure
;
822 #ifdef CONFIG_NF_CONNTRACK_MARK
823 if ((events
& (1 << IPCT_MARK
) || ct
->mark
)
824 && ctnetlink_dump_mark(skb
, ct
) < 0)
825 goto nla_put_failure
;
828 err
= nfnetlink_send(skb
, net
, item
->portid
, group
, item
->report
,
830 if (err
== -ENOBUFS
|| err
== -EAGAIN
)
836 nlmsg_cancel(skb
, nlh
);
840 if (nfnetlink_set_err(net
, 0, group
, -ENOBUFS
) > 0)
845 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
847 static int ctnetlink_done(struct netlink_callback
*cb
)
850 nf_ct_put((struct nf_conn
*)cb
->args
[1]);
855 struct ctnetlink_filter_u32
{
860 struct ctnetlink_filter
{
863 u_int32_t orig_flags
;
864 u_int32_t reply_flags
;
866 struct nf_conntrack_tuple orig
;
867 struct nf_conntrack_tuple reply
;
868 struct nf_conntrack_zone zone
;
870 struct ctnetlink_filter_u32 mark
;
871 struct ctnetlink_filter_u32 status
;
874 static const struct nla_policy cta_filter_nla_policy
[CTA_FILTER_MAX
+ 1] = {
875 [CTA_FILTER_ORIG_FLAGS
] = { .type
= NLA_U32
},
876 [CTA_FILTER_REPLY_FLAGS
] = { .type
= NLA_U32
},
879 static int ctnetlink_parse_filter(const struct nlattr
*attr
,
880 struct ctnetlink_filter
*filter
)
882 struct nlattr
*tb
[CTA_FILTER_MAX
+ 1];
885 ret
= nla_parse_nested(tb
, CTA_FILTER_MAX
, attr
, cta_filter_nla_policy
,
890 if (tb
[CTA_FILTER_ORIG_FLAGS
]) {
891 filter
->orig_flags
= nla_get_u32(tb
[CTA_FILTER_ORIG_FLAGS
]);
892 if (filter
->orig_flags
& ~CTA_FILTER_F_ALL
)
896 if (tb
[CTA_FILTER_REPLY_FLAGS
]) {
897 filter
->reply_flags
= nla_get_u32(tb
[CTA_FILTER_REPLY_FLAGS
]);
898 if (filter
->reply_flags
& ~CTA_FILTER_F_ALL
)
905 static int ctnetlink_parse_zone(const struct nlattr
*attr
,
906 struct nf_conntrack_zone
*zone
);
907 static int ctnetlink_parse_tuple_filter(const struct nlattr
* const cda
[],
908 struct nf_conntrack_tuple
*tuple
,
909 u32 type
, u_int8_t l3num
,
910 struct nf_conntrack_zone
*zone
,
913 static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32
*mark
,
914 const struct nlattr
* const cda
[])
916 #ifdef CONFIG_NF_CONNTRACK_MARK
918 mark
->val
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
920 if (cda
[CTA_MARK_MASK
])
921 mark
->mask
= ntohl(nla_get_be32(cda
[CTA_MARK_MASK
]));
923 mark
->mask
= 0xffffffff;
924 } else if (cda
[CTA_MARK_MASK
]) {
931 static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32
*status
,
932 const struct nlattr
* const cda
[])
934 if (cda
[CTA_STATUS
]) {
935 status
->val
= ntohl(nla_get_be32(cda
[CTA_STATUS
]));
936 if (cda
[CTA_STATUS_MASK
])
937 status
->mask
= ntohl(nla_get_be32(cda
[CTA_STATUS_MASK
]));
939 status
->mask
= status
->val
;
941 /* status->val == 0? always true, else always false. */
942 if (status
->mask
== 0)
944 } else if (cda
[CTA_STATUS_MASK
]) {
948 /* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */
949 BUILD_BUG_ON(__IPS_MAX_BIT
>= 32);
953 static struct ctnetlink_filter
*
954 ctnetlink_alloc_filter(const struct nlattr
* const cda
[], u8 family
)
956 struct ctnetlink_filter
*filter
;
959 #ifndef CONFIG_NF_CONNTRACK_MARK
960 if (cda
[CTA_MARK
] || cda
[CTA_MARK_MASK
])
961 return ERR_PTR(-EOPNOTSUPP
);
964 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
966 return ERR_PTR(-ENOMEM
);
968 filter
->family
= family
;
970 err
= ctnetlink_filter_parse_mark(&filter
->mark
, cda
);
974 err
= ctnetlink_filter_parse_status(&filter
->status
, cda
);
978 if (!cda
[CTA_FILTER
])
981 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &filter
->zone
);
985 err
= ctnetlink_parse_filter(cda
[CTA_FILTER
], filter
);
989 if (filter
->orig_flags
) {
990 if (!cda
[CTA_TUPLE_ORIG
]) {
995 err
= ctnetlink_parse_tuple_filter(cda
, &filter
->orig
,
1004 if (filter
->reply_flags
) {
1005 if (!cda
[CTA_TUPLE_REPLY
]) {
1010 err
= ctnetlink_parse_tuple_filter(cda
, &filter
->reply
,
1014 filter
->reply_flags
);
1024 return ERR_PTR(err
);
1027 static bool ctnetlink_needs_filter(u8 family
, const struct nlattr
* const *cda
)
1029 return family
|| cda
[CTA_MARK
] || cda
[CTA_FILTER
] || cda
[CTA_STATUS
];
1032 static int ctnetlink_start(struct netlink_callback
*cb
)
1034 const struct nlattr
* const *cda
= cb
->data
;
1035 struct ctnetlink_filter
*filter
= NULL
;
1036 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
1037 u8 family
= nfmsg
->nfgen_family
;
1039 if (ctnetlink_needs_filter(family
, cda
)) {
1040 filter
= ctnetlink_alloc_filter(cda
, family
);
1042 return PTR_ERR(filter
);
1049 static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple
*filter_tuple
,
1050 struct nf_conntrack_tuple
*ct_tuple
,
1051 u_int32_t flags
, int family
)
1055 if ((flags
& CTA_FILTER_FLAG(CTA_IP_SRC
)) &&
1056 filter_tuple
->src
.u3
.ip
!= ct_tuple
->src
.u3
.ip
)
1059 if ((flags
& CTA_FILTER_FLAG(CTA_IP_DST
)) &&
1060 filter_tuple
->dst
.u3
.ip
!= ct_tuple
->dst
.u3
.ip
)
1064 if ((flags
& CTA_FILTER_FLAG(CTA_IP_SRC
)) &&
1065 !ipv6_addr_cmp(&filter_tuple
->src
.u3
.in6
,
1066 &ct_tuple
->src
.u3
.in6
))
1069 if ((flags
& CTA_FILTER_FLAG(CTA_IP_DST
)) &&
1070 !ipv6_addr_cmp(&filter_tuple
->dst
.u3
.in6
,
1071 &ct_tuple
->dst
.u3
.in6
))
1076 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_NUM
)) &&
1077 filter_tuple
->dst
.protonum
!= ct_tuple
->dst
.protonum
)
1080 switch (ct_tuple
->dst
.protonum
) {
1083 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT
)) &&
1084 filter_tuple
->src
.u
.tcp
.port
!= ct_tuple
->src
.u
.tcp
.port
)
1087 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_DST_PORT
)) &&
1088 filter_tuple
->dst
.u
.tcp
.port
!= ct_tuple
->dst
.u
.tcp
.port
)
1092 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE
)) &&
1093 filter_tuple
->dst
.u
.icmp
.type
!= ct_tuple
->dst
.u
.icmp
.type
)
1095 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE
)) &&
1096 filter_tuple
->dst
.u
.icmp
.code
!= ct_tuple
->dst
.u
.icmp
.code
)
1098 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID
)) &&
1099 filter_tuple
->src
.u
.icmp
.id
!= ct_tuple
->src
.u
.icmp
.id
)
1102 case IPPROTO_ICMPV6
:
1103 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE
)) &&
1104 filter_tuple
->dst
.u
.icmp
.type
!= ct_tuple
->dst
.u
.icmp
.type
)
1106 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE
)) &&
1107 filter_tuple
->dst
.u
.icmp
.code
!= ct_tuple
->dst
.u
.icmp
.code
)
1109 if ((flags
& CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID
)) &&
1110 filter_tuple
->src
.u
.icmp
.id
!= ct_tuple
->src
.u
.icmp
.id
)
1118 static int ctnetlink_filter_match(struct nf_conn
*ct
, void *data
)
1120 struct ctnetlink_filter
*filter
= data
;
1121 struct nf_conntrack_tuple
*tuple
;
1127 /* Match entries of a given L3 protocol number.
1128 * If it is not specified, ie. l3proto == 0,
1129 * then match everything.
1131 if (filter
->family
&& nf_ct_l3num(ct
) != filter
->family
)
1134 if (filter
->orig_flags
) {
1135 tuple
= nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
);
1136 if (!ctnetlink_filter_match_tuple(&filter
->orig
, tuple
,
1142 if (filter
->reply_flags
) {
1143 tuple
= nf_ct_tuple(ct
, IP_CT_DIR_REPLY
);
1144 if (!ctnetlink_filter_match_tuple(&filter
->reply
, tuple
,
1145 filter
->reply_flags
,
1150 #ifdef CONFIG_NF_CONNTRACK_MARK
1151 if ((ct
->mark
& filter
->mark
.mask
) != filter
->mark
.val
)
1154 status
= (u32
)READ_ONCE(ct
->status
);
1155 if ((status
& filter
->status
.mask
) != filter
->status
.val
)
1166 ctnetlink_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1168 unsigned int flags
= cb
->data
? NLM_F_DUMP_FILTERED
: 0;
1169 struct net
*net
= sock_net(skb
->sk
);
1170 struct nf_conn
*ct
, *last
;
1171 struct nf_conntrack_tuple_hash
*h
;
1172 struct hlist_nulls_node
*n
;
1173 struct nf_conn
*nf_ct_evict
[8];
1177 last
= (struct nf_conn
*)cb
->args
[1];
1181 for (; cb
->args
[0] < nf_conntrack_htable_size
; cb
->args
[0]++) {
1185 if (nf_ct_should_gc(nf_ct_evict
[i
]))
1186 nf_ct_kill(nf_ct_evict
[i
]);
1187 nf_ct_put(nf_ct_evict
[i
]);
1190 lockp
= &nf_conntrack_locks
[cb
->args
[0] % CONNTRACK_LOCKS
];
1191 nf_conntrack_lock(lockp
);
1192 if (cb
->args
[0] >= nf_conntrack_htable_size
) {
1196 hlist_nulls_for_each_entry(h
, n
, &nf_conntrack_hash
[cb
->args
[0]],
1198 if (NF_CT_DIRECTION(h
) != IP_CT_DIR_ORIGINAL
)
1200 ct
= nf_ct_tuplehash_to_ctrack(h
);
1201 if (nf_ct_is_expired(ct
)) {
1202 if (i
< ARRAY_SIZE(nf_ct_evict
) &&
1203 refcount_inc_not_zero(&ct
->ct_general
.use
))
1204 nf_ct_evict
[i
++] = ct
;
1208 if (!net_eq(net
, nf_ct_net(ct
)))
1216 if (!ctnetlink_filter_match(ct
, cb
->data
))
1220 ctnetlink_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
1222 NFNL_MSG_TYPE(cb
->nlh
->nlmsg_type
),
1225 nf_conntrack_get(&ct
->ct_general
);
1226 cb
->args
[1] = (unsigned long)ct
;
1240 /* nf ct hash resize happened, now clear the leftover. */
1241 if ((struct nf_conn
*)cb
->args
[1] == last
)
1249 if (nf_ct_should_gc(nf_ct_evict
[i
]))
1250 nf_ct_kill(nf_ct_evict
[i
]);
1251 nf_ct_put(nf_ct_evict
[i
]);
1257 static int ipv4_nlattr_to_tuple(struct nlattr
*tb
[],
1258 struct nf_conntrack_tuple
*t
,
1261 if (flags
& CTA_FILTER_FLAG(CTA_IP_SRC
)) {
1262 if (!tb
[CTA_IP_V4_SRC
])
1265 t
->src
.u3
.ip
= nla_get_in_addr(tb
[CTA_IP_V4_SRC
]);
1268 if (flags
& CTA_FILTER_FLAG(CTA_IP_DST
)) {
1269 if (!tb
[CTA_IP_V4_DST
])
1272 t
->dst
.u3
.ip
= nla_get_in_addr(tb
[CTA_IP_V4_DST
]);
1278 static int ipv6_nlattr_to_tuple(struct nlattr
*tb
[],
1279 struct nf_conntrack_tuple
*t
,
1282 if (flags
& CTA_FILTER_FLAG(CTA_IP_SRC
)) {
1283 if (!tb
[CTA_IP_V6_SRC
])
1286 t
->src
.u3
.in6
= nla_get_in6_addr(tb
[CTA_IP_V6_SRC
]);
1289 if (flags
& CTA_FILTER_FLAG(CTA_IP_DST
)) {
1290 if (!tb
[CTA_IP_V6_DST
])
1293 t
->dst
.u3
.in6
= nla_get_in6_addr(tb
[CTA_IP_V6_DST
]);
1299 static int ctnetlink_parse_tuple_ip(struct nlattr
*attr
,
1300 struct nf_conntrack_tuple
*tuple
,
1303 struct nlattr
*tb
[CTA_IP_MAX
+1];
1306 ret
= nla_parse_nested_deprecated(tb
, CTA_IP_MAX
, attr
, NULL
, NULL
);
1310 ret
= nla_validate_nested_deprecated(attr
, CTA_IP_MAX
,
1311 cta_ip_nla_policy
, NULL
);
1315 switch (tuple
->src
.l3num
) {
1317 ret
= ipv4_nlattr_to_tuple(tb
, tuple
, flags
);
1320 ret
= ipv6_nlattr_to_tuple(tb
, tuple
, flags
);
1327 static const struct nla_policy proto_nla_policy
[CTA_PROTO_MAX
+1] = {
1328 [CTA_PROTO_NUM
] = { .type
= NLA_U8
},
1331 static int ctnetlink_parse_tuple_proto(struct nlattr
*attr
,
1332 struct nf_conntrack_tuple
*tuple
,
1335 const struct nf_conntrack_l4proto
*l4proto
;
1336 struct nlattr
*tb
[CTA_PROTO_MAX
+1];
1339 ret
= nla_parse_nested_deprecated(tb
, CTA_PROTO_MAX
, attr
,
1340 proto_nla_policy
, NULL
);
1344 if (!(flags
& CTA_FILTER_FLAG(CTA_PROTO_NUM
)))
1347 if (!tb
[CTA_PROTO_NUM
])
1350 tuple
->dst
.protonum
= nla_get_u8(tb
[CTA_PROTO_NUM
]);
1353 l4proto
= nf_ct_l4proto_find(tuple
->dst
.protonum
);
1355 if (likely(l4proto
->nlattr_to_tuple
)) {
1356 ret
= nla_validate_nested_deprecated(attr
, CTA_PROTO_MAX
,
1357 l4proto
->nla_policy
,
1360 ret
= l4proto
->nlattr_to_tuple(tb
, tuple
, flags
);
1369 ctnetlink_parse_zone(const struct nlattr
*attr
,
1370 struct nf_conntrack_zone
*zone
)
1372 nf_ct_zone_init(zone
, NF_CT_DEFAULT_ZONE_ID
,
1373 NF_CT_DEFAULT_ZONE_DIR
, 0);
1374 #ifdef CONFIG_NF_CONNTRACK_ZONES
1376 zone
->id
= ntohs(nla_get_be16(attr
));
1385 ctnetlink_parse_tuple_zone(struct nlattr
*attr
, enum ctattr_type type
,
1386 struct nf_conntrack_zone
*zone
)
1390 if (zone
->id
!= NF_CT_DEFAULT_ZONE_ID
)
1393 ret
= ctnetlink_parse_zone(attr
, zone
);
1397 if (type
== CTA_TUPLE_REPLY
)
1398 zone
->dir
= NF_CT_ZONE_DIR_REPL
;
1400 zone
->dir
= NF_CT_ZONE_DIR_ORIG
;
1405 static const struct nla_policy tuple_nla_policy
[CTA_TUPLE_MAX
+1] = {
1406 [CTA_TUPLE_IP
] = { .type
= NLA_NESTED
},
1407 [CTA_TUPLE_PROTO
] = { .type
= NLA_NESTED
},
1408 [CTA_TUPLE_ZONE
] = { .type
= NLA_U16
},
1411 #define CTA_FILTER_F_ALL_CTA_PROTO \
1412 (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \
1413 CTA_FILTER_F_CTA_PROTO_DST_PORT | \
1414 CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \
1415 CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \
1416 CTA_FILTER_F_CTA_PROTO_ICMP_ID | \
1417 CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \
1418 CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \
1419 CTA_FILTER_F_CTA_PROTO_ICMPV6_ID)
1422 ctnetlink_parse_tuple_filter(const struct nlattr
* const cda
[],
1423 struct nf_conntrack_tuple
*tuple
, u32 type
,
1424 u_int8_t l3num
, struct nf_conntrack_zone
*zone
,
1427 struct nlattr
*tb
[CTA_TUPLE_MAX
+1];
1430 memset(tuple
, 0, sizeof(*tuple
));
1432 err
= nla_parse_nested_deprecated(tb
, CTA_TUPLE_MAX
, cda
[type
],
1433 tuple_nla_policy
, NULL
);
1437 if (l3num
!= NFPROTO_IPV4
&& l3num
!= NFPROTO_IPV6
)
1439 tuple
->src
.l3num
= l3num
;
1441 if (flags
& CTA_FILTER_FLAG(CTA_IP_DST
) ||
1442 flags
& CTA_FILTER_FLAG(CTA_IP_SRC
)) {
1443 if (!tb
[CTA_TUPLE_IP
])
1446 err
= ctnetlink_parse_tuple_ip(tb
[CTA_TUPLE_IP
], tuple
, flags
);
1451 if (flags
& CTA_FILTER_FLAG(CTA_PROTO_NUM
)) {
1452 if (!tb
[CTA_TUPLE_PROTO
])
1455 err
= ctnetlink_parse_tuple_proto(tb
[CTA_TUPLE_PROTO
], tuple
, flags
);
1458 } else if (flags
& CTA_FILTER_FLAG(ALL_CTA_PROTO
)) {
1459 /* Can't manage proto flags without a protonum */
1463 if ((flags
& CTA_FILTER_FLAG(CTA_TUPLE_ZONE
)) && tb
[CTA_TUPLE_ZONE
]) {
1467 err
= ctnetlink_parse_tuple_zone(tb
[CTA_TUPLE_ZONE
],
1473 /* orig and expect tuples get DIR_ORIGINAL */
1474 if (type
== CTA_TUPLE_REPLY
)
1475 tuple
->dst
.dir
= IP_CT_DIR_REPLY
;
1477 tuple
->dst
.dir
= IP_CT_DIR_ORIGINAL
;
1483 ctnetlink_parse_tuple(const struct nlattr
* const cda
[],
1484 struct nf_conntrack_tuple
*tuple
, u32 type
,
1485 u_int8_t l3num
, struct nf_conntrack_zone
*zone
)
1487 return ctnetlink_parse_tuple_filter(cda
, tuple
, type
, l3num
, zone
,
1488 CTA_FILTER_FLAG(ALL
));
1491 static const struct nla_policy help_nla_policy
[CTA_HELP_MAX
+1] = {
1492 [CTA_HELP_NAME
] = { .type
= NLA_NUL_STRING
,
1493 .len
= NF_CT_HELPER_NAME_LEN
- 1 },
1496 static int ctnetlink_parse_help(const struct nlattr
*attr
, char **helper_name
,
1497 struct nlattr
**helpinfo
)
1500 struct nlattr
*tb
[CTA_HELP_MAX
+1];
1502 err
= nla_parse_nested_deprecated(tb
, CTA_HELP_MAX
, attr
,
1503 help_nla_policy
, NULL
);
1507 if (!tb
[CTA_HELP_NAME
])
1510 *helper_name
= nla_data(tb
[CTA_HELP_NAME
]);
1512 if (tb
[CTA_HELP_INFO
])
1513 *helpinfo
= tb
[CTA_HELP_INFO
];
1518 static const struct nla_policy ct_nla_policy
[CTA_MAX
+1] = {
1519 [CTA_TUPLE_ORIG
] = { .type
= NLA_NESTED
},
1520 [CTA_TUPLE_REPLY
] = { .type
= NLA_NESTED
},
1521 [CTA_STATUS
] = { .type
= NLA_U32
},
1522 [CTA_PROTOINFO
] = { .type
= NLA_NESTED
},
1523 [CTA_HELP
] = { .type
= NLA_NESTED
},
1524 [CTA_NAT_SRC
] = { .type
= NLA_NESTED
},
1525 [CTA_TIMEOUT
] = { .type
= NLA_U32
},
1526 [CTA_MARK
] = { .type
= NLA_U32
},
1527 [CTA_ID
] = { .type
= NLA_U32
},
1528 [CTA_NAT_DST
] = { .type
= NLA_NESTED
},
1529 [CTA_TUPLE_MASTER
] = { .type
= NLA_NESTED
},
1530 [CTA_NAT_SEQ_ADJ_ORIG
] = { .type
= NLA_NESTED
},
1531 [CTA_NAT_SEQ_ADJ_REPLY
] = { .type
= NLA_NESTED
},
1532 [CTA_ZONE
] = { .type
= NLA_U16
},
1533 [CTA_MARK_MASK
] = { .type
= NLA_U32
},
1534 [CTA_LABELS
] = { .type
= NLA_BINARY
,
1535 .len
= NF_CT_LABELS_MAX_SIZE
},
1536 [CTA_LABELS_MASK
] = { .type
= NLA_BINARY
,
1537 .len
= NF_CT_LABELS_MAX_SIZE
},
1538 [CTA_FILTER
] = { .type
= NLA_NESTED
},
1539 [CTA_STATUS_MASK
] = { .type
= NLA_U32
},
1542 static int ctnetlink_flush_iterate(struct nf_conn
*ct
, void *data
)
1544 if (test_bit(IPS_OFFLOAD_BIT
, &ct
->status
))
1547 return ctnetlink_filter_match(ct
, data
);
1550 static int ctnetlink_flush_conntrack(struct net
*net
,
1551 const struct nlattr
* const cda
[],
1552 u32 portid
, int report
, u8 family
)
1554 struct ctnetlink_filter
*filter
= NULL
;
1556 if (ctnetlink_needs_filter(family
, cda
)) {
1557 if (cda
[CTA_FILTER
])
1560 filter
= ctnetlink_alloc_filter(cda
, family
);
1562 return PTR_ERR(filter
);
1565 nf_ct_iterate_cleanup_net(net
, ctnetlink_flush_iterate
, filter
,
1572 static int ctnetlink_del_conntrack(struct sk_buff
*skb
,
1573 const struct nfnl_info
*info
,
1574 const struct nlattr
* const cda
[])
1576 u8 family
= info
->nfmsg
->nfgen_family
;
1577 struct nf_conntrack_tuple_hash
*h
;
1578 struct nf_conntrack_tuple tuple
;
1579 struct nf_conntrack_zone zone
;
1583 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
1587 if (cda
[CTA_TUPLE_ORIG
])
1588 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_ORIG
,
1590 else if (cda
[CTA_TUPLE_REPLY
])
1591 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_REPLY
,
1594 u_int8_t u3
= info
->nfmsg
->version
? family
: AF_UNSPEC
;
1596 return ctnetlink_flush_conntrack(info
->net
, cda
,
1597 NETLINK_CB(skb
).portid
,
1598 nlmsg_report(info
->nlh
), u3
);
1604 h
= nf_conntrack_find_get(info
->net
, &zone
, &tuple
);
1608 ct
= nf_ct_tuplehash_to_ctrack(h
);
1610 if (test_bit(IPS_OFFLOAD_BIT
, &ct
->status
)) {
1616 __be32 id
= nla_get_be32(cda
[CTA_ID
]);
1618 if (id
!= (__force __be32
)nf_ct_get_id(ct
)) {
1624 nf_ct_delete(ct
, NETLINK_CB(skb
).portid
, nlmsg_report(info
->nlh
));
1630 static int ctnetlink_get_conntrack(struct sk_buff
*skb
,
1631 const struct nfnl_info
*info
,
1632 const struct nlattr
* const cda
[])
1634 u_int8_t u3
= info
->nfmsg
->nfgen_family
;
1635 struct nf_conntrack_tuple_hash
*h
;
1636 struct nf_conntrack_tuple tuple
;
1637 struct nf_conntrack_zone zone
;
1638 struct sk_buff
*skb2
;
1642 if (info
->nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1643 struct netlink_dump_control c
= {
1644 .start
= ctnetlink_start
,
1645 .dump
= ctnetlink_dump_table
,
1646 .done
= ctnetlink_done
,
1647 .data
= (void *)cda
,
1650 return netlink_dump_start(info
->sk
, skb
, info
->nlh
, &c
);
1653 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
1657 if (cda
[CTA_TUPLE_ORIG
])
1658 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_ORIG
,
1660 else if (cda
[CTA_TUPLE_REPLY
])
1661 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_REPLY
,
1669 h
= nf_conntrack_find_get(info
->net
, &zone
, &tuple
);
1673 ct
= nf_ct_tuplehash_to_ctrack(h
);
1675 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1681 err
= ctnetlink_fill_info(skb2
, NETLINK_CB(skb
).portid
,
1682 info
->nlh
->nlmsg_seq
,
1683 NFNL_MSG_TYPE(info
->nlh
->nlmsg_type
), ct
,
1691 return nfnetlink_unicast(skb2
, info
->net
, NETLINK_CB(skb
).portid
);
1694 static int ctnetlink_done_list(struct netlink_callback
*cb
)
1697 nf_ct_put((struct nf_conn
*)cb
->args
[1]);
1702 ctnetlink_dump_list(struct sk_buff
*skb
, struct netlink_callback
*cb
, bool dying
)
1704 struct nf_conn
*ct
, *last
;
1705 struct nf_conntrack_tuple_hash
*h
;
1706 struct hlist_nulls_node
*n
;
1707 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
1708 u_int8_t l3proto
= nfmsg
->nfgen_family
;
1711 struct hlist_nulls_head
*list
;
1712 struct net
*net
= sock_net(skb
->sk
);
1717 last
= (struct nf_conn
*)cb
->args
[1];
1719 for (cpu
= cb
->args
[0]; cpu
< nr_cpu_ids
; cpu
++) {
1720 struct ct_pcpu
*pcpu
;
1722 if (!cpu_possible(cpu
))
1725 pcpu
= per_cpu_ptr(net
->ct
.pcpu_lists
, cpu
);
1726 spin_lock_bh(&pcpu
->lock
);
1727 list
= dying
? &pcpu
->dying
: &pcpu
->unconfirmed
;
1729 hlist_nulls_for_each_entry(h
, n
, list
, hnnode
) {
1730 ct
= nf_ct_tuplehash_to_ctrack(h
);
1731 if (l3proto
&& nf_ct_l3num(ct
) != l3proto
)
1739 /* We can't dump extension info for the unconfirmed
1740 * list because unconfirmed conntracks can have
1741 * ct->ext reallocated (and thus freed).
1743 * In the dying list case ct->ext can't be free'd
1744 * until after we drop pcpu->lock.
1746 res
= ctnetlink_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
1748 NFNL_MSG_TYPE(cb
->nlh
->nlmsg_type
),
1751 if (!refcount_inc_not_zero(&ct
->ct_general
.use
))
1754 cb
->args
[1] = (unsigned long)ct
;
1755 spin_unlock_bh(&pcpu
->lock
);
1763 spin_unlock_bh(&pcpu
->lock
);
1774 ctnetlink_dump_dying(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1776 return ctnetlink_dump_list(skb
, cb
, true);
1779 static int ctnetlink_get_ct_dying(struct sk_buff
*skb
,
1780 const struct nfnl_info
*info
,
1781 const struct nlattr
* const cda
[])
1783 if (info
->nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1784 struct netlink_dump_control c
= {
1785 .dump
= ctnetlink_dump_dying
,
1786 .done
= ctnetlink_done_list
,
1788 return netlink_dump_start(info
->sk
, skb
, info
->nlh
, &c
);
1795 ctnetlink_dump_unconfirmed(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1797 return ctnetlink_dump_list(skb
, cb
, false);
1800 static int ctnetlink_get_ct_unconfirmed(struct sk_buff
*skb
,
1801 const struct nfnl_info
*info
,
1802 const struct nlattr
* const cda
[])
1804 if (info
->nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1805 struct netlink_dump_control c
= {
1806 .dump
= ctnetlink_dump_unconfirmed
,
1807 .done
= ctnetlink_done_list
,
1809 return netlink_dump_start(info
->sk
, skb
, info
->nlh
, &c
);
1815 #if IS_ENABLED(CONFIG_NF_NAT)
1817 ctnetlink_parse_nat_setup(struct nf_conn
*ct
,
1818 enum nf_nat_manip_type manip
,
1819 const struct nlattr
*attr
)
1822 const struct nf_nat_hook
*nat_hook
;
1825 nat_hook
= rcu_dereference(nf_nat_hook
);
1827 #ifdef CONFIG_MODULES
1829 nfnl_unlock(NFNL_SUBSYS_CTNETLINK
);
1830 if (request_module("nf-nat") < 0) {
1831 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1835 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1837 nat_hook
= rcu_dereference(nf_nat_hook
);
1844 err
= nat_hook
->parse_nat_setup(ct
, manip
, attr
);
1845 if (err
== -EAGAIN
) {
1846 #ifdef CONFIG_MODULES
1848 nfnl_unlock(NFNL_SUBSYS_CTNETLINK
);
1849 if (request_module("nf-nat-%u", nf_ct_l3num(ct
)) < 0) {
1850 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1854 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1865 __ctnetlink_change_status(struct nf_conn
*ct
, unsigned long on
,
1870 /* Ignore these unchangable bits */
1871 on
&= ~IPS_UNCHANGEABLE_MASK
;
1872 off
&= ~IPS_UNCHANGEABLE_MASK
;
1874 for (bit
= 0; bit
< __IPS_MAX_BIT
; bit
++) {
1875 if (on
& (1 << bit
))
1876 set_bit(bit
, &ct
->status
);
1877 else if (off
& (1 << bit
))
1878 clear_bit(bit
, &ct
->status
);
1883 ctnetlink_change_status(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1886 unsigned int status
= ntohl(nla_get_be32(cda
[CTA_STATUS
]));
1887 d
= ct
->status
^ status
;
1889 if (d
& (IPS_EXPECTED
|IPS_CONFIRMED
|IPS_DYING
))
1893 if (d
& IPS_SEEN_REPLY
&& !(status
& IPS_SEEN_REPLY
))
1894 /* SEEN_REPLY bit can only be set */
1897 if (d
& IPS_ASSURED
&& !(status
& IPS_ASSURED
))
1898 /* ASSURED bit can only be set */
1901 __ctnetlink_change_status(ct
, status
, 0);
1906 ctnetlink_setup_nat(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1908 #if IS_ENABLED(CONFIG_NF_NAT)
1911 if (!cda
[CTA_NAT_DST
] && !cda
[CTA_NAT_SRC
])
1914 ret
= ctnetlink_parse_nat_setup(ct
, NF_NAT_MANIP_DST
,
1919 return ctnetlink_parse_nat_setup(ct
, NF_NAT_MANIP_SRC
,
1922 if (!cda
[CTA_NAT_DST
] && !cda
[CTA_NAT_SRC
])
1928 static int ctnetlink_change_helper(struct nf_conn
*ct
,
1929 const struct nlattr
* const cda
[])
1931 struct nf_conntrack_helper
*helper
;
1932 struct nf_conn_help
*help
= nfct_help(ct
);
1933 char *helpname
= NULL
;
1934 struct nlattr
*helpinfo
= NULL
;
1937 err
= ctnetlink_parse_help(cda
[CTA_HELP
], &helpname
, &helpinfo
);
1941 /* don't change helper of sibling connections */
1943 /* If we try to change the helper to the same thing twice,
1944 * treat the second attempt as a no-op instead of returning
1950 helper
= rcu_dereference(help
->helper
);
1951 if (helper
&& !strcmp(helper
->name
, helpname
))
1959 if (!strcmp(helpname
, "")) {
1960 if (help
&& help
->helper
) {
1961 /* we had a helper before ... */
1962 nf_ct_remove_expectations(ct
);
1963 RCU_INIT_POINTER(help
->helper
, NULL
);
1970 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
1971 nf_ct_protonum(ct
));
1972 if (helper
== NULL
) {
1978 if (help
->helper
== helper
) {
1979 /* update private helper data if allowed. */
1980 if (helper
->from_nlattr
)
1981 helper
->from_nlattr(helpinfo
, ct
);
1986 /* we cannot set a helper for an existing conntrack */
1994 static int ctnetlink_change_timeout(struct nf_conn
*ct
,
1995 const struct nlattr
* const cda
[])
1997 u64 timeout
= (u64
)ntohl(nla_get_be32(cda
[CTA_TIMEOUT
])) * HZ
;
1999 if (timeout
> INT_MAX
)
2001 WRITE_ONCE(ct
->timeout
, nfct_time_stamp
+ (u32
)timeout
);
2003 if (test_bit(IPS_DYING_BIT
, &ct
->status
))
2009 #if defined(CONFIG_NF_CONNTRACK_MARK)
2010 static void ctnetlink_change_mark(struct nf_conn
*ct
,
2011 const struct nlattr
* const cda
[])
2013 u32 mark
, newmark
, mask
= 0;
2015 if (cda
[CTA_MARK_MASK
])
2016 mask
= ~ntohl(nla_get_be32(cda
[CTA_MARK_MASK
]));
2018 mark
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
2019 newmark
= (ct
->mark
& mask
) ^ mark
;
2020 if (newmark
!= ct
->mark
)
2025 static const struct nla_policy protoinfo_policy
[CTA_PROTOINFO_MAX
+1] = {
2026 [CTA_PROTOINFO_TCP
] = { .type
= NLA_NESTED
},
2027 [CTA_PROTOINFO_DCCP
] = { .type
= NLA_NESTED
},
2028 [CTA_PROTOINFO_SCTP
] = { .type
= NLA_NESTED
},
2031 static int ctnetlink_change_protoinfo(struct nf_conn
*ct
,
2032 const struct nlattr
* const cda
[])
2034 const struct nlattr
*attr
= cda
[CTA_PROTOINFO
];
2035 const struct nf_conntrack_l4proto
*l4proto
;
2036 struct nlattr
*tb
[CTA_PROTOINFO_MAX
+1];
2039 err
= nla_parse_nested_deprecated(tb
, CTA_PROTOINFO_MAX
, attr
,
2040 protoinfo_policy
, NULL
);
2044 l4proto
= nf_ct_l4proto_find(nf_ct_protonum(ct
));
2045 if (l4proto
->from_nlattr
)
2046 err
= l4proto
->from_nlattr(tb
, ct
);
2051 static const struct nla_policy seqadj_policy
[CTA_SEQADJ_MAX
+1] = {
2052 [CTA_SEQADJ_CORRECTION_POS
] = { .type
= NLA_U32
},
2053 [CTA_SEQADJ_OFFSET_BEFORE
] = { .type
= NLA_U32
},
2054 [CTA_SEQADJ_OFFSET_AFTER
] = { .type
= NLA_U32
},
2057 static int change_seq_adj(struct nf_ct_seqadj
*seq
,
2058 const struct nlattr
* const attr
)
2061 struct nlattr
*cda
[CTA_SEQADJ_MAX
+1];
2063 err
= nla_parse_nested_deprecated(cda
, CTA_SEQADJ_MAX
, attr
,
2064 seqadj_policy
, NULL
);
2068 if (!cda
[CTA_SEQADJ_CORRECTION_POS
])
2071 seq
->correction_pos
=
2072 ntohl(nla_get_be32(cda
[CTA_SEQADJ_CORRECTION_POS
]));
2074 if (!cda
[CTA_SEQADJ_OFFSET_BEFORE
])
2077 seq
->offset_before
=
2078 ntohl(nla_get_be32(cda
[CTA_SEQADJ_OFFSET_BEFORE
]));
2080 if (!cda
[CTA_SEQADJ_OFFSET_AFTER
])
2084 ntohl(nla_get_be32(cda
[CTA_SEQADJ_OFFSET_AFTER
]));
2090 ctnetlink_change_seq_adj(struct nf_conn
*ct
,
2091 const struct nlattr
* const cda
[])
2093 struct nf_conn_seqadj
*seqadj
= nfct_seqadj(ct
);
2099 spin_lock_bh(&ct
->lock
);
2100 if (cda
[CTA_SEQ_ADJ_ORIG
]) {
2101 ret
= change_seq_adj(&seqadj
->seq
[IP_CT_DIR_ORIGINAL
],
2102 cda
[CTA_SEQ_ADJ_ORIG
]);
2106 set_bit(IPS_SEQ_ADJUST_BIT
, &ct
->status
);
2109 if (cda
[CTA_SEQ_ADJ_REPLY
]) {
2110 ret
= change_seq_adj(&seqadj
->seq
[IP_CT_DIR_REPLY
],
2111 cda
[CTA_SEQ_ADJ_REPLY
]);
2115 set_bit(IPS_SEQ_ADJUST_BIT
, &ct
->status
);
2118 spin_unlock_bh(&ct
->lock
);
2121 spin_unlock_bh(&ct
->lock
);
2125 static const struct nla_policy synproxy_policy
[CTA_SYNPROXY_MAX
+ 1] = {
2126 [CTA_SYNPROXY_ISN
] = { .type
= NLA_U32
},
2127 [CTA_SYNPROXY_ITS
] = { .type
= NLA_U32
},
2128 [CTA_SYNPROXY_TSOFF
] = { .type
= NLA_U32
},
2131 static int ctnetlink_change_synproxy(struct nf_conn
*ct
,
2132 const struct nlattr
* const cda
[])
2134 struct nf_conn_synproxy
*synproxy
= nfct_synproxy(ct
);
2135 struct nlattr
*tb
[CTA_SYNPROXY_MAX
+ 1];
2141 err
= nla_parse_nested_deprecated(tb
, CTA_SYNPROXY_MAX
,
2142 cda
[CTA_SYNPROXY
], synproxy_policy
,
2147 if (!tb
[CTA_SYNPROXY_ISN
] ||
2148 !tb
[CTA_SYNPROXY_ITS
] ||
2149 !tb
[CTA_SYNPROXY_TSOFF
])
2152 synproxy
->isn
= ntohl(nla_get_be32(tb
[CTA_SYNPROXY_ISN
]));
2153 synproxy
->its
= ntohl(nla_get_be32(tb
[CTA_SYNPROXY_ITS
]));
2154 synproxy
->tsoff
= ntohl(nla_get_be32(tb
[CTA_SYNPROXY_TSOFF
]));
2160 ctnetlink_attach_labels(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
2162 #ifdef CONFIG_NF_CONNTRACK_LABELS
2163 size_t len
= nla_len(cda
[CTA_LABELS
]);
2164 const void *mask
= cda
[CTA_LABELS_MASK
];
2166 if (len
& (sizeof(u32
)-1)) /* must be multiple of u32 */
2170 if (nla_len(cda
[CTA_LABELS_MASK
]) == 0 ||
2171 nla_len(cda
[CTA_LABELS_MASK
]) != len
)
2173 mask
= nla_data(cda
[CTA_LABELS_MASK
]);
2178 return nf_connlabels_replace(ct
, nla_data(cda
[CTA_LABELS
]), mask
, len
);
2185 ctnetlink_change_conntrack(struct nf_conn
*ct
,
2186 const struct nlattr
* const cda
[])
2190 /* only allow NAT changes and master assignation for new conntracks */
2191 if (cda
[CTA_NAT_SRC
] || cda
[CTA_NAT_DST
] || cda
[CTA_TUPLE_MASTER
])
2194 if (cda
[CTA_HELP
]) {
2195 err
= ctnetlink_change_helper(ct
, cda
);
2200 if (cda
[CTA_TIMEOUT
]) {
2201 err
= ctnetlink_change_timeout(ct
, cda
);
2206 if (cda
[CTA_STATUS
]) {
2207 err
= ctnetlink_change_status(ct
, cda
);
2212 if (cda
[CTA_PROTOINFO
]) {
2213 err
= ctnetlink_change_protoinfo(ct
, cda
);
2218 #if defined(CONFIG_NF_CONNTRACK_MARK)
2220 ctnetlink_change_mark(ct
, cda
);
2223 if (cda
[CTA_SEQ_ADJ_ORIG
] || cda
[CTA_SEQ_ADJ_REPLY
]) {
2224 err
= ctnetlink_change_seq_adj(ct
, cda
);
2229 if (cda
[CTA_SYNPROXY
]) {
2230 err
= ctnetlink_change_synproxy(ct
, cda
);
2235 if (cda
[CTA_LABELS
]) {
2236 err
= ctnetlink_attach_labels(ct
, cda
);
2244 static struct nf_conn
*
2245 ctnetlink_create_conntrack(struct net
*net
,
2246 const struct nf_conntrack_zone
*zone
,
2247 const struct nlattr
* const cda
[],
2248 struct nf_conntrack_tuple
*otuple
,
2249 struct nf_conntrack_tuple
*rtuple
,
2254 struct nf_conntrack_helper
*helper
;
2255 struct nf_conn_tstamp
*tstamp
;
2258 ct
= nf_conntrack_alloc(net
, zone
, otuple
, rtuple
, GFP_ATOMIC
);
2260 return ERR_PTR(-ENOMEM
);
2262 if (!cda
[CTA_TIMEOUT
])
2265 timeout
= (u64
)ntohl(nla_get_be32(cda
[CTA_TIMEOUT
])) * HZ
;
2266 if (timeout
> INT_MAX
)
2268 ct
->timeout
= (u32
)timeout
+ nfct_time_stamp
;
2271 if (cda
[CTA_HELP
]) {
2272 char *helpname
= NULL
;
2273 struct nlattr
*helpinfo
= NULL
;
2275 err
= ctnetlink_parse_help(cda
[CTA_HELP
], &helpname
, &helpinfo
);
2279 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
2280 nf_ct_protonum(ct
));
2281 if (helper
== NULL
) {
2283 #ifdef CONFIG_MODULES
2284 if (request_module("nfct-helper-%s", helpname
) < 0) {
2290 helper
= __nf_conntrack_helper_find(helpname
,
2292 nf_ct_protonum(ct
));
2302 struct nf_conn_help
*help
;
2304 help
= nf_ct_helper_ext_add(ct
, GFP_ATOMIC
);
2309 /* set private helper data if allowed. */
2310 if (helper
->from_nlattr
)
2311 helper
->from_nlattr(helpinfo
, ct
);
2313 /* not in hash table yet so not strictly necessary */
2314 RCU_INIT_POINTER(help
->helper
, helper
);
2317 /* try an implicit helper assignation */
2318 err
= __nf_ct_try_assign_helper(ct
, NULL
, GFP_ATOMIC
);
2323 err
= ctnetlink_setup_nat(ct
, cda
);
2327 nf_ct_acct_ext_add(ct
, GFP_ATOMIC
);
2328 nf_ct_tstamp_ext_add(ct
, GFP_ATOMIC
);
2329 nf_ct_ecache_ext_add(ct
, 0, 0, GFP_ATOMIC
);
2330 nf_ct_labels_ext_add(ct
);
2331 nfct_seqadj_ext_add(ct
);
2332 nfct_synproxy_ext_add(ct
);
2334 /* we must add conntrack extensions before confirmation. */
2335 ct
->status
|= IPS_CONFIRMED
;
2337 if (cda
[CTA_STATUS
]) {
2338 err
= ctnetlink_change_status(ct
, cda
);
2343 if (cda
[CTA_SEQ_ADJ_ORIG
] || cda
[CTA_SEQ_ADJ_REPLY
]) {
2344 err
= ctnetlink_change_seq_adj(ct
, cda
);
2349 memset(&ct
->proto
, 0, sizeof(ct
->proto
));
2350 if (cda
[CTA_PROTOINFO
]) {
2351 err
= ctnetlink_change_protoinfo(ct
, cda
);
2356 if (cda
[CTA_SYNPROXY
]) {
2357 err
= ctnetlink_change_synproxy(ct
, cda
);
2362 #if defined(CONFIG_NF_CONNTRACK_MARK)
2364 ctnetlink_change_mark(ct
, cda
);
2367 /* setup master conntrack: this is a confirmed expectation */
2368 if (cda
[CTA_TUPLE_MASTER
]) {
2369 struct nf_conntrack_tuple master
;
2370 struct nf_conntrack_tuple_hash
*master_h
;
2371 struct nf_conn
*master_ct
;
2373 err
= ctnetlink_parse_tuple(cda
, &master
, CTA_TUPLE_MASTER
,
2378 master_h
= nf_conntrack_find_get(net
, zone
, &master
);
2379 if (master_h
== NULL
) {
2383 master_ct
= nf_ct_tuplehash_to_ctrack(master_h
);
2384 __set_bit(IPS_EXPECTED_BIT
, &ct
->status
);
2385 ct
->master
= master_ct
;
2387 tstamp
= nf_conn_tstamp_find(ct
);
2389 tstamp
->start
= ktime_get_real_ns();
2391 err
= nf_conntrack_hash_check_insert(ct
);
2402 nf_conntrack_free(ct
);
2403 return ERR_PTR(err
);
2406 static int ctnetlink_new_conntrack(struct sk_buff
*skb
,
2407 const struct nfnl_info
*info
,
2408 const struct nlattr
* const cda
[])
2410 struct nf_conntrack_tuple otuple
, rtuple
;
2411 struct nf_conntrack_tuple_hash
*h
= NULL
;
2412 u_int8_t u3
= info
->nfmsg
->nfgen_family
;
2413 struct nf_conntrack_zone zone
;
2417 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
2421 if (cda
[CTA_TUPLE_ORIG
]) {
2422 err
= ctnetlink_parse_tuple(cda
, &otuple
, CTA_TUPLE_ORIG
,
2428 if (cda
[CTA_TUPLE_REPLY
]) {
2429 err
= ctnetlink_parse_tuple(cda
, &rtuple
, CTA_TUPLE_REPLY
,
2435 if (cda
[CTA_TUPLE_ORIG
])
2436 h
= nf_conntrack_find_get(info
->net
, &zone
, &otuple
);
2437 else if (cda
[CTA_TUPLE_REPLY
])
2438 h
= nf_conntrack_find_get(info
->net
, &zone
, &rtuple
);
2442 if (info
->nlh
->nlmsg_flags
& NLM_F_CREATE
) {
2443 enum ip_conntrack_events events
;
2445 if (!cda
[CTA_TUPLE_ORIG
] || !cda
[CTA_TUPLE_REPLY
])
2447 if (otuple
.dst
.protonum
!= rtuple
.dst
.protonum
)
2450 ct
= ctnetlink_create_conntrack(info
->net
, &zone
, cda
,
2451 &otuple
, &rtuple
, u3
);
2456 if (test_bit(IPS_EXPECTED_BIT
, &ct
->status
))
2457 events
= 1 << IPCT_RELATED
;
2459 events
= 1 << IPCT_NEW
;
2461 if (cda
[CTA_LABELS
] &&
2462 ctnetlink_attach_labels(ct
, cda
) == 0)
2463 events
|= (1 << IPCT_LABEL
);
2465 nf_conntrack_eventmask_report((1 << IPCT_REPLY
) |
2466 (1 << IPCT_ASSURED
) |
2467 (1 << IPCT_HELPER
) |
2468 (1 << IPCT_PROTOINFO
) |
2469 (1 << IPCT_SEQADJ
) |
2471 (1 << IPCT_SYNPROXY
) |
2473 ct
, NETLINK_CB(skb
).portid
,
2474 nlmsg_report(info
->nlh
));
2480 /* implicit 'else' */
2483 ct
= nf_ct_tuplehash_to_ctrack(h
);
2484 if (!(info
->nlh
->nlmsg_flags
& NLM_F_EXCL
)) {
2485 err
= ctnetlink_change_conntrack(ct
, cda
);
2487 nf_conntrack_eventmask_report((1 << IPCT_REPLY
) |
2488 (1 << IPCT_ASSURED
) |
2489 (1 << IPCT_HELPER
) |
2491 (1 << IPCT_PROTOINFO
) |
2492 (1 << IPCT_SEQADJ
) |
2494 (1 << IPCT_SYNPROXY
),
2495 ct
, NETLINK_CB(skb
).portid
,
2496 nlmsg_report(info
->nlh
));
2505 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
,
2506 __u16 cpu
, const struct ip_conntrack_stat
*st
)
2508 struct nlmsghdr
*nlh
;
2509 unsigned int flags
= portid
? NLM_F_MULTI
: 0, event
;
2511 event
= nfnl_msg_type(NFNL_SUBSYS_CTNETLINK
,
2512 IPCTNL_MSG_CT_GET_STATS_CPU
);
2513 nlh
= nfnl_msg_put(skb
, portid
, seq
, event
, flags
, AF_UNSPEC
,
2514 NFNETLINK_V0
, htons(cpu
));
2518 if (nla_put_be32(skb
, CTA_STATS_FOUND
, htonl(st
->found
)) ||
2519 nla_put_be32(skb
, CTA_STATS_INVALID
, htonl(st
->invalid
)) ||
2520 nla_put_be32(skb
, CTA_STATS_INSERT
, htonl(st
->insert
)) ||
2521 nla_put_be32(skb
, CTA_STATS_INSERT_FAILED
,
2522 htonl(st
->insert_failed
)) ||
2523 nla_put_be32(skb
, CTA_STATS_DROP
, htonl(st
->drop
)) ||
2524 nla_put_be32(skb
, CTA_STATS_EARLY_DROP
, htonl(st
->early_drop
)) ||
2525 nla_put_be32(skb
, CTA_STATS_ERROR
, htonl(st
->error
)) ||
2526 nla_put_be32(skb
, CTA_STATS_SEARCH_RESTART
,
2527 htonl(st
->search_restart
)) ||
2528 nla_put_be32(skb
, CTA_STATS_CLASH_RESOLVE
,
2529 htonl(st
->clash_resolve
)) ||
2530 nla_put_be32(skb
, CTA_STATS_CHAIN_TOOLONG
,
2531 htonl(st
->chaintoolong
)))
2532 goto nla_put_failure
;
2534 nlmsg_end(skb
, nlh
);
2539 nlmsg_cancel(skb
, nlh
);
2544 ctnetlink_ct_stat_cpu_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2547 struct net
*net
= sock_net(skb
->sk
);
2549 if (cb
->args
[0] == nr_cpu_ids
)
2552 for (cpu
= cb
->args
[0]; cpu
< nr_cpu_ids
; cpu
++) {
2553 const struct ip_conntrack_stat
*st
;
2555 if (!cpu_possible(cpu
))
2558 st
= per_cpu_ptr(net
->ct
.stat
, cpu
);
2559 if (ctnetlink_ct_stat_cpu_fill_info(skb
,
2560 NETLINK_CB(cb
->skb
).portid
,
2570 static int ctnetlink_stat_ct_cpu(struct sk_buff
*skb
,
2571 const struct nfnl_info
*info
,
2572 const struct nlattr
* const cda
[])
2574 if (info
->nlh
->nlmsg_flags
& NLM_F_DUMP
) {
2575 struct netlink_dump_control c
= {
2576 .dump
= ctnetlink_ct_stat_cpu_dump
,
2578 return netlink_dump_start(info
->sk
, skb
, info
->nlh
, &c
);
2585 ctnetlink_stat_ct_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, u32 type
,
2588 unsigned int flags
= portid
? NLM_F_MULTI
: 0, event
;
2589 unsigned int nr_conntracks
;
2590 struct nlmsghdr
*nlh
;
2592 event
= nfnl_msg_type(NFNL_SUBSYS_CTNETLINK
, IPCTNL_MSG_CT_GET_STATS
);
2593 nlh
= nfnl_msg_put(skb
, portid
, seq
, event
, flags
, AF_UNSPEC
,
2598 nr_conntracks
= nf_conntrack_count(net
);
2599 if (nla_put_be32(skb
, CTA_STATS_GLOBAL_ENTRIES
, htonl(nr_conntracks
)))
2600 goto nla_put_failure
;
2602 if (nla_put_be32(skb
, CTA_STATS_GLOBAL_MAX_ENTRIES
, htonl(nf_conntrack_max
)))
2603 goto nla_put_failure
;
2605 nlmsg_end(skb
, nlh
);
2610 nlmsg_cancel(skb
, nlh
);
2614 static int ctnetlink_stat_ct(struct sk_buff
*skb
, const struct nfnl_info
*info
,
2615 const struct nlattr
* const cda
[])
2617 struct sk_buff
*skb2
;
2620 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2624 err
= ctnetlink_stat_ct_fill_info(skb2
, NETLINK_CB(skb
).portid
,
2625 info
->nlh
->nlmsg_seq
,
2626 NFNL_MSG_TYPE(info
->nlh
->nlmsg_type
),
2633 return nfnetlink_unicast(skb2
, info
->net
, NETLINK_CB(skb
).portid
);
2636 static const struct nla_policy exp_nla_policy
[CTA_EXPECT_MAX
+1] = {
2637 [CTA_EXPECT_MASTER
] = { .type
= NLA_NESTED
},
2638 [CTA_EXPECT_TUPLE
] = { .type
= NLA_NESTED
},
2639 [CTA_EXPECT_MASK
] = { .type
= NLA_NESTED
},
2640 [CTA_EXPECT_TIMEOUT
] = { .type
= NLA_U32
},
2641 [CTA_EXPECT_ID
] = { .type
= NLA_U32
},
2642 [CTA_EXPECT_HELP_NAME
] = { .type
= NLA_NUL_STRING
,
2643 .len
= NF_CT_HELPER_NAME_LEN
- 1 },
2644 [CTA_EXPECT_ZONE
] = { .type
= NLA_U16
},
2645 [CTA_EXPECT_FLAGS
] = { .type
= NLA_U32
},
2646 [CTA_EXPECT_CLASS
] = { .type
= NLA_U32
},
2647 [CTA_EXPECT_NAT
] = { .type
= NLA_NESTED
},
2648 [CTA_EXPECT_FN
] = { .type
= NLA_NUL_STRING
},
2651 static struct nf_conntrack_expect
*
2652 ctnetlink_alloc_expect(const struct nlattr
*const cda
[], struct nf_conn
*ct
,
2653 struct nf_conntrack_helper
*helper
,
2654 struct nf_conntrack_tuple
*tuple
,
2655 struct nf_conntrack_tuple
*mask
);
2657 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
2659 ctnetlink_glue_build_size(const struct nf_conn
*ct
)
2661 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2662 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2663 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2664 + 3 * nla_total_size(sizeof(u_int8_t
)) /* CTA_PROTO_NUM */
2665 + nla_total_size(sizeof(u_int32_t
)) /* CTA_ID */
2666 + nla_total_size(sizeof(u_int32_t
)) /* CTA_STATUS */
2667 + nla_total_size(sizeof(u_int32_t
)) /* CTA_TIMEOUT */
2668 + nla_total_size(0) /* CTA_PROTOINFO */
2669 + nla_total_size(0) /* CTA_HELP */
2670 + nla_total_size(NF_CT_HELPER_NAME_LEN
) /* CTA_HELP_NAME */
2671 + ctnetlink_secctx_size(ct
)
2672 + ctnetlink_acct_size(ct
)
2673 + ctnetlink_timestamp_size(ct
)
2674 #if IS_ENABLED(CONFIG_NF_NAT)
2675 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2676 + 6 * nla_total_size(sizeof(u_int32_t
)) /* CTA_NAT_SEQ_OFFSET */
2678 #ifdef CONFIG_NF_CONNTRACK_MARK
2679 + nla_total_size(sizeof(u_int32_t
)) /* CTA_MARK */
2681 #ifdef CONFIG_NF_CONNTRACK_ZONES
2682 + nla_total_size(sizeof(u_int16_t
)) /* CTA_ZONE|CTA_TUPLE_ZONE */
2684 + ctnetlink_proto_size(ct
)
2688 static int __ctnetlink_glue_build(struct sk_buff
*skb
, struct nf_conn
*ct
)
2690 const struct nf_conntrack_zone
*zone
;
2691 struct nlattr
*nest_parms
;
2693 zone
= nf_ct_zone(ct
);
2695 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
);
2697 goto nla_put_failure
;
2698 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
2699 goto nla_put_failure
;
2700 if (ctnetlink_dump_zone_id(skb
, CTA_TUPLE_ZONE
, zone
,
2701 NF_CT_ZONE_DIR_ORIG
) < 0)
2702 goto nla_put_failure
;
2703 nla_nest_end(skb
, nest_parms
);
2705 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
);
2707 goto nla_put_failure
;
2708 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
2709 goto nla_put_failure
;
2710 if (ctnetlink_dump_zone_id(skb
, CTA_TUPLE_ZONE
, zone
,
2711 NF_CT_ZONE_DIR_REPL
) < 0)
2712 goto nla_put_failure
;
2713 nla_nest_end(skb
, nest_parms
);
2715 if (ctnetlink_dump_zone_id(skb
, CTA_ZONE
, zone
,
2716 NF_CT_DEFAULT_ZONE_DIR
) < 0)
2717 goto nla_put_failure
;
2719 if (ctnetlink_dump_id(skb
, ct
) < 0)
2720 goto nla_put_failure
;
2722 if (ctnetlink_dump_status(skb
, ct
) < 0)
2723 goto nla_put_failure
;
2725 if (ctnetlink_dump_timeout(skb
, ct
, false) < 0)
2726 goto nla_put_failure
;
2728 if (ctnetlink_dump_protoinfo(skb
, ct
, false) < 0)
2729 goto nla_put_failure
;
2731 if (ctnetlink_dump_acct(skb
, ct
, IPCTNL_MSG_CT_GET
) < 0 ||
2732 ctnetlink_dump_timestamp(skb
, ct
) < 0)
2733 goto nla_put_failure
;
2735 if (ctnetlink_dump_helpinfo(skb
, ct
) < 0)
2736 goto nla_put_failure
;
2738 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2739 if (ct
->secmark
&& ctnetlink_dump_secctx(skb
, ct
) < 0)
2740 goto nla_put_failure
;
2742 if (ct
->master
&& ctnetlink_dump_master(skb
, ct
) < 0)
2743 goto nla_put_failure
;
2745 if ((ct
->status
& IPS_SEQ_ADJUST
) &&
2746 ctnetlink_dump_ct_seq_adj(skb
, ct
) < 0)
2747 goto nla_put_failure
;
2749 if (ctnetlink_dump_ct_synproxy(skb
, ct
) < 0)
2750 goto nla_put_failure
;
2752 #ifdef CONFIG_NF_CONNTRACK_MARK
2753 if (ct
->mark
&& ctnetlink_dump_mark(skb
, ct
) < 0)
2754 goto nla_put_failure
;
2756 if (ctnetlink_dump_labels(skb
, ct
) < 0)
2757 goto nla_put_failure
;
2765 ctnetlink_glue_build(struct sk_buff
*skb
, struct nf_conn
*ct
,
2766 enum ip_conntrack_info ctinfo
,
2767 u_int16_t ct_attr
, u_int16_t ct_info_attr
)
2769 struct nlattr
*nest_parms
;
2771 nest_parms
= nla_nest_start(skb
, ct_attr
);
2773 goto nla_put_failure
;
2775 if (__ctnetlink_glue_build(skb
, ct
) < 0)
2776 goto nla_put_failure
;
2778 nla_nest_end(skb
, nest_parms
);
2780 if (nla_put_be32(skb
, ct_info_attr
, htonl(ctinfo
)))
2781 goto nla_put_failure
;
2790 ctnetlink_update_status(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
2792 unsigned int status
= ntohl(nla_get_be32(cda
[CTA_STATUS
]));
2793 unsigned long d
= ct
->status
^ status
;
2795 if (d
& IPS_SEEN_REPLY
&& !(status
& IPS_SEEN_REPLY
))
2796 /* SEEN_REPLY bit can only be set */
2799 if (d
& IPS_ASSURED
&& !(status
& IPS_ASSURED
))
2800 /* ASSURED bit can only be set */
2803 /* This check is less strict than ctnetlink_change_status()
2804 * because callers often flip IPS_EXPECTED bits when sending
2805 * an NFQA_CT attribute to the kernel. So ignore the
2806 * unchangeable bits but do not error out. Also user programs
2807 * are allowed to clear the bits that they are allowed to change.
2809 __ctnetlink_change_status(ct
, status
, ~status
);
2814 ctnetlink_glue_parse_ct(const struct nlattr
*cda
[], struct nf_conn
*ct
)
2818 if (cda
[CTA_TIMEOUT
]) {
2819 err
= ctnetlink_change_timeout(ct
, cda
);
2823 if (cda
[CTA_STATUS
]) {
2824 err
= ctnetlink_update_status(ct
, cda
);
2828 if (cda
[CTA_HELP
]) {
2829 err
= ctnetlink_change_helper(ct
, cda
);
2833 if (cda
[CTA_LABELS
]) {
2834 err
= ctnetlink_attach_labels(ct
, cda
);
2838 #if defined(CONFIG_NF_CONNTRACK_MARK)
2839 if (cda
[CTA_MARK
]) {
2840 ctnetlink_change_mark(ct
, cda
);
2847 ctnetlink_glue_parse(const struct nlattr
*attr
, struct nf_conn
*ct
)
2849 struct nlattr
*cda
[CTA_MAX
+1];
2852 ret
= nla_parse_nested_deprecated(cda
, CTA_MAX
, attr
, ct_nla_policy
,
2857 return ctnetlink_glue_parse_ct((const struct nlattr
**)cda
, ct
);
2860 static int ctnetlink_glue_exp_parse(const struct nlattr
* const *cda
,
2861 const struct nf_conn
*ct
,
2862 struct nf_conntrack_tuple
*tuple
,
2863 struct nf_conntrack_tuple
*mask
)
2867 err
= ctnetlink_parse_tuple(cda
, tuple
, CTA_EXPECT_TUPLE
,
2868 nf_ct_l3num(ct
), NULL
);
2872 return ctnetlink_parse_tuple(cda
, mask
, CTA_EXPECT_MASK
,
2873 nf_ct_l3num(ct
), NULL
);
2877 ctnetlink_glue_attach_expect(const struct nlattr
*attr
, struct nf_conn
*ct
,
2878 u32 portid
, u32 report
)
2880 struct nlattr
*cda
[CTA_EXPECT_MAX
+1];
2881 struct nf_conntrack_tuple tuple
, mask
;
2882 struct nf_conntrack_helper
*helper
= NULL
;
2883 struct nf_conntrack_expect
*exp
;
2886 err
= nla_parse_nested_deprecated(cda
, CTA_EXPECT_MAX
, attr
,
2887 exp_nla_policy
, NULL
);
2891 err
= ctnetlink_glue_exp_parse((const struct nlattr
* const *)cda
,
2896 if (cda
[CTA_EXPECT_HELP_NAME
]) {
2897 const char *helpname
= nla_data(cda
[CTA_EXPECT_HELP_NAME
]);
2899 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
2900 nf_ct_protonum(ct
));
2905 exp
= ctnetlink_alloc_expect((const struct nlattr
* const *)cda
, ct
,
2906 helper
, &tuple
, &mask
);
2908 return PTR_ERR(exp
);
2910 err
= nf_ct_expect_related_report(exp
, portid
, report
, 0);
2911 nf_ct_expect_put(exp
);
2915 static void ctnetlink_glue_seqadj(struct sk_buff
*skb
, struct nf_conn
*ct
,
2916 enum ip_conntrack_info ctinfo
, int diff
)
2918 if (!(ct
->status
& IPS_NAT_MASK
))
2921 nf_ct_tcp_seqadj_set(skb
, ct
, ctinfo
, diff
);
2924 static const struct nfnl_ct_hook ctnetlink_glue_hook
= {
2925 .build_size
= ctnetlink_glue_build_size
,
2926 .build
= ctnetlink_glue_build
,
2927 .parse
= ctnetlink_glue_parse
,
2928 .attach_expect
= ctnetlink_glue_attach_expect
,
2929 .seq_adjust
= ctnetlink_glue_seqadj
,
2931 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
2933 /***********************************************************************
2935 ***********************************************************************/
2937 static int ctnetlink_exp_dump_tuple(struct sk_buff
*skb
,
2938 const struct nf_conntrack_tuple
*tuple
,
2941 struct nlattr
*nest_parms
;
2943 nest_parms
= nla_nest_start(skb
, type
);
2945 goto nla_put_failure
;
2946 if (ctnetlink_dump_tuples(skb
, tuple
) < 0)
2947 goto nla_put_failure
;
2948 nla_nest_end(skb
, nest_parms
);
2956 static int ctnetlink_exp_dump_mask(struct sk_buff
*skb
,
2957 const struct nf_conntrack_tuple
*tuple
,
2958 const struct nf_conntrack_tuple_mask
*mask
)
2960 const struct nf_conntrack_l4proto
*l4proto
;
2961 struct nf_conntrack_tuple m
;
2962 struct nlattr
*nest_parms
;
2965 memset(&m
, 0xFF, sizeof(m
));
2966 memcpy(&m
.src
.u3
, &mask
->src
.u3
, sizeof(m
.src
.u3
));
2967 m
.src
.u
.all
= mask
->src
.u
.all
;
2968 m
.src
.l3num
= tuple
->src
.l3num
;
2969 m
.dst
.protonum
= tuple
->dst
.protonum
;
2971 nest_parms
= nla_nest_start(skb
, CTA_EXPECT_MASK
);
2973 goto nla_put_failure
;
2976 ret
= ctnetlink_dump_tuples_ip(skb
, &m
);
2978 l4proto
= nf_ct_l4proto_find(tuple
->dst
.protonum
);
2979 ret
= ctnetlink_dump_tuples_proto(skb
, &m
, l4proto
);
2983 if (unlikely(ret
< 0))
2984 goto nla_put_failure
;
2986 nla_nest_end(skb
, nest_parms
);
2994 static const union nf_inet_addr any_addr
;
2996 static __be32
nf_expect_get_id(const struct nf_conntrack_expect
*exp
)
2998 static siphash_aligned_key_t exp_id_seed
;
2999 unsigned long a
, b
, c
, d
;
3001 net_get_random_once(&exp_id_seed
, sizeof(exp_id_seed
));
3003 a
= (unsigned long)exp
;
3004 b
= (unsigned long)exp
->helper
;
3005 c
= (unsigned long)exp
->master
;
3006 d
= (unsigned long)siphash(&exp
->tuple
, sizeof(exp
->tuple
), &exp_id_seed
);
3009 return (__force __be32
)siphash_4u64((u64
)a
, (u64
)b
, (u64
)c
, (u64
)d
, &exp_id_seed
);
3011 return (__force __be32
)siphash_4u32((u32
)a
, (u32
)b
, (u32
)c
, (u32
)d
, &exp_id_seed
);
3016 ctnetlink_exp_dump_expect(struct sk_buff
*skb
,
3017 const struct nf_conntrack_expect
*exp
)
3019 struct nf_conn
*master
= exp
->master
;
3020 long timeout
= ((long)exp
->timeout
.expires
- (long)jiffies
) / HZ
;
3021 struct nf_conn_help
*help
;
3022 #if IS_ENABLED(CONFIG_NF_NAT)
3023 struct nlattr
*nest_parms
;
3024 struct nf_conntrack_tuple nat_tuple
= {};
3026 struct nf_ct_helper_expectfn
*expfn
;
3031 if (ctnetlink_exp_dump_tuple(skb
, &exp
->tuple
, CTA_EXPECT_TUPLE
) < 0)
3032 goto nla_put_failure
;
3033 if (ctnetlink_exp_dump_mask(skb
, &exp
->tuple
, &exp
->mask
) < 0)
3034 goto nla_put_failure
;
3035 if (ctnetlink_exp_dump_tuple(skb
,
3036 &master
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
,
3037 CTA_EXPECT_MASTER
) < 0)
3038 goto nla_put_failure
;
3040 #if IS_ENABLED(CONFIG_NF_NAT)
3041 if (!nf_inet_addr_cmp(&exp
->saved_addr
, &any_addr
) ||
3042 exp
->saved_proto
.all
) {
3043 nest_parms
= nla_nest_start(skb
, CTA_EXPECT_NAT
);
3045 goto nla_put_failure
;
3047 if (nla_put_be32(skb
, CTA_EXPECT_NAT_DIR
, htonl(exp
->dir
)))
3048 goto nla_put_failure
;
3050 nat_tuple
.src
.l3num
= nf_ct_l3num(master
);
3051 nat_tuple
.src
.u3
= exp
->saved_addr
;
3052 nat_tuple
.dst
.protonum
= nf_ct_protonum(master
);
3053 nat_tuple
.src
.u
= exp
->saved_proto
;
3055 if (ctnetlink_exp_dump_tuple(skb
, &nat_tuple
,
3056 CTA_EXPECT_NAT_TUPLE
) < 0)
3057 goto nla_put_failure
;
3058 nla_nest_end(skb
, nest_parms
);
3061 if (nla_put_be32(skb
, CTA_EXPECT_TIMEOUT
, htonl(timeout
)) ||
3062 nla_put_be32(skb
, CTA_EXPECT_ID
, nf_expect_get_id(exp
)) ||
3063 nla_put_be32(skb
, CTA_EXPECT_FLAGS
, htonl(exp
->flags
)) ||
3064 nla_put_be32(skb
, CTA_EXPECT_CLASS
, htonl(exp
->class)))
3065 goto nla_put_failure
;
3066 help
= nfct_help(master
);
3068 struct nf_conntrack_helper
*helper
;
3070 helper
= rcu_dereference(help
->helper
);
3072 nla_put_string(skb
, CTA_EXPECT_HELP_NAME
, helper
->name
))
3073 goto nla_put_failure
;
3075 expfn
= nf_ct_helper_expectfn_find_by_symbol(exp
->expectfn
);
3076 if (expfn
!= NULL
&&
3077 nla_put_string(skb
, CTA_EXPECT_FN
, expfn
->name
))
3078 goto nla_put_failure
;
3087 ctnetlink_exp_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
,
3088 int event
, const struct nf_conntrack_expect
*exp
)
3090 struct nlmsghdr
*nlh
;
3091 unsigned int flags
= portid
? NLM_F_MULTI
: 0;
3093 event
= nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP
, event
);
3094 nlh
= nfnl_msg_put(skb
, portid
, seq
, event
, flags
,
3095 exp
->tuple
.src
.l3num
, NFNETLINK_V0
, 0);
3099 if (ctnetlink_exp_dump_expect(skb
, exp
) < 0)
3100 goto nla_put_failure
;
3102 nlmsg_end(skb
, nlh
);
3107 nlmsg_cancel(skb
, nlh
);
3111 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3113 ctnetlink_expect_event(unsigned int events
, const struct nf_exp_event
*item
)
3115 struct nf_conntrack_expect
*exp
= item
->exp
;
3116 struct net
*net
= nf_ct_exp_net(exp
);
3117 struct nlmsghdr
*nlh
;
3118 struct sk_buff
*skb
;
3119 unsigned int type
, group
;
3122 if (events
& (1 << IPEXP_DESTROY
)) {
3123 type
= IPCTNL_MSG_EXP_DELETE
;
3124 group
= NFNLGRP_CONNTRACK_EXP_DESTROY
;
3125 } else if (events
& (1 << IPEXP_NEW
)) {
3126 type
= IPCTNL_MSG_EXP_NEW
;
3127 flags
= NLM_F_CREATE
|NLM_F_EXCL
;
3128 group
= NFNLGRP_CONNTRACK_EXP_NEW
;
3132 if (!item
->report
&& !nfnetlink_has_listeners(net
, group
))
3135 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
3139 type
= nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP
, type
);
3140 nlh
= nfnl_msg_put(skb
, item
->portid
, 0, type
, flags
,
3141 exp
->tuple
.src
.l3num
, NFNETLINK_V0
, 0);
3145 if (ctnetlink_exp_dump_expect(skb
, exp
) < 0)
3146 goto nla_put_failure
;
3148 nlmsg_end(skb
, nlh
);
3149 nfnetlink_send(skb
, net
, item
->portid
, group
, item
->report
, GFP_ATOMIC
);
3153 nlmsg_cancel(skb
, nlh
);
3157 nfnetlink_set_err(net
, 0, 0, -ENOBUFS
);
3161 static int ctnetlink_exp_done(struct netlink_callback
*cb
)
3164 nf_ct_expect_put((struct nf_conntrack_expect
*)cb
->args
[1]);
3169 ctnetlink_exp_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3171 struct net
*net
= sock_net(skb
->sk
);
3172 struct nf_conntrack_expect
*exp
, *last
;
3173 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
3174 u_int8_t l3proto
= nfmsg
->nfgen_family
;
3177 last
= (struct nf_conntrack_expect
*)cb
->args
[1];
3178 for (; cb
->args
[0] < nf_ct_expect_hsize
; cb
->args
[0]++) {
3180 hlist_for_each_entry_rcu(exp
, &nf_ct_expect_hash
[cb
->args
[0]],
3182 if (l3proto
&& exp
->tuple
.src
.l3num
!= l3proto
)
3185 if (!net_eq(nf_ct_net(exp
->master
), net
))
3193 if (ctnetlink_exp_fill_info(skb
,
3194 NETLINK_CB(cb
->skb
).portid
,
3198 if (!refcount_inc_not_zero(&exp
->use
))
3200 cb
->args
[1] = (unsigned long)exp
;
3212 nf_ct_expect_put(last
);
3218 ctnetlink_exp_ct_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3220 struct nf_conntrack_expect
*exp
, *last
;
3221 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
3222 struct nf_conn
*ct
= cb
->data
;
3223 struct nf_conn_help
*help
= nfct_help(ct
);
3224 u_int8_t l3proto
= nfmsg
->nfgen_family
;
3230 last
= (struct nf_conntrack_expect
*)cb
->args
[1];
3232 hlist_for_each_entry_rcu(exp
, &help
->expectations
, lnode
) {
3233 if (l3proto
&& exp
->tuple
.src
.l3num
!= l3proto
)
3240 if (ctnetlink_exp_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
3244 if (!refcount_inc_not_zero(&exp
->use
))
3246 cb
->args
[1] = (unsigned long)exp
;
3258 nf_ct_expect_put(last
);
3263 static int ctnetlink_dump_exp_ct(struct net
*net
, struct sock
*ctnl
,
3264 struct sk_buff
*skb
,
3265 const struct nlmsghdr
*nlh
,
3266 const struct nlattr
* const cda
[],
3267 struct netlink_ext_ack
*extack
)
3270 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
3271 u_int8_t u3
= nfmsg
->nfgen_family
;
3272 struct nf_conntrack_tuple tuple
;
3273 struct nf_conntrack_tuple_hash
*h
;
3275 struct nf_conntrack_zone zone
;
3276 struct netlink_dump_control c
= {
3277 .dump
= ctnetlink_exp_ct_dump_table
,
3278 .done
= ctnetlink_exp_done
,
3281 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_MASTER
,
3286 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
3290 h
= nf_conntrack_find_get(net
, &zone
, &tuple
);
3294 ct
= nf_ct_tuplehash_to_ctrack(h
);
3295 /* No expectation linked to this connection tracking. */
3296 if (!nfct_help(ct
)) {
3303 err
= netlink_dump_start(ctnl
, skb
, nlh
, &c
);
3309 static int ctnetlink_get_expect(struct sk_buff
*skb
,
3310 const struct nfnl_info
*info
,
3311 const struct nlattr
* const cda
[])
3313 u_int8_t u3
= info
->nfmsg
->nfgen_family
;
3314 struct nf_conntrack_tuple tuple
;
3315 struct nf_conntrack_expect
*exp
;
3316 struct nf_conntrack_zone zone
;
3317 struct sk_buff
*skb2
;
3320 if (info
->nlh
->nlmsg_flags
& NLM_F_DUMP
) {
3321 if (cda
[CTA_EXPECT_MASTER
])
3322 return ctnetlink_dump_exp_ct(info
->net
, info
->sk
, skb
,
3326 struct netlink_dump_control c
= {
3327 .dump
= ctnetlink_exp_dump_table
,
3328 .done
= ctnetlink_exp_done
,
3330 return netlink_dump_start(info
->sk
, skb
, info
->nlh
, &c
);
3334 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
3338 if (cda
[CTA_EXPECT_TUPLE
])
3339 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
,
3341 else if (cda
[CTA_EXPECT_MASTER
])
3342 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_MASTER
,
3350 exp
= nf_ct_expect_find_get(info
->net
, &zone
, &tuple
);
3354 if (cda
[CTA_EXPECT_ID
]) {
3355 __be32 id
= nla_get_be32(cda
[CTA_EXPECT_ID
]);
3357 if (id
!= nf_expect_get_id(exp
)) {
3358 nf_ct_expect_put(exp
);
3363 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
3365 nf_ct_expect_put(exp
);
3370 err
= ctnetlink_exp_fill_info(skb2
, NETLINK_CB(skb
).portid
,
3371 info
->nlh
->nlmsg_seq
, IPCTNL_MSG_EXP_NEW
,
3374 nf_ct_expect_put(exp
);
3380 return nfnetlink_unicast(skb2
, info
->net
, NETLINK_CB(skb
).portid
);
3383 static bool expect_iter_name(struct nf_conntrack_expect
*exp
, void *data
)
3385 const struct nf_conn_help
*m_help
;
3386 const char *name
= data
;
3388 m_help
= nfct_help(exp
->master
);
3390 return strcmp(m_help
->helper
->name
, name
) == 0;
3393 static bool expect_iter_all(struct nf_conntrack_expect
*exp
, void *data
)
3398 static int ctnetlink_del_expect(struct sk_buff
*skb
,
3399 const struct nfnl_info
*info
,
3400 const struct nlattr
* const cda
[])
3402 u_int8_t u3
= info
->nfmsg
->nfgen_family
;
3403 struct nf_conntrack_expect
*exp
;
3404 struct nf_conntrack_tuple tuple
;
3405 struct nf_conntrack_zone zone
;
3408 if (cda
[CTA_EXPECT_TUPLE
]) {
3409 /* delete a single expect by tuple */
3410 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
3414 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
,
3419 /* bump usage count to 2 */
3420 exp
= nf_ct_expect_find_get(info
->net
, &zone
, &tuple
);
3424 if (cda
[CTA_EXPECT_ID
]) {
3425 __be32 id
= nla_get_be32(cda
[CTA_EXPECT_ID
]);
3426 if (ntohl(id
) != (u32
)(unsigned long)exp
) {
3427 nf_ct_expect_put(exp
);
3432 /* after list removal, usage count == 1 */
3433 spin_lock_bh(&nf_conntrack_expect_lock
);
3434 if (del_timer(&exp
->timeout
)) {
3435 nf_ct_unlink_expect_report(exp
, NETLINK_CB(skb
).portid
,
3436 nlmsg_report(info
->nlh
));
3437 nf_ct_expect_put(exp
);
3439 spin_unlock_bh(&nf_conntrack_expect_lock
);
3440 /* have to put what we 'get' above.
3441 * after this line usage count == 0 */
3442 nf_ct_expect_put(exp
);
3443 } else if (cda
[CTA_EXPECT_HELP_NAME
]) {
3444 char *name
= nla_data(cda
[CTA_EXPECT_HELP_NAME
]);
3446 nf_ct_expect_iterate_net(info
->net
, expect_iter_name
, name
,
3447 NETLINK_CB(skb
).portid
,
3448 nlmsg_report(info
->nlh
));
3450 /* This basically means we have to flush everything*/
3451 nf_ct_expect_iterate_net(info
->net
, expect_iter_all
, NULL
,
3452 NETLINK_CB(skb
).portid
,
3453 nlmsg_report(info
->nlh
));
3459 ctnetlink_change_expect(struct nf_conntrack_expect
*x
,
3460 const struct nlattr
* const cda
[])
3462 if (cda
[CTA_EXPECT_TIMEOUT
]) {
3463 if (!del_timer(&x
->timeout
))
3466 x
->timeout
.expires
= jiffies
+
3467 ntohl(nla_get_be32(cda
[CTA_EXPECT_TIMEOUT
])) * HZ
;
3468 add_timer(&x
->timeout
);
3473 static const struct nla_policy exp_nat_nla_policy
[CTA_EXPECT_NAT_MAX
+1] = {
3474 [CTA_EXPECT_NAT_DIR
] = { .type
= NLA_U32
},
3475 [CTA_EXPECT_NAT_TUPLE
] = { .type
= NLA_NESTED
},
3479 ctnetlink_parse_expect_nat(const struct nlattr
*attr
,
3480 struct nf_conntrack_expect
*exp
,
3483 #if IS_ENABLED(CONFIG_NF_NAT)
3484 struct nlattr
*tb
[CTA_EXPECT_NAT_MAX
+1];
3485 struct nf_conntrack_tuple nat_tuple
= {};
3488 err
= nla_parse_nested_deprecated(tb
, CTA_EXPECT_NAT_MAX
, attr
,
3489 exp_nat_nla_policy
, NULL
);
3493 if (!tb
[CTA_EXPECT_NAT_DIR
] || !tb
[CTA_EXPECT_NAT_TUPLE
])
3496 err
= ctnetlink_parse_tuple((const struct nlattr
* const *)tb
,
3497 &nat_tuple
, CTA_EXPECT_NAT_TUPLE
,
3502 exp
->saved_addr
= nat_tuple
.src
.u3
;
3503 exp
->saved_proto
= nat_tuple
.src
.u
;
3504 exp
->dir
= ntohl(nla_get_be32(tb
[CTA_EXPECT_NAT_DIR
]));
3512 static struct nf_conntrack_expect
*
3513 ctnetlink_alloc_expect(const struct nlattr
* const cda
[], struct nf_conn
*ct
,
3514 struct nf_conntrack_helper
*helper
,
3515 struct nf_conntrack_tuple
*tuple
,
3516 struct nf_conntrack_tuple
*mask
)
3518 u_int32_t
class = 0;
3519 struct nf_conntrack_expect
*exp
;
3520 struct nf_conn_help
*help
;
3523 help
= nfct_help(ct
);
3525 return ERR_PTR(-EOPNOTSUPP
);
3527 if (cda
[CTA_EXPECT_CLASS
] && helper
) {
3528 class = ntohl(nla_get_be32(cda
[CTA_EXPECT_CLASS
]));
3529 if (class > helper
->expect_class_max
)
3530 return ERR_PTR(-EINVAL
);
3532 exp
= nf_ct_expect_alloc(ct
);
3534 return ERR_PTR(-ENOMEM
);
3536 if (cda
[CTA_EXPECT_FLAGS
]) {
3537 exp
->flags
= ntohl(nla_get_be32(cda
[CTA_EXPECT_FLAGS
]));
3538 exp
->flags
&= ~NF_CT_EXPECT_USERSPACE
;
3542 if (cda
[CTA_EXPECT_FN
]) {
3543 const char *name
= nla_data(cda
[CTA_EXPECT_FN
]);
3544 struct nf_ct_helper_expectfn
*expfn
;
3546 expfn
= nf_ct_helper_expectfn_find_by_name(name
);
3547 if (expfn
== NULL
) {
3551 exp
->expectfn
= expfn
->expectfn
;
3553 exp
->expectfn
= NULL
;
3557 exp
->helper
= helper
;
3558 exp
->tuple
= *tuple
;
3559 exp
->mask
.src
.u3
= mask
->src
.u3
;
3560 exp
->mask
.src
.u
.all
= mask
->src
.u
.all
;
3562 if (cda
[CTA_EXPECT_NAT
]) {
3563 err
= ctnetlink_parse_expect_nat(cda
[CTA_EXPECT_NAT
],
3564 exp
, nf_ct_l3num(ct
));
3570 nf_ct_expect_put(exp
);
3571 return ERR_PTR(err
);
3575 ctnetlink_create_expect(struct net
*net
,
3576 const struct nf_conntrack_zone
*zone
,
3577 const struct nlattr
* const cda
[],
3578 u_int8_t u3
, u32 portid
, int report
)
3580 struct nf_conntrack_tuple tuple
, mask
, master_tuple
;
3581 struct nf_conntrack_tuple_hash
*h
= NULL
;
3582 struct nf_conntrack_helper
*helper
= NULL
;
3583 struct nf_conntrack_expect
*exp
;
3587 /* caller guarantees that those three CTA_EXPECT_* exist */
3588 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
,
3592 err
= ctnetlink_parse_tuple(cda
, &mask
, CTA_EXPECT_MASK
,
3596 err
= ctnetlink_parse_tuple(cda
, &master_tuple
, CTA_EXPECT_MASTER
,
3601 /* Look for master conntrack of this expectation */
3602 h
= nf_conntrack_find_get(net
, zone
, &master_tuple
);
3605 ct
= nf_ct_tuplehash_to_ctrack(h
);
3608 if (cda
[CTA_EXPECT_HELP_NAME
]) {
3609 const char *helpname
= nla_data(cda
[CTA_EXPECT_HELP_NAME
]);
3611 helper
= __nf_conntrack_helper_find(helpname
, u3
,
3612 nf_ct_protonum(ct
));
3613 if (helper
== NULL
) {
3615 #ifdef CONFIG_MODULES
3616 if (request_module("nfct-helper-%s", helpname
) < 0) {
3621 helper
= __nf_conntrack_helper_find(helpname
, u3
,
3622 nf_ct_protonum(ct
));
3634 exp
= ctnetlink_alloc_expect(cda
, ct
, helper
, &tuple
, &mask
);
3640 err
= nf_ct_expect_related_report(exp
, portid
, report
, 0);
3641 nf_ct_expect_put(exp
);
3649 static int ctnetlink_new_expect(struct sk_buff
*skb
,
3650 const struct nfnl_info
*info
,
3651 const struct nlattr
* const cda
[])
3653 u_int8_t u3
= info
->nfmsg
->nfgen_family
;
3654 struct nf_conntrack_tuple tuple
;
3655 struct nf_conntrack_expect
*exp
;
3656 struct nf_conntrack_zone zone
;
3659 if (!cda
[CTA_EXPECT_TUPLE
]
3660 || !cda
[CTA_EXPECT_MASK
]
3661 || !cda
[CTA_EXPECT_MASTER
])
3664 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
3668 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
,
3673 spin_lock_bh(&nf_conntrack_expect_lock
);
3674 exp
= __nf_ct_expect_find(info
->net
, &zone
, &tuple
);
3676 spin_unlock_bh(&nf_conntrack_expect_lock
);
3678 if (info
->nlh
->nlmsg_flags
& NLM_F_CREATE
) {
3679 err
= ctnetlink_create_expect(info
->net
, &zone
, cda
, u3
,
3680 NETLINK_CB(skb
).portid
,
3681 nlmsg_report(info
->nlh
));
3687 if (!(info
->nlh
->nlmsg_flags
& NLM_F_EXCL
))
3688 err
= ctnetlink_change_expect(exp
, cda
);
3689 spin_unlock_bh(&nf_conntrack_expect_lock
);
3695 ctnetlink_exp_stat_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, int cpu
,
3696 const struct ip_conntrack_stat
*st
)
3698 struct nlmsghdr
*nlh
;
3699 unsigned int flags
= portid
? NLM_F_MULTI
: 0, event
;
3701 event
= nfnl_msg_type(NFNL_SUBSYS_CTNETLINK
,
3702 IPCTNL_MSG_EXP_GET_STATS_CPU
);
3703 nlh
= nfnl_msg_put(skb
, portid
, seq
, event
, flags
, AF_UNSPEC
,
3704 NFNETLINK_V0
, htons(cpu
));
3708 if (nla_put_be32(skb
, CTA_STATS_EXP_NEW
, htonl(st
->expect_new
)) ||
3709 nla_put_be32(skb
, CTA_STATS_EXP_CREATE
, htonl(st
->expect_create
)) ||
3710 nla_put_be32(skb
, CTA_STATS_EXP_DELETE
, htonl(st
->expect_delete
)))
3711 goto nla_put_failure
;
3713 nlmsg_end(skb
, nlh
);
3718 nlmsg_cancel(skb
, nlh
);
3723 ctnetlink_exp_stat_cpu_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3726 struct net
*net
= sock_net(skb
->sk
);
3728 if (cb
->args
[0] == nr_cpu_ids
)
3731 for (cpu
= cb
->args
[0]; cpu
< nr_cpu_ids
; cpu
++) {
3732 const struct ip_conntrack_stat
*st
;
3734 if (!cpu_possible(cpu
))
3737 st
= per_cpu_ptr(net
->ct
.stat
, cpu
);
3738 if (ctnetlink_exp_stat_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
3748 static int ctnetlink_stat_exp_cpu(struct sk_buff
*skb
,
3749 const struct nfnl_info
*info
,
3750 const struct nlattr
* const cda
[])
3752 if (info
->nlh
->nlmsg_flags
& NLM_F_DUMP
) {
3753 struct netlink_dump_control c
= {
3754 .dump
= ctnetlink_exp_stat_cpu_dump
,
3756 return netlink_dump_start(info
->sk
, skb
, info
->nlh
, &c
);
3762 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3763 static struct nf_ct_event_notifier ctnl_notifier
= {
3764 .ct_event
= ctnetlink_conntrack_event
,
3765 .exp_event
= ctnetlink_expect_event
,
3769 static const struct nfnl_callback ctnl_cb
[IPCTNL_MSG_MAX
] = {
3770 [IPCTNL_MSG_CT_NEW
] = {
3771 .call
= ctnetlink_new_conntrack
,
3772 .type
= NFNL_CB_MUTEX
,
3773 .attr_count
= CTA_MAX
,
3774 .policy
= ct_nla_policy
3776 [IPCTNL_MSG_CT_GET
] = {
3777 .call
= ctnetlink_get_conntrack
,
3778 .type
= NFNL_CB_MUTEX
,
3779 .attr_count
= CTA_MAX
,
3780 .policy
= ct_nla_policy
3782 [IPCTNL_MSG_CT_DELETE
] = {
3783 .call
= ctnetlink_del_conntrack
,
3784 .type
= NFNL_CB_MUTEX
,
3785 .attr_count
= CTA_MAX
,
3786 .policy
= ct_nla_policy
3788 [IPCTNL_MSG_CT_GET_CTRZERO
] = {
3789 .call
= ctnetlink_get_conntrack
,
3790 .type
= NFNL_CB_MUTEX
,
3791 .attr_count
= CTA_MAX
,
3792 .policy
= ct_nla_policy
3794 [IPCTNL_MSG_CT_GET_STATS_CPU
] = {
3795 .call
= ctnetlink_stat_ct_cpu
,
3796 .type
= NFNL_CB_MUTEX
,
3798 [IPCTNL_MSG_CT_GET_STATS
] = {
3799 .call
= ctnetlink_stat_ct
,
3800 .type
= NFNL_CB_MUTEX
,
3802 [IPCTNL_MSG_CT_GET_DYING
] = {
3803 .call
= ctnetlink_get_ct_dying
,
3804 .type
= NFNL_CB_MUTEX
,
3806 [IPCTNL_MSG_CT_GET_UNCONFIRMED
] = {
3807 .call
= ctnetlink_get_ct_unconfirmed
,
3808 .type
= NFNL_CB_MUTEX
,
3812 static const struct nfnl_callback ctnl_exp_cb
[IPCTNL_MSG_EXP_MAX
] = {
3813 [IPCTNL_MSG_EXP_GET
] = {
3814 .call
= ctnetlink_get_expect
,
3815 .type
= NFNL_CB_MUTEX
,
3816 .attr_count
= CTA_EXPECT_MAX
,
3817 .policy
= exp_nla_policy
3819 [IPCTNL_MSG_EXP_NEW
] = {
3820 .call
= ctnetlink_new_expect
,
3821 .type
= NFNL_CB_MUTEX
,
3822 .attr_count
= CTA_EXPECT_MAX
,
3823 .policy
= exp_nla_policy
3825 [IPCTNL_MSG_EXP_DELETE
] = {
3826 .call
= ctnetlink_del_expect
,
3827 .type
= NFNL_CB_MUTEX
,
3828 .attr_count
= CTA_EXPECT_MAX
,
3829 .policy
= exp_nla_policy
3831 [IPCTNL_MSG_EXP_GET_STATS_CPU
] = {
3832 .call
= ctnetlink_stat_exp_cpu
,
3833 .type
= NFNL_CB_MUTEX
,
3837 static const struct nfnetlink_subsystem ctnl_subsys
= {
3838 .name
= "conntrack",
3839 .subsys_id
= NFNL_SUBSYS_CTNETLINK
,
3840 .cb_count
= IPCTNL_MSG_MAX
,
3844 static const struct nfnetlink_subsystem ctnl_exp_subsys
= {
3845 .name
= "conntrack_expect",
3846 .subsys_id
= NFNL_SUBSYS_CTNETLINK_EXP
,
3847 .cb_count
= IPCTNL_MSG_EXP_MAX
,
3851 MODULE_ALIAS("ip_conntrack_netlink");
3852 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK
);
3853 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP
);
3855 static int __net_init
ctnetlink_net_init(struct net
*net
)
3857 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3858 nf_conntrack_register_notifier(net
, &ctnl_notifier
);
3863 static void ctnetlink_net_pre_exit(struct net
*net
)
3865 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3866 nf_conntrack_unregister_notifier(net
);
3870 static struct pernet_operations ctnetlink_net_ops
= {
3871 .init
= ctnetlink_net_init
,
3872 .pre_exit
= ctnetlink_net_pre_exit
,
3875 static int __init
ctnetlink_init(void)
3879 ret
= nfnetlink_subsys_register(&ctnl_subsys
);
3881 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3885 ret
= nfnetlink_subsys_register(&ctnl_exp_subsys
);
3887 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3888 goto err_unreg_subsys
;
3891 ret
= register_pernet_subsys(&ctnetlink_net_ops
);
3893 pr_err("ctnetlink_init: cannot register pernet operations\n");
3894 goto err_unreg_exp_subsys
;
3896 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3897 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3898 RCU_INIT_POINTER(nfnl_ct_hook
, &ctnetlink_glue_hook
);
3902 err_unreg_exp_subsys
:
3903 nfnetlink_subsys_unregister(&ctnl_exp_subsys
);
3905 nfnetlink_subsys_unregister(&ctnl_subsys
);
3910 static void __exit
ctnetlink_exit(void)
3912 unregister_pernet_subsys(&ctnetlink_net_ops
);
3913 nfnetlink_subsys_unregister(&ctnl_exp_subsys
);
3914 nfnetlink_subsys_unregister(&ctnl_subsys
);
3915 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3916 RCU_INIT_POINTER(nfnl_ct_hook
, NULL
);
3921 module_init(ctnetlink_init
);
3922 module_exit(ctnetlink_exit
);