1 // SPDX-License-Identifier: GPL-2.0
2 /* Bareudp: UDP tunnel encasulation for different Payload types like
4 * Copyright (c) 2019 Nokia, Inc.
5 * Authors: Martin Varghese, <martin.varghese@nokia.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/hash.h>
14 #include <net/dst_metadata.h>
15 #include <net/gro_cells.h>
16 #include <net/rtnetlink.h>
17 #include <net/protocol.h>
18 #include <net/ip6_tunnel.h>
19 #include <net/ip_tunnels.h>
20 #include <net/udp_tunnel.h>
21 #include <net/bareudp.h>
23 #define BAREUDP_BASE_HLEN sizeof(struct udphdr)
24 #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
25 sizeof(struct udphdr))
26 #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
27 sizeof(struct udphdr))
29 static bool log_ecn_error
= true;
30 module_param(log_ecn_error
, bool, 0644);
31 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
33 /* per-network namespace private data for this module */
35 static unsigned int bareudp_net_id
;
38 struct list_head bareudp_list
;
41 /* Pseudo network device */
43 struct net
*net
; /* netns for packet i/o */
44 struct net_device
*dev
; /* netdev for bareudp tunnel */
48 bool multi_proto_mode
;
49 struct socket __rcu
*sock
;
50 struct list_head next
; /* bareudp node on namespace list */
51 struct gro_cells gro_cells
;
54 static int bareudp_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
56 struct metadata_dst
*tun_dst
= NULL
;
57 struct pcpu_sw_netstats
*stats
;
58 struct bareudp_dev
*bareudp
;
59 unsigned short family
;
65 bareudp
= rcu_dereference_sk_user_data(sk
);
69 if (skb
->protocol
== htons(ETH_P_IP
))
74 if (bareudp
->ethertype
== htons(ETH_P_IP
)) {
77 iphdr
= (struct iphdr
*)(skb
->data
+ BAREUDP_BASE_HLEN
);
78 if (iphdr
->version
== 4) {
79 proto
= bareudp
->ethertype
;
80 } else if (bareudp
->multi_proto_mode
&& (iphdr
->version
== 6)) {
81 proto
= htons(ETH_P_IPV6
);
83 bareudp
->dev
->stats
.rx_dropped
++;
86 } else if (bareudp
->ethertype
== htons(ETH_P_MPLS_UC
)) {
87 struct iphdr
*tunnel_hdr
;
89 tunnel_hdr
= (struct iphdr
*)skb_network_header(skb
);
90 if (tunnel_hdr
->version
== 4) {
91 if (!ipv4_is_multicast(tunnel_hdr
->daddr
)) {
92 proto
= bareudp
->ethertype
;
93 } else if (bareudp
->multi_proto_mode
&&
94 ipv4_is_multicast(tunnel_hdr
->daddr
)) {
95 proto
= htons(ETH_P_MPLS_MC
);
97 bareudp
->dev
->stats
.rx_dropped
++;
102 struct ipv6hdr
*tunnel_hdr_v6
;
104 tunnel_hdr_v6
= (struct ipv6hdr
*)skb_network_header(skb
);
106 ipv6_addr_type((struct in6_addr
*)&tunnel_hdr_v6
->daddr
);
107 if (!(addr_type
& IPV6_ADDR_MULTICAST
)) {
108 proto
= bareudp
->ethertype
;
109 } else if (bareudp
->multi_proto_mode
&&
110 (addr_type
& IPV6_ADDR_MULTICAST
)) {
111 proto
= htons(ETH_P_MPLS_MC
);
113 bareudp
->dev
->stats
.rx_dropped
++;
118 proto
= bareudp
->ethertype
;
121 if (iptunnel_pull_header(skb
, BAREUDP_BASE_HLEN
,
123 !net_eq(bareudp
->net
,
124 dev_net(bareudp
->dev
)))) {
125 bareudp
->dev
->stats
.rx_dropped
++;
129 tun_dst
= udp_tun_rx_dst(skb
, family
, TUNNEL_KEY
, 0, 0);
131 bareudp
->dev
->stats
.rx_dropped
++;
134 skb_dst_set(skb
, &tun_dst
->dst
);
135 skb
->dev
= bareudp
->dev
;
136 oiph
= skb_network_header(skb
);
137 skb_reset_network_header(skb
);
139 if (family
== AF_INET
)
140 err
= IP_ECN_decapsulate(oiph
, skb
);
141 #if IS_ENABLED(CONFIG_IPV6)
143 err
= IP6_ECN_decapsulate(oiph
, skb
);
148 if (family
== AF_INET
)
149 net_info_ratelimited("non-ECT from %pI4 "
151 &((struct iphdr
*)oiph
)->saddr
,
152 ((struct iphdr
*)oiph
)->tos
);
153 #if IS_ENABLED(CONFIG_IPV6)
155 net_info_ratelimited("non-ECT from %pI6\n",
156 &((struct ipv6hdr
*)oiph
)->saddr
);
160 ++bareudp
->dev
->stats
.rx_frame_errors
;
161 ++bareudp
->dev
->stats
.rx_errors
;
167 err
= gro_cells_receive(&bareudp
->gro_cells
, skb
);
168 if (likely(err
== NET_RX_SUCCESS
)) {
169 stats
= this_cpu_ptr(bareudp
->dev
->tstats
);
170 u64_stats_update_begin(&stats
->syncp
);
172 stats
->rx_bytes
+= len
;
173 u64_stats_update_end(&stats
->syncp
);
177 /* Consume bad packet */
183 static int bareudp_err_lookup(struct sock
*sk
, struct sk_buff
*skb
)
188 static int bareudp_init(struct net_device
*dev
)
190 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
193 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
197 err
= gro_cells_init(&bareudp
->gro_cells
, dev
);
199 free_percpu(dev
->tstats
);
205 static void bareudp_uninit(struct net_device
*dev
)
207 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
209 gro_cells_destroy(&bareudp
->gro_cells
);
210 free_percpu(dev
->tstats
);
213 static struct socket
*bareudp_create_sock(struct net
*net
, __be16 port
)
215 struct udp_port_cfg udp_conf
;
219 memset(&udp_conf
, 0, sizeof(udp_conf
));
220 #if IS_ENABLED(CONFIG_IPV6)
221 udp_conf
.family
= AF_INET6
;
223 udp_conf
.family
= AF_INET
;
225 udp_conf
.local_udp_port
= port
;
226 /* Open UDP socket */
227 err
= udp_sock_create(net
, &udp_conf
, &sock
);
234 /* Create new listen socket if needed */
235 static int bareudp_socket_create(struct bareudp_dev
*bareudp
, __be16 port
)
237 struct udp_tunnel_sock_cfg tunnel_cfg
;
240 sock
= bareudp_create_sock(bareudp
->net
, port
);
242 return PTR_ERR(sock
);
244 /* Mark socket as an encapsulation socket */
245 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
246 tunnel_cfg
.sk_user_data
= bareudp
;
247 tunnel_cfg
.encap_type
= 1;
248 tunnel_cfg
.encap_rcv
= bareudp_udp_encap_recv
;
249 tunnel_cfg
.encap_err_lookup
= bareudp_err_lookup
;
250 tunnel_cfg
.encap_destroy
= NULL
;
251 setup_udp_tunnel_sock(bareudp
->net
, sock
, &tunnel_cfg
);
253 /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
254 * socket type is v6 an explicit call to udp_encap_enable is needed.
256 if (sock
->sk
->sk_family
== AF_INET6
)
259 rcu_assign_pointer(bareudp
->sock
, sock
);
263 static int bareudp_open(struct net_device
*dev
)
265 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
268 ret
= bareudp_socket_create(bareudp
, bareudp
->port
);
272 static void bareudp_sock_release(struct bareudp_dev
*bareudp
)
276 sock
= bareudp
->sock
;
277 rcu_assign_pointer(bareudp
->sock
, NULL
);
279 udp_tunnel_sock_release(sock
);
282 static int bareudp_stop(struct net_device
*dev
)
284 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
286 bareudp_sock_release(bareudp
);
290 static int bareudp_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
291 struct bareudp_dev
*bareudp
,
292 const struct ip_tunnel_info
*info
)
294 bool xnet
= !net_eq(bareudp
->net
, dev_net(bareudp
->dev
));
295 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
296 struct socket
*sock
= rcu_dereference(bareudp
->sock
);
297 bool udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
298 const struct ip_tunnel_key
*key
= &info
->key
;
309 rt
= ip_route_output_tunnel(skb
, dev
, bareudp
->net
, &saddr
, info
,
310 IPPROTO_UDP
, use_cache
);
315 skb_tunnel_check_pmtu(skb
, &rt
->dst
,
316 BAREUDP_IPV4_HLEN
+ info
->options_len
);
318 sport
= udp_flow_src_port(bareudp
->net
, skb
,
319 bareudp
->sport_min
, USHRT_MAX
,
321 tos
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
323 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
324 skb_scrub_packet(skb
, xnet
);
327 if (!skb_pull(skb
, skb_network_offset(skb
)))
330 min_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
+
331 BAREUDP_BASE_HLEN
+ info
->options_len
+ sizeof(struct iphdr
);
333 err
= skb_cow_head(skb
, min_headroom
);
337 err
= udp_tunnel_handle_offloads(skb
, udp_sum
);
341 skb_set_inner_protocol(skb
, bareudp
->ethertype
);
342 udp_tunnel_xmit_skb(rt
, sock
->sk
, skb
, saddr
, info
->key
.u
.ipv4
.dst
,
343 tos
, ttl
, df
, sport
, bareudp
->port
,
344 !net_eq(bareudp
->net
, dev_net(bareudp
->dev
)),
345 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
349 dst_release(&rt
->dst
);
353 #if IS_ENABLED(CONFIG_IPV6)
354 static int bareudp6_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
355 struct bareudp_dev
*bareudp
,
356 const struct ip_tunnel_info
*info
)
358 bool xnet
= !net_eq(bareudp
->net
, dev_net(bareudp
->dev
));
359 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
360 struct socket
*sock
= rcu_dereference(bareudp
->sock
);
361 bool udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
362 const struct ip_tunnel_key
*key
= &info
->key
;
363 struct dst_entry
*dst
= NULL
;
364 struct in6_addr saddr
, daddr
;
373 dst
= ip6_dst_lookup_tunnel(skb
, dev
, bareudp
->net
, sock
, &saddr
, info
,
374 IPPROTO_UDP
, use_cache
);
378 skb_tunnel_check_pmtu(skb
, dst
, BAREUDP_IPV6_HLEN
+ info
->options_len
);
380 sport
= udp_flow_src_port(bareudp
->net
, skb
,
381 bareudp
->sport_min
, USHRT_MAX
,
383 prio
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
386 skb_scrub_packet(skb
, xnet
);
389 if (!skb_pull(skb
, skb_network_offset(skb
)))
392 min_headroom
= LL_RESERVED_SPACE(dst
->dev
) + dst
->header_len
+
393 BAREUDP_BASE_HLEN
+ info
->options_len
+ sizeof(struct iphdr
);
395 err
= skb_cow_head(skb
, min_headroom
);
399 err
= udp_tunnel_handle_offloads(skb
, udp_sum
);
403 daddr
= info
->key
.u
.ipv6
.dst
;
404 udp_tunnel6_xmit_skb(dst
, sock
->sk
, skb
, dev
,
405 &saddr
, &daddr
, prio
, ttl
,
406 info
->key
.label
, sport
, bareudp
->port
,
407 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
416 static netdev_tx_t
bareudp_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
418 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
419 struct ip_tunnel_info
*info
= NULL
;
422 if (skb
->protocol
!= bareudp
->ethertype
) {
423 if (!bareudp
->multi_proto_mode
||
424 (skb
->protocol
!= htons(ETH_P_MPLS_MC
) &&
425 skb
->protocol
!= htons(ETH_P_IPV6
))) {
431 info
= skb_tunnel_info(skb
);
432 if (unlikely(!info
|| !(info
->mode
& IP_TUNNEL_INFO_TX
))) {
438 #if IS_ENABLED(CONFIG_IPV6)
439 if (info
->mode
& IP_TUNNEL_INFO_IPV6
)
440 err
= bareudp6_xmit_skb(skb
, dev
, bareudp
, info
);
443 err
= bareudp_xmit_skb(skb
, dev
, bareudp
, info
);
453 dev
->stats
.collisions
++;
454 else if (err
== -ENETUNREACH
)
455 dev
->stats
.tx_carrier_errors
++;
457 dev
->stats
.tx_errors
++;
461 static int bareudp_fill_metadata_dst(struct net_device
*dev
,
464 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
465 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
468 use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
470 if (ip_tunnel_info_af(info
) == AF_INET
) {
474 rt
= ip_route_output_tunnel(skb
, dev
, bareudp
->net
, &saddr
,
475 info
, IPPROTO_UDP
, use_cache
);
480 info
->key
.u
.ipv4
.src
= saddr
;
481 #if IS_ENABLED(CONFIG_IPV6)
482 } else if (ip_tunnel_info_af(info
) == AF_INET6
) {
483 struct dst_entry
*dst
;
484 struct in6_addr saddr
;
485 struct socket
*sock
= rcu_dereference(bareudp
->sock
);
487 dst
= ip6_dst_lookup_tunnel(skb
, dev
, bareudp
->net
, sock
,
488 &saddr
, info
, IPPROTO_UDP
,
494 info
->key
.u
.ipv6
.src
= saddr
;
500 info
->key
.tp_src
= udp_flow_src_port(bareudp
->net
, skb
,
503 info
->key
.tp_dst
= bareudp
->port
;
507 static const struct net_device_ops bareudp_netdev_ops
= {
508 .ndo_init
= bareudp_init
,
509 .ndo_uninit
= bareudp_uninit
,
510 .ndo_open
= bareudp_open
,
511 .ndo_stop
= bareudp_stop
,
512 .ndo_start_xmit
= bareudp_xmit
,
513 .ndo_get_stats64
= ip_tunnel_get_stats64
,
514 .ndo_fill_metadata_dst
= bareudp_fill_metadata_dst
,
517 static const struct nla_policy bareudp_policy
[IFLA_BAREUDP_MAX
+ 1] = {
518 [IFLA_BAREUDP_PORT
] = { .type
= NLA_U16
},
519 [IFLA_BAREUDP_ETHERTYPE
] = { .type
= NLA_U16
},
520 [IFLA_BAREUDP_SRCPORT_MIN
] = { .type
= NLA_U16
},
521 [IFLA_BAREUDP_MULTIPROTO_MODE
] = { .type
= NLA_FLAG
},
524 /* Info for udev, that this is a virtual tunnel endpoint */
525 static struct device_type bareudp_type
= {
529 /* Initialize the device structure. */
530 static void bareudp_setup(struct net_device
*dev
)
532 dev
->netdev_ops
= &bareudp_netdev_ops
;
533 dev
->needs_free_netdev
= true;
534 SET_NETDEV_DEVTYPE(dev
, &bareudp_type
);
535 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
536 dev
->features
|= NETIF_F_RXCSUM
;
537 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
538 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
539 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
540 dev
->hard_header_len
= 0;
542 dev
->mtu
= ETH_DATA_LEN
;
543 dev
->min_mtu
= IPV4_MIN_MTU
;
544 dev
->max_mtu
= IP_MAX_MTU
- BAREUDP_BASE_HLEN
;
545 dev
->type
= ARPHRD_NONE
;
547 dev
->priv_flags
|= IFF_NO_QUEUE
;
548 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
551 static int bareudp_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
552 struct netlink_ext_ack
*extack
)
555 NL_SET_ERR_MSG(extack
,
556 "Not enough attributes provided to perform the operation");
562 static int bareudp2info(struct nlattr
*data
[], struct bareudp_conf
*conf
,
563 struct netlink_ext_ack
*extack
)
565 if (!data
[IFLA_BAREUDP_PORT
]) {
566 NL_SET_ERR_MSG(extack
, "port not specified");
569 if (!data
[IFLA_BAREUDP_ETHERTYPE
]) {
570 NL_SET_ERR_MSG(extack
, "ethertype not specified");
574 if (data
[IFLA_BAREUDP_PORT
])
575 conf
->port
= nla_get_u16(data
[IFLA_BAREUDP_PORT
]);
577 if (data
[IFLA_BAREUDP_ETHERTYPE
])
578 conf
->ethertype
= nla_get_u16(data
[IFLA_BAREUDP_ETHERTYPE
]);
580 if (data
[IFLA_BAREUDP_SRCPORT_MIN
])
581 conf
->sport_min
= nla_get_u16(data
[IFLA_BAREUDP_SRCPORT_MIN
]);
586 static struct bareudp_dev
*bareudp_find_dev(struct bareudp_net
*bn
,
587 const struct bareudp_conf
*conf
)
589 struct bareudp_dev
*bareudp
, *t
= NULL
;
591 list_for_each_entry(bareudp
, &bn
->bareudp_list
, next
) {
592 if (conf
->port
== bareudp
->port
)
598 static int bareudp_configure(struct net
*net
, struct net_device
*dev
,
599 struct bareudp_conf
*conf
)
601 struct bareudp_net
*bn
= net_generic(net
, bareudp_net_id
);
602 struct bareudp_dev
*t
, *bareudp
= netdev_priv(dev
);
607 t
= bareudp_find_dev(bn
, conf
);
611 if (conf
->multi_proto_mode
&&
612 (conf
->ethertype
!= htons(ETH_P_MPLS_UC
) &&
613 conf
->ethertype
!= htons(ETH_P_IP
)))
616 bareudp
->port
= conf
->port
;
617 bareudp
->ethertype
= conf
->ethertype
;
618 bareudp
->sport_min
= conf
->sport_min
;
619 bareudp
->multi_proto_mode
= conf
->multi_proto_mode
;
620 err
= register_netdevice(dev
);
624 list_add(&bareudp
->next
, &bn
->bareudp_list
);
628 static int bareudp_link_config(struct net_device
*dev
,
634 err
= dev_set_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
641 static int bareudp_newlink(struct net
*net
, struct net_device
*dev
,
642 struct nlattr
*tb
[], struct nlattr
*data
[],
643 struct netlink_ext_ack
*extack
)
645 struct bareudp_conf conf
;
648 err
= bareudp2info(data
, &conf
, extack
);
652 err
= bareudp_configure(net
, dev
, &conf
);
656 err
= bareudp_link_config(dev
, tb
);
663 static void bareudp_dellink(struct net_device
*dev
, struct list_head
*head
)
665 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
667 list_del(&bareudp
->next
);
668 unregister_netdevice_queue(dev
, head
);
671 static size_t bareudp_get_size(const struct net_device
*dev
)
673 return nla_total_size(sizeof(__be16
)) + /* IFLA_BAREUDP_PORT */
674 nla_total_size(sizeof(__be16
)) + /* IFLA_BAREUDP_ETHERTYPE */
675 nla_total_size(sizeof(__u16
)) + /* IFLA_BAREUDP_SRCPORT_MIN */
676 nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */
680 static int bareudp_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
682 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
684 if (nla_put_be16(skb
, IFLA_BAREUDP_PORT
, bareudp
->port
))
685 goto nla_put_failure
;
686 if (nla_put_be16(skb
, IFLA_BAREUDP_ETHERTYPE
, bareudp
->ethertype
))
687 goto nla_put_failure
;
688 if (nla_put_u16(skb
, IFLA_BAREUDP_SRCPORT_MIN
, bareudp
->sport_min
))
689 goto nla_put_failure
;
690 if (bareudp
->multi_proto_mode
&&
691 nla_put_flag(skb
, IFLA_BAREUDP_MULTIPROTO_MODE
))
692 goto nla_put_failure
;
700 static struct rtnl_link_ops bareudp_link_ops __read_mostly
= {
702 .maxtype
= IFLA_BAREUDP_MAX
,
703 .policy
= bareudp_policy
,
704 .priv_size
= sizeof(struct bareudp_dev
),
705 .setup
= bareudp_setup
,
706 .validate
= bareudp_validate
,
707 .newlink
= bareudp_newlink
,
708 .dellink
= bareudp_dellink
,
709 .get_size
= bareudp_get_size
,
710 .fill_info
= bareudp_fill_info
,
713 struct net_device
*bareudp_dev_create(struct net
*net
, const char *name
,
715 struct bareudp_conf
*conf
)
717 struct nlattr
*tb
[IFLA_MAX
+ 1];
718 struct net_device
*dev
;
719 LIST_HEAD(list_kill
);
722 memset(tb
, 0, sizeof(tb
));
723 dev
= rtnl_create_link(net
, name
, name_assign_type
,
724 &bareudp_link_ops
, tb
, NULL
);
728 err
= bareudp_configure(net
, dev
, conf
);
733 err
= dev_set_mtu(dev
, IP_MAX_MTU
- BAREUDP_BASE_HLEN
);
737 err
= rtnl_configure_link(dev
, NULL
);
743 bareudp_dellink(dev
, &list_kill
);
744 unregister_netdevice_many(&list_kill
);
747 EXPORT_SYMBOL_GPL(bareudp_dev_create
);
749 static __net_init
int bareudp_init_net(struct net
*net
)
751 struct bareudp_net
*bn
= net_generic(net
, bareudp_net_id
);
753 INIT_LIST_HEAD(&bn
->bareudp_list
);
757 static void bareudp_destroy_tunnels(struct net
*net
, struct list_head
*head
)
759 struct bareudp_net
*bn
= net_generic(net
, bareudp_net_id
);
760 struct bareudp_dev
*bareudp
, *next
;
762 list_for_each_entry_safe(bareudp
, next
, &bn
->bareudp_list
, next
)
763 unregister_netdevice_queue(bareudp
->dev
, head
);
766 static void __net_exit
bareudp_exit_batch_net(struct list_head
*net_list
)
772 list_for_each_entry(net
, net_list
, exit_list
)
773 bareudp_destroy_tunnels(net
, &list
);
775 /* unregister the devices gathered above */
776 unregister_netdevice_many(&list
);
780 static struct pernet_operations bareudp_net_ops
= {
781 .init
= bareudp_init_net
,
782 .exit_batch
= bareudp_exit_batch_net
,
783 .id
= &bareudp_net_id
,
784 .size
= sizeof(struct bareudp_net
),
787 static int __init
bareudp_init_module(void)
791 rc
= register_pernet_subsys(&bareudp_net_ops
);
795 rc
= rtnl_link_register(&bareudp_link_ops
);
801 unregister_pernet_subsys(&bareudp_net_ops
);
805 late_initcall(bareudp_init_module
);
807 static void __exit
bareudp_cleanup_module(void)
809 rtnl_link_unregister(&bareudp_link_ops
);
810 unregister_pernet_subsys(&bareudp_net_ops
);
812 module_exit(bareudp_cleanup_module
);
814 MODULE_ALIAS_RTNL_LINK("bareudp");
815 MODULE_LICENSE("GPL");
816 MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
817 MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");