]> git.ipfire.org Git - thirdparty/linux.git/blame - net/openvswitch/datapath.c
Merge tag 'hyperv-fixes-signed' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / net / openvswitch / datapath.c
CommitLineData
c9422999 1// SPDX-License-Identifier: GPL-2.0-only
ccb1352e 2/*
ad552007 3 * Copyright (c) 2007-2014 Nicira, Inc.
ccb1352e
JG
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/if_arp.h>
11#include <linux/if_vlan.h>
12#include <linux/in.h>
13#include <linux/ip.h>
14#include <linux/jhash.h>
15#include <linux/delay.h>
16#include <linux/time.h>
17#include <linux/etherdevice.h>
18#include <linux/genetlink.h>
19#include <linux/kernel.h>
20#include <linux/kthread.h>
21#include <linux/mutex.h>
22#include <linux/percpu.h>
23#include <linux/rcupdate.h>
24#include <linux/tcp.h>
25#include <linux/udp.h>
ccb1352e
JG
26#include <linux/ethtool.h>
27#include <linux/wait.h>
ccb1352e
JG
28#include <asm/div64.h>
29#include <linux/highmem.h>
30#include <linux/netfilter_bridge.h>
31#include <linux/netfilter_ipv4.h>
32#include <linux/inetdevice.h>
33#include <linux/list.h>
34#include <linux/openvswitch.h>
35#include <linux/rculist.h>
36#include <linux/dmi.h>
ccb1352e 37#include <net/genetlink.h>
46df7b81
PS
38#include <net/net_namespace.h>
39#include <net/netns/generic.h>
ccb1352e
JG
40
41#include "datapath.h"
42#include "flow.h"
e80857cc 43#include "flow_table.h"
e6445719 44#include "flow_netlink.h"
96fbc13d 45#include "meter.h"
ccb1352e 46#include "vport-internal_dev.h"
cff63a52 47#include "vport-netdev.h"
ccb1352e 48
c7d03a00 49unsigned int ovs_net_id __read_mostly;
8e4e1713 50
0c200ef9
PS
51static struct genl_family dp_packet_genl_family;
52static struct genl_family dp_flow_genl_family;
53static struct genl_family dp_datapath_genl_family;
54
74ed7ab9
JS
55static const struct nla_policy flow_policy[];
56
48e48a70 57static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
58 .name = OVS_FLOW_MCGROUP,
0c200ef9
PS
59};
60
48e48a70 61static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
62 .name = OVS_DATAPATH_MCGROUP,
0c200ef9
PS
63};
64
48e48a70 65static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
66 .name = OVS_VPORT_MCGROUP,
0c200ef9
PS
67};
68
fb5d1e9e
JR
69/* Check if need to build a reply message.
70 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
9b67aa4a
SG
71static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
72 unsigned int group)
fb5d1e9e
JR
73{
74 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
f8403a2e 75 genl_has_listeners(family, genl_info_net(info), group);
fb5d1e9e
JR
76}
77
68eb5503 78static void ovs_notify(struct genl_family *family,
2a94fe48 79 struct sk_buff *skb, struct genl_info *info)
ed661185 80{
92c14d9b 81 genl_notify(family, skb, info, 0, GFP_KERNEL);
ed661185
TG
82}
83
ccb1352e
JG
84/**
85 * DOC: Locking:
86 *
8e4e1713
PS
87 * All writes e.g. Writes to device state (add/remove datapath, port, set
88 * operations on vports, etc.), Writes to other state (flow table
89 * modifications, set miscellaneous datapath parameters, etc.) are protected
90 * by ovs_lock.
ccb1352e
JG
91 *
92 * Reads are protected by RCU.
93 *
94 * There are a few special cases (mostly stats) that have their own
95 * synchronization but they nest under all of above and don't interact with
96 * each other.
8e4e1713
PS
97 *
98 * The RTNL lock nests inside ovs_mutex.
ccb1352e
JG
99 */
100
8e4e1713
PS
101static DEFINE_MUTEX(ovs_mutex);
102
103void ovs_lock(void)
104{
105 mutex_lock(&ovs_mutex);
106}
107
108void ovs_unlock(void)
109{
110 mutex_unlock(&ovs_mutex);
111}
112
113#ifdef CONFIG_LOCKDEP
114int lockdep_ovsl_is_held(void)
115{
116 if (debug_locks)
117 return lockdep_is_held(&ovs_mutex);
118 else
119 return 1;
120}
121#endif
122
ccb1352e 123static struct vport *new_vport(const struct vport_parms *);
8055a89c 124static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
e8eedb85 125 const struct sw_flow_key *,
f2a4d086
WT
126 const struct dp_upcall_info *,
127 uint32_t cutlen);
8055a89c 128static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
e8eedb85 129 const struct sw_flow_key *,
f2a4d086
WT
130 const struct dp_upcall_info *,
131 uint32_t cutlen);
ccb1352e 132
8e4e1713 133/* Must be called with rcu_read_lock or ovs_mutex. */
971427f3 134const char *ovs_dp_name(const struct datapath *dp)
ccb1352e 135{
8e4e1713 136 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
c9db965c 137 return ovs_vport_name(vport);
ccb1352e
JG
138}
139
12eb18f7 140static int get_dpifindex(const struct datapath *dp)
ccb1352e
JG
141{
142 struct vport *local;
143 int ifindex;
144
145 rcu_read_lock();
146
15eac2a7 147 local = ovs_vport_rcu(dp, OVSP_LOCAL);
ccb1352e 148 if (local)
be4ace6e 149 ifindex = local->dev->ifindex;
ccb1352e
JG
150 else
151 ifindex = 0;
152
153 rcu_read_unlock();
154
155 return ifindex;
156}
157
158static void destroy_dp_rcu(struct rcu_head *rcu)
159{
160 struct datapath *dp = container_of(rcu, struct datapath, rcu);
161
9b996e54 162 ovs_flow_tbl_destroy(&dp->table);
ccb1352e 163 free_percpu(dp->stats_percpu);
15eac2a7 164 kfree(dp->ports);
96fbc13d 165 ovs_meters_exit(dp);
ccb1352e
JG
166 kfree(dp);
167}
168
15eac2a7
PS
169static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
170 u16 port_no)
171{
172 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
173}
174
bb6f9a70 175/* Called with ovs_mutex or RCU read lock. */
15eac2a7
PS
176struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
177{
178 struct vport *vport;
15eac2a7
PS
179 struct hlist_head *head;
180
181 head = vport_hash_bucket(dp, port_no);
b67bfe0d 182 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
15eac2a7
PS
183 if (vport->port_no == port_no)
184 return vport;
185 }
186 return NULL;
187}
188
8e4e1713 189/* Called with ovs_mutex. */
ccb1352e
JG
190static struct vport *new_vport(const struct vport_parms *parms)
191{
192 struct vport *vport;
193
194 vport = ovs_vport_add(parms);
195 if (!IS_ERR(vport)) {
196 struct datapath *dp = parms->dp;
15eac2a7 197 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
ccb1352e 198
15eac2a7 199 hlist_add_head_rcu(&vport->dp_hash_node, head);
ccb1352e 200 }
ccb1352e
JG
201 return vport;
202}
203
ccb1352e
JG
204void ovs_dp_detach_port(struct vport *p)
205{
8e4e1713 206 ASSERT_OVSL();
ccb1352e
JG
207
208 /* First drop references to device. */
15eac2a7 209 hlist_del_rcu(&p->dp_hash_node);
ccb1352e
JG
210
211 /* Then destroy it. */
212 ovs_vport_del(p);
213}
214
215/* Must be called with rcu_read_lock. */
8c8b1b83 216void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
ccb1352e 217{
83c8df26 218 const struct vport *p = OVS_CB(skb)->input_vport;
ccb1352e
JG
219 struct datapath *dp = p->dp;
220 struct sw_flow *flow;
d98612b8 221 struct sw_flow_actions *sf_acts;
ccb1352e 222 struct dp_stats_percpu *stats;
ccb1352e 223 u64 *stats_counter;
1bd7116f 224 u32 n_mask_hit;
ccb1352e 225
404f2f10 226 stats = this_cpu_ptr(dp->stats_percpu);
ccb1352e 227
ccb1352e 228 /* Look up flow. */
8c8b1b83 229 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
ccb1352e
JG
230 if (unlikely(!flow)) {
231 struct dp_upcall_info upcall;
8c8b1b83 232 int error;
ccb1352e 233
ccea7445 234 memset(&upcall, 0, sizeof(upcall));
ccb1352e 235 upcall.cmd = OVS_PACKET_CMD_MISS;
5cd667b0 236 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
7f8a436e 237 upcall.mru = OVS_CB(skb)->mru;
f2a4d086 238 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
c5eba0b6
LR
239 if (unlikely(error))
240 kfree_skb(skb);
241 else
242 consume_skb(skb);
ccb1352e
JG
243 stats_counter = &stats->n_missed;
244 goto out;
245 }
246
d98612b8
LJ
247 ovs_flow_stats_update(flow, key->tp.flags, skb);
248 sf_acts = rcu_dereference(flow->sf_acts);
249 ovs_execute_actions(dp, skb, sf_acts, key);
ccb1352e 250
e298e505 251 stats_counter = &stats->n_hit;
ccb1352e
JG
252
253out:
254 /* Update datapath statistics. */
df9d9fdf 255 u64_stats_update_begin(&stats->syncp);
ccb1352e 256 (*stats_counter)++;
1bd7116f 257 stats->n_mask_hit += n_mask_hit;
df9d9fdf 258 u64_stats_update_end(&stats->syncp);
ccb1352e
JG
259}
260
ccb1352e 261int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
e8eedb85 262 const struct sw_flow_key *key,
f2a4d086
WT
263 const struct dp_upcall_info *upcall_info,
264 uint32_t cutlen)
ccb1352e
JG
265{
266 struct dp_stats_percpu *stats;
ccb1352e
JG
267 int err;
268
15e47304 269 if (upcall_info->portid == 0) {
ccb1352e
JG
270 err = -ENOTCONN;
271 goto err;
272 }
273
ccb1352e 274 if (!skb_is_gso(skb))
f2a4d086 275 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
ccb1352e 276 else
f2a4d086 277 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
ccb1352e
JG
278 if (err)
279 goto err;
280
281 return 0;
282
283err:
404f2f10 284 stats = this_cpu_ptr(dp->stats_percpu);
ccb1352e 285
df9d9fdf 286 u64_stats_update_begin(&stats->syncp);
ccb1352e 287 stats->n_lost++;
df9d9fdf 288 u64_stats_update_end(&stats->syncp);
ccb1352e
JG
289
290 return err;
291}
292
8055a89c 293static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
e8eedb85 294 const struct sw_flow_key *key,
f2a4d086
WT
295 const struct dp_upcall_info *upcall_info,
296 uint32_t cutlen)
ccb1352e 297{
2734166e 298 unsigned int gso_type = skb_shinfo(skb)->gso_type;
0c19f846 299 struct sw_flow_key later_key;
ccb1352e
JG
300 struct sk_buff *segs, *nskb;
301 int err;
302
9207f9d4 303 BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
09c5e605 304 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
92e5dfc3
PS
305 if (IS_ERR(segs))
306 return PTR_ERR(segs);
330966e5
FW
307 if (segs == NULL)
308 return -EINVAL;
ccb1352e 309
0c19f846
WB
310 if (gso_type & SKB_GSO_UDP) {
311 /* The initial flow key extracted by ovs_flow_key_extract()
312 * in this case is for a first fragment, so we need to
313 * properly mark later fragments.
314 */
315 later_key = *key;
316 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
317 }
318
ccb1352e
JG
319 /* Queue all of the segments. */
320 skb = segs;
321 do {
0c19f846
WB
322 if (gso_type & SKB_GSO_UDP && skb != segs)
323 key = &later_key;
324
f2a4d086 325 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
ccb1352e
JG
326 if (err)
327 break;
328
ccb1352e
JG
329 } while ((skb = skb->next));
330
331 /* Free all of the segments. */
332 skb = segs;
333 do {
334 nskb = skb->next;
335 if (err)
336 kfree_skb(skb);
337 else
338 consume_skb(skb);
339 } while ((skb = nskb));
340 return err;
341}
342
8f0aad6f 343static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
494bea39 344 unsigned int hdrlen, int actions_attrlen)
c3ff8cfe
TG
345{
346 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
bda56f14 347 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
b95e5928
WT
348 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
349 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
c3ff8cfe
TG
350
351 /* OVS_PACKET_ATTR_USERDATA */
8f0aad6f
WZ
352 if (upcall_info->userdata)
353 size += NLA_ALIGN(upcall_info->userdata->nla_len);
354
355 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
356 if (upcall_info->egress_tun_info)
357 size += nla_total_size(ovs_tun_key_attr_size());
c3ff8cfe 358
ccea7445
NM
359 /* OVS_PACKET_ATTR_ACTIONS */
360 if (upcall_info->actions_len)
494bea39 361 size += nla_total_size(actions_attrlen);
ccea7445 362
7f8a436e
JS
363 /* OVS_PACKET_ATTR_MRU */
364 if (upcall_info->mru)
365 size += nla_total_size(sizeof(upcall_info->mru));
366
c3ff8cfe
TG
367 return size;
368}
369
7f8a436e
JS
370static void pad_packet(struct datapath *dp, struct sk_buff *skb)
371{
372 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
373 size_t plen = NLA_ALIGN(skb->len) - skb->len;
374
375 if (plen > 0)
b080db58 376 skb_put_zero(skb, plen);
7f8a436e
JS
377 }
378}
379
8055a89c 380static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
e8eedb85 381 const struct sw_flow_key *key,
f2a4d086
WT
382 const struct dp_upcall_info *upcall_info,
383 uint32_t cutlen)
ccb1352e
JG
384{
385 struct ovs_header *upcall;
386 struct sk_buff *nskb = NULL;
4ee45ea0 387 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
ccb1352e 388 struct nlattr *nla;
795449d8 389 size_t len;
bda56f14 390 unsigned int hlen;
8055a89c
TG
391 int err, dp_ifindex;
392
393 dp_ifindex = get_dpifindex(dp);
394 if (!dp_ifindex)
395 return -ENODEV;
ccb1352e 396
df8a39de 397 if (skb_vlan_tag_present(skb)) {
ccb1352e
JG
398 nskb = skb_clone(skb, GFP_ATOMIC);
399 if (!nskb)
400 return -ENOMEM;
401
5968250c 402 nskb = __vlan_hwaccel_push_inside(nskb);
8aa51d64 403 if (!nskb)
ccb1352e
JG
404 return -ENOMEM;
405
ccb1352e
JG
406 skb = nskb;
407 }
408
409 if (nla_attr_size(skb->len) > USHRT_MAX) {
410 err = -EFBIG;
411 goto out;
412 }
413
bda56f14
TG
414 /* Complete checksum if needed */
415 if (skb->ip_summed == CHECKSUM_PARTIAL &&
7529390d 416 (err = skb_csum_hwoffload_help(skb, 0)))
bda56f14
TG
417 goto out;
418
419 /* Older versions of OVS user space enforce alignment of the last
420 * Netlink attribute to NLA_ALIGNTO which would require extensive
421 * padding logic. Only perform zerocopy if padding is not required.
422 */
423 if (dp->user_features & OVS_DP_F_UNALIGNED)
424 hlen = skb_zerocopy_headlen(skb);
425 else
426 hlen = skb->len;
427
494bea39
LZ
428 len = upcall_msg_size(upcall_info, hlen - cutlen,
429 OVS_CB(skb)->acts_origlen);
551ddc05 430 user_skb = genlmsg_new(len, GFP_ATOMIC);
ccb1352e
JG
431 if (!user_skb) {
432 err = -ENOMEM;
433 goto out;
434 }
435
436 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
437 0, upcall_info->cmd);
6f19893b
KL
438 if (!upcall) {
439 err = -EINVAL;
440 goto out;
441 }
ccb1352e
JG
442 upcall->dp_ifindex = dp_ifindex;
443
5b4237bb 444 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
a734d1f4
EC
445 if (err)
446 goto out;
ccb1352e
JG
447
448 if (upcall_info->userdata)
4490108b
BP
449 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
450 nla_len(upcall_info->userdata),
451 nla_data(upcall_info->userdata));
ccb1352e 452
8f0aad6f 453 if (upcall_info->egress_tun_info) {
ae0be8de
MK
454 nla = nla_nest_start_noflag(user_skb,
455 OVS_PACKET_ATTR_EGRESS_TUN_KEY);
0fff9bd4
KL
456 if (!nla) {
457 err = -EMSGSIZE;
458 goto out;
459 }
fc4099f1
PS
460 err = ovs_nla_put_tunnel_info(user_skb,
461 upcall_info->egress_tun_info);
a734d1f4
EC
462 if (err)
463 goto out;
464
8f0aad6f
WZ
465 nla_nest_end(user_skb, nla);
466 }
467
ccea7445 468 if (upcall_info->actions_len) {
ae0be8de 469 nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
0fff9bd4
KL
470 if (!nla) {
471 err = -EMSGSIZE;
472 goto out;
473 }
ccea7445
NM
474 err = ovs_nla_put_actions(upcall_info->actions,
475 upcall_info->actions_len,
476 user_skb);
477 if (!err)
478 nla_nest_end(user_skb, nla);
479 else
480 nla_nest_cancel(user_skb, nla);
481 }
482
7f8a436e
JS
483 /* Add OVS_PACKET_ATTR_MRU */
484 if (upcall_info->mru) {
485 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
486 upcall_info->mru)) {
487 err = -ENOBUFS;
488 goto out;
489 }
490 pad_packet(dp, user_skb);
491 }
492
b95e5928
WT
493 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
494 if (cutlen > 0) {
495 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
496 skb->len)) {
497 err = -ENOBUFS;
498 goto out;
499 }
500 pad_packet(dp, user_skb);
501 }
502
bda56f14
TG
503 /* Only reserve room for attribute header, packet data is added
504 * in skb_zerocopy() */
505 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
506 err = -ENOBUFS;
507 goto out;
508 }
f2a4d086 509 nla->nla_len = nla_attr_size(skb->len - cutlen);
ccb1352e 510
f2a4d086 511 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
36d5fe6a
ZK
512 if (err)
513 goto out;
ccb1352e 514
aea0bb4f 515 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
7f8a436e 516 pad_packet(dp, user_skb);
aea0bb4f 517
bda56f14 518 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
ccb1352e 519
bda56f14 520 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
4ee45ea0 521 user_skb = NULL;
ccb1352e 522out:
36d5fe6a
ZK
523 if (err)
524 skb_tx_error(skb);
4ee45ea0 525 kfree_skb(user_skb);
ccb1352e
JG
526 kfree_skb(nskb);
527 return err;
528}
529
ccb1352e
JG
530static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
531{
532 struct ovs_header *ovs_header = info->userhdr;
7f8a436e 533 struct net *net = sock_net(skb->sk);
ccb1352e
JG
534 struct nlattr **a = info->attrs;
535 struct sw_flow_actions *acts;
536 struct sk_buff *packet;
537 struct sw_flow *flow;
d98612b8 538 struct sw_flow_actions *sf_acts;
ccb1352e 539 struct datapath *dp;
83c8df26 540 struct vport *input_vport;
7f8a436e 541 u16 mru = 0;
ccb1352e
JG
542 int len;
543 int err;
1ba39804 544 bool log = !a[OVS_PACKET_ATTR_PROBE];
ccb1352e
JG
545
546 err = -EINVAL;
547 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
dded45fc 548 !a[OVS_PACKET_ATTR_ACTIONS])
ccb1352e
JG
549 goto err;
550
551 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
552 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
553 err = -ENOMEM;
554 if (!packet)
555 goto err;
556 skb_reserve(packet, NET_IP_ALIGN);
557
32686a9d 558 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
ccb1352e 559
7f8a436e
JS
560 /* Set packet's mru */
561 if (a[OVS_PACKET_ATTR_MRU]) {
562 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
563 packet->ignore_df = 1;
564 }
565 OVS_CB(packet)->mru = mru;
566
ccb1352e 567 /* Build an sw_flow for sending this packet. */
23dabf88 568 flow = ovs_flow_alloc();
ccb1352e
JG
569 err = PTR_ERR(flow);
570 if (IS_ERR(flow))
571 goto err_kfree_skb;
572
c2ac6673
JS
573 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
574 packet, &flow->key, log);
ccb1352e
JG
575 if (err)
576 goto err_flow_free;
577
7f8a436e 578 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
05da5898 579 &flow->key, &acts, log);
74f84a57
PS
580 if (err)
581 goto err_flow_free;
ccb1352e 582
f5796684 583 rcu_assign_pointer(flow->sf_acts, acts);
ccb1352e 584 packet->priority = flow->key.phy.priority;
39c7caeb 585 packet->mark = flow->key.phy.skb_mark;
ccb1352e
JG
586
587 rcu_read_lock();
7f8a436e 588 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
ccb1352e
JG
589 err = -ENODEV;
590 if (!dp)
591 goto err_unlock;
592
83c8df26
PS
593 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
594 if (!input_vport)
595 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
596
597 if (!input_vport)
598 goto err_unlock;
599
7f8a436e 600 packet->dev = input_vport->dev;
83c8df26 601 OVS_CB(packet)->input_vport = input_vport;
d98612b8 602 sf_acts = rcu_dereference(flow->sf_acts);
83c8df26 603
ccb1352e 604 local_bh_disable();
d98612b8 605 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
ccb1352e
JG
606 local_bh_enable();
607 rcu_read_unlock();
608
03f0d916 609 ovs_flow_free(flow, false);
ccb1352e
JG
610 return err;
611
612err_unlock:
613 rcu_read_unlock();
614err_flow_free:
03f0d916 615 ovs_flow_free(flow, false);
ccb1352e
JG
616err_kfree_skb:
617 kfree_skb(packet);
618err:
619 return err;
620}
621
622static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
dded45fc 623 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
ccb1352e
JG
624 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
625 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
1ba39804 626 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
7f8a436e 627 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
ccb1352e
JG
628};
629
4534de83 630static const struct genl_ops dp_packet_genl_ops[] = {
ccb1352e 631 { .cmd = OVS_PACKET_CMD_EXECUTE,
ef6243ac 632 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 633 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
634 .doit = ovs_packet_cmd_execute
635 }
636};
637
56989f6d 638static struct genl_family dp_packet_genl_family __ro_after_init = {
0c200ef9
PS
639 .hdrsize = sizeof(struct ovs_header),
640 .name = OVS_PACKET_FAMILY,
641 .version = OVS_PACKET_VERSION,
642 .maxattr = OVS_PACKET_ATTR_MAX,
3b0f31f2 643 .policy = packet_policy,
0c200ef9
PS
644 .netnsok = true,
645 .parallel_ops = true,
646 .ops = dp_packet_genl_ops,
647 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
489111e5 648 .module = THIS_MODULE,
0c200ef9
PS
649};
650
12eb18f7 651static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
1bd7116f 652 struct ovs_dp_megaflow_stats *mega_stats)
ccb1352e
JG
653{
654 int i;
ccb1352e 655
1bd7116f
AZ
656 memset(mega_stats, 0, sizeof(*mega_stats));
657
b637e498 658 stats->n_flows = ovs_flow_tbl_count(&dp->table);
1bd7116f 659 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
ccb1352e
JG
660
661 stats->n_hit = stats->n_missed = stats->n_lost = 0;
1bd7116f 662
ccb1352e
JG
663 for_each_possible_cpu(i) {
664 const struct dp_stats_percpu *percpu_stats;
665 struct dp_stats_percpu local_stats;
666 unsigned int start;
667
668 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
669
670 do {
57a7744e 671 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
ccb1352e 672 local_stats = *percpu_stats;
57a7744e 673 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
ccb1352e
JG
674
675 stats->n_hit += local_stats.n_hit;
676 stats->n_missed += local_stats.n_missed;
677 stats->n_lost += local_stats.n_lost;
1bd7116f 678 mega_stats->n_mask_hit += local_stats.n_mask_hit;
ccb1352e
JG
679 }
680}
681
74ed7ab9
JS
682static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
683{
684 return ovs_identifier_is_ufid(sfid) &&
685 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
686}
687
688static bool should_fill_mask(uint32_t ufid_flags)
689{
690 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
691}
692
693static bool should_fill_actions(uint32_t ufid_flags)
c3ff8cfe 694{
74ed7ab9
JS
695 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
696}
697
698static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
699 const struct sw_flow_id *sfid,
700 uint32_t ufid_flags)
701{
702 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
703
704 /* OVS_FLOW_ATTR_UFID */
705 if (sfid && ovs_identifier_is_ufid(sfid))
706 len += nla_total_size(sfid->ufid_len);
707
708 /* OVS_FLOW_ATTR_KEY */
709 if (!sfid || should_fill_key(sfid, ufid_flags))
710 len += nla_total_size(ovs_key_attr_size());
711
712 /* OVS_FLOW_ATTR_MASK */
713 if (should_fill_mask(ufid_flags))
714 len += nla_total_size(ovs_key_attr_size());
715
716 /* OVS_FLOW_ATTR_ACTIONS */
717 if (should_fill_actions(ufid_flags))
8e2fed1c 718 len += nla_total_size(acts->orig_len);
74ed7ab9
JS
719
720 return len
66c7a5ee 721 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
c3ff8cfe 722 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
66c7a5ee 723 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
c3ff8cfe
TG
724}
725
ca7105f2
JS
726/* Called with ovs_mutex or RCU read lock. */
727static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
728 struct sk_buff *skb)
729{
730 struct ovs_flow_stats stats;
731 __be16 tcp_flags;
732 unsigned long used;
ccb1352e 733
e298e505 734 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
0e9796b4 735
028d6a67 736 if (used &&
0238b720
ND
737 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
738 OVS_FLOW_ATTR_PAD))
ca7105f2 739 return -EMSGSIZE;
ccb1352e 740
028d6a67 741 if (stats.n_packets &&
66c7a5ee
ND
742 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
743 sizeof(struct ovs_flow_stats), &stats,
744 OVS_FLOW_ATTR_PAD))
ca7105f2 745 return -EMSGSIZE;
ccb1352e 746
e298e505
PS
747 if ((u8)ntohs(tcp_flags) &&
748 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
ca7105f2
JS
749 return -EMSGSIZE;
750
751 return 0;
752}
753
754/* Called with ovs_mutex or RCU read lock. */
755static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
756 struct sk_buff *skb, int skb_orig_len)
757{
758 struct nlattr *start;
759 int err;
ccb1352e
JG
760
761 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
762 * this is the first flow to be dumped into 'skb'. This is unusual for
763 * Netlink but individual action lists can be longer than
764 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
765 * The userspace caller can always fetch the actions separately if it
766 * really wants them. (Most userspace callers in fact don't care.)
767 *
768 * This can only fail for dump operations because the skb is always
769 * properly sized for single flows.
770 */
ae0be8de 771 start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
74f84a57 772 if (start) {
d57170b1
PS
773 const struct sw_flow_actions *sf_acts;
774
663efa36 775 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
e6445719
PS
776 err = ovs_nla_put_actions(sf_acts->actions,
777 sf_acts->actions_len, skb);
0e9796b4 778
74f84a57
PS
779 if (!err)
780 nla_nest_end(skb, start);
781 else {
782 if (skb_orig_len)
ca7105f2 783 return err;
74f84a57
PS
784
785 nla_nest_cancel(skb, start);
786 }
ca7105f2
JS
787 } else if (skb_orig_len) {
788 return -EMSGSIZE;
789 }
790
791 return 0;
792}
793
794/* Called with ovs_mutex or RCU read lock. */
795static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
796 struct sk_buff *skb, u32 portid,
74ed7ab9 797 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
ca7105f2
JS
798{
799 const int skb_orig_len = skb->len;
800 struct ovs_header *ovs_header;
801 int err;
802
803 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
804 flags, cmd);
805 if (!ovs_header)
806 return -EMSGSIZE;
807
808 ovs_header->dp_ifindex = dp_ifindex;
809
74ed7ab9 810 err = ovs_nla_put_identifier(flow, skb);
5b4237bb
JS
811 if (err)
812 goto error;
813
74ed7ab9
JS
814 if (should_fill_key(&flow->id, ufid_flags)) {
815 err = ovs_nla_put_masked_key(flow, skb);
816 if (err)
817 goto error;
818 }
819
820 if (should_fill_mask(ufid_flags)) {
821 err = ovs_nla_put_mask(flow, skb);
822 if (err)
823 goto error;
824 }
ca7105f2
JS
825
826 err = ovs_flow_cmd_fill_stats(flow, skb);
827 if (err)
828 goto error;
829
74ed7ab9
JS
830 if (should_fill_actions(ufid_flags)) {
831 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
832 if (err)
833 goto error;
834 }
ccb1352e 835
053c095a
JB
836 genlmsg_end(skb, ovs_header);
837 return 0;
ccb1352e 838
ccb1352e
JG
839error:
840 genlmsg_cancel(skb, ovs_header);
841 return err;
842}
843
0e9796b4
JR
844/* May not be called with RCU read lock. */
845static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
74ed7ab9 846 const struct sw_flow_id *sfid,
fb5d1e9e 847 struct genl_info *info,
74ed7ab9
JS
848 bool always,
849 uint32_t ufid_flags)
ccb1352e 850{
fb5d1e9e 851 struct sk_buff *skb;
74ed7ab9 852 size_t len;
ccb1352e 853
9b67aa4a 854 if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
fb5d1e9e
JR
855 return NULL;
856
74ed7ab9 857 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
551ddc05 858 skb = genlmsg_new(len, GFP_KERNEL);
fb5d1e9e
JR
859 if (!skb)
860 return ERR_PTR(-ENOMEM);
861
862 return skb;
ccb1352e
JG
863}
864
0e9796b4
JR
865/* Called with ovs_mutex. */
866static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
867 int dp_ifindex,
868 struct genl_info *info, u8 cmd,
74ed7ab9 869 bool always, u32 ufid_flags)
ccb1352e
JG
870{
871 struct sk_buff *skb;
872 int retval;
873
74ed7ab9
JS
874 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
875 &flow->id, info, always, ufid_flags);
d0e992aa 876 if (IS_ERR_OR_NULL(skb))
fb5d1e9e 877 return skb;
ccb1352e 878
0e9796b4
JR
879 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
880 info->snd_portid, info->snd_seq, 0,
74ed7ab9 881 cmd, ufid_flags);
ccb1352e
JG
882 BUG_ON(retval < 0);
883 return skb;
884}
885
37bdc87b 886static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
ccb1352e 887{
7f8a436e 888 struct net *net = sock_net(skb->sk);
ccb1352e
JG
889 struct nlattr **a = info->attrs;
890 struct ovs_header *ovs_header = info->userhdr;
74ed7ab9 891 struct sw_flow *flow = NULL, *new_flow;
03f0d916 892 struct sw_flow_mask mask;
ccb1352e
JG
893 struct sk_buff *reply;
894 struct datapath *dp;
37bdc87b 895 struct sw_flow_actions *acts;
03f0d916 896 struct sw_flow_match match;
74ed7ab9 897 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
ccb1352e 898 int error;
05da5898 899 bool log = !a[OVS_FLOW_ATTR_PROBE];
ccb1352e 900
893f139b 901 /* Must have key and actions. */
ccb1352e 902 error = -EINVAL;
426cda5c 903 if (!a[OVS_FLOW_ATTR_KEY]) {
05da5898 904 OVS_NLERR(log, "Flow key attr not present in new flow.");
ccb1352e 905 goto error;
426cda5c
JG
906 }
907 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
05da5898 908 OVS_NLERR(log, "Flow actions attr not present in new flow.");
893f139b 909 goto error;
426cda5c 910 }
03f0d916 911
893f139b
JR
912 /* Most of the time we need to allocate a new flow, do it before
913 * locking.
914 */
915 new_flow = ovs_flow_alloc();
916 if (IS_ERR(new_flow)) {
917 error = PTR_ERR(new_flow);
918 goto error;
919 }
920
921 /* Extract key. */
2279994d 922 ovs_match_init(&match, &new_flow->key, false, &mask);
c2ac6673 923 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
05da5898 924 a[OVS_FLOW_ATTR_MASK], log);
ccb1352e 925 if (error)
893f139b 926 goto err_kfree_flow;
ccb1352e 927
74ed7ab9
JS
928 /* Extract flow identifier. */
929 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
190aa3e7 930 &new_flow->key, log);
74ed7ab9
JS
931 if (error)
932 goto err_kfree_flow;
74f84a57 933
190aa3e7 934 /* unmasked key is needed to match when ufid is not used. */
935 if (ovs_identifier_is_key(&new_flow->id))
936 match.key = new_flow->id.unmasked_key;
937
938 ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
939
893f139b 940 /* Validate actions. */
7f8a436e
JS
941 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
942 &new_flow->key, &acts, log);
37bdc87b 943 if (error) {
05da5898 944 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
2fdb957d 945 goto err_kfree_flow;
893f139b
JR
946 }
947
74ed7ab9
JS
948 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
949 ufid_flags);
893f139b
JR
950 if (IS_ERR(reply)) {
951 error = PTR_ERR(reply);
952 goto err_kfree_acts;
ccb1352e
JG
953 }
954
8e4e1713 955 ovs_lock();
7f8a436e 956 dp = get_dp(net, ovs_header->dp_ifindex);
893f139b
JR
957 if (unlikely(!dp)) {
958 error = -ENODEV;
8e4e1713 959 goto err_unlock_ovs;
893f139b 960 }
74ed7ab9 961
03f0d916 962 /* Check if this is a duplicate flow */
74ed7ab9
JS
963 if (ovs_identifier_is_ufid(&new_flow->id))
964 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
965 if (!flow)
190aa3e7 966 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
893f139b
JR
967 if (likely(!flow)) {
968 rcu_assign_pointer(new_flow->sf_acts, acts);
ccb1352e
JG
969
970 /* Put flow in bucket. */
893f139b
JR
971 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
972 if (unlikely(error)) {
618ed0c8 973 acts = NULL;
893f139b
JR
974 goto err_unlock_ovs;
975 }
976
977 if (unlikely(reply)) {
978 error = ovs_flow_cmd_fill_info(new_flow,
979 ovs_header->dp_ifindex,
980 reply, info->snd_portid,
981 info->snd_seq, 0,
74ed7ab9
JS
982 OVS_FLOW_CMD_NEW,
983 ufid_flags);
893f139b 984 BUG_ON(error < 0);
618ed0c8 985 }
893f139b 986 ovs_unlock();
ccb1352e 987 } else {
37bdc87b
JR
988 struct sw_flow_actions *old_acts;
989
ccb1352e
JG
990 /* Bail out if we're not allowed to modify an existing flow.
991 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
992 * because Generic Netlink treats the latter as a dump
993 * request. We also accept NLM_F_EXCL in case that bug ever
994 * gets fixed.
995 */
893f139b
JR
996 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
997 | NLM_F_EXCL))) {
998 error = -EEXIST;
8e4e1713 999 goto err_unlock_ovs;
893f139b 1000 }
74ed7ab9
JS
1001 /* The flow identifier has to be the same for flow updates.
1002 * Look for any overlapping flow.
1003 */
1004 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1005 if (ovs_identifier_is_key(&flow->id))
1006 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1007 &match);
1008 else /* UFID matches but key is different */
1009 flow = NULL;
4a46b24e
AW
1010 if (!flow) {
1011 error = -ENOENT;
1012 goto err_unlock_ovs;
1013 }
893f139b 1014 }
37bdc87b
JR
1015 /* Update actions. */
1016 old_acts = ovsl_dereference(flow->sf_acts);
1017 rcu_assign_pointer(flow->sf_acts, acts);
37bdc87b 1018
893f139b
JR
1019 if (unlikely(reply)) {
1020 error = ovs_flow_cmd_fill_info(flow,
1021 ovs_header->dp_ifindex,
1022 reply, info->snd_portid,
1023 info->snd_seq, 0,
74ed7ab9
JS
1024 OVS_FLOW_CMD_NEW,
1025 ufid_flags);
893f139b
JR
1026 BUG_ON(error < 0);
1027 }
1028 ovs_unlock();
37bdc87b 1029
34ae932a 1030 ovs_nla_free_flow_actions_rcu(old_acts);
893f139b 1031 ovs_flow_free(new_flow, false);
37bdc87b 1032 }
893f139b
JR
1033
1034 if (reply)
1035 ovs_notify(&dp_flow_genl_family, reply, info);
37bdc87b
JR
1036 return 0;
1037
37bdc87b
JR
1038err_unlock_ovs:
1039 ovs_unlock();
893f139b
JR
1040 kfree_skb(reply);
1041err_kfree_acts:
34ae932a 1042 ovs_nla_free_flow_actions(acts);
893f139b
JR
1043err_kfree_flow:
1044 ovs_flow_free(new_flow, false);
37bdc87b
JR
1045error:
1046 return error;
1047}
ccb1352e 1048
2fdb957d 1049/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
26063790 1050static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net,
7f8a436e 1051 const struct nlattr *a,
6b205b2c 1052 const struct sw_flow_key *key,
05da5898
JR
1053 const struct sw_flow_mask *mask,
1054 bool log)
6b205b2c
JG
1055{
1056 struct sw_flow_actions *acts;
1057 struct sw_flow_key masked_key;
1058 int error;
1059
ae5f2fb1 1060 ovs_flow_mask_key(&masked_key, key, true, mask);
7f8a436e 1061 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
6b205b2c 1062 if (error) {
05da5898
JR
1063 OVS_NLERR(log,
1064 "Actions may not be safe on all matching packets");
6b205b2c
JG
1065 return ERR_PTR(error);
1066 }
1067
1068 return acts;
1069}
1070
9cc9a5cb
TZ
1071/* Factor out match-init and action-copy to avoid
1072 * "Wframe-larger-than=1024" warning. Because mask is only
1073 * used to get actions, we new a function to save some
1074 * stack space.
1075 *
1076 * If there are not key and action attrs, we return 0
1077 * directly. In the case, the caller will also not use the
1078 * match as before. If there is action attr, we try to get
1079 * actions and save them to *acts. Before returning from
1080 * the function, we reset the match->mask pointer. Because
1081 * we should not to return match object with dangling reference
1082 * to mask.
1083 * */
26063790
AB
1084static noinline_for_stack int
1085ovs_nla_init_match_and_action(struct net *net,
1086 struct sw_flow_match *match,
1087 struct sw_flow_key *key,
1088 struct nlattr **a,
1089 struct sw_flow_actions **acts,
1090 bool log)
9cc9a5cb
TZ
1091{
1092 struct sw_flow_mask mask;
1093 int error = 0;
1094
1095 if (a[OVS_FLOW_ATTR_KEY]) {
1096 ovs_match_init(match, key, true, &mask);
1097 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1098 a[OVS_FLOW_ATTR_MASK], log);
1099 if (error)
1100 goto error;
1101 }
1102
1103 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1104 if (!a[OVS_FLOW_ATTR_KEY]) {
1105 OVS_NLERR(log,
1106 "Flow key attribute not present in set flow.");
5829e62a
CJ
1107 error = -EINVAL;
1108 goto error;
9cc9a5cb
TZ
1109 }
1110
1111 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1112 &mask, log);
1113 if (IS_ERR(*acts)) {
1114 error = PTR_ERR(*acts);
1115 goto error;
1116 }
1117 }
1118
1119 /* On success, error is 0. */
1120error:
1121 match->mask = NULL;
1122 return error;
1123}
1124
37bdc87b
JR
1125static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1126{
7f8a436e 1127 struct net *net = sock_net(skb->sk);
37bdc87b
JR
1128 struct nlattr **a = info->attrs;
1129 struct ovs_header *ovs_header = info->userhdr;
6b205b2c 1130 struct sw_flow_key key;
37bdc87b 1131 struct sw_flow *flow;
37bdc87b
JR
1132 struct sk_buff *reply = NULL;
1133 struct datapath *dp;
893f139b 1134 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
37bdc87b 1135 struct sw_flow_match match;
74ed7ab9
JS
1136 struct sw_flow_id sfid;
1137 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
6f15cdbf 1138 int error = 0;
05da5898 1139 bool log = !a[OVS_FLOW_ATTR_PROBE];
74ed7ab9 1140 bool ufid_present;
37bdc87b 1141
74ed7ab9 1142 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
9cc9a5cb 1143 if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
6f15cdbf
SG
1144 OVS_NLERR(log,
1145 "Flow set message rejected, Key attribute missing.");
9cc9a5cb 1146 return -EINVAL;
6f15cdbf 1147 }
9cc9a5cb
TZ
1148
1149 error = ovs_nla_init_match_and_action(net, &match, &key, a,
1150 &acts, log);
37bdc87b
JR
1151 if (error)
1152 goto error;
1153
9cc9a5cb 1154 if (acts) {
2fdb957d 1155 /* Can allocate before locking if have acts. */
74ed7ab9
JS
1156 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1157 ufid_flags);
893f139b
JR
1158 if (IS_ERR(reply)) {
1159 error = PTR_ERR(reply);
1160 goto err_kfree_acts;
be52c9e9 1161 }
37bdc87b 1162 }
0e9796b4 1163
37bdc87b 1164 ovs_lock();
7f8a436e 1165 dp = get_dp(net, ovs_header->dp_ifindex);
893f139b
JR
1166 if (unlikely(!dp)) {
1167 error = -ENODEV;
37bdc87b 1168 goto err_unlock_ovs;
893f139b 1169 }
37bdc87b 1170 /* Check that the flow exists. */
74ed7ab9
JS
1171 if (ufid_present)
1172 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1173 else
1174 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
893f139b
JR
1175 if (unlikely(!flow)) {
1176 error = -ENOENT;
37bdc87b 1177 goto err_unlock_ovs;
893f139b 1178 }
4a46b24e 1179
37bdc87b 1180 /* Update actions, if present. */
893f139b 1181 if (likely(acts)) {
37bdc87b
JR
1182 old_acts = ovsl_dereference(flow->sf_acts);
1183 rcu_assign_pointer(flow->sf_acts, acts);
893f139b
JR
1184
1185 if (unlikely(reply)) {
1186 error = ovs_flow_cmd_fill_info(flow,
1187 ovs_header->dp_ifindex,
1188 reply, info->snd_portid,
1189 info->snd_seq, 0,
804fe108 1190 OVS_FLOW_CMD_SET,
74ed7ab9 1191 ufid_flags);
893f139b
JR
1192 BUG_ON(error < 0);
1193 }
1194 } else {
1195 /* Could not alloc without acts before locking. */
1196 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
804fe108 1197 info, OVS_FLOW_CMD_SET, false,
74ed7ab9
JS
1198 ufid_flags);
1199
b5ffe634 1200 if (IS_ERR(reply)) {
893f139b
JR
1201 error = PTR_ERR(reply);
1202 goto err_unlock_ovs;
1203 }
ccb1352e 1204 }
37bdc87b 1205
37bdc87b
JR
1206 /* Clear stats. */
1207 if (a[OVS_FLOW_ATTR_CLEAR])
1208 ovs_flow_stats_clear(flow);
8e4e1713 1209 ovs_unlock();
ccb1352e 1210
893f139b
JR
1211 if (reply)
1212 ovs_notify(&dp_flow_genl_family, reply, info);
1213 if (old_acts)
34ae932a 1214 ovs_nla_free_flow_actions_rcu(old_acts);
fb5d1e9e 1215
ccb1352e
JG
1216 return 0;
1217
8e4e1713
PS
1218err_unlock_ovs:
1219 ovs_unlock();
893f139b
JR
1220 kfree_skb(reply);
1221err_kfree_acts:
34ae932a 1222 ovs_nla_free_flow_actions(acts);
ccb1352e
JG
1223error:
1224 return error;
1225}
1226
1227static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1228{
1229 struct nlattr **a = info->attrs;
1230 struct ovs_header *ovs_header = info->userhdr;
c2ac6673 1231 struct net *net = sock_net(skb->sk);
ccb1352e
JG
1232 struct sw_flow_key key;
1233 struct sk_buff *reply;
1234 struct sw_flow *flow;
1235 struct datapath *dp;
03f0d916 1236 struct sw_flow_match match;
74ed7ab9
JS
1237 struct sw_flow_id ufid;
1238 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1239 int err = 0;
05da5898 1240 bool log = !a[OVS_FLOW_ATTR_PROBE];
74ed7ab9 1241 bool ufid_present;
ccb1352e 1242
74ed7ab9
JS
1243 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1244 if (a[OVS_FLOW_ATTR_KEY]) {
2279994d 1245 ovs_match_init(&match, &key, true, NULL);
c2ac6673 1246 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
74ed7ab9
JS
1247 log);
1248 } else if (!ufid_present) {
05da5898
JR
1249 OVS_NLERR(log,
1250 "Flow get message rejected, Key attribute missing.");
74ed7ab9 1251 err = -EINVAL;
03f0d916 1252 }
ccb1352e
JG
1253 if (err)
1254 return err;
1255
8e4e1713 1256 ovs_lock();
46df7b81 1257 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
8e4e1713
PS
1258 if (!dp) {
1259 err = -ENODEV;
1260 goto unlock;
1261 }
ccb1352e 1262
74ed7ab9
JS
1263 if (ufid_present)
1264 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1265 else
1266 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
4a46b24e 1267 if (!flow) {
8e4e1713
PS
1268 err = -ENOENT;
1269 goto unlock;
1270 }
ccb1352e 1271
0e9796b4 1272 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
804fe108 1273 OVS_FLOW_CMD_GET, true, ufid_flags);
8e4e1713
PS
1274 if (IS_ERR(reply)) {
1275 err = PTR_ERR(reply);
1276 goto unlock;
1277 }
ccb1352e 1278
8e4e1713 1279 ovs_unlock();
ccb1352e 1280 return genlmsg_reply(reply, info);
8e4e1713
PS
1281unlock:
1282 ovs_unlock();
1283 return err;
ccb1352e
JG
1284}
1285
1286static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1287{
1288 struct nlattr **a = info->attrs;
1289 struct ovs_header *ovs_header = info->userhdr;
c2ac6673 1290 struct net *net = sock_net(skb->sk);
ccb1352e
JG
1291 struct sw_flow_key key;
1292 struct sk_buff *reply;
74ed7ab9 1293 struct sw_flow *flow = NULL;
ccb1352e 1294 struct datapath *dp;
03f0d916 1295 struct sw_flow_match match;
74ed7ab9
JS
1296 struct sw_flow_id ufid;
1297 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
ccb1352e 1298 int err;
05da5898 1299 bool log = !a[OVS_FLOW_ATTR_PROBE];
74ed7ab9 1300 bool ufid_present;
ccb1352e 1301
74ed7ab9
JS
1302 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1303 if (a[OVS_FLOW_ATTR_KEY]) {
2279994d 1304 ovs_match_init(&match, &key, true, NULL);
c2ac6673
JS
1305 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1306 NULL, log);
aed06778
JR
1307 if (unlikely(err))
1308 return err;
1309 }
1310
8e4e1713 1311 ovs_lock();
46df7b81 1312 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
aed06778 1313 if (unlikely(!dp)) {
8e4e1713
PS
1314 err = -ENODEV;
1315 goto unlock;
1316 }
46df7b81 1317
74ed7ab9 1318 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
b637e498 1319 err = ovs_flow_tbl_flush(&dp->table);
8e4e1713
PS
1320 goto unlock;
1321 }
03f0d916 1322
74ed7ab9
JS
1323 if (ufid_present)
1324 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1325 else
1326 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
4a46b24e 1327 if (unlikely(!flow)) {
8e4e1713
PS
1328 err = -ENOENT;
1329 goto unlock;
1330 }
ccb1352e 1331
b637e498 1332 ovs_flow_tbl_remove(&dp->table, flow);
aed06778 1333 ovs_unlock();
ccb1352e 1334
aed06778 1335 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
74ed7ab9 1336 &flow->id, info, false, ufid_flags);
aed06778 1337 if (likely(reply)) {
b90f5aa4 1338 if (!IS_ERR(reply)) {
aed06778
JR
1339 rcu_read_lock(); /*To keep RCU checker happy. */
1340 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1341 reply, info->snd_portid,
1342 info->snd_seq, 0,
74ed7ab9
JS
1343 OVS_FLOW_CMD_DEL,
1344 ufid_flags);
aed06778
JR
1345 rcu_read_unlock();
1346 BUG_ON(err < 0);
1347
1348 ovs_notify(&dp_flow_genl_family, reply, info);
1349 } else {
1350 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1351 }
fb5d1e9e 1352 }
ccb1352e 1353
aed06778 1354 ovs_flow_free(flow, true);
ccb1352e 1355 return 0;
8e4e1713
PS
1356unlock:
1357 ovs_unlock();
1358 return err;
ccb1352e
JG
1359}
1360
1361static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1362{
74ed7ab9 1363 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
ccb1352e 1364 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
b637e498 1365 struct table_instance *ti;
ccb1352e 1366 struct datapath *dp;
74ed7ab9
JS
1367 u32 ufid_flags;
1368 int err;
1369
8cb08174
JB
1370 err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
1371 OVS_FLOW_ATTR_MAX, flow_policy, NULL);
74ed7ab9
JS
1372 if (err)
1373 return err;
1374 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
ccb1352e 1375
d57170b1 1376 rcu_read_lock();
cc3a5ae6 1377 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
8e4e1713 1378 if (!dp) {
d57170b1 1379 rcu_read_unlock();
ccb1352e 1380 return -ENODEV;
8e4e1713 1381 }
ccb1352e 1382
b637e498 1383 ti = rcu_dereference(dp->table.ti);
ccb1352e
JG
1384 for (;;) {
1385 struct sw_flow *flow;
1386 u32 bucket, obj;
1387
1388 bucket = cb->args[0];
1389 obj = cb->args[1];
b637e498 1390 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
ccb1352e
JG
1391 if (!flow)
1392 break;
1393
0e9796b4 1394 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
15e47304 1395 NETLINK_CB(cb->skb).portid,
ccb1352e 1396 cb->nlh->nlmsg_seq, NLM_F_MULTI,
804fe108 1397 OVS_FLOW_CMD_GET, ufid_flags) < 0)
ccb1352e
JG
1398 break;
1399
1400 cb->args[0] = bucket;
1401 cb->args[1] = obj;
1402 }
d57170b1 1403 rcu_read_unlock();
ccb1352e
JG
1404 return skb->len;
1405}
1406
0c200ef9
PS
1407static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1408 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
05da5898 1409 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
0c200ef9
PS
1410 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1411 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
05da5898 1412 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
74ed7ab9
JS
1413 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1414 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
0c200ef9
PS
1415};
1416
48e48a70 1417static const struct genl_ops dp_flow_genl_ops[] = {
ccb1352e 1418 { .cmd = OVS_FLOW_CMD_NEW,
ef6243ac 1419 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 1420 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37bdc87b 1421 .doit = ovs_flow_cmd_new
ccb1352e
JG
1422 },
1423 { .cmd = OVS_FLOW_CMD_DEL,
ef6243ac 1424 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 1425 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
1426 .doit = ovs_flow_cmd_del
1427 },
1428 { .cmd = OVS_FLOW_CMD_GET,
ef6243ac 1429 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
ccb1352e 1430 .flags = 0, /* OK for unprivileged users. */
ccb1352e
JG
1431 .doit = ovs_flow_cmd_get,
1432 .dumpit = ovs_flow_cmd_dump
1433 },
1434 { .cmd = OVS_FLOW_CMD_SET,
ef6243ac 1435 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 1436 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
37bdc87b 1437 .doit = ovs_flow_cmd_set,
ccb1352e
JG
1438 },
1439};
1440
56989f6d 1441static struct genl_family dp_flow_genl_family __ro_after_init = {
ccb1352e 1442 .hdrsize = sizeof(struct ovs_header),
0c200ef9
PS
1443 .name = OVS_FLOW_FAMILY,
1444 .version = OVS_FLOW_VERSION,
1445 .maxattr = OVS_FLOW_ATTR_MAX,
3b0f31f2 1446 .policy = flow_policy,
3a4e0d6a
PS
1447 .netnsok = true,
1448 .parallel_ops = true,
0c200ef9
PS
1449 .ops = dp_flow_genl_ops,
1450 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1451 .mcgrps = &ovs_dp_flow_multicast_group,
1452 .n_mcgrps = 1,
489111e5 1453 .module = THIS_MODULE,
ccb1352e
JG
1454};
1455
c3ff8cfe
TG
1456static size_t ovs_dp_cmd_msg_size(void)
1457{
1458 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1459
1460 msgsize += nla_total_size(IFNAMSIZ);
66c7a5ee
ND
1461 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1462 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
45fb9c35 1463 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
c3ff8cfe
TG
1464
1465 return msgsize;
1466}
1467
8ec609d8 1468/* Called with ovs_mutex. */
ccb1352e 1469static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
15e47304 1470 u32 portid, u32 seq, u32 flags, u8 cmd)
ccb1352e
JG
1471{
1472 struct ovs_header *ovs_header;
1473 struct ovs_dp_stats dp_stats;
1bd7116f 1474 struct ovs_dp_megaflow_stats dp_megaflow_stats;
ccb1352e
JG
1475 int err;
1476
15e47304 1477 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
ccb1352e
JG
1478 flags, cmd);
1479 if (!ovs_header)
1480 goto error;
1481
1482 ovs_header->dp_ifindex = get_dpifindex(dp);
1483
ccb1352e 1484 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
ccb1352e
JG
1485 if (err)
1486 goto nla_put_failure;
1487
1bd7116f 1488 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
66c7a5ee
ND
1489 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1490 &dp_stats, OVS_DP_ATTR_PAD))
1bd7116f
AZ
1491 goto nla_put_failure;
1492
66c7a5ee
ND
1493 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1494 sizeof(struct ovs_dp_megaflow_stats),
1495 &dp_megaflow_stats, OVS_DP_ATTR_PAD))
028d6a67 1496 goto nla_put_failure;
ccb1352e 1497
43d4be9c
TG
1498 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1499 goto nla_put_failure;
1500
053c095a
JB
1501 genlmsg_end(skb, ovs_header);
1502 return 0;
ccb1352e
JG
1503
1504nla_put_failure:
1505 genlmsg_cancel(skb, ovs_header);
1506error:
1507 return -EMSGSIZE;
1508}
1509
263ea090 1510static struct sk_buff *ovs_dp_cmd_alloc_info(void)
ccb1352e 1511{
551ddc05 1512 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
ccb1352e
JG
1513}
1514
bb6f9a70 1515/* Called with rcu_read_lock or ovs_mutex. */
46df7b81 1516static struct datapath *lookup_datapath(struct net *net,
12eb18f7 1517 const struct ovs_header *ovs_header,
ccb1352e
JG
1518 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1519{
1520 struct datapath *dp;
1521
1522 if (!a[OVS_DP_ATTR_NAME])
46df7b81 1523 dp = get_dp(net, ovs_header->dp_ifindex);
ccb1352e
JG
1524 else {
1525 struct vport *vport;
1526
46df7b81 1527 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
ccb1352e 1528 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
ccb1352e
JG
1529 }
1530 return dp ? dp : ERR_PTR(-ENODEV);
1531}
1532
44da5ae5
TG
1533static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1534{
1535 struct datapath *dp;
1536
1537 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
3c7eacfc 1538 if (IS_ERR(dp))
44da5ae5
TG
1539 return;
1540
1541 WARN(dp->user_features, "Dropping previously announced user features\n");
1542 dp->user_features = 0;
1543}
1544
12eb18f7 1545static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
43d4be9c
TG
1546{
1547 if (a[OVS_DP_ATTR_USER_FEATURES])
1548 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1549}
1550
ccb1352e
JG
1551static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1552{
1553 struct nlattr **a = info->attrs;
1554 struct vport_parms parms;
1555 struct sk_buff *reply;
1556 struct datapath *dp;
1557 struct vport *vport;
46df7b81 1558 struct ovs_net *ovs_net;
15eac2a7 1559 int err, i;
ccb1352e
JG
1560
1561 err = -EINVAL;
1562 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1563 goto err;
1564
263ea090 1565 reply = ovs_dp_cmd_alloc_info();
6093ae9a
JR
1566 if (!reply)
1567 return -ENOMEM;
ccb1352e
JG
1568
1569 err = -ENOMEM;
1570 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1571 if (dp == NULL)
6093ae9a 1572 goto err_free_reply;
46df7b81 1573
efd7ef1c 1574 ovs_dp_set_net(dp, sock_net(skb->sk));
ccb1352e
JG
1575
1576 /* Allocate table. */
b637e498
PS
1577 err = ovs_flow_tbl_init(&dp->table);
1578 if (err)
ccb1352e
JG
1579 goto err_free_dp;
1580
1c213bd2 1581 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
ccb1352e
JG
1582 if (!dp->stats_percpu) {
1583 err = -ENOMEM;
1584 goto err_destroy_table;
1585 }
1586
6da2ec56
KC
1587 dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1588 sizeof(struct hlist_head),
1589 GFP_KERNEL);
15eac2a7
PS
1590 if (!dp->ports) {
1591 err = -ENOMEM;
1592 goto err_destroy_percpu;
1593 }
1594
1595 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1596 INIT_HLIST_HEAD(&dp->ports[i]);
1597
96fbc13d
AZ
1598 err = ovs_meters_init(dp);
1599 if (err)
1600 goto err_destroy_ports_array;
1601
ccb1352e
JG
1602 /* Set up our datapath device. */
1603 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1604 parms.type = OVS_VPORT_TYPE_INTERNAL;
1605 parms.options = NULL;
1606 parms.dp = dp;
1607 parms.port_no = OVSP_LOCAL;
5cd667b0 1608 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
ccb1352e 1609
43d4be9c
TG
1610 ovs_dp_change(dp, a);
1611
6093ae9a
JR
1612 /* So far only local changes have been made, now need the lock. */
1613 ovs_lock();
1614
ccb1352e
JG
1615 vport = new_vport(&parms);
1616 if (IS_ERR(vport)) {
1617 err = PTR_ERR(vport);
1618 if (err == -EBUSY)
1619 err = -EEXIST;
1620
44da5ae5
TG
1621 if (err == -EEXIST) {
1622 /* An outdated user space instance that does not understand
1623 * the concept of user_features has attempted to create a new
1624 * datapath and is likely to reuse it. Drop all user features.
1625 */
1626 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1627 ovs_dp_reset_user_features(skb, info);
1628 }
1629
96fbc13d 1630 goto err_destroy_meters;
ccb1352e
JG
1631 }
1632
6093ae9a
JR
1633 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1634 info->snd_seq, 0, OVS_DP_CMD_NEW);
1635 BUG_ON(err < 0);
ccb1352e 1636
46df7b81 1637 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
59a35d60 1638 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
8e4e1713
PS
1639
1640 ovs_unlock();
ccb1352e 1641
2a94fe48 1642 ovs_notify(&dp_datapath_genl_family, reply, info);
ccb1352e
JG
1643 return 0;
1644
96fbc13d 1645err_destroy_meters:
6093ae9a 1646 ovs_unlock();
96fbc13d
AZ
1647 ovs_meters_exit(dp);
1648err_destroy_ports_array:
15eac2a7 1649 kfree(dp->ports);
ccb1352e
JG
1650err_destroy_percpu:
1651 free_percpu(dp->stats_percpu);
1652err_destroy_table:
9b996e54 1653 ovs_flow_tbl_destroy(&dp->table);
ccb1352e
JG
1654err_free_dp:
1655 kfree(dp);
6093ae9a
JR
1656err_free_reply:
1657 kfree_skb(reply);
ccb1352e
JG
1658err:
1659 return err;
1660}
1661
8e4e1713 1662/* Called with ovs_mutex. */
46df7b81 1663static void __dp_destroy(struct datapath *dp)
ccb1352e 1664{
15eac2a7 1665 int i;
ccb1352e 1666
15eac2a7
PS
1667 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1668 struct vport *vport;
b67bfe0d 1669 struct hlist_node *n;
15eac2a7 1670
b67bfe0d 1671 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
15eac2a7
PS
1672 if (vport->port_no != OVSP_LOCAL)
1673 ovs_dp_detach_port(vport);
1674 }
ccb1352e 1675
59a35d60 1676 list_del_rcu(&dp->list_node);
ccb1352e 1677
8e4e1713 1678 /* OVSP_LOCAL is datapath internal port. We need to make sure that
e80857cc 1679 * all ports in datapath are destroyed first before freeing datapath.
ccb1352e 1680 */
8e4e1713 1681 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
ccb1352e 1682
e80857cc 1683 /* RCU destroy the flow table */
ccb1352e 1684 call_rcu(&dp->rcu, destroy_dp_rcu);
46df7b81
PS
1685}
1686
1687static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1688{
1689 struct sk_buff *reply;
1690 struct datapath *dp;
1691 int err;
1692
263ea090 1693 reply = ovs_dp_cmd_alloc_info();
6093ae9a
JR
1694 if (!reply)
1695 return -ENOMEM;
1696
8e4e1713 1697 ovs_lock();
46df7b81
PS
1698 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1699 err = PTR_ERR(dp);
1700 if (IS_ERR(dp))
6093ae9a 1701 goto err_unlock_free;
46df7b81 1702
6093ae9a
JR
1703 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1704 info->snd_seq, 0, OVS_DP_CMD_DEL);
1705 BUG_ON(err < 0);
46df7b81
PS
1706
1707 __dp_destroy(dp);
8e4e1713 1708 ovs_unlock();
ccb1352e 1709
2a94fe48 1710 ovs_notify(&dp_datapath_genl_family, reply, info);
ccb1352e
JG
1711
1712 return 0;
6093ae9a
JR
1713
1714err_unlock_free:
8e4e1713 1715 ovs_unlock();
6093ae9a 1716 kfree_skb(reply);
8e4e1713 1717 return err;
ccb1352e
JG
1718}
1719
1720static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1721{
1722 struct sk_buff *reply;
1723 struct datapath *dp;
1724 int err;
1725
263ea090 1726 reply = ovs_dp_cmd_alloc_info();
6093ae9a
JR
1727 if (!reply)
1728 return -ENOMEM;
1729
8e4e1713 1730 ovs_lock();
46df7b81 1731 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
8e4e1713 1732 err = PTR_ERR(dp);
ccb1352e 1733 if (IS_ERR(dp))
6093ae9a 1734 goto err_unlock_free;
ccb1352e 1735
43d4be9c
TG
1736 ovs_dp_change(dp, info->attrs);
1737
6093ae9a 1738 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
804fe108 1739 info->snd_seq, 0, OVS_DP_CMD_SET);
6093ae9a 1740 BUG_ON(err < 0);
ccb1352e 1741
8e4e1713 1742 ovs_unlock();
2a94fe48 1743 ovs_notify(&dp_datapath_genl_family, reply, info);
ccb1352e
JG
1744
1745 return 0;
6093ae9a
JR
1746
1747err_unlock_free:
8e4e1713 1748 ovs_unlock();
6093ae9a 1749 kfree_skb(reply);
8e4e1713 1750 return err;
ccb1352e
JG
1751}
1752
1753static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1754{
1755 struct sk_buff *reply;
1756 struct datapath *dp;
8e4e1713 1757 int err;
ccb1352e 1758
263ea090 1759 reply = ovs_dp_cmd_alloc_info();
6093ae9a
JR
1760 if (!reply)
1761 return -ENOMEM;
1762
8ec609d8 1763 ovs_lock();
46df7b81 1764 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
8e4e1713
PS
1765 if (IS_ERR(dp)) {
1766 err = PTR_ERR(dp);
6093ae9a 1767 goto err_unlock_free;
8e4e1713 1768 }
6093ae9a 1769 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
804fe108 1770 info->snd_seq, 0, OVS_DP_CMD_GET);
6093ae9a 1771 BUG_ON(err < 0);
8ec609d8 1772 ovs_unlock();
ccb1352e
JG
1773
1774 return genlmsg_reply(reply, info);
8e4e1713 1775
6093ae9a 1776err_unlock_free:
8ec609d8 1777 ovs_unlock();
6093ae9a 1778 kfree_skb(reply);
8e4e1713 1779 return err;
ccb1352e
JG
1780}
1781
1782static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1783{
46df7b81 1784 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
ccb1352e
JG
1785 struct datapath *dp;
1786 int skip = cb->args[0];
1787 int i = 0;
1788
8ec609d8
PS
1789 ovs_lock();
1790 list_for_each_entry(dp, &ovs_net->dps, list_node) {
77676fdb 1791 if (i >= skip &&
15e47304 1792 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
ccb1352e 1793 cb->nlh->nlmsg_seq, NLM_F_MULTI,
804fe108 1794 OVS_DP_CMD_GET) < 0)
ccb1352e
JG
1795 break;
1796 i++;
1797 }
8ec609d8 1798 ovs_unlock();
ccb1352e
JG
1799
1800 cb->args[0] = i;
1801
1802 return skb->len;
1803}
1804
0c200ef9
PS
1805static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1806 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1807 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1808 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1809};
1810
48e48a70 1811static const struct genl_ops dp_datapath_genl_ops[] = {
ccb1352e 1812 { .cmd = OVS_DP_CMD_NEW,
ef6243ac 1813 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 1814 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
1815 .doit = ovs_dp_cmd_new
1816 },
1817 { .cmd = OVS_DP_CMD_DEL,
ef6243ac 1818 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 1819 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
1820 .doit = ovs_dp_cmd_del
1821 },
1822 { .cmd = OVS_DP_CMD_GET,
ef6243ac 1823 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
ccb1352e 1824 .flags = 0, /* OK for unprivileged users. */
ccb1352e
JG
1825 .doit = ovs_dp_cmd_get,
1826 .dumpit = ovs_dp_cmd_dump
1827 },
1828 { .cmd = OVS_DP_CMD_SET,
ef6243ac 1829 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 1830 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
1831 .doit = ovs_dp_cmd_set,
1832 },
1833};
1834
56989f6d 1835static struct genl_family dp_datapath_genl_family __ro_after_init = {
ccb1352e 1836 .hdrsize = sizeof(struct ovs_header),
0c200ef9
PS
1837 .name = OVS_DATAPATH_FAMILY,
1838 .version = OVS_DATAPATH_VERSION,
1839 .maxattr = OVS_DP_ATTR_MAX,
3b0f31f2 1840 .policy = datapath_policy,
3a4e0d6a
PS
1841 .netnsok = true,
1842 .parallel_ops = true,
0c200ef9
PS
1843 .ops = dp_datapath_genl_ops,
1844 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1845 .mcgrps = &ovs_dp_datapath_multicast_group,
1846 .n_mcgrps = 1,
489111e5 1847 .module = THIS_MODULE,
ccb1352e
JG
1848};
1849
8e4e1713 1850/* Called with ovs_mutex or RCU read lock. */
ccb1352e 1851static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
9354d452
JB
1852 struct net *net, u32 portid, u32 seq,
1853 u32 flags, u8 cmd)
ccb1352e
JG
1854{
1855 struct ovs_header *ovs_header;
1856 struct ovs_vport_stats vport_stats;
1857 int err;
1858
15e47304 1859 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
ccb1352e
JG
1860 flags, cmd);
1861 if (!ovs_header)
1862 return -EMSGSIZE;
1863
1864 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1865
028d6a67
DM
1866 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1867 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
5cd667b0 1868 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
9354d452
JB
1869 ovs_vport_name(vport)) ||
1870 nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
028d6a67 1871 goto nla_put_failure;
ccb1352e 1872
9354d452
JB
1873 if (!net_eq(net, dev_net(vport->dev))) {
1874 int id = peernet2id_alloc(net, dev_net(vport->dev));
1875
1876 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1877 goto nla_put_failure;
1878 }
1879
ccb1352e 1880 ovs_vport_get_stats(vport, &vport_stats);
66c7a5ee
ND
1881 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1882 sizeof(struct ovs_vport_stats), &vport_stats,
1883 OVS_VPORT_ATTR_PAD))
028d6a67 1884 goto nla_put_failure;
ccb1352e 1885
5cd667b0
AW
1886 if (ovs_vport_get_upcall_portids(vport, skb))
1887 goto nla_put_failure;
1888
ccb1352e
JG
1889 err = ovs_vport_get_options(vport, skb);
1890 if (err == -EMSGSIZE)
1891 goto error;
1892
053c095a
JB
1893 genlmsg_end(skb, ovs_header);
1894 return 0;
ccb1352e
JG
1895
1896nla_put_failure:
1897 err = -EMSGSIZE;
1898error:
1899 genlmsg_cancel(skb, ovs_header);
1900 return err;
1901}
1902
6093ae9a
JR
1903static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1904{
1905 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1906}
1907
1908/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
9354d452
JB
1909struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
1910 u32 portid, u32 seq, u8 cmd)
ccb1352e
JG
1911{
1912 struct sk_buff *skb;
1913 int retval;
1914
1915 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1916 if (!skb)
1917 return ERR_PTR(-ENOMEM);
1918
9354d452 1919 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
a9341512
JG
1920 BUG_ON(retval < 0);
1921
ccb1352e
JG
1922 return skb;
1923}
1924
8e4e1713 1925/* Called with ovs_mutex or RCU read lock. */
46df7b81 1926static struct vport *lookup_vport(struct net *net,
12eb18f7 1927 const struct ovs_header *ovs_header,
ccb1352e
JG
1928 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1929{
1930 struct datapath *dp;
1931 struct vport *vport;
1932
9354d452
JB
1933 if (a[OVS_VPORT_ATTR_IFINDEX])
1934 return ERR_PTR(-EOPNOTSUPP);
ccb1352e 1935 if (a[OVS_VPORT_ATTR_NAME]) {
46df7b81 1936 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
ccb1352e
JG
1937 if (!vport)
1938 return ERR_PTR(-ENODEV);
651a68ea
BP
1939 if (ovs_header->dp_ifindex &&
1940 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1941 return ERR_PTR(-ENODEV);
ccb1352e
JG
1942 return vport;
1943 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1944 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1945
1946 if (port_no >= DP_MAX_PORTS)
1947 return ERR_PTR(-EFBIG);
1948
46df7b81 1949 dp = get_dp(net, ovs_header->dp_ifindex);
ccb1352e
JG
1950 if (!dp)
1951 return ERR_PTR(-ENODEV);
1952
8e4e1713 1953 vport = ovs_vport_ovsl_rcu(dp, port_no);
ccb1352e 1954 if (!vport)
14408dba 1955 return ERR_PTR(-ENODEV);
ccb1352e
JG
1956 return vport;
1957 } else
1958 return ERR_PTR(-EINVAL);
9354d452 1959
ccb1352e
JG
1960}
1961
6b660c41 1962static unsigned int ovs_get_max_headroom(struct datapath *dp)
3a927bc7 1963{
6b660c41 1964 unsigned int dev_headroom, max_headroom = 0;
3a927bc7
PA
1965 struct net_device *dev;
1966 struct vport *vport;
1967 int i;
1968
1969 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1970 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1971 dev = vport->dev;
1972 dev_headroom = netdev_get_fwd_headroom(dev);
1973 if (dev_headroom > max_headroom)
1974 max_headroom = dev_headroom;
1975 }
1976 }
1977
6b660c41
TY
1978 return max_headroom;
1979}
1980
1981/* Called with ovs_mutex */
1982static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
1983{
1984 struct vport *vport;
1985 int i;
1986
1987 dp->max_headroom = new_headroom;
3a927bc7
PA
1988 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1989 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
6b660c41 1990 netdev_set_rx_headroom(vport->dev, new_headroom);
3a927bc7
PA
1991}
1992
ccb1352e
JG
1993static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1994{
1995 struct nlattr **a = info->attrs;
1996 struct ovs_header *ovs_header = info->userhdr;
1997 struct vport_parms parms;
1998 struct sk_buff *reply;
1999 struct vport *vport;
2000 struct datapath *dp;
6b660c41 2001 unsigned int new_headroom;
ccb1352e
JG
2002 u32 port_no;
2003 int err;
2004
ccb1352e
JG
2005 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2006 !a[OVS_VPORT_ATTR_UPCALL_PID])
6093ae9a 2007 return -EINVAL;
9354d452
JB
2008 if (a[OVS_VPORT_ATTR_IFINDEX])
2009 return -EOPNOTSUPP;
6093ae9a
JR
2010
2011 port_no = a[OVS_VPORT_ATTR_PORT_NO]
2012 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2013 if (port_no >= DP_MAX_PORTS)
2014 return -EFBIG;
2015
2016 reply = ovs_vport_cmd_alloc_info();
2017 if (!reply)
2018 return -ENOMEM;
ccb1352e 2019
8e4e1713 2020 ovs_lock();
62b9c8d0 2021restart:
46df7b81 2022 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
ccb1352e
JG
2023 err = -ENODEV;
2024 if (!dp)
6093ae9a 2025 goto exit_unlock_free;
ccb1352e 2026
6093ae9a 2027 if (port_no) {
8e4e1713 2028 vport = ovs_vport_ovsl(dp, port_no);
ccb1352e
JG
2029 err = -EBUSY;
2030 if (vport)
6093ae9a 2031 goto exit_unlock_free;
ccb1352e
JG
2032 } else {
2033 for (port_no = 1; ; port_no++) {
2034 if (port_no >= DP_MAX_PORTS) {
2035 err = -EFBIG;
6093ae9a 2036 goto exit_unlock_free;
ccb1352e 2037 }
8e4e1713 2038 vport = ovs_vport_ovsl(dp, port_no);
ccb1352e
JG
2039 if (!vport)
2040 break;
2041 }
2042 }
2043
2044 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2045 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2046 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2047 parms.dp = dp;
2048 parms.port_no = port_no;
5cd667b0 2049 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
ccb1352e
JG
2050
2051 vport = new_vport(&parms);
2052 err = PTR_ERR(vport);
62b9c8d0
TG
2053 if (IS_ERR(vport)) {
2054 if (err == -EAGAIN)
2055 goto restart;
6093ae9a 2056 goto exit_unlock_free;
62b9c8d0 2057 }
ccb1352e 2058
9354d452
JB
2059 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2060 info->snd_portid, info->snd_seq, 0,
2061 OVS_VPORT_CMD_NEW);
3a927bc7 2062
6b660c41
TY
2063 new_headroom = netdev_get_fwd_headroom(vport->dev);
2064
2065 if (new_headroom > dp->max_headroom)
2066 ovs_update_headroom(dp, new_headroom);
3a927bc7
PA
2067 else
2068 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2069
6093ae9a
JR
2070 BUG_ON(err < 0);
2071 ovs_unlock();
ed661185 2072
2a94fe48 2073 ovs_notify(&dp_vport_genl_family, reply, info);
6093ae9a 2074 return 0;
ccb1352e 2075
6093ae9a 2076exit_unlock_free:
8e4e1713 2077 ovs_unlock();
6093ae9a 2078 kfree_skb(reply);
ccb1352e
JG
2079 return err;
2080}
2081
2082static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2083{
2084 struct nlattr **a = info->attrs;
2085 struct sk_buff *reply;
2086 struct vport *vport;
2087 int err;
2088
6093ae9a
JR
2089 reply = ovs_vport_cmd_alloc_info();
2090 if (!reply)
2091 return -ENOMEM;
2092
8e4e1713 2093 ovs_lock();
46df7b81 2094 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
ccb1352e
JG
2095 err = PTR_ERR(vport);
2096 if (IS_ERR(vport))
6093ae9a 2097 goto exit_unlock_free;
ccb1352e 2098
ccb1352e 2099 if (a[OVS_VPORT_ATTR_TYPE] &&
f44f3408 2100 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
ccb1352e 2101 err = -EINVAL;
6093ae9a 2102 goto exit_unlock_free;
a9341512
JG
2103 }
2104
f44f3408 2105 if (a[OVS_VPORT_ATTR_OPTIONS]) {
ccb1352e 2106 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
f44f3408 2107 if (err)
6093ae9a 2108 goto exit_unlock_free;
f44f3408 2109 }
a9341512 2110
5cd667b0
AW
2111
2112 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2113 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2114
2115 err = ovs_vport_set_upcall_portids(vport, ids);
2116 if (err)
2117 goto exit_unlock_free;
2118 }
ccb1352e 2119
9354d452
JB
2120 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2121 info->snd_portid, info->snd_seq, 0,
804fe108 2122 OVS_VPORT_CMD_SET);
a9341512 2123 BUG_ON(err < 0);
ccb1352e 2124
8e4e1713 2125 ovs_unlock();
2a94fe48 2126 ovs_notify(&dp_vport_genl_family, reply, info);
8e4e1713 2127 return 0;
ccb1352e 2128
6093ae9a 2129exit_unlock_free:
8e4e1713 2130 ovs_unlock();
6093ae9a 2131 kfree_skb(reply);
ccb1352e
JG
2132 return err;
2133}
2134
2135static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2136{
6b660c41 2137 bool update_headroom = false;
ccb1352e
JG
2138 struct nlattr **a = info->attrs;
2139 struct sk_buff *reply;
3a927bc7 2140 struct datapath *dp;
ccb1352e 2141 struct vport *vport;
6b660c41 2142 unsigned int new_headroom;
ccb1352e
JG
2143 int err;
2144
6093ae9a
JR
2145 reply = ovs_vport_cmd_alloc_info();
2146 if (!reply)
2147 return -ENOMEM;
2148
8e4e1713 2149 ovs_lock();
46df7b81 2150 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
ccb1352e
JG
2151 err = PTR_ERR(vport);
2152 if (IS_ERR(vport))
6093ae9a 2153 goto exit_unlock_free;
ccb1352e
JG
2154
2155 if (vport->port_no == OVSP_LOCAL) {
2156 err = -EINVAL;
6093ae9a 2157 goto exit_unlock_free;
ccb1352e
JG
2158 }
2159
9354d452
JB
2160 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2161 info->snd_portid, info->snd_seq, 0,
2162 OVS_VPORT_CMD_DEL);
6093ae9a 2163 BUG_ON(err < 0);
3a927bc7
PA
2164
2165 /* the vport deletion may trigger dp headroom update */
2166 dp = vport->dp;
2167 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
6b660c41
TY
2168 update_headroom = true;
2169
3a927bc7 2170 netdev_reset_rx_headroom(vport->dev);
ccb1352e 2171 ovs_dp_detach_port(vport);
3a927bc7 2172
6b660c41
TY
2173 if (update_headroom) {
2174 new_headroom = ovs_get_max_headroom(dp);
2175
2176 if (new_headroom < dp->max_headroom)
2177 ovs_update_headroom(dp, new_headroom);
2178 }
6093ae9a 2179 ovs_unlock();
ccb1352e 2180
2a94fe48 2181 ovs_notify(&dp_vport_genl_family, reply, info);
6093ae9a 2182 return 0;
ccb1352e 2183
6093ae9a 2184exit_unlock_free:
8e4e1713 2185 ovs_unlock();
6093ae9a 2186 kfree_skb(reply);
ccb1352e
JG
2187 return err;
2188}
2189
2190static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2191{
2192 struct nlattr **a = info->attrs;
2193 struct ovs_header *ovs_header = info->userhdr;
2194 struct sk_buff *reply;
2195 struct vport *vport;
2196 int err;
2197
6093ae9a
JR
2198 reply = ovs_vport_cmd_alloc_info();
2199 if (!reply)
2200 return -ENOMEM;
2201
ccb1352e 2202 rcu_read_lock();
46df7b81 2203 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
ccb1352e
JG
2204 err = PTR_ERR(vport);
2205 if (IS_ERR(vport))
6093ae9a 2206 goto exit_unlock_free;
9354d452
JB
2207 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2208 info->snd_portid, info->snd_seq, 0,
804fe108 2209 OVS_VPORT_CMD_GET);
6093ae9a 2210 BUG_ON(err < 0);
ccb1352e
JG
2211 rcu_read_unlock();
2212
2213 return genlmsg_reply(reply, info);
2214
6093ae9a 2215exit_unlock_free:
ccb1352e 2216 rcu_read_unlock();
6093ae9a 2217 kfree_skb(reply);
ccb1352e
JG
2218 return err;
2219}
2220
2221static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2222{
2223 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2224 struct datapath *dp;
15eac2a7
PS
2225 int bucket = cb->args[0], skip = cb->args[1];
2226 int i, j = 0;
ccb1352e 2227
42ee19e2 2228 rcu_read_lock();
cc3a5ae6 2229 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
42ee19e2
JR
2230 if (!dp) {
2231 rcu_read_unlock();
ccb1352e 2232 return -ENODEV;
42ee19e2 2233 }
15eac2a7 2234 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
ccb1352e 2235 struct vport *vport;
15eac2a7
PS
2236
2237 j = 0;
b67bfe0d 2238 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
15eac2a7
PS
2239 if (j >= skip &&
2240 ovs_vport_cmd_fill_info(vport, skb,
9354d452 2241 sock_net(skb->sk),
15e47304 2242 NETLINK_CB(cb->skb).portid,
15eac2a7
PS
2243 cb->nlh->nlmsg_seq,
2244 NLM_F_MULTI,
804fe108 2245 OVS_VPORT_CMD_GET) < 0)
15eac2a7
PS
2246 goto out;
2247
2248 j++;
2249 }
2250 skip = 0;
ccb1352e 2251 }
15eac2a7 2252out:
ccb1352e
JG
2253 rcu_read_unlock();
2254
15eac2a7
PS
2255 cb->args[0] = i;
2256 cb->args[1] = j;
ccb1352e 2257
15eac2a7 2258 return skb->len;
ccb1352e
JG
2259}
2260
0c200ef9
PS
2261static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2262 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2263 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2264 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2265 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2266 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2267 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
9354d452
JB
2268 [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2269 [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
0c200ef9
PS
2270};
2271
48e48a70 2272static const struct genl_ops dp_vport_genl_ops[] = {
ccb1352e 2273 { .cmd = OVS_VPORT_CMD_NEW,
ef6243ac 2274 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 2275 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
2276 .doit = ovs_vport_cmd_new
2277 },
2278 { .cmd = OVS_VPORT_CMD_DEL,
ef6243ac 2279 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 2280 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
2281 .doit = ovs_vport_cmd_del
2282 },
2283 { .cmd = OVS_VPORT_CMD_GET,
ef6243ac 2284 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
ccb1352e 2285 .flags = 0, /* OK for unprivileged users. */
ccb1352e
JG
2286 .doit = ovs_vport_cmd_get,
2287 .dumpit = ovs_vport_cmd_dump
2288 },
2289 { .cmd = OVS_VPORT_CMD_SET,
ef6243ac 2290 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
4a92602a 2291 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
ccb1352e
JG
2292 .doit = ovs_vport_cmd_set,
2293 },
2294};
2295
56989f6d 2296struct genl_family dp_vport_genl_family __ro_after_init = {
0c200ef9
PS
2297 .hdrsize = sizeof(struct ovs_header),
2298 .name = OVS_VPORT_FAMILY,
2299 .version = OVS_VPORT_VERSION,
2300 .maxattr = OVS_VPORT_ATTR_MAX,
3b0f31f2 2301 .policy = vport_policy,
0c200ef9
PS
2302 .netnsok = true,
2303 .parallel_ops = true,
2304 .ops = dp_vport_genl_ops,
2305 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2306 .mcgrps = &ovs_dp_vport_multicast_group,
2307 .n_mcgrps = 1,
489111e5 2308 .module = THIS_MODULE,
ccb1352e
JG
2309};
2310
0c200ef9
PS
2311static struct genl_family * const dp_genl_families[] = {
2312 &dp_datapath_genl_family,
2313 &dp_vport_genl_family,
2314 &dp_flow_genl_family,
2315 &dp_packet_genl_family,
96fbc13d 2316 &dp_meter_genl_family,
11efd5cb
YHW
2317#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2318 &dp_ct_limit_genl_family,
2319#endif
ccb1352e
JG
2320};
2321
2322static void dp_unregister_genl(int n_families)
2323{
2324 int i;
2325
2326 for (i = 0; i < n_families; i++)
0c200ef9 2327 genl_unregister_family(dp_genl_families[i]);
ccb1352e
JG
2328}
2329
56989f6d 2330static int __init dp_register_genl(void)
ccb1352e 2331{
ccb1352e
JG
2332 int err;
2333 int i;
2334
ccb1352e 2335 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
ccb1352e 2336
0c200ef9 2337 err = genl_register_family(dp_genl_families[i]);
ccb1352e
JG
2338 if (err)
2339 goto error;
ccb1352e
JG
2340 }
2341
2342 return 0;
2343
2344error:
0c200ef9 2345 dp_unregister_genl(i);
ccb1352e
JG
2346 return err;
2347}
2348
46df7b81
PS
2349static int __net_init ovs_init_net(struct net *net)
2350{
2351 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2352
2353 INIT_LIST_HEAD(&ovs_net->dps);
8e4e1713 2354 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
11efd5cb 2355 return ovs_ct_init(net);
46df7b81
PS
2356}
2357
7b4577a9
PS
2358static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2359 struct list_head *head)
46df7b81 2360{
8e4e1713 2361 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
7b4577a9
PS
2362 struct datapath *dp;
2363
2364 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2365 int i;
2366
2367 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2368 struct vport *vport;
2369
2370 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
7b4577a9
PS
2371 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2372 continue;
2373
be4ace6e 2374 if (dev_net(vport->dev) == dnet)
7b4577a9
PS
2375 list_add(&vport->detach_list, head);
2376 }
2377 }
2378 }
2379}
2380
2381static void __net_exit ovs_exit_net(struct net *dnet)
2382{
2383 struct datapath *dp, *dp_next;
2384 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2385 struct vport *vport, *vport_next;
2386 struct net *net;
2387 LIST_HEAD(head);
46df7b81 2388
c2ac6673 2389 ovs_ct_exit(dnet);
8e4e1713 2390 ovs_lock();
46df7b81
PS
2391 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2392 __dp_destroy(dp);
7b4577a9 2393
f0b07bb1 2394 down_read(&net_rwsem);
7b4577a9
PS
2395 for_each_net(net)
2396 list_vports_from_net(net, dnet, &head);
f0b07bb1 2397 up_read(&net_rwsem);
7b4577a9
PS
2398
2399 /* Detach all vports from given namespace. */
2400 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2401 list_del(&vport->detach_list);
2402 ovs_dp_detach_port(vport);
2403 }
2404
8e4e1713
PS
2405 ovs_unlock();
2406
2407 cancel_work_sync(&ovs_net->dp_notify_work);
46df7b81
PS
2408}
2409
2410static struct pernet_operations ovs_net_ops = {
2411 .init = ovs_init_net,
2412 .exit = ovs_exit_net,
2413 .id = &ovs_net_id,
2414 .size = sizeof(struct ovs_net),
2415};
2416
ccb1352e
JG
2417static int __init dp_init(void)
2418{
ccb1352e
JG
2419 int err;
2420
3523b29b 2421 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
ccb1352e
JG
2422
2423 pr_info("Open vSwitch switching datapath\n");
2424
971427f3 2425 err = action_fifos_init();
ccb1352e
JG
2426 if (err)
2427 goto error;
2428
971427f3
AZ
2429 err = ovs_internal_dev_rtnl_link_register();
2430 if (err)
2431 goto error_action_fifos_exit;
2432
5b9e7e16
JP
2433 err = ovs_flow_init();
2434 if (err)
2435 goto error_unreg_rtnl_link;
2436
ccb1352e
JG
2437 err = ovs_vport_init();
2438 if (err)
2439 goto error_flow_exit;
2440
46df7b81 2441 err = register_pernet_device(&ovs_net_ops);
ccb1352e
JG
2442 if (err)
2443 goto error_vport_exit;
2444
46df7b81
PS
2445 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2446 if (err)
2447 goto error_netns_exit;
2448
62b9c8d0
TG
2449 err = ovs_netdev_init();
2450 if (err)
2451 goto error_unreg_notifier;
2452
ccb1352e
JG
2453 err = dp_register_genl();
2454 if (err < 0)
62b9c8d0 2455 goto error_unreg_netdev;
ccb1352e 2456
ccb1352e
JG
2457 return 0;
2458
62b9c8d0
TG
2459error_unreg_netdev:
2460 ovs_netdev_exit();
ccb1352e
JG
2461error_unreg_notifier:
2462 unregister_netdevice_notifier(&ovs_dp_device_notifier);
46df7b81
PS
2463error_netns_exit:
2464 unregister_pernet_device(&ovs_net_ops);
ccb1352e
JG
2465error_vport_exit:
2466 ovs_vport_exit();
2467error_flow_exit:
2468 ovs_flow_exit();
5b9e7e16
JP
2469error_unreg_rtnl_link:
2470 ovs_internal_dev_rtnl_link_unregister();
971427f3
AZ
2471error_action_fifos_exit:
2472 action_fifos_exit();
ccb1352e
JG
2473error:
2474 return err;
2475}
2476
2477static void dp_cleanup(void)
2478{
ccb1352e 2479 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
62b9c8d0 2480 ovs_netdev_exit();
ccb1352e 2481 unregister_netdevice_notifier(&ovs_dp_device_notifier);
46df7b81
PS
2482 unregister_pernet_device(&ovs_net_ops);
2483 rcu_barrier();
ccb1352e
JG
2484 ovs_vport_exit();
2485 ovs_flow_exit();
5b9e7e16 2486 ovs_internal_dev_rtnl_link_unregister();
971427f3 2487 action_fifos_exit();
ccb1352e
JG
2488}
2489
2490module_init(dp_init);
2491module_exit(dp_cleanup);
2492
2493MODULE_DESCRIPTION("Open vSwitch switching datapath");
2494MODULE_LICENSE("GPL");
ed227099
TLSC
2495MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2496MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2497MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2498MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
96fbc13d 2499MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
11efd5cb 2500MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);