]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
007f790c JP |
2 | /* |
3 | * net/switchdev/switchdev.c - Switch device API | |
7ea6eb3f | 4 | * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> |
f8f21471 | 5 | * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> |
007f790c JP |
6 | */ |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/types.h> | |
10 | #include <linux/init.h> | |
03bf0c28 JP |
11 | #include <linux/mutex.h> |
12 | #include <linux/notifier.h> | |
007f790c | 13 | #include <linux/netdevice.h> |
850d0cbc | 14 | #include <linux/etherdevice.h> |
47f8328b | 15 | #include <linux/if_bridge.h> |
7ea6eb3f | 16 | #include <linux/list.h> |
793f4014 | 17 | #include <linux/workqueue.h> |
87aaf2ca | 18 | #include <linux/if_vlan.h> |
4f2c6ae5 | 19 | #include <linux/rtnetlink.h> |
007f790c JP |
20 | #include <net/switchdev.h> |
21 | ||
dc489f86 TW |
22 | static bool switchdev_obj_eq(const struct switchdev_obj *a, |
23 | const struct switchdev_obj *b) | |
24 | { | |
25 | const struct switchdev_obj_port_vlan *va, *vb; | |
26 | const struct switchdev_obj_port_mdb *ma, *mb; | |
27 | ||
28 | if (a->id != b->id || a->orig_dev != b->orig_dev) | |
29 | return false; | |
30 | ||
31 | switch (a->id) { | |
32 | case SWITCHDEV_OBJ_ID_PORT_VLAN: | |
33 | va = SWITCHDEV_OBJ_PORT_VLAN(a); | |
34 | vb = SWITCHDEV_OBJ_PORT_VLAN(b); | |
35 | return va->flags == vb->flags && | |
36 | va->vid == vb->vid && | |
37 | va->changed == vb->changed; | |
38 | case SWITCHDEV_OBJ_ID_PORT_MDB: | |
39 | case SWITCHDEV_OBJ_ID_HOST_MDB: | |
40 | ma = SWITCHDEV_OBJ_PORT_MDB(a); | |
41 | mb = SWITCHDEV_OBJ_PORT_MDB(b); | |
42 | return ma->vid == mb->vid && | |
43 | ether_addr_equal(ma->addr, mb->addr); | |
44 | default: | |
45 | break; | |
46 | } | |
47 | ||
48 | BUG(); | |
49 | } | |
50 | ||
793f4014 JP |
51 | static LIST_HEAD(deferred); |
52 | static DEFINE_SPINLOCK(deferred_lock); | |
53 | ||
54 | typedef void switchdev_deferred_func_t(struct net_device *dev, | |
55 | const void *data); | |
56 | ||
57 | struct switchdev_deferred_item { | |
58 | struct list_head list; | |
59 | struct net_device *dev; | |
4fc003fe | 60 | netdevice_tracker dev_tracker; |
793f4014 | 61 | switchdev_deferred_func_t *func; |
fbfc8502 | 62 | unsigned long data[]; |
793f4014 JP |
63 | }; |
64 | ||
65 | static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) | |
66 | { | |
67 | struct switchdev_deferred_item *dfitem; | |
68 | ||
69 | spin_lock_bh(&deferred_lock); | |
70 | if (list_empty(&deferred)) { | |
71 | dfitem = NULL; | |
72 | goto unlock; | |
73 | } | |
74 | dfitem = list_first_entry(&deferred, | |
75 | struct switchdev_deferred_item, list); | |
76 | list_del(&dfitem->list); | |
77 | unlock: | |
78 | spin_unlock_bh(&deferred_lock); | |
79 | return dfitem; | |
80 | } | |
81 | ||
82 | /** | |
83 | * switchdev_deferred_process - Process ops in deferred queue | |
84 | * | |
85 | * Called to flush the ops currently queued in deferred ops queue. | |
86 | * rtnl_lock must be held. | |
87 | */ | |
88 | void switchdev_deferred_process(void) | |
89 | { | |
90 | struct switchdev_deferred_item *dfitem; | |
91 | ||
92 | ASSERT_RTNL(); | |
93 | ||
94 | while ((dfitem = switchdev_deferred_dequeue())) { | |
95 | dfitem->func(dfitem->dev, dfitem->data); | |
d62607c3 | 96 | netdev_put(dfitem->dev, &dfitem->dev_tracker); |
793f4014 JP |
97 | kfree(dfitem); |
98 | } | |
99 | } | |
100 | EXPORT_SYMBOL_GPL(switchdev_deferred_process); | |
101 | ||
102 | static void switchdev_deferred_process_work(struct work_struct *work) | |
103 | { | |
104 | rtnl_lock(); | |
105 | switchdev_deferred_process(); | |
106 | rtnl_unlock(); | |
107 | } | |
108 | ||
109 | static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); | |
110 | ||
111 | static int switchdev_deferred_enqueue(struct net_device *dev, | |
112 | const void *data, size_t data_len, | |
113 | switchdev_deferred_func_t *func) | |
114 | { | |
115 | struct switchdev_deferred_item *dfitem; | |
116 | ||
d8c28581 | 117 | dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC); |
793f4014 JP |
118 | if (!dfitem) |
119 | return -ENOMEM; | |
120 | dfitem->dev = dev; | |
121 | dfitem->func = func; | |
122 | memcpy(dfitem->data, data, data_len); | |
d62607c3 | 123 | netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC); |
793f4014 JP |
124 | spin_lock_bh(&deferred_lock); |
125 | list_add_tail(&dfitem->list, &deferred); | |
126 | spin_unlock_bh(&deferred_lock); | |
127 | schedule_work(&deferred_process_work); | |
128 | return 0; | |
129 | } | |
130 | ||
d45224d6 FF |
131 | static int switchdev_port_attr_notify(enum switchdev_notifier_type nt, |
132 | struct net_device *dev, | |
dcbdf135 VO |
133 | const struct switchdev_attr *attr, |
134 | struct netlink_ext_ack *extack) | |
3094333d | 135 | { |
d45224d6 FF |
136 | int err; |
137 | int rc; | |
3094333d | 138 | |
d45224d6 FF |
139 | struct switchdev_notifier_port_attr_info attr_info = { |
140 | .attr = attr, | |
d45224d6 FF |
141 | .handled = false, |
142 | }; | |
3094333d | 143 | |
d45224d6 | 144 | rc = call_switchdev_blocking_notifiers(nt, dev, |
dcbdf135 | 145 | &attr_info.info, extack); |
d45224d6 FF |
146 | err = notifier_to_errno(rc); |
147 | if (err) { | |
148 | WARN_ON(!attr_info.handled); | |
149 | return err; | |
3094333d SF |
150 | } |
151 | ||
d45224d6 FF |
152 | if (!attr_info.handled) |
153 | return -EOPNOTSUPP; | |
464314ea | 154 | |
d45224d6 | 155 | return 0; |
3094333d SF |
156 | } |
157 | ||
0bc05d58 | 158 | static int switchdev_port_attr_set_now(struct net_device *dev, |
dcbdf135 VO |
159 | const struct switchdev_attr *attr, |
160 | struct netlink_ext_ack *extack) | |
3094333d | 161 | { |
dcbdf135 VO |
162 | return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr, |
163 | extack); | |
3094333d | 164 | } |
0bc05d58 JP |
165 | |
166 | static void switchdev_port_attr_set_deferred(struct net_device *dev, | |
167 | const void *data) | |
168 | { | |
169 | const struct switchdev_attr *attr = data; | |
170 | int err; | |
171 | ||
dcbdf135 | 172 | err = switchdev_port_attr_set_now(dev, attr, NULL); |
0bc05d58 JP |
173 | if (err && err != -EOPNOTSUPP) |
174 | netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", | |
175 | err, attr->id); | |
7ceb2afb ER |
176 | if (attr->complete) |
177 | attr->complete(dev, err, attr->complete_priv); | |
0bc05d58 JP |
178 | } |
179 | ||
180 | static int switchdev_port_attr_set_defer(struct net_device *dev, | |
181 | const struct switchdev_attr *attr) | |
182 | { | |
183 | return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), | |
184 | switchdev_port_attr_set_deferred); | |
185 | } | |
186 | ||
187 | /** | |
188 | * switchdev_port_attr_set - Set port attribute | |
189 | * | |
190 | * @dev: port device | |
191 | * @attr: attribute to set | |
dcbdf135 | 192 | * @extack: netlink extended ack, for error message propagation |
0bc05d58 | 193 | * |
0bc05d58 JP |
194 | * rtnl_lock must be held and must not be in atomic section, |
195 | * in case SWITCHDEV_F_DEFER flag is not set. | |
196 | */ | |
197 | int switchdev_port_attr_set(struct net_device *dev, | |
dcbdf135 VO |
198 | const struct switchdev_attr *attr, |
199 | struct netlink_ext_ack *extack) | |
0bc05d58 JP |
200 | { |
201 | if (attr->flags & SWITCHDEV_F_DEFER) | |
202 | return switchdev_port_attr_set_defer(dev, attr); | |
203 | ASSERT_RTNL(); | |
dcbdf135 | 204 | return switchdev_port_attr_set_now(dev, attr, extack); |
0bc05d58 | 205 | } |
3094333d SF |
206 | EXPORT_SYMBOL_GPL(switchdev_port_attr_set); |
207 | ||
e258d919 SF |
208 | static size_t switchdev_obj_size(const struct switchdev_obj *obj) |
209 | { | |
210 | switch (obj->id) { | |
211 | case SWITCHDEV_OBJ_ID_PORT_VLAN: | |
212 | return sizeof(struct switchdev_obj_port_vlan); | |
4d41e125 ER |
213 | case SWITCHDEV_OBJ_ID_PORT_MDB: |
214 | return sizeof(struct switchdev_obj_port_mdb); | |
47d5b6db AL |
215 | case SWITCHDEV_OBJ_ID_HOST_MDB: |
216 | return sizeof(struct switchdev_obj_port_mdb); | |
e258d919 SF |
217 | default: |
218 | BUG(); | |
219 | } | |
220 | return 0; | |
221 | } | |
222 | ||
d17d9f5e PM |
223 | static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, |
224 | struct net_device *dev, | |
225 | const struct switchdev_obj *obj, | |
69b7320e | 226 | struct netlink_ext_ack *extack) |
491d0f15 | 227 | { |
d17d9f5e PM |
228 | int rc; |
229 | int err; | |
491d0f15 | 230 | |
d17d9f5e PM |
231 | struct switchdev_notifier_port_obj_info obj_info = { |
232 | .obj = obj, | |
d17d9f5e PM |
233 | .handled = false, |
234 | }; | |
491d0f15 | 235 | |
479c86dc | 236 | rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); |
d17d9f5e PM |
237 | err = notifier_to_errno(rc); |
238 | if (err) { | |
239 | WARN_ON(!obj_info.handled); | |
240 | return err; | |
491d0f15 | 241 | } |
d17d9f5e PM |
242 | if (!obj_info.handled) |
243 | return -EOPNOTSUPP; | |
244 | return 0; | |
491d0f15 SF |
245 | } |
246 | ||
b7ffab29 OR |
247 | static void switchdev_obj_id_to_helpful_msg(struct net_device *dev, |
248 | enum switchdev_obj_id obj_id, | |
249 | int err, bool add) | |
250 | { | |
251 | const char *action = add ? "add" : "del"; | |
252 | const char *reason = ""; | |
253 | const char *problem; | |
254 | const char *obj_str; | |
255 | ||
256 | switch (obj_id) { | |
257 | case SWITCHDEV_OBJ_ID_UNDEFINED: | |
258 | obj_str = "Undefined object"; | |
259 | problem = "Attempted operation is undefined, indicating a possible programming\n" | |
260 | "error.\n"; | |
261 | break; | |
262 | case SWITCHDEV_OBJ_ID_PORT_VLAN: | |
263 | obj_str = "VLAN entry"; | |
264 | problem = "Failure in VLAN settings on this port might disrupt network\n" | |
265 | "segmentation or traffic isolation, affecting network partitioning.\n"; | |
266 | break; | |
267 | case SWITCHDEV_OBJ_ID_PORT_MDB: | |
268 | obj_str = "Port Multicast Database entry"; | |
269 | problem = "Failure in updating the port's Multicast Database could lead to\n" | |
270 | "multicast forwarding issues.\n"; | |
271 | break; | |
272 | case SWITCHDEV_OBJ_ID_HOST_MDB: | |
273 | obj_str = "Host Multicast Database entry"; | |
274 | problem = "Failure in updating the host's Multicast Database may impact multicast\n" | |
275 | "group memberships or traffic delivery, affecting multicast\n" | |
276 | "communication.\n"; | |
277 | break; | |
278 | case SWITCHDEV_OBJ_ID_MRP: | |
279 | obj_str = "Media Redundancy Protocol configuration for port"; | |
280 | problem = "Failure to set MRP ring ID on this port prevents communication with\n" | |
281 | "the specified redundancy ring, resulting in an inability to engage\n" | |
282 | "in MRP-based network operations.\n"; | |
283 | break; | |
284 | case SWITCHDEV_OBJ_ID_RING_TEST_MRP: | |
285 | obj_str = "MRP Test Frame Operations for port"; | |
286 | problem = "Failure to generate/monitor MRP test frames may lead to inability to\n" | |
287 | "assess the ring's operational integrity and fault response, hindering\n" | |
288 | "proactive network management.\n"; | |
289 | break; | |
290 | case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: | |
291 | obj_str = "MRP Ring Role Configuration"; | |
292 | problem = "Improper MRP ring role configuration may create conflicts in the ring,\n" | |
293 | "disrupting communication for all participants, or isolate the local\n" | |
294 | "system from the ring, hindering its ability to communicate with other\n" | |
295 | "participants.\n"; | |
296 | break; | |
297 | case SWITCHDEV_OBJ_ID_RING_STATE_MRP: | |
298 | obj_str = "MRP Ring State Configuration"; | |
299 | problem = "Failure to correctly set the MRP ring state can result in network\n" | |
300 | "loops or leave segments without communication. In a Closed state,\n" | |
301 | "it maintains loop prevention by blocking one MRM port, while an Open\n" | |
302 | "state activates in response to failures, changing port states to\n" | |
303 | "preserve network connectivity.\n"; | |
304 | break; | |
305 | case SWITCHDEV_OBJ_ID_IN_TEST_MRP: | |
306 | obj_str = "MRP_InTest Frame Generation Configuration"; | |
307 | problem = "Failure in managing MRP_InTest frame generation can misjudge the\n" | |
308 | "interconnection ring's state, leading to incorrect blocking or\n" | |
309 | "unblocking of the I/C port. This misconfiguration might result\n" | |
310 | "in unintended network loops or isolate critical network segments,\n" | |
311 | "compromising network integrity and reliability.\n"; | |
312 | break; | |
313 | case SWITCHDEV_OBJ_ID_IN_ROLE_MRP: | |
314 | obj_str = "Interconnection Ring Role Configuration"; | |
315 | problem = "Failure in incorrect assignment of interconnection ring roles\n" | |
316 | "(MIM/MIC) can impair the formation of the interconnection rings.\n"; | |
317 | break; | |
318 | case SWITCHDEV_OBJ_ID_IN_STATE_MRP: | |
319 | obj_str = "Interconnection Ring State Configuration"; | |
320 | problem = "Failure in updating the interconnection ring state can lead in\n" | |
321 | "case of Open state to incorrect blocking or unblocking of the\n" | |
322 | "I/C port, resulting in unintended network loops or isolation\n" | |
323 | "of critical network\n"; | |
324 | break; | |
325 | default: | |
326 | obj_str = "Unknown object"; | |
327 | problem = "Indicating a possible programming error.\n"; | |
328 | } | |
329 | ||
330 | switch (err) { | |
331 | case -ENOSPC: | |
332 | reason = "Current HW/SW setup lacks sufficient resources.\n"; | |
333 | break; | |
334 | } | |
335 | ||
336 | netdev_err(dev, "Failed to %s %s (object id=%d) with error: %pe (%d).\n%s%s\n", | |
337 | action, obj_str, obj_id, ERR_PTR(err), err, problem, reason); | |
338 | } | |
339 | ||
4d429c5d JP |
340 | static void switchdev_port_obj_add_deferred(struct net_device *dev, |
341 | const void *data) | |
342 | { | |
343 | const struct switchdev_obj *obj = data; | |
344 | int err; | |
345 | ||
cf6def51 VO |
346 | ASSERT_RTNL(); |
347 | err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, | |
348 | dev, obj, NULL); | |
4d429c5d | 349 | if (err && err != -EOPNOTSUPP) |
b7ffab29 | 350 | switchdev_obj_id_to_helpful_msg(dev, obj->id, err, true); |
7ceb2afb ER |
351 | if (obj->complete) |
352 | obj->complete(dev, err, obj->complete_priv); | |
4d429c5d JP |
353 | } |
354 | ||
355 | static int switchdev_port_obj_add_defer(struct net_device *dev, | |
356 | const struct switchdev_obj *obj) | |
357 | { | |
e258d919 | 358 | return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), |
4d429c5d JP |
359 | switchdev_port_obj_add_deferred); |
360 | } | |
491d0f15 SF |
361 | |
362 | /** | |
4d429c5d | 363 | * switchdev_port_obj_add - Add port object |
491d0f15 SF |
364 | * |
365 | * @dev: port device | |
4d429c5d | 366 | * @obj: object to add |
c8af73f0 | 367 | * @extack: netlink extended ack |
4d429c5d | 368 | * |
4d429c5d JP |
369 | * rtnl_lock must be held and must not be in atomic section, |
370 | * in case SWITCHDEV_F_DEFER flag is not set. | |
491d0f15 | 371 | */ |
4d429c5d | 372 | int switchdev_port_obj_add(struct net_device *dev, |
69b7320e PM |
373 | const struct switchdev_obj *obj, |
374 | struct netlink_ext_ack *extack) | |
4d429c5d JP |
375 | { |
376 | if (obj->flags & SWITCHDEV_F_DEFER) | |
377 | return switchdev_port_obj_add_defer(dev, obj); | |
378 | ASSERT_RTNL(); | |
cf6def51 VO |
379 | return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, |
380 | dev, obj, extack); | |
4d429c5d JP |
381 | } |
382 | EXPORT_SYMBOL_GPL(switchdev_port_obj_add); | |
383 | ||
384 | static int switchdev_port_obj_del_now(struct net_device *dev, | |
385 | const struct switchdev_obj *obj) | |
491d0f15 | 386 | { |
d17d9f5e | 387 | return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, |
ffb68fc5 | 388 | dev, obj, NULL); |
491d0f15 | 389 | } |
4d429c5d JP |
390 | |
391 | static void switchdev_port_obj_del_deferred(struct net_device *dev, | |
392 | const void *data) | |
393 | { | |
394 | const struct switchdev_obj *obj = data; | |
395 | int err; | |
396 | ||
397 | err = switchdev_port_obj_del_now(dev, obj); | |
398 | if (err && err != -EOPNOTSUPP) | |
b7ffab29 | 399 | switchdev_obj_id_to_helpful_msg(dev, obj->id, err, false); |
7ceb2afb ER |
400 | if (obj->complete) |
401 | obj->complete(dev, err, obj->complete_priv); | |
4d429c5d JP |
402 | } |
403 | ||
404 | static int switchdev_port_obj_del_defer(struct net_device *dev, | |
405 | const struct switchdev_obj *obj) | |
406 | { | |
e258d919 | 407 | return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), |
4d429c5d JP |
408 | switchdev_port_obj_del_deferred); |
409 | } | |
410 | ||
411 | /** | |
412 | * switchdev_port_obj_del - Delete port object | |
413 | * | |
414 | * @dev: port device | |
4d429c5d JP |
415 | * @obj: object to delete |
416 | * | |
417 | * rtnl_lock must be held and must not be in atomic section, | |
418 | * in case SWITCHDEV_F_DEFER flag is not set. | |
419 | */ | |
420 | int switchdev_port_obj_del(struct net_device *dev, | |
421 | const struct switchdev_obj *obj) | |
422 | { | |
423 | if (obj->flags & SWITCHDEV_F_DEFER) | |
424 | return switchdev_port_obj_del_defer(dev, obj); | |
425 | ASSERT_RTNL(); | |
426 | return switchdev_port_obj_del_now(dev, obj); | |
427 | } | |
491d0f15 SF |
428 | EXPORT_SYMBOL_GPL(switchdev_port_obj_del); |
429 | ||
dc489f86 TW |
430 | /** |
431 | * switchdev_port_obj_act_is_deferred - Is object action pending? | |
432 | * | |
433 | * @dev: port device | |
434 | * @nt: type of action; add or delete | |
435 | * @obj: object to test | |
436 | * | |
437 | * Returns true if a deferred item is pending, which is | |
438 | * equivalent to the action @nt on an object @obj. | |
439 | * | |
440 | * rtnl_lock must be held. | |
441 | */ | |
442 | bool switchdev_port_obj_act_is_deferred(struct net_device *dev, | |
443 | enum switchdev_notifier_type nt, | |
444 | const struct switchdev_obj *obj) | |
445 | { | |
446 | struct switchdev_deferred_item *dfitem; | |
447 | bool found = false; | |
448 | ||
449 | ASSERT_RTNL(); | |
450 | ||
451 | spin_lock_bh(&deferred_lock); | |
452 | ||
453 | list_for_each_entry(dfitem, &deferred, list) { | |
454 | if (dfitem->dev != dev) | |
455 | continue; | |
456 | ||
457 | if ((dfitem->func == switchdev_port_obj_add_deferred && | |
458 | nt == SWITCHDEV_PORT_OBJ_ADD) || | |
459 | (dfitem->func == switchdev_port_obj_del_deferred && | |
460 | nt == SWITCHDEV_PORT_OBJ_DEL)) { | |
461 | if (switchdev_obj_eq((const void *)dfitem->data, obj)) { | |
462 | found = true; | |
463 | break; | |
464 | } | |
465 | } | |
466 | } | |
467 | ||
468 | spin_unlock_bh(&deferred_lock); | |
469 | ||
470 | return found; | |
471 | } | |
472 | EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred); | |
473 | ||
ff5cf100 | 474 | static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); |
a93e3b17 | 475 | static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); |
03bf0c28 JP |
476 | |
477 | /** | |
ebb9a03a | 478 | * register_switchdev_notifier - Register notifier |
03bf0c28 JP |
479 | * @nb: notifier_block |
480 | * | |
ff5cf100 | 481 | * Register switch device notifier. |
03bf0c28 | 482 | */ |
ebb9a03a | 483 | int register_switchdev_notifier(struct notifier_block *nb) |
03bf0c28 | 484 | { |
ff5cf100 | 485 | return atomic_notifier_chain_register(&switchdev_notif_chain, nb); |
03bf0c28 | 486 | } |
ebb9a03a | 487 | EXPORT_SYMBOL_GPL(register_switchdev_notifier); |
03bf0c28 JP |
488 | |
489 | /** | |
ebb9a03a | 490 | * unregister_switchdev_notifier - Unregister notifier |
03bf0c28 JP |
491 | * @nb: notifier_block |
492 | * | |
493 | * Unregister switch device notifier. | |
03bf0c28 | 494 | */ |
ebb9a03a | 495 | int unregister_switchdev_notifier(struct notifier_block *nb) |
03bf0c28 | 496 | { |
ff5cf100 | 497 | return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb); |
03bf0c28 | 498 | } |
ebb9a03a | 499 | EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); |
03bf0c28 JP |
500 | |
501 | /** | |
ebb9a03a | 502 | * call_switchdev_notifiers - Call notifiers |
03bf0c28 JP |
503 | * @val: value passed unmodified to notifier function |
504 | * @dev: port device | |
505 | * @info: notifier information data | |
ea6754ae | 506 | * @extack: netlink extended ack |
ff5cf100 | 507 | * Call all network notifier blocks. |
03bf0c28 | 508 | */ |
ebb9a03a | 509 | int call_switchdev_notifiers(unsigned long val, struct net_device *dev, |
6685987c PM |
510 | struct switchdev_notifier_info *info, |
511 | struct netlink_ext_ack *extack) | |
03bf0c28 | 512 | { |
03bf0c28 | 513 | info->dev = dev; |
6685987c | 514 | info->extack = extack; |
ff5cf100 | 515 | return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); |
03bf0c28 | 516 | } |
ebb9a03a | 517 | EXPORT_SYMBOL_GPL(call_switchdev_notifiers); |
8a44dbb2 | 518 | |
a93e3b17 PM |
519 | int register_switchdev_blocking_notifier(struct notifier_block *nb) |
520 | { | |
521 | struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; | |
522 | ||
523 | return blocking_notifier_chain_register(chain, nb); | |
524 | } | |
525 | EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); | |
526 | ||
527 | int unregister_switchdev_blocking_notifier(struct notifier_block *nb) | |
528 | { | |
529 | struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; | |
530 | ||
531 | return blocking_notifier_chain_unregister(chain, nb); | |
532 | } | |
533 | EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); | |
534 | ||
535 | int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, | |
479c86dc PM |
536 | struct switchdev_notifier_info *info, |
537 | struct netlink_ext_ack *extack) | |
a93e3b17 PM |
538 | { |
539 | info->dev = dev; | |
479c86dc | 540 | info->extack = extack; |
a93e3b17 PM |
541 | return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, |
542 | val, info); | |
543 | } | |
544 | EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); | |
545 | ||
2b0a5688 VO |
546 | struct switchdev_nested_priv { |
547 | bool (*check_cb)(const struct net_device *dev); | |
548 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
549 | const struct net_device *foreign_dev); | |
550 | const struct net_device *dev; | |
551 | struct net_device *lower_dev; | |
552 | }; | |
553 | ||
554 | static int switchdev_lower_dev_walk(struct net_device *lower_dev, | |
555 | struct netdev_nested_priv *priv) | |
556 | { | |
557 | struct switchdev_nested_priv *switchdev_priv = priv->data; | |
558 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
559 | const struct net_device *foreign_dev); | |
560 | bool (*check_cb)(const struct net_device *dev); | |
561 | const struct net_device *dev; | |
562 | ||
563 | check_cb = switchdev_priv->check_cb; | |
564 | foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb; | |
565 | dev = switchdev_priv->dev; | |
566 | ||
567 | if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) { | |
568 | switchdev_priv->lower_dev = lower_dev; | |
569 | return 1; | |
570 | } | |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
575 | static struct net_device * | |
7b465f4c VO |
576 | switchdev_lower_dev_find_rcu(struct net_device *dev, |
577 | bool (*check_cb)(const struct net_device *dev), | |
578 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
579 | const struct net_device *foreign_dev)) | |
2b0a5688 VO |
580 | { |
581 | struct switchdev_nested_priv switchdev_priv = { | |
582 | .check_cb = check_cb, | |
583 | .foreign_dev_check_cb = foreign_dev_check_cb, | |
584 | .dev = dev, | |
585 | .lower_dev = NULL, | |
586 | }; | |
587 | struct netdev_nested_priv priv = { | |
588 | .data = &switchdev_priv, | |
589 | }; | |
590 | ||
591 | netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv); | |
592 | ||
593 | return switchdev_priv.lower_dev; | |
594 | } | |
595 | ||
c4076cdd VO |
596 | static struct net_device * |
597 | switchdev_lower_dev_find(struct net_device *dev, | |
598 | bool (*check_cb)(const struct net_device *dev), | |
599 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
600 | const struct net_device *foreign_dev)) | |
601 | { | |
602 | struct switchdev_nested_priv switchdev_priv = { | |
603 | .check_cb = check_cb, | |
604 | .foreign_dev_check_cb = foreign_dev_check_cb, | |
605 | .dev = dev, | |
606 | .lower_dev = NULL, | |
607 | }; | |
608 | struct netdev_nested_priv priv = { | |
609 | .data = &switchdev_priv, | |
610 | }; | |
611 | ||
612 | netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv); | |
613 | ||
614 | return switchdev_priv.lower_dev; | |
615 | } | |
616 | ||
716a30a9 VO |
617 | static int __switchdev_handle_fdb_event_to_device(struct net_device *dev, |
618 | struct net_device *orig_dev, unsigned long event, | |
8ca07176 VO |
619 | const struct switchdev_notifier_fdb_info *fdb_info, |
620 | bool (*check_cb)(const struct net_device *dev), | |
621 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
622 | const struct net_device *foreign_dev), | |
716a30a9 VO |
623 | int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, |
624 | unsigned long event, const void *ctx, | |
ec638740 | 625 | const struct switchdev_notifier_fdb_info *fdb_info)) |
8ca07176 VO |
626 | { |
627 | const struct switchdev_notifier_info *info = &fdb_info->info; | |
ec638740 | 628 | struct net_device *br, *lower_dev, *switchdev; |
8ca07176 VO |
629 | struct list_head *iter; |
630 | int err = -EOPNOTSUPP; | |
631 | ||
2b0a5688 | 632 | if (check_cb(dev)) |
716a30a9 | 633 | return mod_cb(dev, orig_dev, event, info->ctx, fdb_info); |
8ca07176 | 634 | |
8ca07176 | 635 | /* Recurse through lower interfaces in case the FDB entry is pointing |
ec638740 | 636 | * towards a bridge or a LAG device. |
8ca07176 | 637 | */ |
ec638740 VO |
638 | netdev_for_each_lower_dev(dev, lower_dev, iter) { |
639 | /* Do not propagate FDB entries across bridges */ | |
640 | if (netif_is_bridge_master(lower_dev)) | |
641 | continue; | |
642 | ||
643 | /* Bridge ports might be either us, or LAG interfaces | |
644 | * that we offload. | |
645 | */ | |
646 | if (!check_cb(lower_dev) && | |
647 | !switchdev_lower_dev_find_rcu(lower_dev, check_cb, | |
648 | foreign_dev_check_cb)) | |
649 | continue; | |
650 | ||
651 | err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev, | |
652 | event, fdb_info, check_cb, | |
653 | foreign_dev_check_cb, | |
654 | mod_cb); | |
655 | if (err && err != -EOPNOTSUPP) | |
656 | return err; | |
8ca07176 VO |
657 | } |
658 | ||
2b0a5688 VO |
659 | /* Event is neither on a bridge nor a LAG. Check whether it is on an |
660 | * interface that is in a bridge with us. | |
661 | */ | |
662 | br = netdev_master_upper_dev_get_rcu(dev); | |
663 | if (!br || !netif_is_bridge_master(br)) | |
664 | return 0; | |
665 | ||
ec638740 VO |
666 | switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb); |
667 | if (!switchdev) | |
2b0a5688 VO |
668 | return 0; |
669 | ||
ec638740 VO |
670 | if (!foreign_dev_check_cb(switchdev, dev)) |
671 | return err; | |
672 | ||
716a30a9 VO |
673 | return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info, |
674 | check_cb, foreign_dev_check_cb, | |
ec638740 | 675 | mod_cb); |
8ca07176 VO |
676 | } |
677 | ||
716a30a9 | 678 | int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event, |
8ca07176 VO |
679 | const struct switchdev_notifier_fdb_info *fdb_info, |
680 | bool (*check_cb)(const struct net_device *dev), | |
681 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
682 | const struct net_device *foreign_dev), | |
716a30a9 VO |
683 | int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, |
684 | unsigned long event, const void *ctx, | |
ec638740 | 685 | const struct switchdev_notifier_fdb_info *fdb_info)) |
8ca07176 VO |
686 | { |
687 | int err; | |
688 | ||
716a30a9 VO |
689 | err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info, |
690 | check_cb, foreign_dev_check_cb, | |
ec638740 | 691 | mod_cb); |
8ca07176 VO |
692 | if (err == -EOPNOTSUPP) |
693 | err = 0; | |
694 | ||
695 | return err; | |
696 | } | |
716a30a9 | 697 | EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device); |
8ca07176 | 698 | |
f30f0601 PM |
699 | static int __switchdev_handle_port_obj_add(struct net_device *dev, |
700 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
701 | bool (*check_cb)(const struct net_device *dev), | |
c4076cdd VO |
702 | bool (*foreign_dev_check_cb)(const struct net_device *dev, |
703 | const struct net_device *foreign_dev), | |
69bfac96 | 704 | int (*add_cb)(struct net_device *dev, const void *ctx, |
f30f0601 | 705 | const struct switchdev_obj *obj, |
69213513 | 706 | struct netlink_ext_ack *extack)) |
f30f0601 | 707 | { |
69bfac96 | 708 | struct switchdev_notifier_info *info = &port_obj_info->info; |
acd8df58 | 709 | struct net_device *br, *lower_dev, *switchdev; |
69213513 | 710 | struct netlink_ext_ack *extack; |
f30f0601 PM |
711 | struct list_head *iter; |
712 | int err = -EOPNOTSUPP; | |
713 | ||
69bfac96 | 714 | extack = switchdev_notifier_info_to_extack(info); |
69213513 | 715 | |
f30f0601 | 716 | if (check_cb(dev)) { |
69bfac96 | 717 | err = add_cb(dev, info->ctx, port_obj_info->obj, extack); |
20776b46 RV |
718 | if (err != -EOPNOTSUPP) |
719 | port_obj_info->handled = true; | |
720 | return err; | |
f30f0601 PM |
721 | } |
722 | ||
723 | /* Switch ports might be stacked under e.g. a LAG. Ignore the | |
724 | * unsupported devices, another driver might be able to handle them. But | |
725 | * propagate to the callers any hard errors. | |
726 | * | |
727 | * If the driver does its own bookkeeping of stacked ports, it's not | |
728 | * necessary to go through this helper. | |
729 | */ | |
730 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | |
07c6f980 RK |
731 | if (netif_is_bridge_master(lower_dev)) |
732 | continue; | |
733 | ||
c4076cdd VO |
734 | /* When searching for switchdev interfaces that are neighbors |
735 | * of foreign ones, and @dev is a bridge, do not recurse on the | |
736 | * foreign interface again, it was already visited. | |
737 | */ | |
738 | if (foreign_dev_check_cb && !check_cb(lower_dev) && | |
739 | !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb)) | |
740 | continue; | |
741 | ||
f30f0601 | 742 | err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, |
c4076cdd VO |
743 | check_cb, foreign_dev_check_cb, |
744 | add_cb); | |
f30f0601 PM |
745 | if (err && err != -EOPNOTSUPP) |
746 | return err; | |
747 | } | |
748 | ||
c4076cdd VO |
749 | /* Event is neither on a bridge nor a LAG. Check whether it is on an |
750 | * interface that is in a bridge with us. | |
751 | */ | |
752 | if (!foreign_dev_check_cb) | |
753 | return err; | |
754 | ||
755 | br = netdev_master_upper_dev_get(dev); | |
756 | if (!br || !netif_is_bridge_master(br)) | |
757 | return err; | |
758 | ||
acd8df58 VO |
759 | switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb); |
760 | if (!switchdev) | |
761 | return err; | |
762 | ||
763 | if (!foreign_dev_check_cb(switchdev, dev)) | |
c4076cdd VO |
764 | return err; |
765 | ||
766 | return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb, | |
767 | foreign_dev_check_cb, add_cb); | |
f30f0601 PM |
768 | } |
769 | ||
c4076cdd VO |
770 | /* Pass through a port object addition, if @dev passes @check_cb, or replicate |
771 | * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a | |
772 | * bridge or a LAG. | |
773 | */ | |
f30f0601 PM |
774 | int switchdev_handle_port_obj_add(struct net_device *dev, |
775 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
776 | bool (*check_cb)(const struct net_device *dev), | |
69bfac96 | 777 | int (*add_cb)(struct net_device *dev, const void *ctx, |
f30f0601 | 778 | const struct switchdev_obj *obj, |
69213513 | 779 | struct netlink_ext_ack *extack)) |
f30f0601 PM |
780 | { |
781 | int err; | |
782 | ||
783 | err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, | |
c4076cdd | 784 | NULL, add_cb); |
f30f0601 PM |
785 | if (err == -EOPNOTSUPP) |
786 | err = 0; | |
787 | return err; | |
788 | } | |
789 | EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); | |
790 | ||
c4076cdd VO |
791 | /* Same as switchdev_handle_port_obj_add(), except if object is notified on a |
792 | * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices | |
793 | * that pass @check_cb and are in the same bridge as @dev. | |
794 | */ | |
795 | int switchdev_handle_port_obj_add_foreign(struct net_device *dev, | |
796 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
797 | bool (*check_cb)(const struct net_device *dev), | |
798 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
799 | const struct net_device *foreign_dev), | |
800 | int (*add_cb)(struct net_device *dev, const void *ctx, | |
801 | const struct switchdev_obj *obj, | |
802 | struct netlink_ext_ack *extack)) | |
803 | { | |
804 | int err; | |
805 | ||
806 | err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, | |
807 | foreign_dev_check_cb, add_cb); | |
808 | if (err == -EOPNOTSUPP) | |
809 | err = 0; | |
810 | return err; | |
811 | } | |
812 | EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign); | |
813 | ||
f30f0601 PM |
814 | static int __switchdev_handle_port_obj_del(struct net_device *dev, |
815 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
816 | bool (*check_cb)(const struct net_device *dev), | |
c4076cdd VO |
817 | bool (*foreign_dev_check_cb)(const struct net_device *dev, |
818 | const struct net_device *foreign_dev), | |
69bfac96 | 819 | int (*del_cb)(struct net_device *dev, const void *ctx, |
f30f0601 PM |
820 | const struct switchdev_obj *obj)) |
821 | { | |
69bfac96 | 822 | struct switchdev_notifier_info *info = &port_obj_info->info; |
acd8df58 | 823 | struct net_device *br, *lower_dev, *switchdev; |
f30f0601 PM |
824 | struct list_head *iter; |
825 | int err = -EOPNOTSUPP; | |
826 | ||
827 | if (check_cb(dev)) { | |
69bfac96 | 828 | err = del_cb(dev, info->ctx, port_obj_info->obj); |
20776b46 RV |
829 | if (err != -EOPNOTSUPP) |
830 | port_obj_info->handled = true; | |
831 | return err; | |
f30f0601 PM |
832 | } |
833 | ||
834 | /* Switch ports might be stacked under e.g. a LAG. Ignore the | |
835 | * unsupported devices, another driver might be able to handle them. But | |
836 | * propagate to the callers any hard errors. | |
837 | * | |
838 | * If the driver does its own bookkeeping of stacked ports, it's not | |
839 | * necessary to go through this helper. | |
840 | */ | |
841 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | |
07c6f980 RK |
842 | if (netif_is_bridge_master(lower_dev)) |
843 | continue; | |
844 | ||
c4076cdd VO |
845 | /* When searching for switchdev interfaces that are neighbors |
846 | * of foreign ones, and @dev is a bridge, do not recurse on the | |
847 | * foreign interface again, it was already visited. | |
848 | */ | |
849 | if (foreign_dev_check_cb && !check_cb(lower_dev) && | |
850 | !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb)) | |
851 | continue; | |
852 | ||
f30f0601 | 853 | err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, |
c4076cdd VO |
854 | check_cb, foreign_dev_check_cb, |
855 | del_cb); | |
f30f0601 PM |
856 | if (err && err != -EOPNOTSUPP) |
857 | return err; | |
858 | } | |
859 | ||
c4076cdd VO |
860 | /* Event is neither on a bridge nor a LAG. Check whether it is on an |
861 | * interface that is in a bridge with us. | |
862 | */ | |
863 | if (!foreign_dev_check_cb) | |
864 | return err; | |
865 | ||
866 | br = netdev_master_upper_dev_get(dev); | |
867 | if (!br || !netif_is_bridge_master(br)) | |
868 | return err; | |
869 | ||
acd8df58 VO |
870 | switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb); |
871 | if (!switchdev) | |
872 | return err; | |
873 | ||
874 | if (!foreign_dev_check_cb(switchdev, dev)) | |
c4076cdd VO |
875 | return err; |
876 | ||
877 | return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb, | |
878 | foreign_dev_check_cb, del_cb); | |
f30f0601 PM |
879 | } |
880 | ||
c4076cdd VO |
881 | /* Pass through a port object deletion, if @dev passes @check_cb, or replicate |
882 | * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a | |
883 | * bridge or a LAG. | |
884 | */ | |
f30f0601 PM |
885 | int switchdev_handle_port_obj_del(struct net_device *dev, |
886 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
887 | bool (*check_cb)(const struct net_device *dev), | |
69bfac96 | 888 | int (*del_cb)(struct net_device *dev, const void *ctx, |
f30f0601 PM |
889 | const struct switchdev_obj *obj)) |
890 | { | |
891 | int err; | |
892 | ||
893 | err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, | |
c4076cdd | 894 | NULL, del_cb); |
f30f0601 PM |
895 | if (err == -EOPNOTSUPP) |
896 | err = 0; | |
897 | return err; | |
898 | } | |
899 | EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); | |
1cb33af1 | 900 | |
c4076cdd VO |
901 | /* Same as switchdev_handle_port_obj_del(), except if object is notified on a |
902 | * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices | |
903 | * that pass @check_cb and are in the same bridge as @dev. | |
904 | */ | |
905 | int switchdev_handle_port_obj_del_foreign(struct net_device *dev, | |
906 | struct switchdev_notifier_port_obj_info *port_obj_info, | |
907 | bool (*check_cb)(const struct net_device *dev), | |
908 | bool (*foreign_dev_check_cb)(const struct net_device *dev, | |
909 | const struct net_device *foreign_dev), | |
910 | int (*del_cb)(struct net_device *dev, const void *ctx, | |
911 | const struct switchdev_obj *obj)) | |
912 | { | |
913 | int err; | |
914 | ||
915 | err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, | |
916 | foreign_dev_check_cb, del_cb); | |
917 | if (err == -EOPNOTSUPP) | |
918 | err = 0; | |
919 | return err; | |
920 | } | |
921 | EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign); | |
922 | ||
1cb33af1 FF |
923 | static int __switchdev_handle_port_attr_set(struct net_device *dev, |
924 | struct switchdev_notifier_port_attr_info *port_attr_info, | |
925 | bool (*check_cb)(const struct net_device *dev), | |
69bfac96 | 926 | int (*set_cb)(struct net_device *dev, const void *ctx, |
4c08c586 VO |
927 | const struct switchdev_attr *attr, |
928 | struct netlink_ext_ack *extack)) | |
1cb33af1 | 929 | { |
69bfac96 | 930 | struct switchdev_notifier_info *info = &port_attr_info->info; |
4c08c586 | 931 | struct netlink_ext_ack *extack; |
1cb33af1 FF |
932 | struct net_device *lower_dev; |
933 | struct list_head *iter; | |
934 | int err = -EOPNOTSUPP; | |
935 | ||
69bfac96 | 936 | extack = switchdev_notifier_info_to_extack(info); |
4c08c586 | 937 | |
1cb33af1 | 938 | if (check_cb(dev)) { |
69bfac96 | 939 | err = set_cb(dev, info->ctx, port_attr_info->attr, extack); |
20776b46 RV |
940 | if (err != -EOPNOTSUPP) |
941 | port_attr_info->handled = true; | |
942 | return err; | |
1cb33af1 FF |
943 | } |
944 | ||
945 | /* Switch ports might be stacked under e.g. a LAG. Ignore the | |
946 | * unsupported devices, another driver might be able to handle them. But | |
947 | * propagate to the callers any hard errors. | |
948 | * | |
949 | * If the driver does its own bookkeeping of stacked ports, it's not | |
950 | * necessary to go through this helper. | |
951 | */ | |
952 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | |
07c6f980 RK |
953 | if (netif_is_bridge_master(lower_dev)) |
954 | continue; | |
955 | ||
1cb33af1 FF |
956 | err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info, |
957 | check_cb, set_cb); | |
958 | if (err && err != -EOPNOTSUPP) | |
959 | return err; | |
960 | } | |
961 | ||
962 | return err; | |
963 | } | |
964 | ||
965 | int switchdev_handle_port_attr_set(struct net_device *dev, | |
966 | struct switchdev_notifier_port_attr_info *port_attr_info, | |
967 | bool (*check_cb)(const struct net_device *dev), | |
69bfac96 | 968 | int (*set_cb)(struct net_device *dev, const void *ctx, |
4c08c586 VO |
969 | const struct switchdev_attr *attr, |
970 | struct netlink_ext_ack *extack)) | |
1cb33af1 FF |
971 | { |
972 | int err; | |
973 | ||
974 | err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb, | |
975 | set_cb); | |
976 | if (err == -EOPNOTSUPP) | |
977 | err = 0; | |
978 | return err; | |
979 | } | |
980 | EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set); | |
957e2235 VO |
981 | |
982 | int switchdev_bridge_port_offload(struct net_device *brport_dev, | |
983 | struct net_device *dev, const void *ctx, | |
984 | struct notifier_block *atomic_nb, | |
985 | struct notifier_block *blocking_nb, | |
986 | bool tx_fwd_offload, | |
987 | struct netlink_ext_ack *extack) | |
988 | { | |
989 | struct switchdev_notifier_brport_info brport_info = { | |
990 | .brport = { | |
991 | .dev = dev, | |
992 | .ctx = ctx, | |
993 | .atomic_nb = atomic_nb, | |
994 | .blocking_nb = blocking_nb, | |
995 | .tx_fwd_offload = tx_fwd_offload, | |
996 | }, | |
997 | }; | |
998 | int err; | |
999 | ||
1000 | ASSERT_RTNL(); | |
1001 | ||
1002 | err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED, | |
1003 | brport_dev, &brport_info.info, | |
1004 | extack); | |
1005 | return notifier_to_errno(err); | |
1006 | } | |
1007 | EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload); | |
1008 | ||
1009 | void switchdev_bridge_port_unoffload(struct net_device *brport_dev, | |
1010 | const void *ctx, | |
1011 | struct notifier_block *atomic_nb, | |
1012 | struct notifier_block *blocking_nb) | |
1013 | { | |
1014 | struct switchdev_notifier_brport_info brport_info = { | |
1015 | .brport = { | |
1016 | .ctx = ctx, | |
1017 | .atomic_nb = atomic_nb, | |
1018 | .blocking_nb = blocking_nb, | |
1019 | }, | |
1020 | }; | |
1021 | ||
1022 | ASSERT_RTNL(); | |
1023 | ||
1024 | call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED, | |
1025 | brport_dev, &brport_info.info, | |
1026 | NULL); | |
1027 | } | |
1028 | EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload); | |
f2e2857b PM |
1029 | |
1030 | int switchdev_bridge_port_replay(struct net_device *brport_dev, | |
1031 | struct net_device *dev, const void *ctx, | |
1032 | struct notifier_block *atomic_nb, | |
1033 | struct notifier_block *blocking_nb, | |
1034 | struct netlink_ext_ack *extack) | |
1035 | { | |
1036 | struct switchdev_notifier_brport_info brport_info = { | |
1037 | .brport = { | |
1038 | .dev = dev, | |
1039 | .ctx = ctx, | |
1040 | .atomic_nb = atomic_nb, | |
1041 | .blocking_nb = blocking_nb, | |
1042 | }, | |
1043 | }; | |
1044 | int err; | |
1045 | ||
1046 | ASSERT_RTNL(); | |
1047 | ||
1048 | err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY, | |
1049 | brport_dev, &brport_info.info, | |
1050 | extack); | |
1051 | return notifier_to_errno(err); | |
1052 | } | |
1053 | EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay); |