]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
Merge tag 'kvm-x86-docs-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / rep / bridge.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "esw/bridge.h"
10 #include "en_rep.h"
11
12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
13
14 struct mlx5_bridge_switchdev_fdb_work {
15 struct work_struct work;
16 struct switchdev_notifier_fdb_info fdb_info;
17 struct net_device *dev;
18 struct mlx5_esw_bridge_offloads *br_offloads;
19 bool add;
20 };
21
22 static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
23 {
24 struct mlx5e_priv *priv = netdev_priv(dev);
25
26 return esw == priv->mdev->priv.eswitch;
27 }
28
29 static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
30 {
31 struct mlx5e_priv *priv = netdev_priv(dev);
32 struct mlx5_core_dev *mdev, *esw_mdev;
33 u64 system_guid, esw_system_guid;
34
35 mdev = priv->mdev;
36 esw_mdev = esw->dev;
37
38 system_guid = mlx5_query_nic_system_image_guid(mdev);
39 esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
40
41 return system_guid == esw_system_guid;
42 }
43
44 static struct net_device *
45 mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
46 {
47 struct net_device *lower;
48 struct list_head *iter;
49
50 netdev_for_each_lower_dev(dev, lower, iter) {
51 struct mlx5_core_dev *mdev;
52 struct mlx5e_priv *priv;
53
54 if (!mlx5e_eswitch_rep(lower))
55 continue;
56
57 priv = netdev_priv(lower);
58 mdev = priv->mdev;
59 if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
60 return lower;
61 }
62
63 return NULL;
64 }
65
66 static struct net_device *
67 mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
68 u16 *vport_num, u16 *esw_owner_vhca_id)
69 {
70 struct mlx5e_rep_priv *rpriv;
71 struct mlx5e_priv *priv;
72
73 if (netif_is_lag_master(dev))
74 dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
75
76 if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
77 return NULL;
78
79 priv = netdev_priv(dev);
80
81 if (!priv->mdev->priv.eswitch->br_offloads)
82 return NULL;
83
84 rpriv = priv->ppriv;
85 *vport_num = rpriv->rep->vport;
86 *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
87 return dev;
88 }
89
90 static struct net_device *
91 mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
92 u16 *vport_num, u16 *esw_owner_vhca_id)
93 {
94 struct net_device *lower_dev;
95 struct list_head *iter;
96
97 if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
98 return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
99 esw_owner_vhca_id);
100
101 netdev_for_each_lower_dev(dev, lower_dev, iter) {
102 struct net_device *rep;
103
104 if (netif_is_bridge_master(lower_dev))
105 continue;
106
107 rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
108 esw_owner_vhca_id);
109 if (rep)
110 return rep;
111 }
112
113 return NULL;
114 }
115
116 static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
117 struct mlx5_eswitch *esw)
118 {
119 struct mlx5_core_dev *mdev;
120 struct mlx5e_priv *priv;
121
122 if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
123 return false;
124
125 priv = netdev_priv(rep);
126 mdev = priv->mdev;
127 if (netif_is_lag_master(dev))
128 return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
129 return true;
130 }
131
132 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
133 {
134 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
135 struct mlx5_esw_bridge_offloads,
136 netdev_nb);
137 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
138 struct netdev_notifier_changeupper_info *info = ptr;
139 struct net_device *upper = info->upper_dev, *rep;
140 struct mlx5_eswitch *esw = br_offloads->esw;
141 u16 vport_num, esw_owner_vhca_id;
142 struct netlink_ext_ack *extack;
143 int err = 0;
144
145 if (!netif_is_bridge_master(upper))
146 return 0;
147
148 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
149 if (!rep)
150 return 0;
151
152 extack = netdev_notifier_info_to_extack(&info->info);
153
154 if (mlx5_esw_bridge_is_local(dev, rep, esw))
155 err = info->linking ?
156 mlx5_esw_bridge_vport_link(upper, vport_num, esw_owner_vhca_id,
157 br_offloads, extack) :
158 mlx5_esw_bridge_vport_unlink(upper, vport_num, esw_owner_vhca_id,
159 br_offloads, extack);
160 else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
161 err = info->linking ?
162 mlx5_esw_bridge_vport_peer_link(upper, vport_num, esw_owner_vhca_id,
163 br_offloads, extack) :
164 mlx5_esw_bridge_vport_peer_unlink(upper, vport_num, esw_owner_vhca_id,
165 br_offloads, extack);
166
167 return err;
168 }
169
170 static int
171 mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
172 {
173 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
174 struct netdev_notifier_changeupper_info *info = ptr;
175 struct net_device *upper = info->upper_dev;
176 struct net_device *lower;
177 struct list_head *iter;
178
179 if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
180 return 0;
181
182 netdev_for_each_lower_dev(dev, lower, iter) {
183 struct mlx5_core_dev *mdev;
184 struct mlx5e_priv *priv;
185
186 if (!mlx5e_eswitch_rep(lower))
187 continue;
188
189 priv = netdev_priv(lower);
190 mdev = priv->mdev;
191 if (!mlx5_lag_is_active(mdev))
192 return -EAGAIN;
193 if (!mlx5_lag_is_shared_fdb(mdev))
194 return -EOPNOTSUPP;
195 }
196
197 return 0;
198 }
199
200 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
201 unsigned long event, void *ptr)
202 {
203 int err = 0;
204
205 switch (event) {
206 case NETDEV_PRECHANGEUPPER:
207 err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
208 break;
209
210 case NETDEV_CHANGEUPPER:
211 err = mlx5_esw_bridge_port_changeupper(nb, ptr);
212 break;
213 }
214
215 return notifier_from_errno(err);
216 }
217
218 static int
219 mlx5_esw_bridge_port_obj_add(struct net_device *dev,
220 struct switchdev_notifier_port_obj_info *port_obj_info,
221 struct mlx5_esw_bridge_offloads *br_offloads)
222 {
223 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
224 const struct switchdev_obj *obj = port_obj_info->obj;
225 const struct switchdev_obj_port_vlan *vlan;
226 const struct switchdev_obj_port_mdb *mdb;
227 u16 vport_num, esw_owner_vhca_id;
228 int err;
229
230 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
231 &esw_owner_vhca_id))
232 return 0;
233
234 port_obj_info->handled = true;
235
236 switch (obj->id) {
237 case SWITCHDEV_OBJ_ID_PORT_VLAN:
238 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
239 err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
240 vlan->flags, br_offloads, extack);
241 break;
242 case SWITCHDEV_OBJ_ID_PORT_MDB:
243 mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
244 err = mlx5_esw_bridge_port_mdb_add(dev, vport_num, esw_owner_vhca_id, mdb->addr,
245 mdb->vid, br_offloads, extack);
246 break;
247 default:
248 return -EOPNOTSUPP;
249 }
250 return err;
251 }
252
253 static int
254 mlx5_esw_bridge_port_obj_del(struct net_device *dev,
255 struct switchdev_notifier_port_obj_info *port_obj_info,
256 struct mlx5_esw_bridge_offloads *br_offloads)
257 {
258 const struct switchdev_obj *obj = port_obj_info->obj;
259 const struct switchdev_obj_port_vlan *vlan;
260 const struct switchdev_obj_port_mdb *mdb;
261 u16 vport_num, esw_owner_vhca_id;
262
263 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
264 &esw_owner_vhca_id))
265 return 0;
266
267 port_obj_info->handled = true;
268
269 switch (obj->id) {
270 case SWITCHDEV_OBJ_ID_PORT_VLAN:
271 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
272 mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
273 break;
274 case SWITCHDEV_OBJ_ID_PORT_MDB:
275 mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
276 mlx5_esw_bridge_port_mdb_del(dev, vport_num, esw_owner_vhca_id, mdb->addr, mdb->vid,
277 br_offloads);
278 break;
279 default:
280 return -EOPNOTSUPP;
281 }
282 return 0;
283 }
284
285 static int
286 mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
287 struct switchdev_notifier_port_attr_info *port_attr_info,
288 struct mlx5_esw_bridge_offloads *br_offloads)
289 {
290 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
291 const struct switchdev_attr *attr = port_attr_info->attr;
292 u16 vport_num, esw_owner_vhca_id;
293 int err = 0;
294
295 if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
296 &esw_owner_vhca_id))
297 return 0;
298
299 port_attr_info->handled = true;
300
301 switch (attr->id) {
302 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
303 if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
304 NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
305 err = -EINVAL;
306 }
307 break;
308 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
309 break;
310 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
311 err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
312 attr->u.ageing_time, br_offloads);
313 break;
314 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
315 err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
316 attr->u.vlan_filtering, br_offloads);
317 break;
318 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
319 err = mlx5_esw_bridge_vlan_proto_set(vport_num,
320 esw_owner_vhca_id,
321 attr->u.vlan_protocol,
322 br_offloads);
323 break;
324 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
325 err = mlx5_esw_bridge_mcast_set(vport_num, esw_owner_vhca_id,
326 !attr->u.mc_disabled, br_offloads);
327 break;
328 default:
329 err = -EOPNOTSUPP;
330 }
331
332 return err;
333 }
334
335 static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
336 unsigned long event, void *ptr)
337 {
338 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
339 struct mlx5_esw_bridge_offloads,
340 nb_blk);
341 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
342 int err;
343
344 switch (event) {
345 case SWITCHDEV_PORT_OBJ_ADD:
346 err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
347 break;
348 case SWITCHDEV_PORT_OBJ_DEL:
349 err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
350 break;
351 case SWITCHDEV_PORT_ATTR_SET:
352 err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
353 break;
354 default:
355 err = 0;
356 }
357
358 return notifier_from_errno(err);
359 }
360
361 static void
362 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
363 {
364 dev_put(fdb_work->dev);
365 kfree(fdb_work->fdb_info.addr);
366 kfree(fdb_work);
367 }
368
369 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
370 {
371 struct mlx5_bridge_switchdev_fdb_work *fdb_work =
372 container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
373 struct switchdev_notifier_fdb_info *fdb_info =
374 &fdb_work->fdb_info;
375 struct mlx5_esw_bridge_offloads *br_offloads =
376 fdb_work->br_offloads;
377 struct net_device *dev = fdb_work->dev;
378 u16 vport_num, esw_owner_vhca_id;
379
380 rtnl_lock();
381
382 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
383 &esw_owner_vhca_id))
384 goto out;
385
386 if (fdb_work->add)
387 mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
388 fdb_info);
389 else
390 mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
391 fdb_info);
392
393 out:
394 rtnl_unlock();
395 mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
396 }
397
398 static struct mlx5_bridge_switchdev_fdb_work *
399 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
400 struct switchdev_notifier_fdb_info *fdb_info,
401 struct mlx5_esw_bridge_offloads *br_offloads)
402 {
403 struct mlx5_bridge_switchdev_fdb_work *work;
404 u8 *addr;
405
406 work = kzalloc(sizeof(*work), GFP_ATOMIC);
407 if (!work)
408 return ERR_PTR(-ENOMEM);
409
410 INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
411 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
412
413 addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
414 if (!addr) {
415 kfree(work);
416 return ERR_PTR(-ENOMEM);
417 }
418 ether_addr_copy(addr, fdb_info->addr);
419 work->fdb_info.addr = addr;
420
421 dev_hold(dev);
422 work->dev = dev;
423 work->br_offloads = br_offloads;
424 work->add = add;
425 return work;
426 }
427
428 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
429 unsigned long event, void *ptr)
430 {
431 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
432 struct mlx5_esw_bridge_offloads,
433 nb);
434 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
435 struct switchdev_notifier_fdb_info *fdb_info;
436 struct mlx5_bridge_switchdev_fdb_work *work;
437 struct mlx5_eswitch *esw = br_offloads->esw;
438 struct switchdev_notifier_info *info = ptr;
439 u16 vport_num, esw_owner_vhca_id;
440 struct net_device *upper, *rep;
441
442 if (event == SWITCHDEV_PORT_ATTR_SET) {
443 int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
444
445 return notifier_from_errno(err);
446 }
447
448 upper = netdev_master_upper_dev_get_rcu(dev);
449 if (!upper)
450 return NOTIFY_DONE;
451 if (!netif_is_bridge_master(upper))
452 return NOTIFY_DONE;
453
454 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
455 if (!rep)
456 return NOTIFY_DONE;
457
458 switch (event) {
459 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
460 fdb_info = container_of(info,
461 struct switchdev_notifier_fdb_info,
462 info);
463 mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
464 fdb_info);
465 break;
466 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
467 /* only handle the event on peers */
468 if (mlx5_esw_bridge_is_local(dev, rep, esw))
469 break;
470
471 fdb_info = container_of(info,
472 struct switchdev_notifier_fdb_info,
473 info);
474 /* Mark for deletion to prevent the update wq task from
475 * spuriously refreshing the entry which would mark it again as
476 * offloaded in SW bridge. After this fallthrough to regular
477 * async delete code.
478 */
479 mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads,
480 fdb_info);
481 fallthrough;
482 case SWITCHDEV_FDB_ADD_TO_DEVICE:
483 case SWITCHDEV_FDB_DEL_TO_DEVICE:
484 fdb_info = container_of(info,
485 struct switchdev_notifier_fdb_info,
486 info);
487
488 work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
489 event == SWITCHDEV_FDB_ADD_TO_DEVICE,
490 fdb_info,
491 br_offloads);
492 if (IS_ERR(work)) {
493 WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
494 PTR_ERR(work));
495 return notifier_from_errno(PTR_ERR(work));
496 }
497
498 queue_work(br_offloads->wq, &work->work);
499 break;
500 default:
501 break;
502 }
503 return NOTIFY_DONE;
504 }
505
506 static void mlx5_esw_bridge_update_work(struct work_struct *work)
507 {
508 struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
509 struct mlx5_esw_bridge_offloads,
510 update_work.work);
511
512 rtnl_lock();
513 mlx5_esw_bridge_update(br_offloads);
514 rtnl_unlock();
515
516 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
517 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
518 }
519
520 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
521 {
522 struct mlx5_esw_bridge_offloads *br_offloads;
523 struct mlx5_core_dev *mdev = priv->mdev;
524 struct mlx5_eswitch *esw =
525 mdev->priv.eswitch;
526 int err;
527
528 rtnl_lock();
529 br_offloads = mlx5_esw_bridge_init(esw);
530 rtnl_unlock();
531 if (IS_ERR(br_offloads)) {
532 esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
533 return;
534 }
535
536 br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
537 if (!br_offloads->wq) {
538 esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
539 goto err_alloc_wq;
540 }
541
542 br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
543 err = register_switchdev_notifier(&br_offloads->nb);
544 if (err) {
545 esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
546 goto err_register_swdev;
547 }
548
549 br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
550 err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
551 if (err) {
552 esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
553 goto err_register_swdev_blk;
554 }
555
556 br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
557 err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
558 if (err) {
559 esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
560 err);
561 goto err_register_netdev;
562 }
563 INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
564 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
565 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
566 return;
567
568 err_register_netdev:
569 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
570 err_register_swdev_blk:
571 unregister_switchdev_notifier(&br_offloads->nb);
572 err_register_swdev:
573 destroy_workqueue(br_offloads->wq);
574 err_alloc_wq:
575 rtnl_lock();
576 mlx5_esw_bridge_cleanup(esw);
577 rtnl_unlock();
578 }
579
580 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
581 {
582 struct mlx5_esw_bridge_offloads *br_offloads;
583 struct mlx5_core_dev *mdev = priv->mdev;
584 struct mlx5_eswitch *esw =
585 mdev->priv.eswitch;
586
587 br_offloads = esw->br_offloads;
588 if (!br_offloads)
589 return;
590
591 cancel_delayed_work_sync(&br_offloads->update_work);
592 unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
593 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
594 unregister_switchdev_notifier(&br_offloads->nb);
595 destroy_workqueue(br_offloads->wq);
596 rtnl_lock();
597 mlx5_esw_bridge_cleanup(esw);
598 rtnl_unlock();
599 }