]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - net/dsa/slave.c
net: dsa: add slave to port helper
[thirdparty/kernel/linux.git] / net / dsa / slave.c
1 /*
2 * net/dsa/slave.c - Slave device handling
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11 #include <linux/list.h>
12 #include <linux/etherdevice.h>
13 #include <linux/netdevice.h>
14 #include <linux/phy.h>
15 #include <linux/phy_fixed.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/mdio.h>
19 #include <linux/list.h>
20 #include <net/rtnetlink.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_mirred.h>
23 #include <linux/if_bridge.h>
24 #include <linux/netpoll.h>
25
26 #include "dsa_priv.h"
27
28 static bool dsa_slave_dev_check(struct net_device *dev);
29
30 /* slave mii_bus handling ***************************************************/
31 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
32 {
33 struct dsa_switch *ds = bus->priv;
34
35 if (ds->phys_mii_mask & (1 << addr))
36 return ds->ops->phy_read(ds, addr, reg);
37
38 return 0xffff;
39 }
40
41 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
42 {
43 struct dsa_switch *ds = bus->priv;
44
45 if (ds->phys_mii_mask & (1 << addr))
46 return ds->ops->phy_write(ds, addr, reg, val);
47
48 return 0;
49 }
50
51 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
52 {
53 ds->slave_mii_bus->priv = (void *)ds;
54 ds->slave_mii_bus->name = "dsa slave smi";
55 ds->slave_mii_bus->read = dsa_slave_phy_read;
56 ds->slave_mii_bus->write = dsa_slave_phy_write;
57 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
58 ds->dst->tree, ds->index);
59 ds->slave_mii_bus->parent = ds->dev;
60 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
61 }
62
63
64 /* slave device handling ****************************************************/
65 static int dsa_slave_get_iflink(const struct net_device *dev)
66 {
67 struct dsa_slave_priv *p = netdev_priv(dev);
68
69 return dsa_master_netdev(p)->ifindex;
70 }
71
72 static int dsa_slave_open(struct net_device *dev)
73 {
74 struct dsa_slave_priv *p = netdev_priv(dev);
75 struct net_device *master = dsa_master_netdev(p);
76 struct dsa_port *dp = dsa_slave_to_port(dev);
77 int err;
78
79 if (!(master->flags & IFF_UP))
80 return -ENETDOWN;
81
82 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
83 err = dev_uc_add(master, dev->dev_addr);
84 if (err < 0)
85 goto out;
86 }
87
88 if (dev->flags & IFF_ALLMULTI) {
89 err = dev_set_allmulti(master, 1);
90 if (err < 0)
91 goto del_unicast;
92 }
93 if (dev->flags & IFF_PROMISC) {
94 err = dev_set_promiscuity(master, 1);
95 if (err < 0)
96 goto clear_allmulti;
97 }
98
99 err = dsa_port_enable(dp, dev->phydev);
100 if (err)
101 goto clear_promisc;
102
103 if (dev->phydev)
104 phy_start(dev->phydev);
105
106 return 0;
107
108 clear_promisc:
109 if (dev->flags & IFF_PROMISC)
110 dev_set_promiscuity(master, -1);
111 clear_allmulti:
112 if (dev->flags & IFF_ALLMULTI)
113 dev_set_allmulti(master, -1);
114 del_unicast:
115 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
116 dev_uc_del(master, dev->dev_addr);
117 out:
118 return err;
119 }
120
121 static int dsa_slave_close(struct net_device *dev)
122 {
123 struct dsa_slave_priv *p = netdev_priv(dev);
124 struct net_device *master = dsa_master_netdev(p);
125 struct dsa_port *dp = dsa_slave_to_port(dev);
126
127 if (dev->phydev)
128 phy_stop(dev->phydev);
129
130 dsa_port_disable(dp, dev->phydev);
131
132 dev_mc_unsync(master, dev);
133 dev_uc_unsync(master, dev);
134 if (dev->flags & IFF_ALLMULTI)
135 dev_set_allmulti(master, -1);
136 if (dev->flags & IFF_PROMISC)
137 dev_set_promiscuity(master, -1);
138
139 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
140 dev_uc_del(master, dev->dev_addr);
141
142 return 0;
143 }
144
145 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
146 {
147 struct dsa_slave_priv *p = netdev_priv(dev);
148 struct net_device *master = dsa_master_netdev(p);
149
150 if (change & IFF_ALLMULTI)
151 dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
152 if (change & IFF_PROMISC)
153 dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
154 }
155
156 static void dsa_slave_set_rx_mode(struct net_device *dev)
157 {
158 struct dsa_slave_priv *p = netdev_priv(dev);
159 struct net_device *master = dsa_master_netdev(p);
160
161 dev_mc_sync(master, dev);
162 dev_uc_sync(master, dev);
163 }
164
165 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
166 {
167 struct dsa_slave_priv *p = netdev_priv(dev);
168 struct net_device *master = dsa_master_netdev(p);
169 struct sockaddr *addr = a;
170 int err;
171
172 if (!is_valid_ether_addr(addr->sa_data))
173 return -EADDRNOTAVAIL;
174
175 if (!(dev->flags & IFF_UP))
176 goto out;
177
178 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
179 err = dev_uc_add(master, addr->sa_data);
180 if (err < 0)
181 return err;
182 }
183
184 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
185 dev_uc_del(master, dev->dev_addr);
186
187 out:
188 ether_addr_copy(dev->dev_addr, addr->sa_data);
189
190 return 0;
191 }
192
193 struct dsa_slave_dump_ctx {
194 struct net_device *dev;
195 struct sk_buff *skb;
196 struct netlink_callback *cb;
197 int idx;
198 };
199
200 static int
201 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
202 bool is_static, void *data)
203 {
204 struct dsa_slave_dump_ctx *dump = data;
205 u32 portid = NETLINK_CB(dump->cb->skb).portid;
206 u32 seq = dump->cb->nlh->nlmsg_seq;
207 struct nlmsghdr *nlh;
208 struct ndmsg *ndm;
209
210 if (dump->idx < dump->cb->args[2])
211 goto skip;
212
213 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
214 sizeof(*ndm), NLM_F_MULTI);
215 if (!nlh)
216 return -EMSGSIZE;
217
218 ndm = nlmsg_data(nlh);
219 ndm->ndm_family = AF_BRIDGE;
220 ndm->ndm_pad1 = 0;
221 ndm->ndm_pad2 = 0;
222 ndm->ndm_flags = NTF_SELF;
223 ndm->ndm_type = 0;
224 ndm->ndm_ifindex = dump->dev->ifindex;
225 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
226
227 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
228 goto nla_put_failure;
229
230 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
231 goto nla_put_failure;
232
233 nlmsg_end(dump->skb, nlh);
234
235 skip:
236 dump->idx++;
237 return 0;
238
239 nla_put_failure:
240 nlmsg_cancel(dump->skb, nlh);
241 return -EMSGSIZE;
242 }
243
244 static int
245 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
246 struct net_device *dev, struct net_device *filter_dev,
247 int *idx)
248 {
249 struct dsa_port *dp = dsa_slave_to_port(dev);
250 struct dsa_slave_dump_ctx dump = {
251 .dev = dev,
252 .skb = skb,
253 .cb = cb,
254 .idx = *idx,
255 };
256 int err;
257
258 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
259 *idx = dump.idx;
260
261 return err;
262 }
263
264 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
265 {
266 if (!dev->phydev)
267 return -ENODEV;
268
269 return phy_mii_ioctl(dev->phydev, ifr, cmd);
270 }
271
272 static int dsa_slave_port_attr_set(struct net_device *dev,
273 const struct switchdev_attr *attr,
274 struct switchdev_trans *trans)
275 {
276 struct dsa_port *dp = dsa_slave_to_port(dev);
277 int ret;
278
279 switch (attr->id) {
280 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
281 ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
282 break;
283 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
284 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
285 trans);
286 break;
287 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
288 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
289 break;
290 default:
291 ret = -EOPNOTSUPP;
292 break;
293 }
294
295 return ret;
296 }
297
298 static int dsa_slave_port_obj_add(struct net_device *dev,
299 const struct switchdev_obj *obj,
300 struct switchdev_trans *trans)
301 {
302 struct dsa_port *dp = dsa_slave_to_port(dev);
303 int err;
304
305 /* For the prepare phase, ensure the full set of changes is feasable in
306 * one go in order to signal a failure properly. If an operation is not
307 * supported, return -EOPNOTSUPP.
308 */
309
310 switch (obj->id) {
311 case SWITCHDEV_OBJ_ID_PORT_MDB:
312 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
313 break;
314 case SWITCHDEV_OBJ_ID_PORT_VLAN:
315 err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj),
316 trans);
317 break;
318 default:
319 err = -EOPNOTSUPP;
320 break;
321 }
322
323 return err;
324 }
325
326 static int dsa_slave_port_obj_del(struct net_device *dev,
327 const struct switchdev_obj *obj)
328 {
329 struct dsa_port *dp = dsa_slave_to_port(dev);
330 int err;
331
332 switch (obj->id) {
333 case SWITCHDEV_OBJ_ID_PORT_MDB:
334 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
335 break;
336 case SWITCHDEV_OBJ_ID_PORT_VLAN:
337 err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
338 break;
339 default:
340 err = -EOPNOTSUPP;
341 break;
342 }
343
344 return err;
345 }
346
347 static int dsa_slave_port_attr_get(struct net_device *dev,
348 struct switchdev_attr *attr)
349 {
350 struct dsa_port *dp = dsa_slave_to_port(dev);
351 struct dsa_switch *ds = dp->ds;
352
353 switch (attr->id) {
354 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
355 attr->u.ppid.id_len = sizeof(ds->index);
356 memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len);
357 break;
358 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
359 attr->u.brport_flags_support = 0;
360 break;
361 default:
362 return -EOPNOTSUPP;
363 }
364
365 return 0;
366 }
367
368 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
369 struct sk_buff *skb)
370 {
371 #ifdef CONFIG_NET_POLL_CONTROLLER
372 struct dsa_slave_priv *p = netdev_priv(dev);
373
374 if (p->netpoll)
375 netpoll_send_skb(p->netpoll, skb);
376 #else
377 BUG();
378 #endif
379 return NETDEV_TX_OK;
380 }
381
382 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
383 {
384 struct dsa_slave_priv *p = netdev_priv(dev);
385 struct pcpu_sw_netstats *s;
386 struct sk_buff *nskb;
387
388 s = this_cpu_ptr(p->stats64);
389 u64_stats_update_begin(&s->syncp);
390 s->tx_packets++;
391 s->tx_bytes += skb->len;
392 u64_stats_update_end(&s->syncp);
393
394 /* Transmit function may have to reallocate the original SKB,
395 * in which case it must have freed it. Only free it here on error.
396 */
397 nskb = p->xmit(skb, dev);
398 if (!nskb) {
399 kfree_skb(skb);
400 return NETDEV_TX_OK;
401 }
402
403 /* SKB for netpoll still need to be mangled with the protocol-specific
404 * tag to be successfully transmitted
405 */
406 if (unlikely(netpoll_tx_running(dev)))
407 return dsa_slave_netpoll_send_skb(dev, nskb);
408
409 /* Queue the SKB for transmission on the parent interface, but
410 * do not modify its EtherType
411 */
412 nskb->dev = dsa_master_netdev(p);
413 dev_queue_xmit(nskb);
414
415 return NETDEV_TX_OK;
416 }
417
418 /* ethtool operations *******************************************************/
419
420 static void dsa_slave_get_drvinfo(struct net_device *dev,
421 struct ethtool_drvinfo *drvinfo)
422 {
423 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
424 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
425 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
426 }
427
428 static int dsa_slave_get_regs_len(struct net_device *dev)
429 {
430 struct dsa_port *dp = dsa_slave_to_port(dev);
431 struct dsa_switch *ds = dp->ds;
432
433 if (ds->ops->get_regs_len)
434 return ds->ops->get_regs_len(ds, dp->index);
435
436 return -EOPNOTSUPP;
437 }
438
439 static void
440 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
441 {
442 struct dsa_port *dp = dsa_slave_to_port(dev);
443 struct dsa_switch *ds = dp->ds;
444
445 if (ds->ops->get_regs)
446 ds->ops->get_regs(ds, dp->index, regs, _p);
447 }
448
449 static u32 dsa_slave_get_link(struct net_device *dev)
450 {
451 if (!dev->phydev)
452 return -ENODEV;
453
454 genphy_update_link(dev->phydev);
455
456 return dev->phydev->link;
457 }
458
459 static int dsa_slave_get_eeprom_len(struct net_device *dev)
460 {
461 struct dsa_port *dp = dsa_slave_to_port(dev);
462 struct dsa_switch *ds = dp->ds;
463
464 if (ds->cd && ds->cd->eeprom_len)
465 return ds->cd->eeprom_len;
466
467 if (ds->ops->get_eeprom_len)
468 return ds->ops->get_eeprom_len(ds);
469
470 return 0;
471 }
472
473 static int dsa_slave_get_eeprom(struct net_device *dev,
474 struct ethtool_eeprom *eeprom, u8 *data)
475 {
476 struct dsa_port *dp = dsa_slave_to_port(dev);
477 struct dsa_switch *ds = dp->ds;
478
479 if (ds->ops->get_eeprom)
480 return ds->ops->get_eeprom(ds, eeprom, data);
481
482 return -EOPNOTSUPP;
483 }
484
485 static int dsa_slave_set_eeprom(struct net_device *dev,
486 struct ethtool_eeprom *eeprom, u8 *data)
487 {
488 struct dsa_port *dp = dsa_slave_to_port(dev);
489 struct dsa_switch *ds = dp->ds;
490
491 if (ds->ops->set_eeprom)
492 return ds->ops->set_eeprom(ds, eeprom, data);
493
494 return -EOPNOTSUPP;
495 }
496
497 static void dsa_slave_get_strings(struct net_device *dev,
498 uint32_t stringset, uint8_t *data)
499 {
500 struct dsa_port *dp = dsa_slave_to_port(dev);
501 struct dsa_switch *ds = dp->ds;
502
503 if (stringset == ETH_SS_STATS) {
504 int len = ETH_GSTRING_LEN;
505
506 strncpy(data, "tx_packets", len);
507 strncpy(data + len, "tx_bytes", len);
508 strncpy(data + 2 * len, "rx_packets", len);
509 strncpy(data + 3 * len, "rx_bytes", len);
510 if (ds->ops->get_strings)
511 ds->ops->get_strings(ds, dp->index, data + 4 * len);
512 }
513 }
514
515 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
516 struct ethtool_stats *stats,
517 uint64_t *data)
518 {
519 struct dsa_port *dp = dsa_slave_to_port(dev);
520 struct dsa_slave_priv *p = netdev_priv(dev);
521 struct dsa_switch *ds = dp->ds;
522 struct pcpu_sw_netstats *s;
523 unsigned int start;
524 int i;
525
526 for_each_possible_cpu(i) {
527 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
528
529 s = per_cpu_ptr(p->stats64, i);
530 do {
531 start = u64_stats_fetch_begin_irq(&s->syncp);
532 tx_packets = s->tx_packets;
533 tx_bytes = s->tx_bytes;
534 rx_packets = s->rx_packets;
535 rx_bytes = s->rx_bytes;
536 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
537 data[0] += tx_packets;
538 data[1] += tx_bytes;
539 data[2] += rx_packets;
540 data[3] += rx_bytes;
541 }
542 if (ds->ops->get_ethtool_stats)
543 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
544 }
545
546 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
547 {
548 struct dsa_port *dp = dsa_slave_to_port(dev);
549 struct dsa_switch *ds = dp->ds;
550
551 if (sset == ETH_SS_STATS) {
552 int count;
553
554 count = 4;
555 if (ds->ops->get_sset_count)
556 count += ds->ops->get_sset_count(ds);
557
558 return count;
559 }
560
561 return -EOPNOTSUPP;
562 }
563
564 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
565 {
566 struct dsa_port *dp = dsa_slave_to_port(dev);
567 struct dsa_switch *ds = dp->ds;
568
569 if (ds->ops->get_wol)
570 ds->ops->get_wol(ds, dp->index, w);
571 }
572
573 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
574 {
575 struct dsa_port *dp = dsa_slave_to_port(dev);
576 struct dsa_switch *ds = dp->ds;
577 int ret = -EOPNOTSUPP;
578
579 if (ds->ops->set_wol)
580 ret = ds->ops->set_wol(ds, dp->index, w);
581
582 return ret;
583 }
584
585 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
586 {
587 struct dsa_port *dp = dsa_slave_to_port(dev);
588 struct dsa_switch *ds = dp->ds;
589 int ret;
590
591 /* Port's PHY and MAC both need to be EEE capable */
592 if (!dev->phydev)
593 return -ENODEV;
594
595 if (!ds->ops->set_mac_eee)
596 return -EOPNOTSUPP;
597
598 ret = ds->ops->set_mac_eee(ds, dp->index, e);
599 if (ret)
600 return ret;
601
602 if (e->eee_enabled) {
603 ret = phy_init_eee(dev->phydev, 0);
604 if (ret)
605 return ret;
606 }
607
608 return phy_ethtool_set_eee(dev->phydev, e);
609 }
610
611 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
612 {
613 struct dsa_port *dp = dsa_slave_to_port(dev);
614 struct dsa_switch *ds = dp->ds;
615 int ret;
616
617 /* Port's PHY and MAC both need to be EEE capable */
618 if (!dev->phydev)
619 return -ENODEV;
620
621 if (!ds->ops->get_mac_eee)
622 return -EOPNOTSUPP;
623
624 ret = ds->ops->get_mac_eee(ds, dp->index, e);
625 if (ret)
626 return ret;
627
628 return phy_ethtool_get_eee(dev->phydev, e);
629 }
630
631 #ifdef CONFIG_NET_POLL_CONTROLLER
632 static int dsa_slave_netpoll_setup(struct net_device *dev,
633 struct netpoll_info *ni)
634 {
635 struct dsa_slave_priv *p = netdev_priv(dev);
636 struct net_device *master = dsa_master_netdev(p);
637 struct netpoll *netpoll;
638 int err = 0;
639
640 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
641 if (!netpoll)
642 return -ENOMEM;
643
644 err = __netpoll_setup(netpoll, master);
645 if (err) {
646 kfree(netpoll);
647 goto out;
648 }
649
650 p->netpoll = netpoll;
651 out:
652 return err;
653 }
654
655 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
656 {
657 struct dsa_slave_priv *p = netdev_priv(dev);
658 struct netpoll *netpoll = p->netpoll;
659
660 if (!netpoll)
661 return;
662
663 p->netpoll = NULL;
664
665 __netpoll_free_async(netpoll);
666 }
667
668 static void dsa_slave_poll_controller(struct net_device *dev)
669 {
670 }
671 #endif
672
673 static int dsa_slave_get_phys_port_name(struct net_device *dev,
674 char *name, size_t len)
675 {
676 struct dsa_port *dp = dsa_slave_to_port(dev);
677
678 if (snprintf(name, len, "p%d", dp->index) >= len)
679 return -EINVAL;
680
681 return 0;
682 }
683
684 static struct dsa_mall_tc_entry *
685 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
686 {
687 struct dsa_slave_priv *p = netdev_priv(dev);
688 struct dsa_mall_tc_entry *mall_tc_entry;
689
690 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
691 if (mall_tc_entry->cookie == cookie)
692 return mall_tc_entry;
693
694 return NULL;
695 }
696
697 static int dsa_slave_add_cls_matchall(struct net_device *dev,
698 struct tc_cls_matchall_offload *cls,
699 bool ingress)
700 {
701 struct dsa_port *dp = dsa_slave_to_port(dev);
702 struct dsa_slave_priv *p = netdev_priv(dev);
703 struct dsa_mall_tc_entry *mall_tc_entry;
704 __be16 protocol = cls->common.protocol;
705 struct net *net = dev_net(dev);
706 struct dsa_switch *ds = dp->ds;
707 struct net_device *to_dev;
708 const struct tc_action *a;
709 struct dsa_port *to_dp;
710 int err = -EOPNOTSUPP;
711 LIST_HEAD(actions);
712 int ifindex;
713
714 if (!ds->ops->port_mirror_add)
715 return err;
716
717 if (!tcf_exts_has_one_action(cls->exts))
718 return err;
719
720 tcf_exts_to_list(cls->exts, &actions);
721 a = list_first_entry(&actions, struct tc_action, list);
722
723 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
724 struct dsa_mall_mirror_tc_entry *mirror;
725
726 ifindex = tcf_mirred_ifindex(a);
727 to_dev = __dev_get_by_index(net, ifindex);
728 if (!to_dev)
729 return -EINVAL;
730
731 if (!dsa_slave_dev_check(to_dev))
732 return -EOPNOTSUPP;
733
734 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
735 if (!mall_tc_entry)
736 return -ENOMEM;
737
738 mall_tc_entry->cookie = cls->cookie;
739 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
740 mirror = &mall_tc_entry->mirror;
741
742 to_dp = dsa_slave_to_port(to_dev);
743
744 mirror->to_local_port = to_dp->index;
745 mirror->ingress = ingress;
746
747 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
748 if (err) {
749 kfree(mall_tc_entry);
750 return err;
751 }
752
753 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
754 }
755
756 return 0;
757 }
758
759 static void dsa_slave_del_cls_matchall(struct net_device *dev,
760 struct tc_cls_matchall_offload *cls)
761 {
762 struct dsa_port *dp = dsa_slave_to_port(dev);
763 struct dsa_mall_tc_entry *mall_tc_entry;
764 struct dsa_switch *ds = dp->ds;
765
766 if (!ds->ops->port_mirror_del)
767 return;
768
769 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
770 if (!mall_tc_entry)
771 return;
772
773 list_del(&mall_tc_entry->list);
774
775 switch (mall_tc_entry->type) {
776 case DSA_PORT_MALL_MIRROR:
777 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
778 break;
779 default:
780 WARN_ON(1);
781 }
782
783 kfree(mall_tc_entry);
784 }
785
786 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
787 struct tc_cls_matchall_offload *cls)
788 {
789 bool ingress;
790
791 if (is_classid_clsact_ingress(cls->common.classid))
792 ingress = true;
793 else if (is_classid_clsact_egress(cls->common.classid))
794 ingress = false;
795 else
796 return -EOPNOTSUPP;
797
798 if (cls->common.chain_index)
799 return -EOPNOTSUPP;
800
801 switch (cls->command) {
802 case TC_CLSMATCHALL_REPLACE:
803 return dsa_slave_add_cls_matchall(dev, cls, ingress);
804 case TC_CLSMATCHALL_DESTROY:
805 dsa_slave_del_cls_matchall(dev, cls);
806 return 0;
807 default:
808 return -EOPNOTSUPP;
809 }
810 }
811
812 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
813 void *type_data)
814 {
815 switch (type) {
816 case TC_SETUP_CLSMATCHALL:
817 return dsa_slave_setup_tc_cls_matchall(dev, type_data);
818 default:
819 return -EOPNOTSUPP;
820 }
821 }
822
823 static void dsa_slave_get_stats64(struct net_device *dev,
824 struct rtnl_link_stats64 *stats)
825 {
826 struct dsa_slave_priv *p = netdev_priv(dev);
827 struct pcpu_sw_netstats *s;
828 unsigned int start;
829 int i;
830
831 netdev_stats_to_stats64(stats, &dev->stats);
832 for_each_possible_cpu(i) {
833 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
834
835 s = per_cpu_ptr(p->stats64, i);
836 do {
837 start = u64_stats_fetch_begin_irq(&s->syncp);
838 tx_packets = s->tx_packets;
839 tx_bytes = s->tx_bytes;
840 rx_packets = s->rx_packets;
841 rx_bytes = s->rx_bytes;
842 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
843
844 stats->tx_packets += tx_packets;
845 stats->tx_bytes += tx_bytes;
846 stats->rx_packets += rx_packets;
847 stats->rx_bytes += rx_bytes;
848 }
849 }
850
851 static int dsa_slave_get_rxnfc(struct net_device *dev,
852 struct ethtool_rxnfc *nfc, u32 *rule_locs)
853 {
854 struct dsa_port *dp = dsa_slave_to_port(dev);
855 struct dsa_switch *ds = dp->ds;
856
857 if (!ds->ops->get_rxnfc)
858 return -EOPNOTSUPP;
859
860 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
861 }
862
863 static int dsa_slave_set_rxnfc(struct net_device *dev,
864 struct ethtool_rxnfc *nfc)
865 {
866 struct dsa_port *dp = dsa_slave_to_port(dev);
867 struct dsa_switch *ds = dp->ds;
868
869 if (!ds->ops->set_rxnfc)
870 return -EOPNOTSUPP;
871
872 return ds->ops->set_rxnfc(ds, dp->index, nfc);
873 }
874
875 static const struct ethtool_ops dsa_slave_ethtool_ops = {
876 .get_drvinfo = dsa_slave_get_drvinfo,
877 .get_regs_len = dsa_slave_get_regs_len,
878 .get_regs = dsa_slave_get_regs,
879 .nway_reset = phy_ethtool_nway_reset,
880 .get_link = dsa_slave_get_link,
881 .get_eeprom_len = dsa_slave_get_eeprom_len,
882 .get_eeprom = dsa_slave_get_eeprom,
883 .set_eeprom = dsa_slave_set_eeprom,
884 .get_strings = dsa_slave_get_strings,
885 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
886 .get_sset_count = dsa_slave_get_sset_count,
887 .set_wol = dsa_slave_set_wol,
888 .get_wol = dsa_slave_get_wol,
889 .set_eee = dsa_slave_set_eee,
890 .get_eee = dsa_slave_get_eee,
891 .get_link_ksettings = phy_ethtool_get_link_ksettings,
892 .set_link_ksettings = phy_ethtool_set_link_ksettings,
893 .get_rxnfc = dsa_slave_get_rxnfc,
894 .set_rxnfc = dsa_slave_set_rxnfc,
895 };
896
897 static const struct net_device_ops dsa_slave_netdev_ops = {
898 .ndo_open = dsa_slave_open,
899 .ndo_stop = dsa_slave_close,
900 .ndo_start_xmit = dsa_slave_xmit,
901 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
902 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
903 .ndo_set_mac_address = dsa_slave_set_mac_address,
904 .ndo_fdb_add = dsa_legacy_fdb_add,
905 .ndo_fdb_del = dsa_legacy_fdb_del,
906 .ndo_fdb_dump = dsa_slave_fdb_dump,
907 .ndo_do_ioctl = dsa_slave_ioctl,
908 .ndo_get_iflink = dsa_slave_get_iflink,
909 #ifdef CONFIG_NET_POLL_CONTROLLER
910 .ndo_netpoll_setup = dsa_slave_netpoll_setup,
911 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
912 .ndo_poll_controller = dsa_slave_poll_controller,
913 #endif
914 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
915 .ndo_setup_tc = dsa_slave_setup_tc,
916 .ndo_get_stats64 = dsa_slave_get_stats64,
917 };
918
919 static const struct switchdev_ops dsa_slave_switchdev_ops = {
920 .switchdev_port_attr_get = dsa_slave_port_attr_get,
921 .switchdev_port_attr_set = dsa_slave_port_attr_set,
922 .switchdev_port_obj_add = dsa_slave_port_obj_add,
923 .switchdev_port_obj_del = dsa_slave_port_obj_del,
924 };
925
926 static struct device_type dsa_type = {
927 .name = "dsa",
928 };
929
930 static void dsa_slave_adjust_link(struct net_device *dev)
931 {
932 struct dsa_port *dp = dsa_slave_to_port(dev);
933 struct dsa_slave_priv *p = netdev_priv(dev);
934 struct dsa_switch *ds = dp->ds;
935 unsigned int status_changed = 0;
936
937 if (p->old_link != dev->phydev->link) {
938 status_changed = 1;
939 p->old_link = dev->phydev->link;
940 }
941
942 if (p->old_duplex != dev->phydev->duplex) {
943 status_changed = 1;
944 p->old_duplex = dev->phydev->duplex;
945 }
946
947 if (p->old_pause != dev->phydev->pause) {
948 status_changed = 1;
949 p->old_pause = dev->phydev->pause;
950 }
951
952 if (ds->ops->adjust_link && status_changed)
953 ds->ops->adjust_link(ds, dp->index, dev->phydev);
954
955 if (status_changed)
956 phy_print_status(dev->phydev);
957 }
958
959 static int dsa_slave_fixed_link_update(struct net_device *dev,
960 struct fixed_phy_status *status)
961 {
962 struct dsa_switch *ds;
963 struct dsa_port *dp;
964
965 if (dev) {
966 dp = dsa_slave_to_port(dev);
967 ds = dp->ds;
968 if (ds->ops->fixed_link_update)
969 ds->ops->fixed_link_update(ds, dp->index, status);
970 }
971
972 return 0;
973 }
974
975 /* slave device setup *******************************************************/
976 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
977 {
978 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
979 struct dsa_slave_priv *p = netdev_priv(slave_dev);
980 struct dsa_switch *ds = dp->ds;
981
982 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
983 if (!slave_dev->phydev) {
984 netdev_err(slave_dev, "no phy at %d\n", addr);
985 return -ENODEV;
986 }
987
988 /* Use already configured phy mode */
989 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
990 p->phy_interface = slave_dev->phydev->interface;
991
992 return phy_connect_direct(slave_dev, slave_dev->phydev,
993 dsa_slave_adjust_link, p->phy_interface);
994 }
995
996 static int dsa_slave_phy_setup(struct net_device *slave_dev)
997 {
998 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
999 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1000 struct device_node *port_dn = dp->dn;
1001 struct dsa_switch *ds = dp->ds;
1002 struct device_node *phy_dn;
1003 bool phy_is_fixed = false;
1004 u32 phy_flags = 0;
1005 int mode, ret;
1006
1007 mode = of_get_phy_mode(port_dn);
1008 if (mode < 0)
1009 mode = PHY_INTERFACE_MODE_NA;
1010 p->phy_interface = mode;
1011
1012 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
1013 if (!phy_dn && of_phy_is_fixed_link(port_dn)) {
1014 /* In the case of a fixed PHY, the DT node associated
1015 * to the fixed PHY is the Port DT node
1016 */
1017 ret = of_phy_register_fixed_link(port_dn);
1018 if (ret) {
1019 netdev_err(slave_dev, "failed to register fixed PHY: %d\n", ret);
1020 return ret;
1021 }
1022 phy_is_fixed = true;
1023 phy_dn = of_node_get(port_dn);
1024 }
1025
1026 if (ds->ops->get_phy_flags)
1027 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1028
1029 if (phy_dn) {
1030 int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
1031
1032 /* If this PHY address is part of phys_mii_mask, which means
1033 * that we need to divert reads and writes to/from it, then we
1034 * want to bind this device using the slave MII bus created by
1035 * DSA to make that happen.
1036 */
1037 if (!phy_is_fixed && phy_id >= 0 &&
1038 (ds->phys_mii_mask & (1 << phy_id))) {
1039 ret = dsa_slave_phy_connect(slave_dev, phy_id);
1040 if (ret) {
1041 netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret);
1042 of_node_put(phy_dn);
1043 return ret;
1044 }
1045 } else {
1046 slave_dev->phydev = of_phy_connect(slave_dev, phy_dn,
1047 dsa_slave_adjust_link,
1048 phy_flags,
1049 p->phy_interface);
1050 }
1051
1052 of_node_put(phy_dn);
1053 }
1054
1055 if (slave_dev->phydev && phy_is_fixed)
1056 fixed_phy_set_link_update(slave_dev->phydev,
1057 dsa_slave_fixed_link_update);
1058
1059 /* We could not connect to a designated PHY, so use the switch internal
1060 * MDIO bus instead
1061 */
1062 if (!slave_dev->phydev) {
1063 ret = dsa_slave_phy_connect(slave_dev, dp->index);
1064 if (ret) {
1065 netdev_err(slave_dev, "failed to connect to port %d: %d\n",
1066 dp->index, ret);
1067 if (phy_is_fixed)
1068 of_phy_deregister_fixed_link(port_dn);
1069 return ret;
1070 }
1071 }
1072
1073 phy_attached_info(slave_dev->phydev);
1074
1075 return 0;
1076 }
1077
1078 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1079 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1080 struct netdev_queue *txq,
1081 void *_unused)
1082 {
1083 lockdep_set_class(&txq->_xmit_lock,
1084 &dsa_slave_netdev_xmit_lock_key);
1085 }
1086
1087 int dsa_slave_suspend(struct net_device *slave_dev)
1088 {
1089 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1090
1091 netif_device_detach(slave_dev);
1092
1093 if (slave_dev->phydev) {
1094 phy_stop(slave_dev->phydev);
1095 p->old_pause = -1;
1096 p->old_link = -1;
1097 p->old_duplex = -1;
1098 phy_suspend(slave_dev->phydev);
1099 }
1100
1101 return 0;
1102 }
1103
1104 int dsa_slave_resume(struct net_device *slave_dev)
1105 {
1106 netif_device_attach(slave_dev);
1107
1108 if (slave_dev->phydev) {
1109 phy_resume(slave_dev->phydev);
1110 phy_start(slave_dev->phydev);
1111 }
1112
1113 return 0;
1114 }
1115
1116 static void dsa_slave_notify(struct net_device *dev, unsigned long val)
1117 {
1118 struct dsa_slave_priv *p = netdev_priv(dev);
1119 struct net_device *master = dsa_master_netdev(p);
1120 struct dsa_port *dp = dsa_slave_to_port(dev);
1121 struct dsa_notifier_register_info rinfo = {
1122 .switch_number = dp->ds->index,
1123 .port_number = dp->index,
1124 .master = master,
1125 .info.dev = dev,
1126 };
1127
1128 call_dsa_notifiers(val, dev, &rinfo.info);
1129 }
1130
1131 int dsa_slave_create(struct dsa_port *port, const char *name)
1132 {
1133 struct dsa_port *cpu_dp = port->cpu_dp;
1134 struct net_device *master = cpu_dp->netdev;
1135 struct dsa_switch *ds = port->ds;
1136 struct net_device *slave_dev;
1137 struct dsa_slave_priv *p;
1138 int ret;
1139
1140 if (!ds->num_tx_queues)
1141 ds->num_tx_queues = 1;
1142
1143 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1144 NET_NAME_UNKNOWN, ether_setup,
1145 ds->num_tx_queues, 1);
1146 if (slave_dev == NULL)
1147 return -ENOMEM;
1148
1149 slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1150 slave_dev->hw_features |= NETIF_F_HW_TC;
1151 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1152 eth_hw_addr_inherit(slave_dev, master);
1153 slave_dev->priv_flags |= IFF_NO_QUEUE;
1154 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1155 slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
1156 slave_dev->min_mtu = 0;
1157 slave_dev->max_mtu = ETH_MAX_MTU;
1158 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1159
1160 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1161 NULL);
1162
1163 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1164 slave_dev->dev.of_node = port->dn;
1165 slave_dev->vlan_features = master->vlan_features;
1166
1167 p = netdev_priv(slave_dev);
1168 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1169 if (!p->stats64) {
1170 free_netdev(slave_dev);
1171 return -ENOMEM;
1172 }
1173 p->dp = port;
1174 INIT_LIST_HEAD(&p->mall_tc_list);
1175 p->xmit = cpu_dp->tag_ops->xmit;
1176
1177 p->old_pause = -1;
1178 p->old_link = -1;
1179 p->old_duplex = -1;
1180
1181 port->netdev = slave_dev;
1182
1183 netif_carrier_off(slave_dev);
1184
1185 ret = dsa_slave_phy_setup(slave_dev);
1186 if (ret) {
1187 netdev_err(master, "error %d setting up slave phy\n", ret);
1188 goto out_free;
1189 }
1190
1191 dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1192
1193 ret = register_netdev(slave_dev);
1194 if (ret) {
1195 netdev_err(master, "error %d registering interface %s\n",
1196 ret, slave_dev->name);
1197 goto out_phy;
1198 }
1199
1200 return 0;
1201
1202 out_phy:
1203 phy_disconnect(slave_dev->phydev);
1204 if (of_phy_is_fixed_link(port->dn))
1205 of_phy_deregister_fixed_link(port->dn);
1206 out_free:
1207 free_percpu(p->stats64);
1208 free_netdev(slave_dev);
1209 port->netdev = NULL;
1210 return ret;
1211 }
1212
1213 void dsa_slave_destroy(struct net_device *slave_dev)
1214 {
1215 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1216 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1217 struct device_node *port_dn = dp->dn;
1218
1219 netif_carrier_off(slave_dev);
1220 if (slave_dev->phydev) {
1221 phy_disconnect(slave_dev->phydev);
1222
1223 if (of_phy_is_fixed_link(port_dn))
1224 of_phy_deregister_fixed_link(port_dn);
1225 }
1226 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1227 unregister_netdev(slave_dev);
1228 free_percpu(p->stats64);
1229 free_netdev(slave_dev);
1230 }
1231
1232 static bool dsa_slave_dev_check(struct net_device *dev)
1233 {
1234 return dev->netdev_ops == &dsa_slave_netdev_ops;
1235 }
1236
1237 static int dsa_slave_changeupper(struct net_device *dev,
1238 struct netdev_notifier_changeupper_info *info)
1239 {
1240 struct dsa_port *dp = dsa_slave_to_port(dev);
1241 int err = NOTIFY_DONE;
1242
1243 if (netif_is_bridge_master(info->upper_dev)) {
1244 if (info->linking) {
1245 err = dsa_port_bridge_join(dp, info->upper_dev);
1246 err = notifier_from_errno(err);
1247 } else {
1248 dsa_port_bridge_leave(dp, info->upper_dev);
1249 err = NOTIFY_OK;
1250 }
1251 }
1252
1253 return err;
1254 }
1255
1256 static int dsa_slave_netdevice_event(struct notifier_block *nb,
1257 unsigned long event, void *ptr)
1258 {
1259 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1260
1261 if (!dsa_slave_dev_check(dev))
1262 return NOTIFY_DONE;
1263
1264 if (event == NETDEV_CHANGEUPPER)
1265 return dsa_slave_changeupper(dev, ptr);
1266
1267 return NOTIFY_DONE;
1268 }
1269
1270 struct dsa_switchdev_event_work {
1271 struct work_struct work;
1272 struct switchdev_notifier_fdb_info fdb_info;
1273 struct net_device *dev;
1274 unsigned long event;
1275 };
1276
1277 static void dsa_slave_switchdev_event_work(struct work_struct *work)
1278 {
1279 struct dsa_switchdev_event_work *switchdev_work =
1280 container_of(work, struct dsa_switchdev_event_work, work);
1281 struct net_device *dev = switchdev_work->dev;
1282 struct switchdev_notifier_fdb_info *fdb_info;
1283 struct dsa_port *dp = dsa_slave_to_port(dev);
1284 int err;
1285
1286 rtnl_lock();
1287 switch (switchdev_work->event) {
1288 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1289 fdb_info = &switchdev_work->fdb_info;
1290 err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
1291 if (err) {
1292 netdev_dbg(dev, "fdb add failed err=%d\n", err);
1293 break;
1294 }
1295 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
1296 &fdb_info->info);
1297 break;
1298
1299 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1300 fdb_info = &switchdev_work->fdb_info;
1301 err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
1302 if (err) {
1303 netdev_dbg(dev, "fdb del failed err=%d\n", err);
1304 dev_close(dev);
1305 }
1306 break;
1307 }
1308 rtnl_unlock();
1309
1310 kfree(switchdev_work->fdb_info.addr);
1311 kfree(switchdev_work);
1312 dev_put(dev);
1313 }
1314
1315 static int
1316 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
1317 switchdev_work,
1318 const struct switchdev_notifier_fdb_info *
1319 fdb_info)
1320 {
1321 memcpy(&switchdev_work->fdb_info, fdb_info,
1322 sizeof(switchdev_work->fdb_info));
1323 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1324 if (!switchdev_work->fdb_info.addr)
1325 return -ENOMEM;
1326 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1327 fdb_info->addr);
1328 return 0;
1329 }
1330
1331 /* Called under rcu_read_lock() */
1332 static int dsa_slave_switchdev_event(struct notifier_block *unused,
1333 unsigned long event, void *ptr)
1334 {
1335 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1336 struct dsa_switchdev_event_work *switchdev_work;
1337
1338 if (!dsa_slave_dev_check(dev))
1339 return NOTIFY_DONE;
1340
1341 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1342 if (!switchdev_work)
1343 return NOTIFY_BAD;
1344
1345 INIT_WORK(&switchdev_work->work,
1346 dsa_slave_switchdev_event_work);
1347 switchdev_work->dev = dev;
1348 switchdev_work->event = event;
1349
1350 switch (event) {
1351 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1352 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1353 if (dsa_slave_switchdev_fdb_work_init(switchdev_work,
1354 ptr))
1355 goto err_fdb_work_init;
1356 dev_hold(dev);
1357 break;
1358 default:
1359 kfree(switchdev_work);
1360 return NOTIFY_DONE;
1361 }
1362
1363 dsa_schedule_work(&switchdev_work->work);
1364 return NOTIFY_OK;
1365
1366 err_fdb_work_init:
1367 kfree(switchdev_work);
1368 return NOTIFY_BAD;
1369 }
1370
1371 static struct notifier_block dsa_slave_nb __read_mostly = {
1372 .notifier_call = dsa_slave_netdevice_event,
1373 };
1374
1375 static struct notifier_block dsa_slave_switchdev_notifier = {
1376 .notifier_call = dsa_slave_switchdev_event,
1377 };
1378
1379 int dsa_slave_register_notifier(void)
1380 {
1381 int err;
1382
1383 err = register_netdevice_notifier(&dsa_slave_nb);
1384 if (err)
1385 return err;
1386
1387 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
1388 if (err)
1389 goto err_switchdev_nb;
1390
1391 return 0;
1392
1393 err_switchdev_nb:
1394 unregister_netdevice_notifier(&dsa_slave_nb);
1395 return err;
1396 }
1397
1398 void dsa_slave_unregister_notifier(void)
1399 {
1400 int err;
1401
1402 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1403 if (err)
1404 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
1405
1406 err = unregister_netdevice_notifier(&dsa_slave_nb);
1407 if (err)
1408 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
1409 }