]> git.ipfire.org Git - thirdparty/linux.git/blob - net/dsa/port.c
Merge tag 'drm/tegra/for-5.7-fixes' of git://anongit.freedesktop.org/tegra/linux...
[thirdparty/linux.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handling of a single switch port
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
17 {
18 struct raw_notifier_head *nh = &dp->ds->dst->nh;
19 int err;
20
21 err = raw_notifier_call_chain(nh, e, v);
22
23 return notifier_to_errno(err);
24 }
25
26 int dsa_port_set_state(struct dsa_port *dp, u8 state,
27 struct switchdev_trans *trans)
28 {
29 struct dsa_switch *ds = dp->ds;
30 int port = dp->index;
31
32 if (switchdev_trans_ph_prepare(trans))
33 return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
34
35 if (ds->ops->port_stp_state_set)
36 ds->ops->port_stp_state_set(ds, port, state);
37
38 if (ds->ops->port_fast_age) {
39 /* Fast age FDB entries or flush appropriate forwarding database
40 * for the given port, if we are moving it from Learning or
41 * Forwarding state, to Disabled or Blocking or Listening state.
42 */
43
44 if ((dp->stp_state == BR_STATE_LEARNING ||
45 dp->stp_state == BR_STATE_FORWARDING) &&
46 (state == BR_STATE_DISABLED ||
47 state == BR_STATE_BLOCKING ||
48 state == BR_STATE_LISTENING))
49 ds->ops->port_fast_age(ds, port);
50 }
51
52 dp->stp_state = state;
53
54 return 0;
55 }
56
57 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
58 {
59 int err;
60
61 err = dsa_port_set_state(dp, state, NULL);
62 if (err)
63 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
64 }
65
66 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
67 {
68 struct dsa_switch *ds = dp->ds;
69 int port = dp->index;
70 int err;
71
72 if (ds->ops->port_enable) {
73 err = ds->ops->port_enable(ds, port, phy);
74 if (err)
75 return err;
76 }
77
78 if (!dp->bridge_dev)
79 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
80
81 if (dp->pl)
82 phylink_start(dp->pl);
83
84 return 0;
85 }
86
87 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
88 {
89 int err;
90
91 rtnl_lock();
92 err = dsa_port_enable_rt(dp, phy);
93 rtnl_unlock();
94
95 return err;
96 }
97
98 void dsa_port_disable_rt(struct dsa_port *dp)
99 {
100 struct dsa_switch *ds = dp->ds;
101 int port = dp->index;
102
103 if (dp->pl)
104 phylink_stop(dp->pl);
105
106 if (!dp->bridge_dev)
107 dsa_port_set_state_now(dp, BR_STATE_DISABLED);
108
109 if (ds->ops->port_disable)
110 ds->ops->port_disable(ds, port);
111 }
112
113 void dsa_port_disable(struct dsa_port *dp)
114 {
115 rtnl_lock();
116 dsa_port_disable_rt(dp);
117 rtnl_unlock();
118 }
119
120 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
121 {
122 struct dsa_notifier_bridge_info info = {
123 .sw_index = dp->ds->index,
124 .port = dp->index,
125 .br = br,
126 };
127 int err;
128
129 /* Set the flooding mode before joining the port in the switch */
130 err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD, NULL);
131 if (err)
132 return err;
133
134 /* Here the interface is already bridged. Reflect the current
135 * configuration so that drivers can program their chips accordingly.
136 */
137 dp->bridge_dev = br;
138
139 err = dsa_port_notify(dp, DSA_NOTIFIER_BRIDGE_JOIN, &info);
140
141 /* The bridging is rolled back on error */
142 if (err) {
143 dsa_port_bridge_flags(dp, 0, NULL);
144 dp->bridge_dev = NULL;
145 }
146
147 return err;
148 }
149
150 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
151 {
152 struct dsa_notifier_bridge_info info = {
153 .sw_index = dp->ds->index,
154 .port = dp->index,
155 .br = br,
156 };
157 int err;
158
159 /* Here the port is already unbridged. Reflect the current configuration
160 * so that drivers can program their chips accordingly.
161 */
162 dp->bridge_dev = NULL;
163
164 err = dsa_port_notify(dp, DSA_NOTIFIER_BRIDGE_LEAVE, &info);
165 if (err)
166 pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
167
168 /* Port is leaving the bridge, disable flooding */
169 dsa_port_bridge_flags(dp, 0, NULL);
170
171 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
172 * so allow it to be in BR_STATE_FORWARDING to be kept functional
173 */
174 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
175 }
176
177 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
178 bool vlan_filtering)
179 {
180 struct dsa_switch *ds = dp->ds;
181 int i;
182
183 if (!ds->vlan_filtering_is_global)
184 return true;
185
186 /* For cases where enabling/disabling VLAN awareness is global to the
187 * switch, we need to handle the case where multiple bridges span
188 * different ports of the same switch device and one of them has a
189 * different setting than what is being requested.
190 */
191 for (i = 0; i < ds->num_ports; i++) {
192 struct net_device *other_bridge;
193
194 other_bridge = dsa_to_port(ds, i)->bridge_dev;
195 if (!other_bridge)
196 continue;
197 /* If it's the same bridge, it also has same
198 * vlan_filtering setting => no need to check
199 */
200 if (other_bridge == dp->bridge_dev)
201 continue;
202 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
203 dev_err(ds->dev, "VLAN filtering is a global setting\n");
204 return false;
205 }
206 }
207 return true;
208 }
209
210 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
211 struct switchdev_trans *trans)
212 {
213 struct dsa_switch *ds = dp->ds;
214 int err;
215
216 /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
217 if (switchdev_trans_ph_prepare(trans))
218 return 0;
219
220 if (!ds->ops->port_vlan_filtering)
221 return 0;
222
223 if (!dsa_port_can_apply_vlan_filtering(dp, vlan_filtering))
224 return -EINVAL;
225
226 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
227 return 0;
228
229 err = ds->ops->port_vlan_filtering(ds, dp->index,
230 vlan_filtering);
231 if (err)
232 return err;
233
234 if (ds->vlan_filtering_is_global)
235 ds->vlan_filtering = vlan_filtering;
236 else
237 dp->vlan_filtering = vlan_filtering;
238 return 0;
239 }
240
241 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
242 struct switchdev_trans *trans)
243 {
244 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
245 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
246 struct dsa_notifier_ageing_time_info info = {
247 .ageing_time = ageing_time,
248 .trans = trans,
249 };
250
251 if (switchdev_trans_ph_prepare(trans))
252 return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
253
254 dp->ageing_time = ageing_time;
255
256 return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
257 }
258
259 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
260 struct switchdev_trans *trans)
261 {
262 struct dsa_switch *ds = dp->ds;
263
264 if (!ds->ops->port_egress_floods ||
265 (flags & ~(BR_FLOOD | BR_MCAST_FLOOD)))
266 return -EINVAL;
267
268 return 0;
269 }
270
271 int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
272 struct switchdev_trans *trans)
273 {
274 struct dsa_switch *ds = dp->ds;
275 int port = dp->index;
276 int err = 0;
277
278 if (switchdev_trans_ph_prepare(trans))
279 return 0;
280
281 if (ds->ops->port_egress_floods)
282 err = ds->ops->port_egress_floods(ds, port, flags & BR_FLOOD,
283 flags & BR_MCAST_FLOOD);
284
285 return err;
286 }
287
288 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
289 struct switchdev_trans *trans)
290 {
291 struct dsa_switch *ds = dp->ds;
292 int port = dp->index;
293
294 if (switchdev_trans_ph_prepare(trans))
295 return ds->ops->port_egress_floods ? 0 : -EOPNOTSUPP;
296
297 return ds->ops->port_egress_floods(ds, port, true, mrouter);
298 }
299
300 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
301 bool propagate_upstream)
302 {
303 struct dsa_notifier_mtu_info info = {
304 .sw_index = dp->ds->index,
305 .propagate_upstream = propagate_upstream,
306 .port = dp->index,
307 .mtu = new_mtu,
308 };
309
310 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
311 }
312
313 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
314 u16 vid)
315 {
316 struct dsa_notifier_fdb_info info = {
317 .sw_index = dp->ds->index,
318 .port = dp->index,
319 .addr = addr,
320 .vid = vid,
321 };
322
323 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
324 }
325
326 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
327 u16 vid)
328 {
329 struct dsa_notifier_fdb_info info = {
330 .sw_index = dp->ds->index,
331 .port = dp->index,
332 .addr = addr,
333 .vid = vid,
334
335 };
336
337 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
338 }
339
340 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
341 {
342 struct dsa_switch *ds = dp->ds;
343 int port = dp->index;
344
345 if (!ds->ops->port_fdb_dump)
346 return -EOPNOTSUPP;
347
348 return ds->ops->port_fdb_dump(ds, port, cb, data);
349 }
350
351 int dsa_port_mdb_add(const struct dsa_port *dp,
352 const struct switchdev_obj_port_mdb *mdb,
353 struct switchdev_trans *trans)
354 {
355 struct dsa_notifier_mdb_info info = {
356 .sw_index = dp->ds->index,
357 .port = dp->index,
358 .trans = trans,
359 .mdb = mdb,
360 };
361
362 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
363 }
364
365 int dsa_port_mdb_del(const struct dsa_port *dp,
366 const struct switchdev_obj_port_mdb *mdb)
367 {
368 struct dsa_notifier_mdb_info info = {
369 .sw_index = dp->ds->index,
370 .port = dp->index,
371 .mdb = mdb,
372 };
373
374 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
375 }
376
377 int dsa_port_vlan_add(struct dsa_port *dp,
378 const struct switchdev_obj_port_vlan *vlan,
379 struct switchdev_trans *trans)
380 {
381 struct dsa_notifier_vlan_info info = {
382 .sw_index = dp->ds->index,
383 .port = dp->index,
384 .trans = trans,
385 .vlan = vlan,
386 };
387
388 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
389 }
390
391 int dsa_port_vlan_del(struct dsa_port *dp,
392 const struct switchdev_obj_port_vlan *vlan)
393 {
394 struct dsa_notifier_vlan_info info = {
395 .sw_index = dp->ds->index,
396 .port = dp->index,
397 .vlan = vlan,
398 };
399
400 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
401 }
402
403 int dsa_port_vid_add(struct dsa_port *dp, u16 vid, u16 flags)
404 {
405 struct switchdev_obj_port_vlan vlan = {
406 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
407 .flags = flags,
408 .vid_begin = vid,
409 .vid_end = vid,
410 };
411 struct switchdev_trans trans;
412 int err;
413
414 trans.ph_prepare = true;
415 err = dsa_port_vlan_add(dp, &vlan, &trans);
416 if (err)
417 return err;
418
419 trans.ph_prepare = false;
420 return dsa_port_vlan_add(dp, &vlan, &trans);
421 }
422 EXPORT_SYMBOL(dsa_port_vid_add);
423
424 int dsa_port_vid_del(struct dsa_port *dp, u16 vid)
425 {
426 struct switchdev_obj_port_vlan vlan = {
427 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
428 .vid_begin = vid,
429 .vid_end = vid,
430 };
431
432 return dsa_port_vlan_del(dp, &vlan);
433 }
434 EXPORT_SYMBOL(dsa_port_vid_del);
435
436 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
437 {
438 struct device_node *phy_dn;
439 struct phy_device *phydev;
440
441 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
442 if (!phy_dn)
443 return NULL;
444
445 phydev = of_phy_find_device(phy_dn);
446 if (!phydev) {
447 of_node_put(phy_dn);
448 return ERR_PTR(-EPROBE_DEFER);
449 }
450
451 of_node_put(phy_dn);
452 return phydev;
453 }
454
455 static void dsa_port_phylink_validate(struct phylink_config *config,
456 unsigned long *supported,
457 struct phylink_link_state *state)
458 {
459 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
460 struct dsa_switch *ds = dp->ds;
461
462 if (!ds->ops->phylink_validate)
463 return;
464
465 ds->ops->phylink_validate(ds, dp->index, supported, state);
466 }
467
468 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
469 struct phylink_link_state *state)
470 {
471 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
472 struct dsa_switch *ds = dp->ds;
473 int err;
474
475 /* Only called for inband modes */
476 if (!ds->ops->phylink_mac_link_state) {
477 state->link = 0;
478 return;
479 }
480
481 err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
482 if (err < 0) {
483 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
484 dp->index, err);
485 state->link = 0;
486 }
487 }
488
489 static void dsa_port_phylink_mac_config(struct phylink_config *config,
490 unsigned int mode,
491 const struct phylink_link_state *state)
492 {
493 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
494 struct dsa_switch *ds = dp->ds;
495
496 if (!ds->ops->phylink_mac_config)
497 return;
498
499 ds->ops->phylink_mac_config(ds, dp->index, mode, state);
500 }
501
502 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
503 {
504 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
505 struct dsa_switch *ds = dp->ds;
506
507 if (!ds->ops->phylink_mac_an_restart)
508 return;
509
510 ds->ops->phylink_mac_an_restart(ds, dp->index);
511 }
512
513 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
514 unsigned int mode,
515 phy_interface_t interface)
516 {
517 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
518 struct phy_device *phydev = NULL;
519 struct dsa_switch *ds = dp->ds;
520
521 if (dsa_is_user_port(ds, dp->index))
522 phydev = dp->slave->phydev;
523
524 if (!ds->ops->phylink_mac_link_down) {
525 if (ds->ops->adjust_link && phydev)
526 ds->ops->adjust_link(ds, dp->index, phydev);
527 return;
528 }
529
530 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
531 }
532
533 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
534 struct phy_device *phydev,
535 unsigned int mode,
536 phy_interface_t interface,
537 int speed, int duplex,
538 bool tx_pause, bool rx_pause)
539 {
540 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
541 struct dsa_switch *ds = dp->ds;
542
543 if (!ds->ops->phylink_mac_link_up) {
544 if (ds->ops->adjust_link && phydev)
545 ds->ops->adjust_link(ds, dp->index, phydev);
546 return;
547 }
548
549 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
550 speed, duplex, tx_pause, rx_pause);
551 }
552
553 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
554 .validate = dsa_port_phylink_validate,
555 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
556 .mac_config = dsa_port_phylink_mac_config,
557 .mac_an_restart = dsa_port_phylink_mac_an_restart,
558 .mac_link_down = dsa_port_phylink_mac_link_down,
559 .mac_link_up = dsa_port_phylink_mac_link_up,
560 };
561
562 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
563 {
564 struct dsa_switch *ds = dp->ds;
565 struct phy_device *phydev;
566 int port = dp->index;
567 int err = 0;
568
569 phydev = dsa_port_get_phy_device(dp);
570 if (!phydev)
571 return 0;
572
573 if (IS_ERR(phydev))
574 return PTR_ERR(phydev);
575
576 if (enable) {
577 err = genphy_resume(phydev);
578 if (err < 0)
579 goto err_put_dev;
580
581 err = genphy_read_status(phydev);
582 if (err < 0)
583 goto err_put_dev;
584 } else {
585 err = genphy_suspend(phydev);
586 if (err < 0)
587 goto err_put_dev;
588 }
589
590 if (ds->ops->adjust_link)
591 ds->ops->adjust_link(ds, port, phydev);
592
593 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
594
595 err_put_dev:
596 put_device(&phydev->mdio.dev);
597 return err;
598 }
599
600 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
601 {
602 struct device_node *dn = dp->dn;
603 struct dsa_switch *ds = dp->ds;
604 struct phy_device *phydev;
605 int port = dp->index;
606 phy_interface_t mode;
607 int err;
608
609 err = of_phy_register_fixed_link(dn);
610 if (err) {
611 dev_err(ds->dev,
612 "failed to register the fixed PHY of port %d\n",
613 port);
614 return err;
615 }
616
617 phydev = of_phy_find_device(dn);
618
619 err = of_get_phy_mode(dn, &mode);
620 if (err)
621 mode = PHY_INTERFACE_MODE_NA;
622 phydev->interface = mode;
623
624 genphy_read_status(phydev);
625
626 if (ds->ops->adjust_link)
627 ds->ops->adjust_link(ds, port, phydev);
628
629 put_device(&phydev->mdio.dev);
630
631 return 0;
632 }
633
634 static int dsa_port_phylink_register(struct dsa_port *dp)
635 {
636 struct dsa_switch *ds = dp->ds;
637 struct device_node *port_dn = dp->dn;
638 phy_interface_t mode;
639 int err;
640
641 err = of_get_phy_mode(port_dn, &mode);
642 if (err)
643 mode = PHY_INTERFACE_MODE_NA;
644
645 dp->pl_config.dev = ds->dev;
646 dp->pl_config.type = PHYLINK_DEV;
647 dp->pl_config.pcs_poll = ds->pcs_poll;
648
649 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
650 mode, &dsa_port_phylink_mac_ops);
651 if (IS_ERR(dp->pl)) {
652 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
653 return PTR_ERR(dp->pl);
654 }
655
656 err = phylink_of_phy_connect(dp->pl, port_dn, 0);
657 if (err && err != -ENODEV) {
658 pr_err("could not attach to PHY: %d\n", err);
659 goto err_phy_connect;
660 }
661
662 return 0;
663
664 err_phy_connect:
665 phylink_destroy(dp->pl);
666 return err;
667 }
668
669 int dsa_port_link_register_of(struct dsa_port *dp)
670 {
671 struct dsa_switch *ds = dp->ds;
672 struct device_node *phy_np;
673 int port = dp->index;
674
675 if (!ds->ops->adjust_link) {
676 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
677 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
678 if (ds->ops->phylink_mac_link_down)
679 ds->ops->phylink_mac_link_down(ds, port,
680 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
681 return dsa_port_phylink_register(dp);
682 }
683 return 0;
684 }
685
686 dev_warn(ds->dev,
687 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
688
689 if (of_phy_is_fixed_link(dp->dn))
690 return dsa_port_fixed_link_register_of(dp);
691 else
692 return dsa_port_setup_phy_of(dp, true);
693 }
694
695 void dsa_port_link_unregister_of(struct dsa_port *dp)
696 {
697 struct dsa_switch *ds = dp->ds;
698
699 if (!ds->ops->adjust_link && dp->pl) {
700 rtnl_lock();
701 phylink_disconnect_phy(dp->pl);
702 rtnl_unlock();
703 phylink_destroy(dp->pl);
704 dp->pl = NULL;
705 return;
706 }
707
708 if (of_phy_is_fixed_link(dp->dn))
709 of_phy_deregister_fixed_link(dp->dn);
710 else
711 dsa_port_setup_phy_of(dp, false);
712 }
713
714 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
715 {
716 struct phy_device *phydev;
717 int ret = -EOPNOTSUPP;
718
719 if (of_phy_is_fixed_link(dp->dn))
720 return ret;
721
722 phydev = dsa_port_get_phy_device(dp);
723 if (IS_ERR_OR_NULL(phydev))
724 return ret;
725
726 ret = phy_ethtool_get_strings(phydev, data);
727 put_device(&phydev->mdio.dev);
728
729 return ret;
730 }
731 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
732
733 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
734 {
735 struct phy_device *phydev;
736 int ret = -EOPNOTSUPP;
737
738 if (of_phy_is_fixed_link(dp->dn))
739 return ret;
740
741 phydev = dsa_port_get_phy_device(dp);
742 if (IS_ERR_OR_NULL(phydev))
743 return ret;
744
745 ret = phy_ethtool_get_stats(phydev, NULL, data);
746 put_device(&phydev->mdio.dev);
747
748 return ret;
749 }
750 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
751
752 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
753 {
754 struct phy_device *phydev;
755 int ret = -EOPNOTSUPP;
756
757 if (of_phy_is_fixed_link(dp->dn))
758 return ret;
759
760 phydev = dsa_port_get_phy_device(dp);
761 if (IS_ERR_OR_NULL(phydev))
762 return ret;
763
764 ret = phy_ethtool_get_sset_count(phydev);
765 put_device(&phydev->mdio.dev);
766
767 return ret;
768 }
769 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);