]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/dsa/b53/b53_common.c
Merge branch 'nvme-5.7' of git://git.infradead.org/nvme into block-5.7
[thirdparty/linux.git] / drivers / net / dsa / b53 / b53_common.c
1 /*
2 * B53 switch driver main logic
3 *
4 * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5 * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/delay.h>
23 #include <linux/export.h>
24 #include <linux/gpio.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/platform_data/b53.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/etherdevice.h>
31 #include <linux/if_bridge.h>
32 #include <net/dsa.h>
33
34 #include "b53_regs.h"
35 #include "b53_priv.h"
36
37 struct b53_mib_desc {
38 u8 size;
39 u8 offset;
40 const char *name;
41 };
42
43 /* BCM5365 MIB counters */
44 static const struct b53_mib_desc b53_mibs_65[] = {
45 { 8, 0x00, "TxOctets" },
46 { 4, 0x08, "TxDropPkts" },
47 { 4, 0x10, "TxBroadcastPkts" },
48 { 4, 0x14, "TxMulticastPkts" },
49 { 4, 0x18, "TxUnicastPkts" },
50 { 4, 0x1c, "TxCollisions" },
51 { 4, 0x20, "TxSingleCollision" },
52 { 4, 0x24, "TxMultipleCollision" },
53 { 4, 0x28, "TxDeferredTransmit" },
54 { 4, 0x2c, "TxLateCollision" },
55 { 4, 0x30, "TxExcessiveCollision" },
56 { 4, 0x38, "TxPausePkts" },
57 { 8, 0x44, "RxOctets" },
58 { 4, 0x4c, "RxUndersizePkts" },
59 { 4, 0x50, "RxPausePkts" },
60 { 4, 0x54, "Pkts64Octets" },
61 { 4, 0x58, "Pkts65to127Octets" },
62 { 4, 0x5c, "Pkts128to255Octets" },
63 { 4, 0x60, "Pkts256to511Octets" },
64 { 4, 0x64, "Pkts512to1023Octets" },
65 { 4, 0x68, "Pkts1024to1522Octets" },
66 { 4, 0x6c, "RxOversizePkts" },
67 { 4, 0x70, "RxJabbers" },
68 { 4, 0x74, "RxAlignmentErrors" },
69 { 4, 0x78, "RxFCSErrors" },
70 { 8, 0x7c, "RxGoodOctets" },
71 { 4, 0x84, "RxDropPkts" },
72 { 4, 0x88, "RxUnicastPkts" },
73 { 4, 0x8c, "RxMulticastPkts" },
74 { 4, 0x90, "RxBroadcastPkts" },
75 { 4, 0x94, "RxSAChanges" },
76 { 4, 0x98, "RxFragments" },
77 };
78
79 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
80
81 /* BCM63xx MIB counters */
82 static const struct b53_mib_desc b53_mibs_63xx[] = {
83 { 8, 0x00, "TxOctets" },
84 { 4, 0x08, "TxDropPkts" },
85 { 4, 0x0c, "TxQoSPkts" },
86 { 4, 0x10, "TxBroadcastPkts" },
87 { 4, 0x14, "TxMulticastPkts" },
88 { 4, 0x18, "TxUnicastPkts" },
89 { 4, 0x1c, "TxCollisions" },
90 { 4, 0x20, "TxSingleCollision" },
91 { 4, 0x24, "TxMultipleCollision" },
92 { 4, 0x28, "TxDeferredTransmit" },
93 { 4, 0x2c, "TxLateCollision" },
94 { 4, 0x30, "TxExcessiveCollision" },
95 { 4, 0x38, "TxPausePkts" },
96 { 8, 0x3c, "TxQoSOctets" },
97 { 8, 0x44, "RxOctets" },
98 { 4, 0x4c, "RxUndersizePkts" },
99 { 4, 0x50, "RxPausePkts" },
100 { 4, 0x54, "Pkts64Octets" },
101 { 4, 0x58, "Pkts65to127Octets" },
102 { 4, 0x5c, "Pkts128to255Octets" },
103 { 4, 0x60, "Pkts256to511Octets" },
104 { 4, 0x64, "Pkts512to1023Octets" },
105 { 4, 0x68, "Pkts1024to1522Octets" },
106 { 4, 0x6c, "RxOversizePkts" },
107 { 4, 0x70, "RxJabbers" },
108 { 4, 0x74, "RxAlignmentErrors" },
109 { 4, 0x78, "RxFCSErrors" },
110 { 8, 0x7c, "RxGoodOctets" },
111 { 4, 0x84, "RxDropPkts" },
112 { 4, 0x88, "RxUnicastPkts" },
113 { 4, 0x8c, "RxMulticastPkts" },
114 { 4, 0x90, "RxBroadcastPkts" },
115 { 4, 0x94, "RxSAChanges" },
116 { 4, 0x98, "RxFragments" },
117 { 4, 0xa0, "RxSymbolErrors" },
118 { 4, 0xa4, "RxQoSPkts" },
119 { 8, 0xa8, "RxQoSOctets" },
120 { 4, 0xb0, "Pkts1523to2047Octets" },
121 { 4, 0xb4, "Pkts2048to4095Octets" },
122 { 4, 0xb8, "Pkts4096to8191Octets" },
123 { 4, 0xbc, "Pkts8192to9728Octets" },
124 { 4, 0xc0, "RxDiscarded" },
125 };
126
127 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
128
129 /* MIB counters */
130 static const struct b53_mib_desc b53_mibs[] = {
131 { 8, 0x00, "TxOctets" },
132 { 4, 0x08, "TxDropPkts" },
133 { 4, 0x10, "TxBroadcastPkts" },
134 { 4, 0x14, "TxMulticastPkts" },
135 { 4, 0x18, "TxUnicastPkts" },
136 { 4, 0x1c, "TxCollisions" },
137 { 4, 0x20, "TxSingleCollision" },
138 { 4, 0x24, "TxMultipleCollision" },
139 { 4, 0x28, "TxDeferredTransmit" },
140 { 4, 0x2c, "TxLateCollision" },
141 { 4, 0x30, "TxExcessiveCollision" },
142 { 4, 0x38, "TxPausePkts" },
143 { 8, 0x50, "RxOctets" },
144 { 4, 0x58, "RxUndersizePkts" },
145 { 4, 0x5c, "RxPausePkts" },
146 { 4, 0x60, "Pkts64Octets" },
147 { 4, 0x64, "Pkts65to127Octets" },
148 { 4, 0x68, "Pkts128to255Octets" },
149 { 4, 0x6c, "Pkts256to511Octets" },
150 { 4, 0x70, "Pkts512to1023Octets" },
151 { 4, 0x74, "Pkts1024to1522Octets" },
152 { 4, 0x78, "RxOversizePkts" },
153 { 4, 0x7c, "RxJabbers" },
154 { 4, 0x80, "RxAlignmentErrors" },
155 { 4, 0x84, "RxFCSErrors" },
156 { 8, 0x88, "RxGoodOctets" },
157 { 4, 0x90, "RxDropPkts" },
158 { 4, 0x94, "RxUnicastPkts" },
159 { 4, 0x98, "RxMulticastPkts" },
160 { 4, 0x9c, "RxBroadcastPkts" },
161 { 4, 0xa0, "RxSAChanges" },
162 { 4, 0xa4, "RxFragments" },
163 { 4, 0xa8, "RxJumboPkts" },
164 { 4, 0xac, "RxSymbolErrors" },
165 { 4, 0xc0, "RxDiscarded" },
166 };
167
168 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
169
170 static const struct b53_mib_desc b53_mibs_58xx[] = {
171 { 8, 0x00, "TxOctets" },
172 { 4, 0x08, "TxDropPkts" },
173 { 4, 0x0c, "TxQPKTQ0" },
174 { 4, 0x10, "TxBroadcastPkts" },
175 { 4, 0x14, "TxMulticastPkts" },
176 { 4, 0x18, "TxUnicastPKts" },
177 { 4, 0x1c, "TxCollisions" },
178 { 4, 0x20, "TxSingleCollision" },
179 { 4, 0x24, "TxMultipleCollision" },
180 { 4, 0x28, "TxDeferredCollision" },
181 { 4, 0x2c, "TxLateCollision" },
182 { 4, 0x30, "TxExcessiveCollision" },
183 { 4, 0x34, "TxFrameInDisc" },
184 { 4, 0x38, "TxPausePkts" },
185 { 4, 0x3c, "TxQPKTQ1" },
186 { 4, 0x40, "TxQPKTQ2" },
187 { 4, 0x44, "TxQPKTQ3" },
188 { 4, 0x48, "TxQPKTQ4" },
189 { 4, 0x4c, "TxQPKTQ5" },
190 { 8, 0x50, "RxOctets" },
191 { 4, 0x58, "RxUndersizePkts" },
192 { 4, 0x5c, "RxPausePkts" },
193 { 4, 0x60, "RxPkts64Octets" },
194 { 4, 0x64, "RxPkts65to127Octets" },
195 { 4, 0x68, "RxPkts128to255Octets" },
196 { 4, 0x6c, "RxPkts256to511Octets" },
197 { 4, 0x70, "RxPkts512to1023Octets" },
198 { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
199 { 4, 0x78, "RxOversizePkts" },
200 { 4, 0x7c, "RxJabbers" },
201 { 4, 0x80, "RxAlignmentErrors" },
202 { 4, 0x84, "RxFCSErrors" },
203 { 8, 0x88, "RxGoodOctets" },
204 { 4, 0x90, "RxDropPkts" },
205 { 4, 0x94, "RxUnicastPkts" },
206 { 4, 0x98, "RxMulticastPkts" },
207 { 4, 0x9c, "RxBroadcastPkts" },
208 { 4, 0xa0, "RxSAChanges" },
209 { 4, 0xa4, "RxFragments" },
210 { 4, 0xa8, "RxJumboPkt" },
211 { 4, 0xac, "RxSymblErr" },
212 { 4, 0xb0, "InRangeErrCount" },
213 { 4, 0xb4, "OutRangeErrCount" },
214 { 4, 0xb8, "EEELpiEvent" },
215 { 4, 0xbc, "EEELpiDuration" },
216 { 4, 0xc0, "RxDiscard" },
217 { 4, 0xc8, "TxQPKTQ6" },
218 { 4, 0xcc, "TxQPKTQ7" },
219 { 4, 0xd0, "TxPkts64Octets" },
220 { 4, 0xd4, "TxPkts65to127Octets" },
221 { 4, 0xd8, "TxPkts128to255Octets" },
222 { 4, 0xdc, "TxPkts256to511Ocets" },
223 { 4, 0xe0, "TxPkts512to1023Ocets" },
224 { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
225 };
226
227 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
228
229 static int b53_do_vlan_op(struct b53_device *dev, u8 op)
230 {
231 unsigned int i;
232
233 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
234
235 for (i = 0; i < 10; i++) {
236 u8 vta;
237
238 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
239 if (!(vta & VTA_START_CMD))
240 return 0;
241
242 usleep_range(100, 200);
243 }
244
245 return -EIO;
246 }
247
248 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
249 struct b53_vlan *vlan)
250 {
251 if (is5325(dev)) {
252 u32 entry = 0;
253
254 if (vlan->members) {
255 entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
256 VA_UNTAG_S_25) | vlan->members;
257 if (dev->core_rev >= 3)
258 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
259 else
260 entry |= VA_VALID_25;
261 }
262
263 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
264 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
265 VTA_RW_STATE_WR | VTA_RW_OP_EN);
266 } else if (is5365(dev)) {
267 u16 entry = 0;
268
269 if (vlan->members)
270 entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
271 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
272
273 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
274 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
275 VTA_RW_STATE_WR | VTA_RW_OP_EN);
276 } else {
277 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
278 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
279 (vlan->untag << VTE_UNTAG_S) | vlan->members);
280
281 b53_do_vlan_op(dev, VTA_CMD_WRITE);
282 }
283
284 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
285 vid, vlan->members, vlan->untag);
286 }
287
288 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
289 struct b53_vlan *vlan)
290 {
291 if (is5325(dev)) {
292 u32 entry = 0;
293
294 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
295 VTA_RW_STATE_RD | VTA_RW_OP_EN);
296 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
297
298 if (dev->core_rev >= 3)
299 vlan->valid = !!(entry & VA_VALID_25_R4);
300 else
301 vlan->valid = !!(entry & VA_VALID_25);
302 vlan->members = entry & VA_MEMBER_MASK;
303 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
304
305 } else if (is5365(dev)) {
306 u16 entry = 0;
307
308 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
309 VTA_RW_STATE_WR | VTA_RW_OP_EN);
310 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
311
312 vlan->valid = !!(entry & VA_VALID_65);
313 vlan->members = entry & VA_MEMBER_MASK;
314 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
315 } else {
316 u32 entry = 0;
317
318 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
319 b53_do_vlan_op(dev, VTA_CMD_READ);
320 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
321 vlan->members = entry & VTE_MEMBERS;
322 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
323 vlan->valid = true;
324 }
325 }
326
327 static void b53_set_forwarding(struct b53_device *dev, int enable)
328 {
329 u8 mgmt;
330
331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
332
333 if (enable)
334 mgmt |= SM_SW_FWD_EN;
335 else
336 mgmt &= ~SM_SW_FWD_EN;
337
338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
339
340 /* Include IMP port in dumb forwarding mode
341 */
342 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
343 mgmt |= B53_MII_DUMB_FWDG_EN;
344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
345
346 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
347 * frames should be flooded or not.
348 */
349 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
350 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
351 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
352 }
353
354 static void b53_enable_vlan(struct b53_device *dev, bool enable,
355 bool enable_filtering)
356 {
357 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
358
359 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
360 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
361 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
362
363 if (is5325(dev) || is5365(dev)) {
364 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
365 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
366 } else if (is63xx(dev)) {
367 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
368 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
369 } else {
370 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
371 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
372 }
373
374 if (enable) {
375 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
376 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
377 vc4 &= ~VC4_ING_VID_CHECK_MASK;
378 if (enable_filtering) {
379 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
380 vc5 |= VC5_DROP_VTABLE_MISS;
381 } else {
382 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
383 vc5 &= ~VC5_DROP_VTABLE_MISS;
384 }
385
386 if (is5325(dev))
387 vc0 &= ~VC0_RESERVED_1;
388
389 if (is5325(dev) || is5365(dev))
390 vc1 |= VC1_RX_MCST_TAG_EN;
391
392 } else {
393 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
394 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
395 vc4 &= ~VC4_ING_VID_CHECK_MASK;
396 vc5 &= ~VC5_DROP_VTABLE_MISS;
397
398 if (is5325(dev) || is5365(dev))
399 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
400 else
401 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
402
403 if (is5325(dev) || is5365(dev))
404 vc1 &= ~VC1_RX_MCST_TAG_EN;
405 }
406
407 if (!is5325(dev) && !is5365(dev))
408 vc5 &= ~VC5_VID_FFF_EN;
409
410 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
411 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
412
413 if (is5325(dev) || is5365(dev)) {
414 /* enable the high 8 bit vid check on 5325 */
415 if (is5325(dev) && enable)
416 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
417 VC3_HIGH_8BIT_EN);
418 else
419 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
420
421 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
422 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
423 } else if (is63xx(dev)) {
424 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
425 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
426 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
427 } else {
428 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
429 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
430 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
431 }
432
433 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
434
435 dev->vlan_enabled = enable;
436 }
437
438 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
439 {
440 u32 port_mask = 0;
441 u16 max_size = JMS_MIN_SIZE;
442
443 if (is5325(dev) || is5365(dev))
444 return -EINVAL;
445
446 if (enable) {
447 port_mask = dev->enabled_ports;
448 max_size = JMS_MAX_SIZE;
449 if (allow_10_100)
450 port_mask |= JPM_10_100_JUMBO_EN;
451 }
452
453 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
454 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
455 }
456
457 static int b53_flush_arl(struct b53_device *dev, u8 mask)
458 {
459 unsigned int i;
460
461 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
462 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
463
464 for (i = 0; i < 10; i++) {
465 u8 fast_age_ctrl;
466
467 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
468 &fast_age_ctrl);
469
470 if (!(fast_age_ctrl & FAST_AGE_DONE))
471 goto out;
472
473 msleep(1);
474 }
475
476 return -ETIMEDOUT;
477 out:
478 /* Only age dynamic entries (default behavior) */
479 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
480 return 0;
481 }
482
483 static int b53_fast_age_port(struct b53_device *dev, int port)
484 {
485 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
486
487 return b53_flush_arl(dev, FAST_AGE_PORT);
488 }
489
490 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
491 {
492 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
493
494 return b53_flush_arl(dev, FAST_AGE_VLAN);
495 }
496
497 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
498 {
499 struct b53_device *dev = ds->priv;
500 unsigned int i;
501 u16 pvlan;
502
503 /* Enable the IMP port to be in the same VLAN as the other ports
504 * on a per-port basis such that we only have Port i and IMP in
505 * the same VLAN.
506 */
507 b53_for_each_port(dev, i) {
508 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
509 pvlan |= BIT(cpu_port);
510 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
511 }
512 }
513 EXPORT_SYMBOL(b53_imp_vlan_setup);
514
515 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
516 {
517 struct b53_device *dev = ds->priv;
518 unsigned int cpu_port;
519 int ret = 0;
520 u16 pvlan;
521
522 if (!dsa_is_user_port(ds, port))
523 return 0;
524
525 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
526
527 b53_br_egress_floods(ds, port, true, true);
528
529 if (dev->ops->irq_enable)
530 ret = dev->ops->irq_enable(dev, port);
531 if (ret)
532 return ret;
533
534 /* Clear the Rx and Tx disable bits and set to no spanning tree */
535 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
536
537 /* Set this port, and only this one to be in the default VLAN,
538 * if member of a bridge, restore its membership prior to
539 * bringing down this port.
540 */
541 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
542 pvlan &= ~0x1ff;
543 pvlan |= BIT(port);
544 pvlan |= dev->ports[port].vlan_ctl_mask;
545 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
546
547 b53_imp_vlan_setup(ds, cpu_port);
548
549 /* If EEE was enabled, restore it */
550 if (dev->ports[port].eee.eee_enabled)
551 b53_eee_enable_set(ds, port, true);
552
553 return 0;
554 }
555 EXPORT_SYMBOL(b53_enable_port);
556
557 void b53_disable_port(struct dsa_switch *ds, int port)
558 {
559 struct b53_device *dev = ds->priv;
560 u8 reg;
561
562 /* Disable Tx/Rx for the port */
563 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
564 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
565 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
566
567 if (dev->ops->irq_disable)
568 dev->ops->irq_disable(dev, port);
569 }
570 EXPORT_SYMBOL(b53_disable_port);
571
572 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
573 {
574 struct b53_device *dev = ds->priv;
575 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
576 u8 hdr_ctl, val;
577 u16 reg;
578
579 /* Resolve which bit controls the Broadcom tag */
580 switch (port) {
581 case 8:
582 val = BRCM_HDR_P8_EN;
583 break;
584 case 7:
585 val = BRCM_HDR_P7_EN;
586 break;
587 case 5:
588 val = BRCM_HDR_P5_EN;
589 break;
590 default:
591 val = 0;
592 break;
593 }
594
595 /* Enable management mode if tagging is requested */
596 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
597 if (tag_en)
598 hdr_ctl |= SM_SW_FWD_MODE;
599 else
600 hdr_ctl &= ~SM_SW_FWD_MODE;
601 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
602
603 /* Configure the appropriate IMP port */
604 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
605 if (port == 8)
606 hdr_ctl |= GC_FRM_MGMT_PORT_MII;
607 else if (port == 5)
608 hdr_ctl |= GC_FRM_MGMT_PORT_M;
609 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
610
611 /* Enable Broadcom tags for IMP port */
612 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
613 if (tag_en)
614 hdr_ctl |= val;
615 else
616 hdr_ctl &= ~val;
617 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
618
619 /* Registers below are only accessible on newer devices */
620 if (!is58xx(dev))
621 return;
622
623 /* Enable reception Broadcom tag for CPU TX (switch RX) to
624 * allow us to tag outgoing frames
625 */
626 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, &reg);
627 if (tag_en)
628 reg &= ~BIT(port);
629 else
630 reg |= BIT(port);
631 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
632
633 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
634 * allow delivering frames to the per-port net_devices
635 */
636 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, &reg);
637 if (tag_en)
638 reg &= ~BIT(port);
639 else
640 reg |= BIT(port);
641 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
642 }
643 EXPORT_SYMBOL(b53_brcm_hdr_setup);
644
645 static void b53_enable_cpu_port(struct b53_device *dev, int port)
646 {
647 u8 port_ctrl;
648
649 /* BCM5325 CPU port is at 8 */
650 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
651 port = B53_CPU_PORT;
652
653 port_ctrl = PORT_CTRL_RX_BCST_EN |
654 PORT_CTRL_RX_MCST_EN |
655 PORT_CTRL_RX_UCST_EN;
656 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
657
658 b53_brcm_hdr_setup(dev->ds, port);
659
660 b53_br_egress_floods(dev->ds, port, true, true);
661 }
662
663 static void b53_enable_mib(struct b53_device *dev)
664 {
665 u8 gc;
666
667 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
668 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
669 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
670 }
671
672 static u16 b53_default_pvid(struct b53_device *dev)
673 {
674 if (is5325(dev) || is5365(dev))
675 return 1;
676 else
677 return 0;
678 }
679
680 int b53_configure_vlan(struct dsa_switch *ds)
681 {
682 struct b53_device *dev = ds->priv;
683 struct b53_vlan vl = { 0 };
684 struct b53_vlan *v;
685 int i, def_vid;
686 u16 vid;
687
688 def_vid = b53_default_pvid(dev);
689
690 /* clear all vlan entries */
691 if (is5325(dev) || is5365(dev)) {
692 for (i = def_vid; i < dev->num_vlans; i++)
693 b53_set_vlan_entry(dev, i, &vl);
694 } else {
695 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
696 }
697
698 b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
699
700 b53_for_each_port(dev, i)
701 b53_write16(dev, B53_VLAN_PAGE,
702 B53_VLAN_PORT_DEF_TAG(i), def_vid);
703
704 /* Upon initial call we have not set-up any VLANs, but upon
705 * system resume, we need to restore all VLAN entries.
706 */
707 for (vid = def_vid; vid < dev->num_vlans; vid++) {
708 v = &dev->vlans[vid];
709
710 if (!v->members)
711 continue;
712
713 b53_set_vlan_entry(dev, vid, v);
714 b53_fast_age_vlan(dev, vid);
715 }
716
717 return 0;
718 }
719 EXPORT_SYMBOL(b53_configure_vlan);
720
721 static void b53_switch_reset_gpio(struct b53_device *dev)
722 {
723 int gpio = dev->reset_gpio;
724
725 if (gpio < 0)
726 return;
727
728 /* Reset sequence: RESET low(50ms)->high(20ms)
729 */
730 gpio_set_value(gpio, 0);
731 mdelay(50);
732
733 gpio_set_value(gpio, 1);
734 mdelay(20);
735
736 dev->current_page = 0xff;
737 }
738
739 static int b53_switch_reset(struct b53_device *dev)
740 {
741 unsigned int timeout = 1000;
742 u8 mgmt, reg;
743
744 b53_switch_reset_gpio(dev);
745
746 if (is539x(dev)) {
747 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
748 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
749 }
750
751 /* This is specific to 58xx devices here, do not use is58xx() which
752 * covers the larger Starfigther 2 family, including 7445/7278 which
753 * still use this driver as a library and need to perform the reset
754 * earlier.
755 */
756 if (dev->chip_id == BCM58XX_DEVICE_ID ||
757 dev->chip_id == BCM583XX_DEVICE_ID) {
758 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
759 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
760 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
761
762 do {
763 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
764 if (!(reg & SW_RST))
765 break;
766
767 usleep_range(1000, 2000);
768 } while (timeout-- > 0);
769
770 if (timeout == 0)
771 return -ETIMEDOUT;
772 }
773
774 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
775
776 if (!(mgmt & SM_SW_FWD_EN)) {
777 mgmt &= ~SM_SW_FWD_MODE;
778 mgmt |= SM_SW_FWD_EN;
779
780 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
781 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
782
783 if (!(mgmt & SM_SW_FWD_EN)) {
784 dev_err(dev->dev, "Failed to enable switch!\n");
785 return -EINVAL;
786 }
787 }
788
789 b53_enable_mib(dev);
790
791 return b53_flush_arl(dev, FAST_AGE_STATIC);
792 }
793
794 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
795 {
796 struct b53_device *priv = ds->priv;
797 u16 value = 0;
798 int ret;
799
800 if (priv->ops->phy_read16)
801 ret = priv->ops->phy_read16(priv, addr, reg, &value);
802 else
803 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
804 reg * 2, &value);
805
806 return ret ? ret : value;
807 }
808
809 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
810 {
811 struct b53_device *priv = ds->priv;
812
813 if (priv->ops->phy_write16)
814 return priv->ops->phy_write16(priv, addr, reg, val);
815
816 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
817 }
818
819 static int b53_reset_switch(struct b53_device *priv)
820 {
821 /* reset vlans */
822 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
823 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
824
825 priv->serdes_lane = B53_INVALID_LANE;
826
827 return b53_switch_reset(priv);
828 }
829
830 static int b53_apply_config(struct b53_device *priv)
831 {
832 /* disable switching */
833 b53_set_forwarding(priv, 0);
834
835 b53_configure_vlan(priv->ds);
836
837 /* enable switching */
838 b53_set_forwarding(priv, 1);
839
840 return 0;
841 }
842
843 static void b53_reset_mib(struct b53_device *priv)
844 {
845 u8 gc;
846
847 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
848
849 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
850 msleep(1);
851 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
852 msleep(1);
853 }
854
855 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
856 {
857 if (is5365(dev))
858 return b53_mibs_65;
859 else if (is63xx(dev))
860 return b53_mibs_63xx;
861 else if (is58xx(dev))
862 return b53_mibs_58xx;
863 else
864 return b53_mibs;
865 }
866
867 static unsigned int b53_get_mib_size(struct b53_device *dev)
868 {
869 if (is5365(dev))
870 return B53_MIBS_65_SIZE;
871 else if (is63xx(dev))
872 return B53_MIBS_63XX_SIZE;
873 else if (is58xx(dev))
874 return B53_MIBS_58XX_SIZE;
875 else
876 return B53_MIBS_SIZE;
877 }
878
879 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
880 {
881 /* These ports typically do not have built-in PHYs */
882 switch (port) {
883 case B53_CPU_PORT_25:
884 case 7:
885 case B53_CPU_PORT:
886 return NULL;
887 }
888
889 return mdiobus_get_phy(ds->slave_mii_bus, port);
890 }
891
892 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
893 uint8_t *data)
894 {
895 struct b53_device *dev = ds->priv;
896 const struct b53_mib_desc *mibs = b53_get_mib(dev);
897 unsigned int mib_size = b53_get_mib_size(dev);
898 struct phy_device *phydev;
899 unsigned int i;
900
901 if (stringset == ETH_SS_STATS) {
902 for (i = 0; i < mib_size; i++)
903 strlcpy(data + i * ETH_GSTRING_LEN,
904 mibs[i].name, ETH_GSTRING_LEN);
905 } else if (stringset == ETH_SS_PHY_STATS) {
906 phydev = b53_get_phy_device(ds, port);
907 if (!phydev)
908 return;
909
910 phy_ethtool_get_strings(phydev, data);
911 }
912 }
913 EXPORT_SYMBOL(b53_get_strings);
914
915 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
916 {
917 struct b53_device *dev = ds->priv;
918 const struct b53_mib_desc *mibs = b53_get_mib(dev);
919 unsigned int mib_size = b53_get_mib_size(dev);
920 const struct b53_mib_desc *s;
921 unsigned int i;
922 u64 val = 0;
923
924 if (is5365(dev) && port == 5)
925 port = 8;
926
927 mutex_lock(&dev->stats_mutex);
928
929 for (i = 0; i < mib_size; i++) {
930 s = &mibs[i];
931
932 if (s->size == 8) {
933 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
934 } else {
935 u32 val32;
936
937 b53_read32(dev, B53_MIB_PAGE(port), s->offset,
938 &val32);
939 val = val32;
940 }
941 data[i] = (u64)val;
942 }
943
944 mutex_unlock(&dev->stats_mutex);
945 }
946 EXPORT_SYMBOL(b53_get_ethtool_stats);
947
948 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
949 {
950 struct phy_device *phydev;
951
952 phydev = b53_get_phy_device(ds, port);
953 if (!phydev)
954 return;
955
956 phy_ethtool_get_stats(phydev, NULL, data);
957 }
958 EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
959
960 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
961 {
962 struct b53_device *dev = ds->priv;
963 struct phy_device *phydev;
964
965 if (sset == ETH_SS_STATS) {
966 return b53_get_mib_size(dev);
967 } else if (sset == ETH_SS_PHY_STATS) {
968 phydev = b53_get_phy_device(ds, port);
969 if (!phydev)
970 return 0;
971
972 return phy_ethtool_get_sset_count(phydev);
973 }
974
975 return 0;
976 }
977 EXPORT_SYMBOL(b53_get_sset_count);
978
979 static int b53_setup(struct dsa_switch *ds)
980 {
981 struct b53_device *dev = ds->priv;
982 unsigned int port;
983 int ret;
984
985 ret = b53_reset_switch(dev);
986 if (ret) {
987 dev_err(ds->dev, "failed to reset switch\n");
988 return ret;
989 }
990
991 b53_reset_mib(dev);
992
993 ret = b53_apply_config(dev);
994 if (ret)
995 dev_err(ds->dev, "failed to apply configuration\n");
996
997 /* Configure IMP/CPU port, disable all other ports. Enabled
998 * ports will be configured with .port_enable
999 */
1000 for (port = 0; port < dev->num_ports; port++) {
1001 if (dsa_is_cpu_port(ds, port))
1002 b53_enable_cpu_port(dev, port);
1003 else
1004 b53_disable_port(ds, port);
1005 }
1006
1007 /* Let DSA handle the case were multiple bridges span the same switch
1008 * device and different VLAN awareness settings are requested, which
1009 * would be breaking filtering semantics for any of the other bridge
1010 * devices. (not hardware supported)
1011 */
1012 ds->vlan_filtering_is_global = true;
1013
1014 return ret;
1015 }
1016
1017 static void b53_force_link(struct b53_device *dev, int port, int link)
1018 {
1019 u8 reg, val, off;
1020
1021 /* Override the port settings */
1022 if (port == dev->cpu_port) {
1023 off = B53_PORT_OVERRIDE_CTRL;
1024 val = PORT_OVERRIDE_EN;
1025 } else {
1026 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1027 val = GMII_PO_EN;
1028 }
1029
1030 b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1031 reg |= val;
1032 if (link)
1033 reg |= PORT_OVERRIDE_LINK;
1034 else
1035 reg &= ~PORT_OVERRIDE_LINK;
1036 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1037 }
1038
1039 static void b53_force_port_config(struct b53_device *dev, int port,
1040 int speed, int duplex, int pause)
1041 {
1042 u8 reg, val, off;
1043
1044 /* Override the port settings */
1045 if (port == dev->cpu_port) {
1046 off = B53_PORT_OVERRIDE_CTRL;
1047 val = PORT_OVERRIDE_EN;
1048 } else {
1049 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1050 val = GMII_PO_EN;
1051 }
1052
1053 b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1054 reg |= val;
1055 if (duplex == DUPLEX_FULL)
1056 reg |= PORT_OVERRIDE_FULL_DUPLEX;
1057 else
1058 reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1059
1060 switch (speed) {
1061 case 2000:
1062 reg |= PORT_OVERRIDE_SPEED_2000M;
1063 /* fallthrough */
1064 case SPEED_1000:
1065 reg |= PORT_OVERRIDE_SPEED_1000M;
1066 break;
1067 case SPEED_100:
1068 reg |= PORT_OVERRIDE_SPEED_100M;
1069 break;
1070 case SPEED_10:
1071 reg |= PORT_OVERRIDE_SPEED_10M;
1072 break;
1073 default:
1074 dev_err(dev->dev, "unknown speed: %d\n", speed);
1075 return;
1076 }
1077
1078 if (pause & MLO_PAUSE_RX)
1079 reg |= PORT_OVERRIDE_RX_FLOW;
1080 if (pause & MLO_PAUSE_TX)
1081 reg |= PORT_OVERRIDE_TX_FLOW;
1082
1083 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1084 }
1085
1086 static void b53_adjust_link(struct dsa_switch *ds, int port,
1087 struct phy_device *phydev)
1088 {
1089 struct b53_device *dev = ds->priv;
1090 struct ethtool_eee *p = &dev->ports[port].eee;
1091 u8 rgmii_ctrl = 0, reg = 0, off;
1092 int pause = 0;
1093
1094 if (!phy_is_pseudo_fixed_link(phydev))
1095 return;
1096
1097 /* Enable flow control on BCM5301x's CPU port */
1098 if (is5301x(dev) && port == dev->cpu_port)
1099 pause = MLO_PAUSE_TXRX_MASK;
1100
1101 if (phydev->pause) {
1102 if (phydev->asym_pause)
1103 pause |= MLO_PAUSE_TX;
1104 pause |= MLO_PAUSE_RX;
1105 }
1106
1107 b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
1108 b53_force_link(dev, port, phydev->link);
1109
1110 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
1111 if (port == 8)
1112 off = B53_RGMII_CTRL_IMP;
1113 else
1114 off = B53_RGMII_CTRL_P(port);
1115
1116 /* Configure the port RGMII clock delay by DLL disabled and
1117 * tx_clk aligned timing (restoring to reset defaults)
1118 */
1119 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1120 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
1121 RGMII_CTRL_TIMING_SEL);
1122
1123 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1124 * sure that we enable the port TX clock internal delay to
1125 * account for this internal delay that is inserted, otherwise
1126 * the switch won't be able to receive correctly.
1127 *
1128 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1129 * any delay neither on transmission nor reception, so the
1130 * BCM53125 must also be configured accordingly to account for
1131 * the lack of delay and introduce
1132 *
1133 * The BCM53125 switch has its RX clock and TX clock control
1134 * swapped, hence the reason why we modify the TX clock path in
1135 * the "RGMII" case
1136 */
1137 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
1138 rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1139 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
1140 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1141 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1142 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1143
1144 dev_info(ds->dev, "Configured port %d for %s\n", port,
1145 phy_modes(phydev->interface));
1146 }
1147
1148 /* configure MII port if necessary */
1149 if (is5325(dev)) {
1150 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1151 &reg);
1152
1153 /* reverse mii needs to be enabled */
1154 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1155 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1156 reg | PORT_OVERRIDE_RV_MII_25);
1157 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1158 &reg);
1159
1160 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1161 dev_err(ds->dev,
1162 "Failed to enable reverse MII mode\n");
1163 return;
1164 }
1165 }
1166 } else if (is5301x(dev)) {
1167 if (port != dev->cpu_port) {
1168 b53_force_port_config(dev, dev->cpu_port, 2000,
1169 DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
1170 b53_force_link(dev, dev->cpu_port, 1);
1171 }
1172 }
1173
1174 /* Re-negotiate EEE if it was enabled already */
1175 p->eee_enabled = b53_eee_init(ds, port, phydev);
1176 }
1177
1178 void b53_port_event(struct dsa_switch *ds, int port)
1179 {
1180 struct b53_device *dev = ds->priv;
1181 bool link;
1182 u16 sts;
1183
1184 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1185 link = !!(sts & BIT(port));
1186 dsa_port_phylink_mac_change(ds, port, link);
1187 }
1188 EXPORT_SYMBOL(b53_port_event);
1189
1190 void b53_phylink_validate(struct dsa_switch *ds, int port,
1191 unsigned long *supported,
1192 struct phylink_link_state *state)
1193 {
1194 struct b53_device *dev = ds->priv;
1195 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1196
1197 if (dev->ops->serdes_phylink_validate)
1198 dev->ops->serdes_phylink_validate(dev, port, mask, state);
1199
1200 /* Allow all the expected bits */
1201 phylink_set(mask, Autoneg);
1202 phylink_set_port_modes(mask);
1203 phylink_set(mask, Pause);
1204 phylink_set(mask, Asym_Pause);
1205
1206 /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
1207 * support Gigabit, including Half duplex.
1208 */
1209 if (state->interface != PHY_INTERFACE_MODE_MII &&
1210 state->interface != PHY_INTERFACE_MODE_REVMII &&
1211 !phy_interface_mode_is_8023z(state->interface) &&
1212 !(is5325(dev) || is5365(dev))) {
1213 phylink_set(mask, 1000baseT_Full);
1214 phylink_set(mask, 1000baseT_Half);
1215 }
1216
1217 if (!phy_interface_mode_is_8023z(state->interface)) {
1218 phylink_set(mask, 10baseT_Half);
1219 phylink_set(mask, 10baseT_Full);
1220 phylink_set(mask, 100baseT_Half);
1221 phylink_set(mask, 100baseT_Full);
1222 }
1223
1224 bitmap_and(supported, supported, mask,
1225 __ETHTOOL_LINK_MODE_MASK_NBITS);
1226 bitmap_and(state->advertising, state->advertising, mask,
1227 __ETHTOOL_LINK_MODE_MASK_NBITS);
1228
1229 phylink_helper_basex_speed(state);
1230 }
1231 EXPORT_SYMBOL(b53_phylink_validate);
1232
1233 int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
1234 struct phylink_link_state *state)
1235 {
1236 struct b53_device *dev = ds->priv;
1237 int ret = -EOPNOTSUPP;
1238
1239 if ((phy_interface_mode_is_8023z(state->interface) ||
1240 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1241 dev->ops->serdes_link_state)
1242 ret = dev->ops->serdes_link_state(dev, port, state);
1243
1244 return ret;
1245 }
1246 EXPORT_SYMBOL(b53_phylink_mac_link_state);
1247
1248 void b53_phylink_mac_config(struct dsa_switch *ds, int port,
1249 unsigned int mode,
1250 const struct phylink_link_state *state)
1251 {
1252 struct b53_device *dev = ds->priv;
1253
1254 if (mode == MLO_AN_PHY)
1255 return;
1256
1257 if (mode == MLO_AN_FIXED) {
1258 b53_force_port_config(dev, port, state->speed,
1259 state->duplex, state->pause);
1260 return;
1261 }
1262
1263 if ((phy_interface_mode_is_8023z(state->interface) ||
1264 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1265 dev->ops->serdes_config)
1266 dev->ops->serdes_config(dev, port, mode, state);
1267 }
1268 EXPORT_SYMBOL(b53_phylink_mac_config);
1269
1270 void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
1271 {
1272 struct b53_device *dev = ds->priv;
1273
1274 if (dev->ops->serdes_an_restart)
1275 dev->ops->serdes_an_restart(dev, port);
1276 }
1277 EXPORT_SYMBOL(b53_phylink_mac_an_restart);
1278
1279 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
1280 unsigned int mode,
1281 phy_interface_t interface)
1282 {
1283 struct b53_device *dev = ds->priv;
1284
1285 if (mode == MLO_AN_PHY)
1286 return;
1287
1288 if (mode == MLO_AN_FIXED) {
1289 b53_force_link(dev, port, false);
1290 return;
1291 }
1292
1293 if (phy_interface_mode_is_8023z(interface) &&
1294 dev->ops->serdes_link_set)
1295 dev->ops->serdes_link_set(dev, port, mode, interface, false);
1296 }
1297 EXPORT_SYMBOL(b53_phylink_mac_link_down);
1298
1299 void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
1300 unsigned int mode,
1301 phy_interface_t interface,
1302 struct phy_device *phydev,
1303 int speed, int duplex,
1304 bool tx_pause, bool rx_pause)
1305 {
1306 struct b53_device *dev = ds->priv;
1307
1308 if (mode == MLO_AN_PHY)
1309 return;
1310
1311 if (mode == MLO_AN_FIXED) {
1312 b53_force_link(dev, port, true);
1313 return;
1314 }
1315
1316 if (phy_interface_mode_is_8023z(interface) &&
1317 dev->ops->serdes_link_set)
1318 dev->ops->serdes_link_set(dev, port, mode, interface, true);
1319 }
1320 EXPORT_SYMBOL(b53_phylink_mac_link_up);
1321
1322 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1323 {
1324 struct b53_device *dev = ds->priv;
1325 u16 pvid, new_pvid;
1326
1327 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1328 new_pvid = pvid;
1329 if (!vlan_filtering) {
1330 /* Filtering is currently enabled, use the default PVID since
1331 * the bridge does not expect tagging anymore
1332 */
1333 dev->ports[port].pvid = pvid;
1334 new_pvid = b53_default_pvid(dev);
1335 } else {
1336 /* Filtering is currently disabled, restore the previous PVID */
1337 new_pvid = dev->ports[port].pvid;
1338 }
1339
1340 if (pvid != new_pvid)
1341 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1342 new_pvid);
1343
1344 b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
1345
1346 return 0;
1347 }
1348 EXPORT_SYMBOL(b53_vlan_filtering);
1349
1350 int b53_vlan_prepare(struct dsa_switch *ds, int port,
1351 const struct switchdev_obj_port_vlan *vlan)
1352 {
1353 struct b53_device *dev = ds->priv;
1354
1355 if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
1356 return -EOPNOTSUPP;
1357
1358 /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
1359 * receiving VLAN tagged frames at all, we can still allow the port to
1360 * be configured for egress untagged.
1361 */
1362 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
1363 !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1364 return -EINVAL;
1365
1366 if (vlan->vid_end > dev->num_vlans)
1367 return -ERANGE;
1368
1369 b53_enable_vlan(dev, true, ds->vlan_filtering);
1370
1371 return 0;
1372 }
1373 EXPORT_SYMBOL(b53_vlan_prepare);
1374
1375 void b53_vlan_add(struct dsa_switch *ds, int port,
1376 const struct switchdev_obj_port_vlan *vlan)
1377 {
1378 struct b53_device *dev = ds->priv;
1379 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1380 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1381 struct b53_vlan *vl;
1382 u16 vid;
1383
1384 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1385 vl = &dev->vlans[vid];
1386
1387 b53_get_vlan_entry(dev, vid, vl);
1388
1389 if (vid == 0 && vid == b53_default_pvid(dev))
1390 untagged = true;
1391
1392 vl->members |= BIT(port);
1393 if (untagged && !dsa_is_cpu_port(ds, port))
1394 vl->untag |= BIT(port);
1395 else
1396 vl->untag &= ~BIT(port);
1397
1398 b53_set_vlan_entry(dev, vid, vl);
1399 b53_fast_age_vlan(dev, vid);
1400 }
1401
1402 if (pvid && !dsa_is_cpu_port(ds, port)) {
1403 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1404 vlan->vid_end);
1405 b53_fast_age_vlan(dev, vid);
1406 }
1407 }
1408 EXPORT_SYMBOL(b53_vlan_add);
1409
1410 int b53_vlan_del(struct dsa_switch *ds, int port,
1411 const struct switchdev_obj_port_vlan *vlan)
1412 {
1413 struct b53_device *dev = ds->priv;
1414 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1415 struct b53_vlan *vl;
1416 u16 vid;
1417 u16 pvid;
1418
1419 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1420
1421 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1422 vl = &dev->vlans[vid];
1423
1424 b53_get_vlan_entry(dev, vid, vl);
1425
1426 vl->members &= ~BIT(port);
1427
1428 if (pvid == vid)
1429 pvid = b53_default_pvid(dev);
1430
1431 if (untagged && !dsa_is_cpu_port(ds, port))
1432 vl->untag &= ~(BIT(port));
1433
1434 b53_set_vlan_entry(dev, vid, vl);
1435 b53_fast_age_vlan(dev, vid);
1436 }
1437
1438 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1439 b53_fast_age_vlan(dev, pvid);
1440
1441 return 0;
1442 }
1443 EXPORT_SYMBOL(b53_vlan_del);
1444
1445 /* Address Resolution Logic routines */
1446 static int b53_arl_op_wait(struct b53_device *dev)
1447 {
1448 unsigned int timeout = 10;
1449 u8 reg;
1450
1451 do {
1452 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1453 if (!(reg & ARLTBL_START_DONE))
1454 return 0;
1455
1456 usleep_range(1000, 2000);
1457 } while (timeout--);
1458
1459 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1460
1461 return -ETIMEDOUT;
1462 }
1463
1464 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1465 {
1466 u8 reg;
1467
1468 if (op > ARLTBL_RW)
1469 return -EINVAL;
1470
1471 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1472 reg |= ARLTBL_START_DONE;
1473 if (op)
1474 reg |= ARLTBL_RW;
1475 else
1476 reg &= ~ARLTBL_RW;
1477 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1478
1479 return b53_arl_op_wait(dev);
1480 }
1481
1482 static int b53_arl_read(struct b53_device *dev, u64 mac,
1483 u16 vid, struct b53_arl_entry *ent, u8 *idx,
1484 bool is_valid)
1485 {
1486 unsigned int i;
1487 int ret;
1488
1489 ret = b53_arl_op_wait(dev);
1490 if (ret)
1491 return ret;
1492
1493 /* Read the bins */
1494 for (i = 0; i < dev->num_arl_entries; i++) {
1495 u64 mac_vid;
1496 u32 fwd_entry;
1497
1498 b53_read64(dev, B53_ARLIO_PAGE,
1499 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1500 b53_read32(dev, B53_ARLIO_PAGE,
1501 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1502 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1503
1504 if (!(fwd_entry & ARLTBL_VALID))
1505 continue;
1506 if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1507 continue;
1508 *idx = i;
1509 }
1510
1511 return -ENOENT;
1512 }
1513
1514 static int b53_arl_op(struct b53_device *dev, int op, int port,
1515 const unsigned char *addr, u16 vid, bool is_valid)
1516 {
1517 struct b53_arl_entry ent;
1518 u32 fwd_entry;
1519 u64 mac, mac_vid = 0;
1520 u8 idx = 0;
1521 int ret;
1522
1523 /* Convert the array into a 64-bit MAC */
1524 mac = ether_addr_to_u64(addr);
1525
1526 /* Perform a read for the given MAC and VID */
1527 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1528 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1529
1530 /* Issue a read operation for this MAC */
1531 ret = b53_arl_rw_op(dev, 1);
1532 if (ret)
1533 return ret;
1534
1535 ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
1536 /* If this is a read, just finish now */
1537 if (op)
1538 return ret;
1539
1540 /* We could not find a matching MAC, so reset to a new entry */
1541 if (ret) {
1542 fwd_entry = 0;
1543 idx = 1;
1544 }
1545
1546 /* For multicast address, the port is a bitmask and the validity
1547 * is determined by having at least one port being still active
1548 */
1549 if (!is_multicast_ether_addr(addr)) {
1550 ent.port = port;
1551 ent.is_valid = is_valid;
1552 } else {
1553 if (is_valid)
1554 ent.port |= BIT(port);
1555 else
1556 ent.port &= ~BIT(port);
1557
1558 ent.is_valid = !!(ent.port);
1559 }
1560
1561 ent.is_valid = is_valid;
1562 ent.vid = vid;
1563 ent.is_static = true;
1564 ent.is_age = false;
1565 memcpy(ent.mac, addr, ETH_ALEN);
1566 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1567
1568 b53_write64(dev, B53_ARLIO_PAGE,
1569 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1570 b53_write32(dev, B53_ARLIO_PAGE,
1571 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1572
1573 return b53_arl_rw_op(dev, 0);
1574 }
1575
1576 int b53_fdb_add(struct dsa_switch *ds, int port,
1577 const unsigned char *addr, u16 vid)
1578 {
1579 struct b53_device *priv = ds->priv;
1580
1581 /* 5325 and 5365 require some more massaging, but could
1582 * be supported eventually
1583 */
1584 if (is5325(priv) || is5365(priv))
1585 return -EOPNOTSUPP;
1586
1587 return b53_arl_op(priv, 0, port, addr, vid, true);
1588 }
1589 EXPORT_SYMBOL(b53_fdb_add);
1590
1591 int b53_fdb_del(struct dsa_switch *ds, int port,
1592 const unsigned char *addr, u16 vid)
1593 {
1594 struct b53_device *priv = ds->priv;
1595
1596 return b53_arl_op(priv, 0, port, addr, vid, false);
1597 }
1598 EXPORT_SYMBOL(b53_fdb_del);
1599
1600 static int b53_arl_search_wait(struct b53_device *dev)
1601 {
1602 unsigned int timeout = 1000;
1603 u8 reg;
1604
1605 do {
1606 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
1607 if (!(reg & ARL_SRCH_STDN))
1608 return 0;
1609
1610 if (reg & ARL_SRCH_VLID)
1611 return 0;
1612
1613 usleep_range(1000, 2000);
1614 } while (timeout--);
1615
1616 return -ETIMEDOUT;
1617 }
1618
1619 static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1620 struct b53_arl_entry *ent)
1621 {
1622 u64 mac_vid;
1623 u32 fwd_entry;
1624
1625 b53_read64(dev, B53_ARLIO_PAGE,
1626 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1627 b53_read32(dev, B53_ARLIO_PAGE,
1628 B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1629 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1630 }
1631
1632 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
1633 dsa_fdb_dump_cb_t *cb, void *data)
1634 {
1635 if (!ent->is_valid)
1636 return 0;
1637
1638 if (port != ent->port)
1639 return 0;
1640
1641 return cb(ent->mac, ent->vid, ent->is_static, data);
1642 }
1643
1644 int b53_fdb_dump(struct dsa_switch *ds, int port,
1645 dsa_fdb_dump_cb_t *cb, void *data)
1646 {
1647 struct b53_device *priv = ds->priv;
1648 struct b53_arl_entry results[2];
1649 unsigned int count = 0;
1650 int ret;
1651 u8 reg;
1652
1653 /* Start search operation */
1654 reg = ARL_SRCH_STDN;
1655 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1656
1657 do {
1658 ret = b53_arl_search_wait(priv);
1659 if (ret)
1660 return ret;
1661
1662 b53_arl_search_rd(priv, 0, &results[0]);
1663 ret = b53_fdb_copy(port, &results[0], cb, data);
1664 if (ret)
1665 return ret;
1666
1667 if (priv->num_arl_entries > 2) {
1668 b53_arl_search_rd(priv, 1, &results[1]);
1669 ret = b53_fdb_copy(port, &results[1], cb, data);
1670 if (ret)
1671 return ret;
1672
1673 if (!results[0].is_valid && !results[1].is_valid)
1674 break;
1675 }
1676
1677 } while (count++ < 1024);
1678
1679 return 0;
1680 }
1681 EXPORT_SYMBOL(b53_fdb_dump);
1682
1683 int b53_mdb_prepare(struct dsa_switch *ds, int port,
1684 const struct switchdev_obj_port_mdb *mdb)
1685 {
1686 struct b53_device *priv = ds->priv;
1687
1688 /* 5325 and 5365 require some more massaging, but could
1689 * be supported eventually
1690 */
1691 if (is5325(priv) || is5365(priv))
1692 return -EOPNOTSUPP;
1693
1694 return 0;
1695 }
1696 EXPORT_SYMBOL(b53_mdb_prepare);
1697
1698 void b53_mdb_add(struct dsa_switch *ds, int port,
1699 const struct switchdev_obj_port_mdb *mdb)
1700 {
1701 struct b53_device *priv = ds->priv;
1702 int ret;
1703
1704 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
1705 if (ret)
1706 dev_err(ds->dev, "failed to add MDB entry\n");
1707 }
1708 EXPORT_SYMBOL(b53_mdb_add);
1709
1710 int b53_mdb_del(struct dsa_switch *ds, int port,
1711 const struct switchdev_obj_port_mdb *mdb)
1712 {
1713 struct b53_device *priv = ds->priv;
1714 int ret;
1715
1716 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
1717 if (ret)
1718 dev_err(ds->dev, "failed to delete MDB entry\n");
1719
1720 return ret;
1721 }
1722 EXPORT_SYMBOL(b53_mdb_del);
1723
1724 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
1725 {
1726 struct b53_device *dev = ds->priv;
1727 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1728 u16 pvlan, reg;
1729 unsigned int i;
1730
1731 /* On 7278, port 7 which connects to the ASP should only receive
1732 * traffic from matching CFP rules.
1733 */
1734 if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
1735 return -EINVAL;
1736
1737 /* Make this port leave the all VLANs join since we will have proper
1738 * VLAN entries from now on
1739 */
1740 if (is58xx(dev)) {
1741 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1742 reg &= ~BIT(port);
1743 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
1744 reg &= ~BIT(cpu_port);
1745 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1746 }
1747
1748 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1749
1750 b53_for_each_port(dev, i) {
1751 if (dsa_to_port(ds, i)->bridge_dev != br)
1752 continue;
1753
1754 /* Add this local port to the remote port VLAN control
1755 * membership and update the remote port bitmask
1756 */
1757 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1758 reg |= BIT(port);
1759 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1760 dev->ports[i].vlan_ctl_mask = reg;
1761
1762 pvlan |= BIT(i);
1763 }
1764
1765 /* Configure the local port VLAN control membership to include
1766 * remote ports and update the local port bitmask
1767 */
1768 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1769 dev->ports[port].vlan_ctl_mask = pvlan;
1770
1771 return 0;
1772 }
1773 EXPORT_SYMBOL(b53_br_join);
1774
1775 void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1776 {
1777 struct b53_device *dev = ds->priv;
1778 struct b53_vlan *vl = &dev->vlans[0];
1779 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1780 unsigned int i;
1781 u16 pvlan, reg, pvid;
1782
1783 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1784
1785 b53_for_each_port(dev, i) {
1786 /* Don't touch the remaining ports */
1787 if (dsa_to_port(ds, i)->bridge_dev != br)
1788 continue;
1789
1790 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1791 reg &= ~BIT(port);
1792 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1793 dev->ports[port].vlan_ctl_mask = reg;
1794
1795 /* Prevent self removal to preserve isolation */
1796 if (port != i)
1797 pvlan &= ~BIT(i);
1798 }
1799
1800 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1801 dev->ports[port].vlan_ctl_mask = pvlan;
1802
1803 pvid = b53_default_pvid(dev);
1804
1805 /* Make this port join all VLANs without VLAN entries */
1806 if (is58xx(dev)) {
1807 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1808 reg |= BIT(port);
1809 if (!(reg & BIT(cpu_port)))
1810 reg |= BIT(cpu_port);
1811 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1812 } else {
1813 b53_get_vlan_entry(dev, pvid, vl);
1814 vl->members |= BIT(port) | BIT(cpu_port);
1815 vl->untag |= BIT(port) | BIT(cpu_port);
1816 b53_set_vlan_entry(dev, pvid, vl);
1817 }
1818 }
1819 EXPORT_SYMBOL(b53_br_leave);
1820
1821 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
1822 {
1823 struct b53_device *dev = ds->priv;
1824 u8 hw_state;
1825 u8 reg;
1826
1827 switch (state) {
1828 case BR_STATE_DISABLED:
1829 hw_state = PORT_CTRL_DIS_STATE;
1830 break;
1831 case BR_STATE_LISTENING:
1832 hw_state = PORT_CTRL_LISTEN_STATE;
1833 break;
1834 case BR_STATE_LEARNING:
1835 hw_state = PORT_CTRL_LEARN_STATE;
1836 break;
1837 case BR_STATE_FORWARDING:
1838 hw_state = PORT_CTRL_FWD_STATE;
1839 break;
1840 case BR_STATE_BLOCKING:
1841 hw_state = PORT_CTRL_BLOCK_STATE;
1842 break;
1843 default:
1844 dev_err(ds->dev, "invalid STP state: %d\n", state);
1845 return;
1846 }
1847
1848 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
1849 reg &= ~PORT_CTRL_STP_STATE_MASK;
1850 reg |= hw_state;
1851 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
1852 }
1853 EXPORT_SYMBOL(b53_br_set_stp_state);
1854
1855 void b53_br_fast_age(struct dsa_switch *ds, int port)
1856 {
1857 struct b53_device *dev = ds->priv;
1858
1859 if (b53_fast_age_port(dev, port))
1860 dev_err(ds->dev, "fast ageing failed\n");
1861 }
1862 EXPORT_SYMBOL(b53_br_fast_age);
1863
1864 int b53_br_egress_floods(struct dsa_switch *ds, int port,
1865 bool unicast, bool multicast)
1866 {
1867 struct b53_device *dev = ds->priv;
1868 u16 uc, mc;
1869
1870 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
1871 if (unicast)
1872 uc |= BIT(port);
1873 else
1874 uc &= ~BIT(port);
1875 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
1876
1877 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
1878 if (multicast)
1879 mc |= BIT(port);
1880 else
1881 mc &= ~BIT(port);
1882 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
1883
1884 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
1885 if (multicast)
1886 mc |= BIT(port);
1887 else
1888 mc &= ~BIT(port);
1889 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
1890
1891 return 0;
1892
1893 }
1894 EXPORT_SYMBOL(b53_br_egress_floods);
1895
1896 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
1897 {
1898 /* Broadcom switches will accept enabling Broadcom tags on the
1899 * following ports: 5, 7 and 8, any other port is not supported
1900 */
1901 switch (port) {
1902 case B53_CPU_PORT_25:
1903 case 7:
1904 case B53_CPU_PORT:
1905 return true;
1906 }
1907
1908 return false;
1909 }
1910
1911 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
1912 enum dsa_tag_protocol tag_protocol)
1913 {
1914 bool ret = b53_possible_cpu_port(ds, port);
1915
1916 if (!ret) {
1917 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
1918 port);
1919 return ret;
1920 }
1921
1922 switch (tag_protocol) {
1923 case DSA_TAG_PROTO_BRCM:
1924 case DSA_TAG_PROTO_BRCM_PREPEND:
1925 dev_warn(ds->dev,
1926 "Port %d is stacked to Broadcom tag switch\n", port);
1927 ret = false;
1928 break;
1929 default:
1930 ret = true;
1931 break;
1932 }
1933
1934 return ret;
1935 }
1936
1937 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
1938 enum dsa_tag_protocol mprot)
1939 {
1940 struct b53_device *dev = ds->priv;
1941
1942 /* Older models (5325, 5365) support a different tag format that we do
1943 * not support in net/dsa/tag_brcm.c yet.
1944 */
1945 if (is5325(dev) || is5365(dev) ||
1946 !b53_can_enable_brcm_tags(ds, port, mprot)) {
1947 dev->tag_protocol = DSA_TAG_PROTO_NONE;
1948 goto out;
1949 }
1950
1951 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
1952 * which requires us to use the prepended Broadcom tag type
1953 */
1954 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
1955 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
1956 goto out;
1957 }
1958
1959 dev->tag_protocol = DSA_TAG_PROTO_BRCM;
1960 out:
1961 return dev->tag_protocol;
1962 }
1963 EXPORT_SYMBOL(b53_get_tag_protocol);
1964
1965 int b53_mirror_add(struct dsa_switch *ds, int port,
1966 struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
1967 {
1968 struct b53_device *dev = ds->priv;
1969 u16 reg, loc;
1970
1971 if (ingress)
1972 loc = B53_IG_MIR_CTL;
1973 else
1974 loc = B53_EG_MIR_CTL;
1975
1976 b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
1977 reg |= BIT(port);
1978 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
1979
1980 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
1981 reg &= ~CAP_PORT_MASK;
1982 reg |= mirror->to_local_port;
1983 reg |= MIRROR_EN;
1984 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
1985
1986 return 0;
1987 }
1988 EXPORT_SYMBOL(b53_mirror_add);
1989
1990 void b53_mirror_del(struct dsa_switch *ds, int port,
1991 struct dsa_mall_mirror_tc_entry *mirror)
1992 {
1993 struct b53_device *dev = ds->priv;
1994 bool loc_disable = false, other_loc_disable = false;
1995 u16 reg, loc;
1996
1997 if (mirror->ingress)
1998 loc = B53_IG_MIR_CTL;
1999 else
2000 loc = B53_EG_MIR_CTL;
2001
2002 /* Update the desired ingress/egress register */
2003 b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
2004 reg &= ~BIT(port);
2005 if (!(reg & MIRROR_MASK))
2006 loc_disable = true;
2007 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2008
2009 /* Now look at the other one to know if we can disable mirroring
2010 * entirely
2011 */
2012 if (mirror->ingress)
2013 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
2014 else
2015 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
2016 if (!(reg & MIRROR_MASK))
2017 other_loc_disable = true;
2018
2019 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
2020 /* Both no longer have ports, let's disable mirroring */
2021 if (loc_disable && other_loc_disable) {
2022 reg &= ~MIRROR_EN;
2023 reg &= ~mirror->to_local_port;
2024 }
2025 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2026 }
2027 EXPORT_SYMBOL(b53_mirror_del);
2028
2029 void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
2030 {
2031 struct b53_device *dev = ds->priv;
2032 u16 reg;
2033
2034 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
2035 if (enable)
2036 reg |= BIT(port);
2037 else
2038 reg &= ~BIT(port);
2039 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
2040 }
2041 EXPORT_SYMBOL(b53_eee_enable_set);
2042
2043
2044 /* Returns 0 if EEE was not enabled, or 1 otherwise
2045 */
2046 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
2047 {
2048 int ret;
2049
2050 ret = phy_init_eee(phy, 0);
2051 if (ret)
2052 return 0;
2053
2054 b53_eee_enable_set(ds, port, true);
2055
2056 return 1;
2057 }
2058 EXPORT_SYMBOL(b53_eee_init);
2059
2060 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2061 {
2062 struct b53_device *dev = ds->priv;
2063 struct ethtool_eee *p = &dev->ports[port].eee;
2064 u16 reg;
2065
2066 if (is5325(dev) || is5365(dev))
2067 return -EOPNOTSUPP;
2068
2069 b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
2070 e->eee_enabled = p->eee_enabled;
2071 e->eee_active = !!(reg & BIT(port));
2072
2073 return 0;
2074 }
2075 EXPORT_SYMBOL(b53_get_mac_eee);
2076
2077 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2078 {
2079 struct b53_device *dev = ds->priv;
2080 struct ethtool_eee *p = &dev->ports[port].eee;
2081
2082 if (is5325(dev) || is5365(dev))
2083 return -EOPNOTSUPP;
2084
2085 p->eee_enabled = e->eee_enabled;
2086 b53_eee_enable_set(ds, port, e->eee_enabled);
2087
2088 return 0;
2089 }
2090 EXPORT_SYMBOL(b53_set_mac_eee);
2091
2092 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
2093 {
2094 struct b53_device *dev = ds->priv;
2095 bool enable_jumbo;
2096 bool allow_10_100;
2097
2098 if (is5325(dev) || is5365(dev))
2099 return -EOPNOTSUPP;
2100
2101 enable_jumbo = (mtu >= JMS_MIN_SIZE);
2102 allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
2103
2104 return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
2105 }
2106
2107 static int b53_get_max_mtu(struct dsa_switch *ds, int port)
2108 {
2109 return JMS_MAX_SIZE;
2110 }
2111
2112 static const struct dsa_switch_ops b53_switch_ops = {
2113 .get_tag_protocol = b53_get_tag_protocol,
2114 .setup = b53_setup,
2115 .get_strings = b53_get_strings,
2116 .get_ethtool_stats = b53_get_ethtool_stats,
2117 .get_sset_count = b53_get_sset_count,
2118 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
2119 .phy_read = b53_phy_read16,
2120 .phy_write = b53_phy_write16,
2121 .adjust_link = b53_adjust_link,
2122 .phylink_validate = b53_phylink_validate,
2123 .phylink_mac_link_state = b53_phylink_mac_link_state,
2124 .phylink_mac_config = b53_phylink_mac_config,
2125 .phylink_mac_an_restart = b53_phylink_mac_an_restart,
2126 .phylink_mac_link_down = b53_phylink_mac_link_down,
2127 .phylink_mac_link_up = b53_phylink_mac_link_up,
2128 .port_enable = b53_enable_port,
2129 .port_disable = b53_disable_port,
2130 .get_mac_eee = b53_get_mac_eee,
2131 .set_mac_eee = b53_set_mac_eee,
2132 .port_bridge_join = b53_br_join,
2133 .port_bridge_leave = b53_br_leave,
2134 .port_stp_state_set = b53_br_set_stp_state,
2135 .port_fast_age = b53_br_fast_age,
2136 .port_egress_floods = b53_br_egress_floods,
2137 .port_vlan_filtering = b53_vlan_filtering,
2138 .port_vlan_prepare = b53_vlan_prepare,
2139 .port_vlan_add = b53_vlan_add,
2140 .port_vlan_del = b53_vlan_del,
2141 .port_fdb_dump = b53_fdb_dump,
2142 .port_fdb_add = b53_fdb_add,
2143 .port_fdb_del = b53_fdb_del,
2144 .port_mirror_add = b53_mirror_add,
2145 .port_mirror_del = b53_mirror_del,
2146 .port_mdb_prepare = b53_mdb_prepare,
2147 .port_mdb_add = b53_mdb_add,
2148 .port_mdb_del = b53_mdb_del,
2149 .port_max_mtu = b53_get_max_mtu,
2150 .port_change_mtu = b53_change_mtu,
2151 };
2152
2153 struct b53_chip_data {
2154 u32 chip_id;
2155 const char *dev_name;
2156 u16 vlans;
2157 u16 enabled_ports;
2158 u8 cpu_port;
2159 u8 vta_regs[3];
2160 u8 arl_entries;
2161 u8 duplex_reg;
2162 u8 jumbo_pm_reg;
2163 u8 jumbo_size_reg;
2164 };
2165
2166 #define B53_VTA_REGS \
2167 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2168 #define B53_VTA_REGS_9798 \
2169 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2170 #define B53_VTA_REGS_63XX \
2171 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2172
2173 static const struct b53_chip_data b53_switch_chips[] = {
2174 {
2175 .chip_id = BCM5325_DEVICE_ID,
2176 .dev_name = "BCM5325",
2177 .vlans = 16,
2178 .enabled_ports = 0x1f,
2179 .arl_entries = 2,
2180 .cpu_port = B53_CPU_PORT_25,
2181 .duplex_reg = B53_DUPLEX_STAT_FE,
2182 },
2183 {
2184 .chip_id = BCM5365_DEVICE_ID,
2185 .dev_name = "BCM5365",
2186 .vlans = 256,
2187 .enabled_ports = 0x1f,
2188 .arl_entries = 2,
2189 .cpu_port = B53_CPU_PORT_25,
2190 .duplex_reg = B53_DUPLEX_STAT_FE,
2191 },
2192 {
2193 .chip_id = BCM5389_DEVICE_ID,
2194 .dev_name = "BCM5389",
2195 .vlans = 4096,
2196 .enabled_ports = 0x1f,
2197 .arl_entries = 4,
2198 .cpu_port = B53_CPU_PORT,
2199 .vta_regs = B53_VTA_REGS,
2200 .duplex_reg = B53_DUPLEX_STAT_GE,
2201 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2202 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2203 },
2204 {
2205 .chip_id = BCM5395_DEVICE_ID,
2206 .dev_name = "BCM5395",
2207 .vlans = 4096,
2208 .enabled_ports = 0x1f,
2209 .arl_entries = 4,
2210 .cpu_port = B53_CPU_PORT,
2211 .vta_regs = B53_VTA_REGS,
2212 .duplex_reg = B53_DUPLEX_STAT_GE,
2213 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2214 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2215 },
2216 {
2217 .chip_id = BCM5397_DEVICE_ID,
2218 .dev_name = "BCM5397",
2219 .vlans = 4096,
2220 .enabled_ports = 0x1f,
2221 .arl_entries = 4,
2222 .cpu_port = B53_CPU_PORT,
2223 .vta_regs = B53_VTA_REGS_9798,
2224 .duplex_reg = B53_DUPLEX_STAT_GE,
2225 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2226 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2227 },
2228 {
2229 .chip_id = BCM5398_DEVICE_ID,
2230 .dev_name = "BCM5398",
2231 .vlans = 4096,
2232 .enabled_ports = 0x7f,
2233 .arl_entries = 4,
2234 .cpu_port = B53_CPU_PORT,
2235 .vta_regs = B53_VTA_REGS_9798,
2236 .duplex_reg = B53_DUPLEX_STAT_GE,
2237 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2238 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2239 },
2240 {
2241 .chip_id = BCM53115_DEVICE_ID,
2242 .dev_name = "BCM53115",
2243 .vlans = 4096,
2244 .enabled_ports = 0x1f,
2245 .arl_entries = 4,
2246 .vta_regs = B53_VTA_REGS,
2247 .cpu_port = B53_CPU_PORT,
2248 .duplex_reg = B53_DUPLEX_STAT_GE,
2249 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2250 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2251 },
2252 {
2253 .chip_id = BCM53125_DEVICE_ID,
2254 .dev_name = "BCM53125",
2255 .vlans = 4096,
2256 .enabled_ports = 0xff,
2257 .arl_entries = 4,
2258 .cpu_port = B53_CPU_PORT,
2259 .vta_regs = B53_VTA_REGS,
2260 .duplex_reg = B53_DUPLEX_STAT_GE,
2261 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2262 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2263 },
2264 {
2265 .chip_id = BCM53128_DEVICE_ID,
2266 .dev_name = "BCM53128",
2267 .vlans = 4096,
2268 .enabled_ports = 0x1ff,
2269 .arl_entries = 4,
2270 .cpu_port = B53_CPU_PORT,
2271 .vta_regs = B53_VTA_REGS,
2272 .duplex_reg = B53_DUPLEX_STAT_GE,
2273 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2274 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2275 },
2276 {
2277 .chip_id = BCM63XX_DEVICE_ID,
2278 .dev_name = "BCM63xx",
2279 .vlans = 4096,
2280 .enabled_ports = 0, /* pdata must provide them */
2281 .arl_entries = 4,
2282 .cpu_port = B53_CPU_PORT,
2283 .vta_regs = B53_VTA_REGS_63XX,
2284 .duplex_reg = B53_DUPLEX_STAT_63XX,
2285 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2286 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2287 },
2288 {
2289 .chip_id = BCM53010_DEVICE_ID,
2290 .dev_name = "BCM53010",
2291 .vlans = 4096,
2292 .enabled_ports = 0x1f,
2293 .arl_entries = 4,
2294 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2295 .vta_regs = B53_VTA_REGS,
2296 .duplex_reg = B53_DUPLEX_STAT_GE,
2297 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2298 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2299 },
2300 {
2301 .chip_id = BCM53011_DEVICE_ID,
2302 .dev_name = "BCM53011",
2303 .vlans = 4096,
2304 .enabled_ports = 0x1bf,
2305 .arl_entries = 4,
2306 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2307 .vta_regs = B53_VTA_REGS,
2308 .duplex_reg = B53_DUPLEX_STAT_GE,
2309 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2310 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2311 },
2312 {
2313 .chip_id = BCM53012_DEVICE_ID,
2314 .dev_name = "BCM53012",
2315 .vlans = 4096,
2316 .enabled_ports = 0x1bf,
2317 .arl_entries = 4,
2318 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2319 .vta_regs = B53_VTA_REGS,
2320 .duplex_reg = B53_DUPLEX_STAT_GE,
2321 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2322 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2323 },
2324 {
2325 .chip_id = BCM53018_DEVICE_ID,
2326 .dev_name = "BCM53018",
2327 .vlans = 4096,
2328 .enabled_ports = 0x1f,
2329 .arl_entries = 4,
2330 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2331 .vta_regs = B53_VTA_REGS,
2332 .duplex_reg = B53_DUPLEX_STAT_GE,
2333 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2334 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2335 },
2336 {
2337 .chip_id = BCM53019_DEVICE_ID,
2338 .dev_name = "BCM53019",
2339 .vlans = 4096,
2340 .enabled_ports = 0x1f,
2341 .arl_entries = 4,
2342 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2343 .vta_regs = B53_VTA_REGS,
2344 .duplex_reg = B53_DUPLEX_STAT_GE,
2345 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2346 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2347 },
2348 {
2349 .chip_id = BCM58XX_DEVICE_ID,
2350 .dev_name = "BCM585xx/586xx/88312",
2351 .vlans = 4096,
2352 .enabled_ports = 0x1ff,
2353 .arl_entries = 4,
2354 .cpu_port = B53_CPU_PORT,
2355 .vta_regs = B53_VTA_REGS,
2356 .duplex_reg = B53_DUPLEX_STAT_GE,
2357 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2358 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2359 },
2360 {
2361 .chip_id = BCM583XX_DEVICE_ID,
2362 .dev_name = "BCM583xx/11360",
2363 .vlans = 4096,
2364 .enabled_ports = 0x103,
2365 .arl_entries = 4,
2366 .cpu_port = B53_CPU_PORT,
2367 .vta_regs = B53_VTA_REGS,
2368 .duplex_reg = B53_DUPLEX_STAT_GE,
2369 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2370 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2371 },
2372 {
2373 .chip_id = BCM7445_DEVICE_ID,
2374 .dev_name = "BCM7445",
2375 .vlans = 4096,
2376 .enabled_ports = 0x1ff,
2377 .arl_entries = 4,
2378 .cpu_port = B53_CPU_PORT,
2379 .vta_regs = B53_VTA_REGS,
2380 .duplex_reg = B53_DUPLEX_STAT_GE,
2381 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2382 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2383 },
2384 {
2385 .chip_id = BCM7278_DEVICE_ID,
2386 .dev_name = "BCM7278",
2387 .vlans = 4096,
2388 .enabled_ports = 0x1ff,
2389 .arl_entries= 4,
2390 .cpu_port = B53_CPU_PORT,
2391 .vta_regs = B53_VTA_REGS,
2392 .duplex_reg = B53_DUPLEX_STAT_GE,
2393 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2394 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2395 },
2396 };
2397
2398 static int b53_switch_init(struct b53_device *dev)
2399 {
2400 unsigned int i;
2401 int ret;
2402
2403 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
2404 const struct b53_chip_data *chip = &b53_switch_chips[i];
2405
2406 if (chip->chip_id == dev->chip_id) {
2407 if (!dev->enabled_ports)
2408 dev->enabled_ports = chip->enabled_ports;
2409 dev->name = chip->dev_name;
2410 dev->duplex_reg = chip->duplex_reg;
2411 dev->vta_regs[0] = chip->vta_regs[0];
2412 dev->vta_regs[1] = chip->vta_regs[1];
2413 dev->vta_regs[2] = chip->vta_regs[2];
2414 dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2415 dev->cpu_port = chip->cpu_port;
2416 dev->num_vlans = chip->vlans;
2417 dev->num_arl_entries = chip->arl_entries;
2418 break;
2419 }
2420 }
2421
2422 /* check which BCM5325x version we have */
2423 if (is5325(dev)) {
2424 u8 vc4;
2425
2426 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
2427
2428 /* check reserved bits */
2429 switch (vc4 & 3) {
2430 case 1:
2431 /* BCM5325E */
2432 break;
2433 case 3:
2434 /* BCM5325F - do not use port 4 */
2435 dev->enabled_ports &= ~BIT(4);
2436 break;
2437 default:
2438 /* On the BCM47XX SoCs this is the supported internal switch.*/
2439 #ifndef CONFIG_BCM47XX
2440 /* BCM5325M */
2441 return -EINVAL;
2442 #else
2443 break;
2444 #endif
2445 }
2446 } else if (dev->chip_id == BCM53115_DEVICE_ID) {
2447 u64 strap_value;
2448
2449 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
2450 /* use second IMP port if GMII is enabled */
2451 if (strap_value & SV_GMII_CTRL_115)
2452 dev->cpu_port = 5;
2453 }
2454
2455 /* cpu port is always last */
2456 dev->num_ports = dev->cpu_port + 1;
2457 dev->enabled_ports |= BIT(dev->cpu_port);
2458
2459 /* Include non standard CPU port built-in PHYs to be probed */
2460 if (is539x(dev) || is531x5(dev)) {
2461 for (i = 0; i < dev->num_ports; i++) {
2462 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2463 !b53_possible_cpu_port(dev->ds, i))
2464 dev->ds->phys_mii_mask |= BIT(i);
2465 }
2466 }
2467
2468 dev->ports = devm_kcalloc(dev->dev,
2469 dev->num_ports, sizeof(struct b53_port),
2470 GFP_KERNEL);
2471 if (!dev->ports)
2472 return -ENOMEM;
2473
2474 dev->vlans = devm_kcalloc(dev->dev,
2475 dev->num_vlans, sizeof(struct b53_vlan),
2476 GFP_KERNEL);
2477 if (!dev->vlans)
2478 return -ENOMEM;
2479
2480 dev->reset_gpio = b53_switch_get_reset_gpio(dev);
2481 if (dev->reset_gpio >= 0) {
2482 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
2483 GPIOF_OUT_INIT_HIGH, "robo_reset");
2484 if (ret)
2485 return ret;
2486 }
2487
2488 return 0;
2489 }
2490
2491 struct b53_device *b53_switch_alloc(struct device *base,
2492 const struct b53_io_ops *ops,
2493 void *priv)
2494 {
2495 struct dsa_switch *ds;
2496 struct b53_device *dev;
2497
2498 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
2499 if (!ds)
2500 return NULL;
2501
2502 ds->dev = base;
2503 ds->num_ports = DSA_MAX_PORTS;
2504
2505 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2506 if (!dev)
2507 return NULL;
2508
2509 ds->priv = dev;
2510 dev->dev = base;
2511
2512 dev->ds = ds;
2513 dev->priv = priv;
2514 dev->ops = ops;
2515 ds->ops = &b53_switch_ops;
2516 mutex_init(&dev->reg_mutex);
2517 mutex_init(&dev->stats_mutex);
2518
2519 return dev;
2520 }
2521 EXPORT_SYMBOL(b53_switch_alloc);
2522
2523 int b53_switch_detect(struct b53_device *dev)
2524 {
2525 u32 id32;
2526 u16 tmp;
2527 u8 id8;
2528 int ret;
2529
2530 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
2531 if (ret)
2532 return ret;
2533
2534 switch (id8) {
2535 case 0:
2536 /* BCM5325 and BCM5365 do not have this register so reads
2537 * return 0. But the read operation did succeed, so assume this
2538 * is one of them.
2539 *
2540 * Next check if we can write to the 5325's VTA register; for
2541 * 5365 it is read only.
2542 */
2543 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
2544 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
2545
2546 if (tmp == 0xf)
2547 dev->chip_id = BCM5325_DEVICE_ID;
2548 else
2549 dev->chip_id = BCM5365_DEVICE_ID;
2550 break;
2551 case BCM5389_DEVICE_ID:
2552 case BCM5395_DEVICE_ID:
2553 case BCM5397_DEVICE_ID:
2554 case BCM5398_DEVICE_ID:
2555 dev->chip_id = id8;
2556 break;
2557 default:
2558 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
2559 if (ret)
2560 return ret;
2561
2562 switch (id32) {
2563 case BCM53115_DEVICE_ID:
2564 case BCM53125_DEVICE_ID:
2565 case BCM53128_DEVICE_ID:
2566 case BCM53010_DEVICE_ID:
2567 case BCM53011_DEVICE_ID:
2568 case BCM53012_DEVICE_ID:
2569 case BCM53018_DEVICE_ID:
2570 case BCM53019_DEVICE_ID:
2571 dev->chip_id = id32;
2572 break;
2573 default:
2574 pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
2575 id8, id32);
2576 return -ENODEV;
2577 }
2578 }
2579
2580 if (dev->chip_id == BCM5325_DEVICE_ID)
2581 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
2582 &dev->core_rev);
2583 else
2584 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
2585 &dev->core_rev);
2586 }
2587 EXPORT_SYMBOL(b53_switch_detect);
2588
2589 int b53_switch_register(struct b53_device *dev)
2590 {
2591 int ret;
2592
2593 if (dev->pdata) {
2594 dev->chip_id = dev->pdata->chip_id;
2595 dev->enabled_ports = dev->pdata->enabled_ports;
2596 }
2597
2598 if (!dev->chip_id && b53_switch_detect(dev))
2599 return -EINVAL;
2600
2601 ret = b53_switch_init(dev);
2602 if (ret)
2603 return ret;
2604
2605 pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
2606
2607 return dsa_register_switch(dev->ds);
2608 }
2609 EXPORT_SYMBOL(b53_switch_register);
2610
2611 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
2612 MODULE_DESCRIPTION("B53 switch library");
2613 MODULE_LICENSE("Dual BSD/GPL");