]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/dsa/sja1105/sja1105_main.c
net: dsa: sja1105: Add a state machine for RX timestamping
[thirdparty/linux.git] / drivers / net / dsa / sja1105 / sja1105_main.c
CommitLineData
8aa9ebcc
VO
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/printk.h>
11#include <linux/spi/spi.h>
12#include <linux/errno.h>
13#include <linux/gpio/consumer.h>
ad9f299a 14#include <linux/phylink.h>
8aa9ebcc
VO
15#include <linux/of.h>
16#include <linux/of_net.h>
17#include <linux/of_mdio.h>
18#include <linux/of_device.h>
19#include <linux/netdev_features.h>
20#include <linux/netdevice.h>
21#include <linux/if_bridge.h>
22#include <linux/if_ether.h>
227d07a0 23#include <linux/dsa/8021q.h>
8aa9ebcc
VO
24#include "sja1105.h"
25
26static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
27 unsigned int startup_delay)
28{
29 gpiod_set_value_cansleep(gpio, 1);
30 /* Wait for minimum reset pulse length */
31 msleep(pulse_len);
32 gpiod_set_value_cansleep(gpio, 0);
33 /* Wait until chip is ready after reset */
34 msleep(startup_delay);
35}
36
37static void
38sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
39 int from, int to, bool allow)
40{
41 if (allow) {
42 l2_fwd[from].bc_domain |= BIT(to);
43 l2_fwd[from].reach_port |= BIT(to);
44 l2_fwd[from].fl_domain |= BIT(to);
45 } else {
46 l2_fwd[from].bc_domain &= ~BIT(to);
47 l2_fwd[from].reach_port &= ~BIT(to);
48 l2_fwd[from].fl_domain &= ~BIT(to);
49 }
50}
51
52/* Structure used to temporarily transport device tree
53 * settings into sja1105_setup
54 */
55struct sja1105_dt_port {
56 phy_interface_t phy_mode;
57 sja1105_mii_role_t role;
58};
59
60static int sja1105_init_mac_settings(struct sja1105_private *priv)
61{
62 struct sja1105_mac_config_entry default_mac = {
63 /* Enable all 8 priority queues on egress.
64 * Every queue i holds top[i] - base[i] frames.
65 * Sum of top[i] - base[i] is 511 (max hardware limit).
66 */
67 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
68 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
69 .enabled = {true, true, true, true, true, true, true, true},
70 /* Keep standard IFG of 12 bytes on egress. */
71 .ifg = 0,
72 /* Always put the MAC speed in automatic mode, where it can be
73 * retrieved from the PHY object through phylib and
74 * sja1105_adjust_port_config.
75 */
76 .speed = SJA1105_SPEED_AUTO,
77 /* No static correction for 1-step 1588 events */
78 .tp_delin = 0,
79 .tp_delout = 0,
80 /* Disable aging for critical TTEthernet traffic */
81 .maxage = 0xFF,
82 /* Internal VLAN (pvid) to apply to untagged ingress */
83 .vlanprio = 0,
84 .vlanid = 0,
85 .ing_mirr = false,
86 .egr_mirr = false,
87 /* Don't drop traffic with other EtherType than ETH_P_IP */
88 .drpnona664 = false,
89 /* Don't drop double-tagged traffic */
90 .drpdtag = false,
91 /* Don't drop untagged traffic */
92 .drpuntag = false,
93 /* Don't retag 802.1p (VID 0) traffic with the pvid */
94 .retag = false,
640f763f
VO
95 /* Disable learning and I/O on user ports by default -
96 * STP will enable it.
97 */
98 .dyn_learn = false,
8aa9ebcc
VO
99 .egress = false,
100 .ingress = false,
101 };
102 struct sja1105_mac_config_entry *mac;
103 struct sja1105_table *table;
104 int i;
105
106 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107
108 /* Discard previous MAC Configuration Table */
109 if (table->entry_count) {
110 kfree(table->entries);
111 table->entry_count = 0;
112 }
113
114 table->entries = kcalloc(SJA1105_NUM_PORTS,
115 table->ops->unpacked_entry_size, GFP_KERNEL);
116 if (!table->entries)
117 return -ENOMEM;
118
119 /* Override table based on phylib DT bindings */
120 table->entry_count = SJA1105_NUM_PORTS;
121
122 mac = table->entries;
123
640f763f 124 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
8aa9ebcc 125 mac[i] = default_mac;
640f763f
VO
126 if (i == dsa_upstream_port(priv->ds, i)) {
127 /* STP doesn't get called for CPU port, so we need to
128 * set the I/O parameters statically.
129 */
130 mac[i].dyn_learn = true;
131 mac[i].ingress = true;
132 mac[i].egress = true;
133 }
134 }
8aa9ebcc
VO
135
136 return 0;
137}
138
139static int sja1105_init_mii_settings(struct sja1105_private *priv,
140 struct sja1105_dt_port *ports)
141{
142 struct device *dev = &priv->spidev->dev;
143 struct sja1105_xmii_params_entry *mii;
144 struct sja1105_table *table;
145 int i;
146
147 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
148
149 /* Discard previous xMII Mode Parameters Table */
150 if (table->entry_count) {
151 kfree(table->entries);
152 table->entry_count = 0;
153 }
154
155 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
156 table->ops->unpacked_entry_size, GFP_KERNEL);
157 if (!table->entries)
158 return -ENOMEM;
159
160 /* Override table based on phylib DT bindings */
161 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
162
163 mii = table->entries;
164
165 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
166 switch (ports[i].phy_mode) {
167 case PHY_INTERFACE_MODE_MII:
168 mii->xmii_mode[i] = XMII_MODE_MII;
169 break;
170 case PHY_INTERFACE_MODE_RMII:
171 mii->xmii_mode[i] = XMII_MODE_RMII;
172 break;
173 case PHY_INTERFACE_MODE_RGMII:
174 case PHY_INTERFACE_MODE_RGMII_ID:
175 case PHY_INTERFACE_MODE_RGMII_RXID:
176 case PHY_INTERFACE_MODE_RGMII_TXID:
177 mii->xmii_mode[i] = XMII_MODE_RGMII;
178 break;
179 default:
180 dev_err(dev, "Unsupported PHY mode %s!\n",
181 phy_modes(ports[i].phy_mode));
182 }
183
184 mii->phy_mac[i] = ports[i].role;
185 }
186 return 0;
187}
188
189static int sja1105_init_static_fdb(struct sja1105_private *priv)
190{
191 struct sja1105_table *table;
192
193 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
194
291d1e72
VO
195 /* We only populate the FDB table through dynamic
196 * L2 Address Lookup entries
197 */
8aa9ebcc
VO
198 if (table->entry_count) {
199 kfree(table->entries);
200 table->entry_count = 0;
201 }
202 return 0;
203}
204
205static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
206{
207 struct sja1105_table *table;
208 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
8456721d
VO
209 /* Learned FDB entries are forgotten after 300 seconds */
210 .maxage = SJA1105_AGEING_TIME_MS(300000),
8aa9ebcc
VO
211 /* All entries within a FDB bin are available for learning */
212 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
1da73821
VO
213 /* And the P/Q/R/S equivalent setting: */
214 .start_dynspc = 0,
8aa9ebcc
VO
215 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
216 .poly = 0x97,
217 /* This selects between Independent VLAN Learning (IVL) and
218 * Shared VLAN Learning (SVL)
219 */
220 .shared_learn = false,
221 /* Don't discard management traffic based on ENFPORT -
222 * we don't perform SMAC port enforcement anyway, so
223 * what we are setting here doesn't matter.
224 */
225 .no_enf_hostprt = false,
226 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
227 * Maybe correlate with no_linklocal_learn from bridge driver?
228 */
229 .no_mgmt_learn = true,
1da73821
VO
230 /* P/Q/R/S only */
231 .use_static = true,
232 /* Dynamically learned FDB entries can overwrite other (older)
233 * dynamic FDB entries
234 */
235 .owr_dyn = true,
236 .drpnolearn = true,
8aa9ebcc
VO
237 };
238
239 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
240
241 if (table->entry_count) {
242 kfree(table->entries);
243 table->entry_count = 0;
244 }
245
246 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
247 table->ops->unpacked_entry_size, GFP_KERNEL);
248 if (!table->entries)
249 return -ENOMEM;
250
251 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
252
253 /* This table only has a single entry */
254 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
255 default_l2_lookup_params;
256
257 return 0;
258}
259
260static int sja1105_init_static_vlan(struct sja1105_private *priv)
261{
262 struct sja1105_table *table;
263 struct sja1105_vlan_lookup_entry pvid = {
264 .ving_mirr = 0,
265 .vegr_mirr = 0,
266 .vmemb_port = 0,
267 .vlan_bc = 0,
268 .tag_port = 0,
269 .vlanid = 0,
270 };
271 int i;
272
273 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
274
275 /* The static VLAN table will only contain the initial pvid of 0.
6666cebc
VO
276 * All other VLANs are to be configured through dynamic entries,
277 * and kept in the static configuration table as backing memory.
278 * The pvid of 0 is sufficient to pass traffic while the ports are
279 * standalone and when vlan_filtering is disabled. When filtering
280 * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
281 * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
282 * vlan' even when vlan_filtering is off, but it has no effect.
8aa9ebcc
VO
283 */
284 if (table->entry_count) {
285 kfree(table->entries);
286 table->entry_count = 0;
287 }
288
289 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
290 GFP_KERNEL);
291 if (!table->entries)
292 return -ENOMEM;
293
294 table->entry_count = 1;
295
296 /* VLAN ID 0: all DT-defined ports are members; no restrictions on
297 * forwarding; always transmit priority-tagged frames as untagged.
298 */
299 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
300 pvid.vmemb_port |= BIT(i);
301 pvid.vlan_bc |= BIT(i);
302 pvid.tag_port &= ~BIT(i);
303 }
304
305 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
306 return 0;
307}
308
309static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
310{
311 struct sja1105_l2_forwarding_entry *l2fwd;
312 struct sja1105_table *table;
313 int i, j;
314
315 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
316
317 if (table->entry_count) {
318 kfree(table->entries);
319 table->entry_count = 0;
320 }
321
322 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
323 table->ops->unpacked_entry_size, GFP_KERNEL);
324 if (!table->entries)
325 return -ENOMEM;
326
327 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
328
329 l2fwd = table->entries;
330
331 /* First 5 entries define the forwarding rules */
332 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
333 unsigned int upstream = dsa_upstream_port(priv->ds, i);
334
335 for (j = 0; j < SJA1105_NUM_TC; j++)
336 l2fwd[i].vlan_pmap[j] = j;
337
338 if (i == upstream)
339 continue;
340
341 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
342 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
343 }
344 /* Next 8 entries define VLAN PCP mapping from ingress to egress.
345 * Create a one-to-one mapping.
346 */
347 for (i = 0; i < SJA1105_NUM_TC; i++)
348 for (j = 0; j < SJA1105_NUM_PORTS; j++)
349 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
350
351 return 0;
352}
353
354static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
355{
356 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
357 /* Disallow dynamic reconfiguration of vlan_pmap */
358 .max_dynp = 0,
359 /* Use a single memory partition for all ingress queues */
360 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
361 };
362 struct sja1105_table *table;
363
364 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
365
366 if (table->entry_count) {
367 kfree(table->entries);
368 table->entry_count = 0;
369 }
370
371 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
372 table->ops->unpacked_entry_size, GFP_KERNEL);
373 if (!table->entries)
374 return -ENOMEM;
375
376 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
377
378 /* This table only has a single entry */
379 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
380 default_l2fwd_params;
381
382 return 0;
383}
384
385static int sja1105_init_general_params(struct sja1105_private *priv)
386{
387 struct sja1105_general_params_entry default_general_params = {
388 /* Disallow dynamic changing of the mirror port */
389 .mirr_ptacu = 0,
390 .switchid = priv->ds->index,
391 /* Priority queue for link-local frames trapped to CPU */
08fde09a 392 .hostprio = 7,
8aa9ebcc
VO
393 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
394 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
42824463 395 .incl_srcpt1 = false,
8aa9ebcc
VO
396 .send_meta1 = false,
397 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
398 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
42824463 399 .incl_srcpt0 = false,
8aa9ebcc
VO
400 .send_meta0 = false,
401 /* The destination for traffic matching mac_fltres1 and
402 * mac_fltres0 on all ports except host_port. Such traffic
403 * receieved on host_port itself would be dropped, except
404 * by installing a temporary 'management route'
405 */
406 .host_port = dsa_upstream_port(priv->ds, 0),
407 /* Same as host port */
408 .mirr_port = dsa_upstream_port(priv->ds, 0),
409 /* Link-local traffic received on casc_port will be forwarded
410 * to host_port without embedding the source port and device ID
411 * info in the destination MAC address (presumably because it
412 * is a cascaded port and a downstream SJA switch already did
413 * that). Default to an invalid port (to disable the feature)
414 * and overwrite this if we find any DSA (cascaded) ports.
415 */
416 .casc_port = SJA1105_NUM_PORTS,
417 /* No TTEthernet */
418 .vllupformat = 0,
419 .vlmarker = 0,
420 .vlmask = 0,
421 /* Only update correctionField for 1-step PTP (L2 transport) */
422 .ignore2stf = 0,
6666cebc
VO
423 /* Forcefully disable VLAN filtering by telling
424 * the switch that VLAN has a different EtherType.
425 */
426 .tpid = ETH_P_SJA1105,
427 .tpid2 = ETH_P_SJA1105,
8aa9ebcc
VO
428 };
429 struct sja1105_table *table;
227d07a0 430 int i, k = 0;
8aa9ebcc 431
227d07a0 432 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
8aa9ebcc
VO
433 if (dsa_is_dsa_port(priv->ds, i))
434 default_general_params.casc_port = i;
227d07a0
VO
435 else if (dsa_is_user_port(priv->ds, i))
436 priv->ports[i].mgmt_slot = k++;
437 }
8aa9ebcc
VO
438
439 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
440
441 if (table->entry_count) {
442 kfree(table->entries);
443 table->entry_count = 0;
444 }
445
446 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
447 table->ops->unpacked_entry_size, GFP_KERNEL);
448 if (!table->entries)
449 return -ENOMEM;
450
451 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
452
453 /* This table only has a single entry */
454 ((struct sja1105_general_params_entry *)table->entries)[0] =
455 default_general_params;
456
457 return 0;
458}
459
460#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
461
462static inline void
463sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
464 int index)
465{
466 policing[index].sharindx = index;
467 policing[index].smax = 65535; /* Burst size in bytes */
468 policing[index].rate = SJA1105_RATE_MBPS(1000);
469 policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
470 policing[index].partition = 0;
471}
472
473static int sja1105_init_l2_policing(struct sja1105_private *priv)
474{
475 struct sja1105_l2_policing_entry *policing;
476 struct sja1105_table *table;
477 int i, j, k;
478
479 table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
480
481 /* Discard previous L2 Policing Table */
482 if (table->entry_count) {
483 kfree(table->entries);
484 table->entry_count = 0;
485 }
486
487 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
488 table->ops->unpacked_entry_size, GFP_KERNEL);
489 if (!table->entries)
490 return -ENOMEM;
491
492 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
493
494 policing = table->entries;
495
496 /* k sweeps through all unicast policers (0-39).
497 * bcast sweeps through policers 40-44.
498 */
499 for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
500 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
501
502 for (j = 0; j < SJA1105_NUM_TC; j++, k++)
503 sja1105_setup_policer(policing, k);
504
505 /* Set up this port's policer for broadcast traffic */
506 sja1105_setup_policer(policing, bcast);
507 }
508 return 0;
509}
510
24c01949
VO
511static int sja1105_init_avb_params(struct sja1105_private *priv,
512 bool on)
513{
514 struct sja1105_avb_params_entry *avb;
515 struct sja1105_table *table;
516
517 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
518
519 /* Discard previous AVB Parameters Table */
520 if (table->entry_count) {
521 kfree(table->entries);
522 table->entry_count = 0;
523 }
524
525 /* Configure the reception of meta frames only if requested */
526 if (!on)
527 return 0;
528
529 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
530 table->ops->unpacked_entry_size, GFP_KERNEL);
531 if (!table->entries)
532 return -ENOMEM;
533
534 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
535
536 avb = table->entries;
537
538 avb->destmeta = SJA1105_META_DMAC;
539 avb->srcmeta = SJA1105_META_SMAC;
540
541 return 0;
542}
543
8aa9ebcc
VO
544static int sja1105_static_config_load(struct sja1105_private *priv,
545 struct sja1105_dt_port *ports)
546{
547 int rc;
548
549 sja1105_static_config_free(&priv->static_config);
550 rc = sja1105_static_config_init(&priv->static_config,
551 priv->info->static_ops,
552 priv->info->device_id);
553 if (rc)
554 return rc;
555
556 /* Build static configuration */
557 rc = sja1105_init_mac_settings(priv);
558 if (rc < 0)
559 return rc;
560 rc = sja1105_init_mii_settings(priv, ports);
561 if (rc < 0)
562 return rc;
563 rc = sja1105_init_static_fdb(priv);
564 if (rc < 0)
565 return rc;
566 rc = sja1105_init_static_vlan(priv);
567 if (rc < 0)
568 return rc;
569 rc = sja1105_init_l2_lookup_params(priv);
570 if (rc < 0)
571 return rc;
572 rc = sja1105_init_l2_forwarding(priv);
573 if (rc < 0)
574 return rc;
575 rc = sja1105_init_l2_forwarding_params(priv);
576 if (rc < 0)
577 return rc;
578 rc = sja1105_init_l2_policing(priv);
579 if (rc < 0)
580 return rc;
581 rc = sja1105_init_general_params(priv);
24c01949
VO
582 if (rc < 0)
583 return rc;
584 rc = sja1105_init_avb_params(priv, false);
8aa9ebcc
VO
585 if (rc < 0)
586 return rc;
587
588 /* Send initial configuration to hardware via SPI */
589 return sja1105_static_config_upload(priv);
590}
591
f5b8631c
VO
592static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
593 const struct sja1105_dt_port *ports)
594{
595 int i;
596
597 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
598 if (ports->role == XMII_MAC)
599 continue;
600
601 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
602 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
603 priv->rgmii_rx_delay[i] = true;
604
605 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
606 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
607 priv->rgmii_tx_delay[i] = true;
608
609 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
610 !priv->info->setup_rgmii_delay)
611 return -EINVAL;
612 }
613 return 0;
614}
615
8aa9ebcc
VO
616static int sja1105_parse_ports_node(struct sja1105_private *priv,
617 struct sja1105_dt_port *ports,
618 struct device_node *ports_node)
619{
620 struct device *dev = &priv->spidev->dev;
621 struct device_node *child;
622
623 for_each_child_of_node(ports_node, child) {
624 struct device_node *phy_node;
625 int phy_mode;
626 u32 index;
627
628 /* Get switch port number from DT */
629 if (of_property_read_u32(child, "reg", &index) < 0) {
630 dev_err(dev, "Port number not defined in device tree "
631 "(property \"reg\")\n");
632 return -ENODEV;
633 }
634
635 /* Get PHY mode from DT */
636 phy_mode = of_get_phy_mode(child);
637 if (phy_mode < 0) {
638 dev_err(dev, "Failed to read phy-mode or "
639 "phy-interface-type property for port %d\n",
640 index);
641 return -ENODEV;
642 }
643 ports[index].phy_mode = phy_mode;
644
645 phy_node = of_parse_phandle(child, "phy-handle", 0);
646 if (!phy_node) {
647 if (!of_phy_is_fixed_link(child)) {
648 dev_err(dev, "phy-handle or fixed-link "
649 "properties missing!\n");
650 return -ENODEV;
651 }
652 /* phy-handle is missing, but fixed-link isn't.
653 * So it's a fixed link. Default to PHY role.
654 */
655 ports[index].role = XMII_PHY;
656 } else {
657 /* phy-handle present => put port in MAC role */
658 ports[index].role = XMII_MAC;
659 of_node_put(phy_node);
660 }
661
662 /* The MAC/PHY role can be overridden with explicit bindings */
663 if (of_property_read_bool(child, "sja1105,role-mac"))
664 ports[index].role = XMII_MAC;
665 else if (of_property_read_bool(child, "sja1105,role-phy"))
666 ports[index].role = XMII_PHY;
667 }
668
669 return 0;
670}
671
672static int sja1105_parse_dt(struct sja1105_private *priv,
673 struct sja1105_dt_port *ports)
674{
675 struct device *dev = &priv->spidev->dev;
676 struct device_node *switch_node = dev->of_node;
677 struct device_node *ports_node;
678 int rc;
679
680 ports_node = of_get_child_by_name(switch_node, "ports");
681 if (!ports_node) {
682 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
683 return -ENODEV;
684 }
685
686 rc = sja1105_parse_ports_node(priv, ports, ports_node);
687 of_node_put(ports_node);
688
689 return rc;
690}
691
692/* Convert back and forth MAC speed from Mbps to SJA1105 encoding */
693static int sja1105_speed[] = {
694 [SJA1105_SPEED_AUTO] = 0,
695 [SJA1105_SPEED_10MBPS] = 10,
696 [SJA1105_SPEED_100MBPS] = 100,
697 [SJA1105_SPEED_1000MBPS] = 1000,
698};
699
8aa9ebcc
VO
700/* Set link speed and enable/disable traffic I/O in the MAC configuration
701 * for a specific port.
702 *
703 * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed.
640f763f
VO
704 * @enabled: Manage Rx and Tx settings for this port. If false, overrides the
705 * settings from the STP state, but not persistently (does not
706 * overwrite the static MAC info for this port).
8aa9ebcc
VO
707 */
708static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
709 int speed_mbps, bool enabled)
710{
640f763f 711 struct sja1105_mac_config_entry dyn_mac;
8aa9ebcc
VO
712 struct sja1105_xmii_params_entry *mii;
713 struct sja1105_mac_config_entry *mac;
714 struct device *dev = priv->ds->dev;
715 sja1105_phy_interface_t phy_mode;
716 sja1105_speed_t speed;
717 int rc;
718
719 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
720 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
721
f4cfcfbd
VO
722 switch (speed_mbps) {
723 case 0:
724 /* No speed update requested */
725 speed = SJA1105_SPEED_AUTO;
726 break;
727 case 10:
728 speed = SJA1105_SPEED_10MBPS;
729 break;
730 case 100:
731 speed = SJA1105_SPEED_100MBPS;
732 break;
733 case 1000:
734 speed = SJA1105_SPEED_1000MBPS;
735 break;
736 default:
8aa9ebcc
VO
737 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
738 return -EINVAL;
739 }
740
741 /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC
742 * configuration table, since this will be used for the clocking setup,
743 * and we no longer need to store it in the static config (already told
744 * hardware we want auto during upload phase).
745 */
f4cfcfbd 746 mac[port].speed = speed;
8aa9ebcc
VO
747
748 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
749 * tables. On E/T, MAC reconfig tables are not readable, only writable.
750 * We have to *know* what the MAC looks like. For the sake of keeping
751 * the code common, we'll use the static configuration tables as a
752 * reasonable approximation for both E/T and P/Q/R/S.
753 */
640f763f
VO
754 dyn_mac = mac[port];
755 dyn_mac.ingress = enabled && mac[port].ingress;
756 dyn_mac.egress = enabled && mac[port].egress;
8aa9ebcc
VO
757
758 /* Write to the dynamic reconfiguration tables */
759 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG,
640f763f 760 port, &dyn_mac, true);
8aa9ebcc
VO
761 if (rc < 0) {
762 dev_err(dev, "Failed to write MAC config: %d\n", rc);
763 return rc;
764 }
765
766 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
767 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
768 * RMII no change of the clock setup is required. Actually, changing
769 * the clock setup does interrupt the clock signal for a certain time
770 * which causes trouble for all PHYs relying on this signal.
771 */
772 if (!enabled)
773 return 0;
774
775 phy_mode = mii->xmii_mode[port];
776 if (phy_mode != XMII_MODE_RGMII)
777 return 0;
778
779 return sja1105_clocking_setup_port(priv, port);
780}
781
af7cd036
VO
782static void sja1105_mac_config(struct dsa_switch *ds, int port,
783 unsigned int link_an_mode,
784 const struct phylink_link_state *state)
8aa9ebcc
VO
785{
786 struct sja1105_private *priv = ds->priv;
787
af7cd036 788 if (!state->link)
8aa9ebcc
VO
789 sja1105_adjust_port_config(priv, port, 0, false);
790 else
af7cd036 791 sja1105_adjust_port_config(priv, port, state->speed, true);
8aa9ebcc
VO
792}
793
ad9f299a
VO
794static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
795 unsigned long *supported,
796 struct phylink_link_state *state)
797{
798 /* Construct a new mask which exhaustively contains all link features
799 * supported by the MAC, and then apply that (logical AND) to what will
800 * be sent to the PHY for "marketing".
801 */
802 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
803 struct sja1105_private *priv = ds->priv;
804 struct sja1105_xmii_params_entry *mii;
805
806 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
807
808 /* The MAC does not support pause frames, and also doesn't
809 * support half-duplex traffic modes.
810 */
811 phylink_set(mask, Autoneg);
812 phylink_set(mask, MII);
813 phylink_set(mask, 10baseT_Full);
814 phylink_set(mask, 100baseT_Full);
815 if (mii->xmii_mode[port] == XMII_MODE_RGMII)
816 phylink_set(mask, 1000baseT_Full);
817
818 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
819 bitmap_and(state->advertising, state->advertising, mask,
820 __ETHTOOL_LINK_MODE_MASK_NBITS);
821}
822
291d1e72
VO
823/* First-generation switches have a 4-way set associative TCAM that
824 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
825 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
826 * For the placement of a newly learnt FDB entry, the switch selects the bin
827 * based on a hash function, and the way within that bin incrementally.
828 */
829static inline int sja1105et_fdb_index(int bin, int way)
830{
831 return bin * SJA1105ET_FDB_BIN_SIZE + way;
832}
833
9dfa6911
VO
834static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
835 const u8 *addr, u16 vid,
836 struct sja1105_l2_lookup_entry *match,
837 int *last_unused)
291d1e72
VO
838{
839 int way;
840
841 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
842 struct sja1105_l2_lookup_entry l2_lookup = {0};
843 int index = sja1105et_fdb_index(bin, way);
844
845 /* Skip unused entries, optionally marking them
846 * into the return value
847 */
848 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
849 index, &l2_lookup)) {
850 if (last_unused)
851 *last_unused = way;
852 continue;
853 }
854
855 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
856 l2_lookup.vlanid == vid) {
857 if (match)
858 *match = l2_lookup;
859 return way;
860 }
861 }
862 /* Return an invalid entry index if not found */
863 return -1;
864}
865
9dfa6911
VO
866int sja1105et_fdb_add(struct dsa_switch *ds, int port,
867 const unsigned char *addr, u16 vid)
291d1e72
VO
868{
869 struct sja1105_l2_lookup_entry l2_lookup = {0};
870 struct sja1105_private *priv = ds->priv;
871 struct device *dev = ds->dev;
872 int last_unused = -1;
873 int bin, way;
874
9dfa6911 875 bin = sja1105et_fdb_hash(priv, addr, vid);
291d1e72 876
9dfa6911
VO
877 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
878 &l2_lookup, &last_unused);
291d1e72
VO
879 if (way >= 0) {
880 /* We have an FDB entry. Is our port in the destination
881 * mask? If yes, we need to do nothing. If not, we need
882 * to rewrite the entry by adding this port to it.
883 */
884 if (l2_lookup.destports & BIT(port))
885 return 0;
886 l2_lookup.destports |= BIT(port);
887 } else {
888 int index = sja1105et_fdb_index(bin, way);
889
890 /* We don't have an FDB entry. We construct a new one and
891 * try to find a place for it within the FDB table.
892 */
893 l2_lookup.macaddr = ether_addr_to_u64(addr);
894 l2_lookup.destports = BIT(port);
895 l2_lookup.vlanid = vid;
896
897 if (last_unused >= 0) {
898 way = last_unused;
899 } else {
900 /* Bin is full, need to evict somebody.
901 * Choose victim at random. If you get these messages
902 * often, you may need to consider changing the
903 * distribution function:
904 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
905 */
906 get_random_bytes(&way, sizeof(u8));
907 way %= SJA1105ET_FDB_BIN_SIZE;
908 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
909 bin, addr, way);
910 /* Evict entry */
911 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
912 index, NULL, false);
913 }
914 }
915 l2_lookup.index = sja1105et_fdb_index(bin, way);
916
917 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
918 l2_lookup.index, &l2_lookup,
919 true);
920}
921
9dfa6911
VO
922int sja1105et_fdb_del(struct dsa_switch *ds, int port,
923 const unsigned char *addr, u16 vid)
291d1e72
VO
924{
925 struct sja1105_l2_lookup_entry l2_lookup = {0};
926 struct sja1105_private *priv = ds->priv;
927 int index, bin, way;
928 bool keep;
929
9dfa6911
VO
930 bin = sja1105et_fdb_hash(priv, addr, vid);
931 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
932 &l2_lookup, NULL);
291d1e72
VO
933 if (way < 0)
934 return 0;
935 index = sja1105et_fdb_index(bin, way);
936
937 /* We have an FDB entry. Is our port in the destination mask? If yes,
938 * we need to remove it. If the resulting port mask becomes empty, we
939 * need to completely evict the FDB entry.
940 * Otherwise we just write it back.
941 */
7752e937
VO
942 l2_lookup.destports &= ~BIT(port);
943
291d1e72
VO
944 if (l2_lookup.destports)
945 keep = true;
946 else
947 keep = false;
948
949 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
950 index, &l2_lookup, keep);
951}
952
9dfa6911
VO
953int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
954 const unsigned char *addr, u16 vid)
955{
1da73821
VO
956 struct sja1105_l2_lookup_entry l2_lookup = {0};
957 struct sja1105_private *priv = ds->priv;
958 int rc, i;
959
960 /* Search for an existing entry in the FDB table */
961 l2_lookup.macaddr = ether_addr_to_u64(addr);
962 l2_lookup.vlanid = vid;
963 l2_lookup.iotag = SJA1105_S_TAG;
964 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
965 l2_lookup.mask_vlanid = VLAN_VID_MASK;
966 l2_lookup.mask_iotag = BIT(0);
967 l2_lookup.destports = BIT(port);
968
969 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
970 SJA1105_SEARCH, &l2_lookup);
971 if (rc == 0) {
972 /* Found and this port is already in the entry's
973 * port mask => job done
974 */
975 if (l2_lookup.destports & BIT(port))
976 return 0;
977 /* l2_lookup.index is populated by the switch in case it
978 * found something.
979 */
980 l2_lookup.destports |= BIT(port);
981 goto skip_finding_an_index;
982 }
983
984 /* Not found, so try to find an unused spot in the FDB.
985 * This is slightly inefficient because the strategy is knock-knock at
986 * every possible position from 0 to 1023.
987 */
988 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
989 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
990 i, NULL);
991 if (rc < 0)
992 break;
993 }
994 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
995 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
996 return -EINVAL;
997 }
998 l2_lookup.index = i;
999
1000skip_finding_an_index:
1001 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1002 l2_lookup.index, &l2_lookup,
1003 true);
9dfa6911
VO
1004}
1005
1006int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1007 const unsigned char *addr, u16 vid)
1008{
1da73821
VO
1009 struct sja1105_l2_lookup_entry l2_lookup = {0};
1010 struct sja1105_private *priv = ds->priv;
1011 bool keep;
1012 int rc;
1013
1014 l2_lookup.macaddr = ether_addr_to_u64(addr);
1015 l2_lookup.vlanid = vid;
1016 l2_lookup.iotag = SJA1105_S_TAG;
1017 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1018 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1019 l2_lookup.mask_iotag = BIT(0);
1020 l2_lookup.destports = BIT(port);
1021
1022 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1023 SJA1105_SEARCH, &l2_lookup);
1024 if (rc < 0)
1025 return 0;
1026
1027 l2_lookup.destports &= ~BIT(port);
1028
1029 /* Decide whether we remove just this port from the FDB entry,
1030 * or if we remove it completely.
1031 */
1032 if (l2_lookup.destports)
1033 keep = true;
1034 else
1035 keep = false;
1036
1037 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1038 l2_lookup.index, &l2_lookup, keep);
9dfa6911
VO
1039}
1040
1041static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1042 const unsigned char *addr, u16 vid)
1043{
1044 struct sja1105_private *priv = ds->priv;
93647594
VO
1045 int rc;
1046
1047 /* Since we make use of VLANs even when the bridge core doesn't tell us
1048 * to, translate these FDB entries into the correct dsa_8021q ones.
1049 */
1050 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
1051 unsigned int upstream = dsa_upstream_port(priv->ds, port);
1052 u16 tx_vid = dsa_8021q_tx_vid(ds, port);
1053 u16 rx_vid = dsa_8021q_rx_vid(ds, port);
9dfa6911 1054
93647594
VO
1055 rc = priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
1056 if (rc < 0)
1057 return rc;
1058 return priv->info->fdb_add_cmd(ds, upstream, addr, rx_vid);
1059 }
9dfa6911
VO
1060 return priv->info->fdb_add_cmd(ds, port, addr, vid);
1061}
1062
1063static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1064 const unsigned char *addr, u16 vid)
1065{
1066 struct sja1105_private *priv = ds->priv;
93647594 1067 int rc;
9dfa6911 1068
93647594
VO
1069 /* Since we make use of VLANs even when the bridge core doesn't tell us
1070 * to, translate these FDB entries into the correct dsa_8021q ones.
1071 */
1072 if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
1073 unsigned int upstream = dsa_upstream_port(priv->ds, port);
1074 u16 tx_vid = dsa_8021q_tx_vid(ds, port);
1075 u16 rx_vid = dsa_8021q_rx_vid(ds, port);
1076
1077 rc = priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
1078 if (rc < 0)
1079 return rc;
1080 return priv->info->fdb_del_cmd(ds, upstream, addr, rx_vid);
1081 }
9dfa6911
VO
1082 return priv->info->fdb_del_cmd(ds, port, addr, vid);
1083}
1084
291d1e72
VO
1085static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1086 dsa_fdb_dump_cb_t *cb, void *data)
1087{
1088 struct sja1105_private *priv = ds->priv;
1089 struct device *dev = ds->dev;
1090 int i;
1091
1092 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1093 struct sja1105_l2_lookup_entry l2_lookup = {0};
1094 u8 macaddr[ETH_ALEN];
1095 int rc;
1096
1097 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1098 i, &l2_lookup);
1099 /* No fdb entry at i, not an issue */
def84604 1100 if (rc == -ENOENT)
291d1e72
VO
1101 continue;
1102 if (rc) {
1103 dev_err(dev, "Failed to dump FDB: %d\n", rc);
1104 return rc;
1105 }
1106
1107 /* FDB dump callback is per port. This means we have to
1108 * disregard a valid entry if it's not for this port, even if
1109 * only to revisit it later. This is inefficient because the
1110 * 1024-sized FDB table needs to be traversed 4 times through
1111 * SPI during a 'bridge fdb show' command.
1112 */
1113 if (!(l2_lookup.destports & BIT(port)))
1114 continue;
1115 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
93647594
VO
1116
1117 /* We need to hide the dsa_8021q VLAN from the user.
1118 * Convert the TX VID into the pvid that is active in
1119 * standalone and non-vlan_filtering modes, aka 1.
1120 * The RX VID is applied on the CPU port, which is not seen by
1121 * the bridge core anyway, so there's nothing to hide.
1122 */
1123 if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1124 l2_lookup.vlanid = 1;
291d1e72
VO
1125 cb(macaddr, l2_lookup.vlanid, false, data);
1126 }
1127 return 0;
1128}
1129
1130/* This callback needs to be present */
1131static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
1132 const struct switchdev_obj_port_mdb *mdb)
1133{
1134 return 0;
1135}
1136
1137static void sja1105_mdb_add(struct dsa_switch *ds, int port,
1138 const struct switchdev_obj_port_mdb *mdb)
1139{
1140 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1141}
1142
1143static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1144 const struct switchdev_obj_port_mdb *mdb)
1145{
1146 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1147}
1148
8aa9ebcc
VO
1149static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1150 struct net_device *br, bool member)
1151{
1152 struct sja1105_l2_forwarding_entry *l2_fwd;
1153 struct sja1105_private *priv = ds->priv;
1154 int i, rc;
1155
1156 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1157
1158 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1159 /* Add this port to the forwarding matrix of the
1160 * other ports in the same bridge, and viceversa.
1161 */
1162 if (!dsa_is_user_port(ds, i))
1163 continue;
1164 /* For the ports already under the bridge, only one thing needs
1165 * to be done, and that is to add this port to their
1166 * reachability domain. So we can perform the SPI write for
1167 * them immediately. However, for this port itself (the one
1168 * that is new to the bridge), we need to add all other ports
1169 * to its reachability domain. So we do that incrementally in
1170 * this loop, and perform the SPI write only at the end, once
1171 * the domain contains all other bridge ports.
1172 */
1173 if (i == port)
1174 continue;
1175 if (dsa_to_port(ds, i)->bridge_dev != br)
1176 continue;
1177 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1178 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1179
1180 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1181 i, &l2_fwd[i], true);
1182 if (rc < 0)
1183 return rc;
1184 }
1185
1186 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1187 port, &l2_fwd[port], true);
1188}
1189
640f763f
VO
1190static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1191 u8 state)
1192{
1193 struct sja1105_private *priv = ds->priv;
1194 struct sja1105_mac_config_entry *mac;
1195
1196 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1197
1198 switch (state) {
1199 case BR_STATE_DISABLED:
1200 case BR_STATE_BLOCKING:
1201 /* From UM10944 description of DRPDTAG (why put this there?):
1202 * "Management traffic flows to the port regardless of the state
1203 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1204 * At the moment no difference between DISABLED and BLOCKING.
1205 */
1206 mac[port].ingress = false;
1207 mac[port].egress = false;
1208 mac[port].dyn_learn = false;
1209 break;
1210 case BR_STATE_LISTENING:
1211 mac[port].ingress = true;
1212 mac[port].egress = false;
1213 mac[port].dyn_learn = false;
1214 break;
1215 case BR_STATE_LEARNING:
1216 mac[port].ingress = true;
1217 mac[port].egress = false;
1218 mac[port].dyn_learn = true;
1219 break;
1220 case BR_STATE_FORWARDING:
1221 mac[port].ingress = true;
1222 mac[port].egress = true;
1223 mac[port].dyn_learn = true;
1224 break;
1225 default:
1226 dev_err(ds->dev, "invalid STP state: %d\n", state);
1227 return;
1228 }
1229
1230 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1231 &mac[port], true);
1232}
1233
8aa9ebcc
VO
1234static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1235 struct net_device *br)
1236{
1237 return sja1105_bridge_member(ds, port, br, true);
1238}
1239
1240static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1241 struct net_device *br)
1242{
1243 sja1105_bridge_member(ds, port, br, false);
1244}
1245
640f763f
VO
1246static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port)
1247{
1248 struct sja1105_mac_config_entry *mac;
1249
1250 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1251
1252 if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1253 return BR_STATE_BLOCKING;
1254 if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1255 return BR_STATE_LISTENING;
1256 if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn)
1257 return BR_STATE_LEARNING;
1258 if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn)
1259 return BR_STATE_FORWARDING;
3b2c4f4d
VO
1260 /* This is really an error condition if the MAC was in none of the STP
1261 * states above. But treating the port as disabled does nothing, which
1262 * is adequate, and it also resets the MAC to a known state later on.
1263 */
1264 return BR_STATE_DISABLED;
640f763f
VO
1265}
1266
6666cebc
VO
1267/* For situations where we need to change a setting at runtime that is only
1268 * available through the static configuration, resetting the switch in order
1269 * to upload the new static config is unavoidable. Back up the settings we
1270 * modify at runtime (currently only MAC) and restore them after uploading,
1271 * such that this operation is relatively seamless.
1272 */
1273static int sja1105_static_config_reload(struct sja1105_private *priv)
1274{
1275 struct sja1105_mac_config_entry *mac;
1276 int speed_mbps[SJA1105_NUM_PORTS];
640f763f 1277 u8 stp_state[SJA1105_NUM_PORTS];
6666cebc
VO
1278 int rc, i;
1279
1280 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1281
1282 /* Back up settings changed by sja1105_adjust_port_config and
640f763f 1283 * sja1105_bridge_stp_state_set and restore their defaults.
6666cebc
VO
1284 */
1285 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1286 speed_mbps[i] = sja1105_speed[mac[i].speed];
1287 mac[i].speed = SJA1105_SPEED_AUTO;
640f763f
VO
1288 if (i == dsa_upstream_port(priv->ds, i)) {
1289 mac[i].ingress = true;
1290 mac[i].egress = true;
1291 mac[i].dyn_learn = true;
1292 } else {
1293 stp_state[i] = sja1105_stp_state_get(priv, i);
1294 mac[i].ingress = false;
1295 mac[i].egress = false;
1296 mac[i].dyn_learn = false;
1297 }
6666cebc
VO
1298 }
1299
1300 /* Reset switch and send updated static configuration */
1301 rc = sja1105_static_config_upload(priv);
1302 if (rc < 0)
1303 goto out;
1304
1305 /* Configure the CGU (PLLs) for MII and RMII PHYs.
1306 * For these interfaces there is no dynamic configuration
1307 * needed, since PLLs have same settings at all speeds.
1308 */
1309 rc = sja1105_clocking_setup(priv);
1310 if (rc < 0)
1311 goto out;
1312
1313 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1314 bool enabled = (speed_mbps[i] != 0);
1315
640f763f
VO
1316 if (i != dsa_upstream_port(priv->ds, i))
1317 sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]);
1318
6666cebc
VO
1319 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i],
1320 enabled);
1321 if (rc < 0)
1322 goto out;
1323 }
1324out:
1325 return rc;
1326}
1327
6666cebc
VO
1328static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1329{
1330 struct sja1105_mac_config_entry *mac;
1331
1332 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1333
1334 mac[port].vlanid = pvid;
1335
1336 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1337 &mac[port], true);
1338}
1339
1340static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
1341{
1342 struct sja1105_vlan_lookup_entry *vlan;
1343 int count, i;
1344
1345 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
1346 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
1347
1348 for (i = 0; i < count; i++)
1349 if (vlan[i].vlanid == vid)
1350 return i;
1351
1352 /* Return an invalid entry index if not found */
1353 return -1;
1354}
1355
1356static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
1357 bool enabled, bool untagged)
1358{
1359 struct sja1105_vlan_lookup_entry *vlan;
1360 struct sja1105_table *table;
1361 bool keep = true;
1362 int match, rc;
1363
1364 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
1365
1366 match = sja1105_is_vlan_configured(priv, vid);
1367 if (match < 0) {
1368 /* Can't delete a missing entry. */
1369 if (!enabled)
1370 return 0;
1371 rc = sja1105_table_resize(table, table->entry_count + 1);
1372 if (rc)
1373 return rc;
1374 match = table->entry_count - 1;
1375 }
1376 /* Assign pointer after the resize (it's new memory) */
1377 vlan = table->entries;
1378 vlan[match].vlanid = vid;
1379 if (enabled) {
1380 vlan[match].vlan_bc |= BIT(port);
1381 vlan[match].vmemb_port |= BIT(port);
1382 } else {
1383 vlan[match].vlan_bc &= ~BIT(port);
1384 vlan[match].vmemb_port &= ~BIT(port);
1385 }
1386 /* Also unset tag_port if removing this VLAN was requested,
1387 * just so we don't have a confusing bitmap (no practical purpose).
1388 */
1389 if (untagged || !enabled)
1390 vlan[match].tag_port &= ~BIT(port);
1391 else
1392 vlan[match].tag_port |= BIT(port);
1393 /* If there's no port left as member of this VLAN,
1394 * it's time for it to go.
1395 */
1396 if (!vlan[match].vmemb_port)
1397 keep = false;
1398
1399 dev_dbg(priv->ds->dev,
1400 "%s: port %d, vid %llu, broadcast domain 0x%llx, "
1401 "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
1402 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
1403 vlan[match].vmemb_port, vlan[match].tag_port, keep);
1404
1405 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
1406 &vlan[match], keep);
1407 if (rc < 0)
1408 return rc;
1409
1410 if (!keep)
1411 return sja1105_table_delete_entry(table, match);
1412
1413 return 0;
1414}
1415
227d07a0
VO
1416static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1417{
1418 int rc, i;
1419
1420 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1421 rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
1422 if (rc < 0) {
1423 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
1424 i, rc);
1425 return rc;
1426 }
1427 }
1428 dev_info(ds->dev, "%s switch tagging\n",
1429 enabled ? "Enabled" : "Disabled");
1430 return 0;
1431}
1432
8aa9ebcc
VO
1433static enum dsa_tag_protocol
1434sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
1435{
227d07a0 1436 return DSA_TAG_PROTO_SJA1105;
8aa9ebcc
VO
1437}
1438
6666cebc
VO
1439/* This callback needs to be present */
1440static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
1441 const struct switchdev_obj_port_vlan *vlan)
1442{
1443 return 0;
1444}
1445
070ca3bb
VO
1446/* The TPID setting belongs to the General Parameters table,
1447 * which can only be partially reconfigured at runtime (and not the TPID).
1448 * So a switch reset is required.
1449 */
6666cebc
VO
1450static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1451{
070ca3bb 1452 struct sja1105_general_params_entry *general_params;
6666cebc 1453 struct sja1105_private *priv = ds->priv;
070ca3bb
VO
1454 struct sja1105_table *table;
1455 u16 tpid, tpid2;
6666cebc
VO
1456 int rc;
1457
070ca3bb 1458 if (enabled) {
6666cebc 1459 /* Enable VLAN filtering. */
f9a1a764
VO
1460 tpid = ETH_P_8021AD;
1461 tpid2 = ETH_P_8021Q;
070ca3bb 1462 } else {
6666cebc 1463 /* Disable VLAN filtering. */
070ca3bb
VO
1464 tpid = ETH_P_SJA1105;
1465 tpid2 = ETH_P_SJA1105;
1466 }
1467
1468 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1469 general_params = table->entries;
f9a1a764 1470 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
070ca3bb 1471 general_params->tpid = tpid;
f9a1a764 1472 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
070ca3bb 1473 general_params->tpid2 = tpid2;
42824463
VO
1474 /* When VLAN filtering is on, we need to at least be able to
1475 * decode management traffic through the "backup plan".
1476 */
1477 general_params->incl_srcpt1 = enabled;
1478 general_params->incl_srcpt0 = enabled;
070ca3bb
VO
1479
1480 rc = sja1105_static_config_reload(priv);
6666cebc
VO
1481 if (rc)
1482 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
1483
227d07a0
VO
1484 /* Switch port identification based on 802.1Q is only passable
1485 * if we are not under a vlan_filtering bridge. So make sure
1486 * the two configurations are mutually exclusive.
1487 */
1488 return sja1105_setup_8021q_tagging(ds, !enabled);
6666cebc
VO
1489}
1490
1491static void sja1105_vlan_add(struct dsa_switch *ds, int port,
1492 const struct switchdev_obj_port_vlan *vlan)
1493{
1494 struct sja1105_private *priv = ds->priv;
1495 u16 vid;
1496 int rc;
1497
1498 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1499 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
1500 BRIDGE_VLAN_INFO_UNTAGGED);
1501 if (rc < 0) {
1502 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
1503 vid, port, rc);
1504 return;
1505 }
1506 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1507 rc = sja1105_pvid_apply(ds->priv, port, vid);
1508 if (rc < 0) {
1509 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
1510 vid, port, rc);
1511 return;
1512 }
1513 }
1514 }
1515}
1516
1517static int sja1105_vlan_del(struct dsa_switch *ds, int port,
1518 const struct switchdev_obj_port_vlan *vlan)
1519{
1520 struct sja1105_private *priv = ds->priv;
1521 u16 vid;
1522 int rc;
1523
1524 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1525 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
1526 BRIDGE_VLAN_INFO_UNTAGGED);
1527 if (rc < 0) {
1528 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
1529 vid, port, rc);
1530 return rc;
1531 }
1532 }
1533 return 0;
1534}
1535
8aa9ebcc
VO
1536/* The programming model for the SJA1105 switch is "all-at-once" via static
1537 * configuration tables. Some of these can be dynamically modified at runtime,
1538 * but not the xMII mode parameters table.
1539 * Furthermode, some PHYs may not have crystals for generating their clocks
1540 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
1541 * ref_clk pin. So port clocking needs to be initialized early, before
1542 * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
1543 * Setting correct PHY link speed does not matter now.
1544 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
1545 * bindings are not yet parsed by DSA core. We need to parse early so that we
1546 * can populate the xMII mode parameters table.
1547 */
1548static int sja1105_setup(struct dsa_switch *ds)
1549{
1550 struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
1551 struct sja1105_private *priv = ds->priv;
1552 int rc;
1553
1554 rc = sja1105_parse_dt(priv, ports);
1555 if (rc < 0) {
1556 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
1557 return rc;
1558 }
f5b8631c
VO
1559
1560 /* Error out early if internal delays are required through DT
1561 * and we can't apply them.
1562 */
1563 rc = sja1105_parse_rgmii_delays(priv, ports);
1564 if (rc < 0) {
1565 dev_err(ds->dev, "RGMII delay not supported\n");
1566 return rc;
1567 }
1568
bb77f36a
VO
1569 rc = sja1105_ptp_clock_register(priv);
1570 if (rc < 0) {
1571 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
1572 return rc;
1573 }
8aa9ebcc
VO
1574 /* Create and send configuration down to device */
1575 rc = sja1105_static_config_load(priv, ports);
1576 if (rc < 0) {
1577 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
1578 return rc;
1579 }
1580 /* Configure the CGU (PHY link modes and speeds) */
1581 rc = sja1105_clocking_setup(priv);
1582 if (rc < 0) {
1583 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
1584 return rc;
1585 }
6666cebc
VO
1586 /* On SJA1105, VLAN filtering per se is always enabled in hardware.
1587 * The only thing we can do to disable it is lie about what the 802.1Q
1588 * EtherType is.
1589 * So it will still try to apply VLAN filtering, but all ingress
1590 * traffic (except frames received with EtherType of ETH_P_SJA1105)
1591 * will be internally tagged with a distorted VLAN header where the
1592 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
1593 */
1594 ds->vlan_filtering_is_global = true;
8aa9ebcc 1595
227d07a0
VO
1596 /* The DSA/switchdev model brings up switch ports in standalone mode by
1597 * default, and that means vlan_filtering is 0 since they're not under
1598 * a bridge, so it's safe to set up switch tagging at this time.
1599 */
1600 return sja1105_setup_8021q_tagging(ds, true);
1601}
1602
f3097be2
VO
1603static void sja1105_teardown(struct dsa_switch *ds)
1604{
1605 struct sja1105_private *priv = ds->priv;
1606
1607 cancel_work_sync(&priv->tagger_data.rxtstamp_work);
1608 skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
1609}
1610
227d07a0 1611static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
47ed985e 1612 struct sk_buff *skb, bool takets)
227d07a0
VO
1613{
1614 struct sja1105_mgmt_entry mgmt_route = {0};
1615 struct sja1105_private *priv = ds->priv;
1616 struct ethhdr *hdr;
1617 int timeout = 10;
1618 int rc;
1619
1620 hdr = eth_hdr(skb);
1621
1622 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
1623 mgmt_route.destports = BIT(port);
1624 mgmt_route.enfport = 1;
47ed985e
VO
1625 mgmt_route.tsreg = 0;
1626 mgmt_route.takets = takets;
227d07a0
VO
1627
1628 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1629 slot, &mgmt_route, true);
1630 if (rc < 0) {
1631 kfree_skb(skb);
1632 return rc;
1633 }
1634
1635 /* Transfer skb to the host port. */
1636 dsa_enqueue_skb(skb, ds->ports[port].slave);
1637
1638 /* Wait until the switch has processed the frame */
1639 do {
1640 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
1641 slot, &mgmt_route);
1642 if (rc < 0) {
1643 dev_err_ratelimited(priv->ds->dev,
1644 "failed to poll for mgmt route\n");
1645 continue;
1646 }
1647
1648 /* UM10944: The ENFPORT flag of the respective entry is
1649 * cleared when a match is found. The host can use this
1650 * flag as an acknowledgment.
1651 */
1652 cpu_relax();
1653 } while (mgmt_route.enfport && --timeout);
1654
1655 if (!timeout) {
1656 /* Clean up the management route so that a follow-up
1657 * frame may not match on it by mistake.
2a7e7409
VO
1658 * This is only hardware supported on P/Q/R/S - on E/T it is
1659 * a no-op and we are silently discarding the -EOPNOTSUPP.
227d07a0
VO
1660 */
1661 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1662 slot, &mgmt_route, false);
1663 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
1664 }
1665
1666 return NETDEV_TX_OK;
1667}
1668
1669/* Deferred work is unfortunately necessary because setting up the management
1670 * route cannot be done from atomit context (SPI transfer takes a sleepable
1671 * lock on the bus)
1672 */
1673static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
1674 struct sk_buff *skb)
1675{
1676 struct sja1105_private *priv = ds->priv;
1677 struct sja1105_port *sp = &priv->ports[port];
47ed985e 1678 struct skb_shared_hwtstamps shwt = {0};
227d07a0 1679 int slot = sp->mgmt_slot;
47ed985e
VO
1680 struct sk_buff *clone;
1681 u64 now, ts;
1682 int rc;
227d07a0
VO
1683
1684 /* The tragic fact about the switch having 4x2 slots for installing
1685 * management routes is that all of them except one are actually
1686 * useless.
1687 * If 2 slots are simultaneously configured for two BPDUs sent to the
1688 * same (multicast) DMAC but on different egress ports, the switch
1689 * would confuse them and redirect first frame it receives on the CPU
1690 * port towards the port configured on the numerically first slot
1691 * (therefore wrong port), then second received frame on second slot
1692 * (also wrong port).
1693 * So for all practical purposes, there needs to be a lock that
1694 * prevents that from happening. The slot used here is utterly useless
1695 * (could have simply been 0 just as fine), but we are doing it
1696 * nonetheless, in case a smarter idea ever comes up in the future.
1697 */
1698 mutex_lock(&priv->mgmt_lock);
1699
47ed985e
VO
1700 /* The clone, if there, was made by dsa_skb_tx_timestamp */
1701 clone = DSA_SKB_CB(skb)->clone;
1702
1703 sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
1704
1705 if (!clone)
1706 goto out;
1707
1708 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
1709
1710 mutex_lock(&priv->ptp_lock);
1711
1712 now = priv->tstamp_cc.read(&priv->tstamp_cc);
1713
1714 rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
1715 if (rc < 0) {
1716 dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
1717 kfree_skb(clone);
1718 goto out_unlock_ptp;
1719 }
1720
1721 ts = sja1105_tstamp_reconstruct(priv, now, ts);
1722 ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
227d07a0 1723
47ed985e
VO
1724 shwt.hwtstamp = ns_to_ktime(ts);
1725 skb_complete_tx_timestamp(clone, &shwt);
1726
1727out_unlock_ptp:
1728 mutex_unlock(&priv->ptp_lock);
1729out:
227d07a0
VO
1730 mutex_unlock(&priv->mgmt_lock);
1731 return NETDEV_TX_OK;
8aa9ebcc
VO
1732}
1733
8456721d
VO
1734/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
1735 * which cannot be reconfigured at runtime. So a switch reset is required.
1736 */
1737static int sja1105_set_ageing_time(struct dsa_switch *ds,
1738 unsigned int ageing_time)
1739{
1740 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1741 struct sja1105_private *priv = ds->priv;
1742 struct sja1105_table *table;
1743 unsigned int maxage;
1744
1745 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1746 l2_lookup_params = table->entries;
1747
1748 maxage = SJA1105_AGEING_TIME_MS(ageing_time);
1749
1750 if (l2_lookup_params->maxage == maxage)
1751 return 0;
1752
1753 l2_lookup_params->maxage = maxage;
1754
1755 return sja1105_static_config_reload(priv);
1756}
1757
f3097be2
VO
1758#define to_tagger(d) \
1759 container_of((d), struct sja1105_tagger_data, rxtstamp_work)
1760#define to_sja1105(d) \
1761 container_of((d), struct sja1105_private, tagger_data)
1762
1763static void sja1105_rxtstamp_work(struct work_struct *work)
1764{
1765 struct sja1105_tagger_data *data = to_tagger(work);
1766 struct sja1105_private *priv = to_sja1105(data);
1767 struct sk_buff *skb;
1768 u64 now;
1769
1770 mutex_lock(&priv->ptp_lock);
1771
1772 now = priv->tstamp_cc.read(&priv->tstamp_cc);
1773
1774 while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
1775 struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
1776 u64 ts;
1777
1778 *shwt = (struct skb_shared_hwtstamps) {0};
1779
1780 ts = SJA1105_SKB_CB(skb)->meta_tstamp;
1781 ts = sja1105_tstamp_reconstruct(priv, now, ts);
1782 ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
1783
1784 shwt->hwtstamp = ns_to_ktime(ts);
1785 netif_rx_ni(skb);
1786 }
1787
1788 mutex_unlock(&priv->ptp_lock);
1789}
1790
1791/* Called from dsa_skb_defer_rx_timestamp */
1792bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
1793 struct sk_buff *skb, unsigned int type)
1794{
1795 struct sja1105_private *priv = ds->priv;
1796 struct sja1105_tagger_data *data = &priv->tagger_data;
1797
1798 if (!data->hwts_rx_en)
1799 return false;
1800
1801 /* We need to read the full PTP clock to reconstruct the Rx
1802 * timestamp. For that we need a sleepable context.
1803 */
1804 skb_queue_tail(&data->skb_rxtstamp_queue, skb);
1805 schedule_work(&data->rxtstamp_work);
1806 return true;
1807}
1808
47ed985e
VO
1809/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
1810 * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
1811 * callback, where we will timestamp it synchronously.
1812 */
1813bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
1814 struct sk_buff *skb, unsigned int type)
1815{
1816 struct sja1105_private *priv = ds->priv;
1817 struct sja1105_port *sp = &priv->ports[port];
1818
1819 if (!sp->hwts_tx_en)
1820 return false;
1821
1822 return true;
1823}
1824
8aa9ebcc
VO
1825static const struct dsa_switch_ops sja1105_switch_ops = {
1826 .get_tag_protocol = sja1105_get_tag_protocol,
1827 .setup = sja1105_setup,
f3097be2 1828 .teardown = sja1105_teardown,
8456721d 1829 .set_ageing_time = sja1105_set_ageing_time,
ad9f299a 1830 .phylink_validate = sja1105_phylink_validate,
af7cd036 1831 .phylink_mac_config = sja1105_mac_config,
52c34e6e
VO
1832 .get_strings = sja1105_get_strings,
1833 .get_ethtool_stats = sja1105_get_ethtool_stats,
1834 .get_sset_count = sja1105_get_sset_count,
bb77f36a 1835 .get_ts_info = sja1105_get_ts_info,
291d1e72
VO
1836 .port_fdb_dump = sja1105_fdb_dump,
1837 .port_fdb_add = sja1105_fdb_add,
1838 .port_fdb_del = sja1105_fdb_del,
8aa9ebcc
VO
1839 .port_bridge_join = sja1105_bridge_join,
1840 .port_bridge_leave = sja1105_bridge_leave,
640f763f 1841 .port_stp_state_set = sja1105_bridge_stp_state_set,
6666cebc
VO
1842 .port_vlan_prepare = sja1105_vlan_prepare,
1843 .port_vlan_filtering = sja1105_vlan_filtering,
1844 .port_vlan_add = sja1105_vlan_add,
1845 .port_vlan_del = sja1105_vlan_del,
291d1e72
VO
1846 .port_mdb_prepare = sja1105_mdb_prepare,
1847 .port_mdb_add = sja1105_mdb_add,
1848 .port_mdb_del = sja1105_mdb_del,
227d07a0 1849 .port_deferred_xmit = sja1105_port_deferred_xmit,
f3097be2 1850 .port_rxtstamp = sja1105_port_rxtstamp,
47ed985e 1851 .port_txtstamp = sja1105_port_txtstamp,
8aa9ebcc
VO
1852};
1853
1854static int sja1105_check_device_id(struct sja1105_private *priv)
1855{
1856 const struct sja1105_regs *regs = priv->info->regs;
1857 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
1858 struct device *dev = &priv->spidev->dev;
1859 u64 device_id;
1860 u64 part_no;
1861 int rc;
1862
1863 rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
1864 &device_id, SJA1105_SIZE_DEVICE_ID);
1865 if (rc < 0)
1866 return rc;
1867
1868 if (device_id != priv->info->device_id) {
1869 dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
1870 priv->info->device_id, device_id);
1871 return -ENODEV;
1872 }
1873
1874 rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
1875 prod_id, SJA1105_SIZE_DEVICE_ID);
1876 if (rc < 0)
1877 return rc;
1878
1879 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
1880
1881 if (part_no != priv->info->part_no) {
1882 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
1883 priv->info->part_no, part_no);
1884 return -ENODEV;
1885 }
1886
1887 return 0;
1888}
1889
1890static int sja1105_probe(struct spi_device *spi)
1891{
844d7edc 1892 struct sja1105_tagger_data *tagger_data;
8aa9ebcc
VO
1893 struct device *dev = &spi->dev;
1894 struct sja1105_private *priv;
1895 struct dsa_switch *ds;
227d07a0 1896 int rc, i;
8aa9ebcc
VO
1897
1898 if (!dev->of_node) {
1899 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
1900 return -EINVAL;
1901 }
1902
1903 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
1904 if (!priv)
1905 return -ENOMEM;
1906
1907 /* Configure the optional reset pin and bring up switch */
1908 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
1909 if (IS_ERR(priv->reset_gpio))
1910 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
1911 else
1912 sja1105_hw_reset(priv->reset_gpio, 1, 1);
1913
1914 /* Populate our driver private structure (priv) based on
1915 * the device tree node that was probed (spi)
1916 */
1917 priv->spidev = spi;
1918 spi_set_drvdata(spi, priv);
1919
1920 /* Configure the SPI bus */
1921 spi->bits_per_word = 8;
1922 rc = spi_setup(spi);
1923 if (rc < 0) {
1924 dev_err(dev, "Could not init SPI\n");
1925 return rc;
1926 }
1927
1928 priv->info = of_device_get_match_data(dev);
1929
1930 /* Detect hardware device */
1931 rc = sja1105_check_device_id(priv);
1932 if (rc < 0) {
1933 dev_err(dev, "Device ID check failed: %d\n", rc);
1934 return rc;
1935 }
1936
1937 dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
1938
1939 ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
1940 if (!ds)
1941 return -ENOMEM;
1942
1943 ds->ops = &sja1105_switch_ops;
1944 ds->priv = priv;
1945 priv->ds = ds;
1946
844d7edc
VO
1947 tagger_data = &priv->tagger_data;
1948 skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
f3097be2 1949 INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
844d7edc 1950
227d07a0
VO
1951 /* Connections between dsa_port and sja1105_port */
1952 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1953 struct sja1105_port *sp = &priv->ports[i];
1954
1955 ds->ports[i].priv = sp;
1956 sp->dp = &ds->ports[i];
844d7edc 1957 sp->data = tagger_data;
227d07a0
VO
1958 }
1959 mutex_init(&priv->mgmt_lock);
1960
8aa9ebcc
VO
1961 return dsa_register_switch(priv->ds);
1962}
1963
1964static int sja1105_remove(struct spi_device *spi)
1965{
1966 struct sja1105_private *priv = spi_get_drvdata(spi);
1967
bb77f36a 1968 sja1105_ptp_clock_unregister(priv);
8aa9ebcc
VO
1969 dsa_unregister_switch(priv->ds);
1970 sja1105_static_config_free(&priv->static_config);
1971 return 0;
1972}
1973
1974static const struct of_device_id sja1105_dt_ids[] = {
1975 { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
1976 { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
1977 { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
1978 { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
1979 { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
1980 { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
1981 { /* sentinel */ },
1982};
1983MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
1984
1985static struct spi_driver sja1105_driver = {
1986 .driver = {
1987 .name = "sja1105",
1988 .owner = THIS_MODULE,
1989 .of_match_table = of_match_ptr(sja1105_dt_ids),
1990 },
1991 .probe = sja1105_probe,
1992 .remove = sja1105_remove,
1993};
1994
1995module_spi_driver(sja1105_driver);
1996
1997MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
1998MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
1999MODULE_DESCRIPTION("SJA1105 Driver");
2000MODULE_LICENSE("GPL v2");