]> git.ipfire.org Git - thirdparty/linux.git/blame - net/dsa/tag_sja1105.c
arm64: tegra: Add Tegra234 thermal support
[thirdparty/linux.git] / net / dsa / tag_sja1105.c
CommitLineData
227d07a0
VO
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
3 */
4#include <linux/if_vlan.h>
5#include <linux/dsa/sja1105.h>
6#include <linux/dsa/8021q.h>
7#include <linux/packing.h>
bd954b82
VO
8
9#include "tag.h"
19d05ea7 10#include "tag_8021q.h"
227d07a0 11
94793a56
VO
12#define SJA1105_NAME "sja1105"
13#define SJA1110_NAME "sja1110"
14
4913b8eb
VO
15/* Is this a TX or an RX header? */
16#define SJA1110_HEADER_HOST_TO_SWITCH BIT(15)
17
18/* RX header */
19#define SJA1110_RX_HEADER_IS_METADATA BIT(14)
20#define SJA1110_RX_HEADER_HOST_ONLY BIT(13)
21#define SJA1110_RX_HEADER_HAS_TRAILER BIT(12)
22
23/* Trap-to-host format (no trailer present) */
24#define SJA1110_RX_HEADER_SRC_PORT(x) (((x) & GENMASK(7, 4)) >> 4)
25#define SJA1110_RX_HEADER_SWITCH_ID(x) ((x) & GENMASK(3, 0))
26
27/* Timestamp format (trailer present) */
28#define SJA1110_RX_HEADER_TRAILER_POS(x) ((x) & GENMASK(11, 0))
29
30#define SJA1110_RX_TRAILER_SWITCH_ID(x) (((x) & GENMASK(7, 4)) >> 4)
31#define SJA1110_RX_TRAILER_SRC_PORT(x) ((x) & GENMASK(3, 0))
32
566b18c8
VO
33/* Meta frame format (for 2-step TX timestamps) */
34#define SJA1110_RX_HEADER_N_TS(x) (((x) & GENMASK(8, 4)) >> 4)
35
4913b8eb
VO
36/* TX header */
37#define SJA1110_TX_HEADER_UPDATE_TC BIT(14)
38#define SJA1110_TX_HEADER_TAKE_TS BIT(13)
39#define SJA1110_TX_HEADER_TAKE_TS_CASC BIT(12)
40#define SJA1110_TX_HEADER_HAS_TRAILER BIT(11)
41
42/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */
43#define SJA1110_TX_HEADER_PRIO(x) (((x) << 7) & GENMASK(10, 7))
44#define SJA1110_TX_HEADER_TSTAMP_ID(x) ((x) & GENMASK(7, 0))
45
46/* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */
47#define SJA1110_TX_HEADER_TRAILER_POS(x) ((x) & GENMASK(10, 0))
48
49#define SJA1110_TX_TRAILER_TSTAMP_ID(x) (((x) << 24) & GENMASK(31, 24))
50#define SJA1110_TX_TRAILER_PRIO(x) (((x) << 21) & GENMASK(23, 21))
51#define SJA1110_TX_TRAILER_SWITCHID(x) (((x) << 12) & GENMASK(15, 12))
52#define SJA1110_TX_TRAILER_DESTPORTS(x) (((x) << 1) & GENMASK(11, 1))
53
566b18c8
VO
54#define SJA1110_META_TSTAMP_SIZE 10
55
4913b8eb
VO
56#define SJA1110_HEADER_LEN 4
57#define SJA1110_RX_TRAILER_LEN 13
58#define SJA1110_TX_TRAILER_LEN 4
59#define SJA1110_MAX_PADDING_LEN 15
60
950a419d
VO
61#define SJA1105_HWTS_RX_EN 0
62
63struct sja1105_tagger_private {
64 struct sja1105_tagger_data data; /* Must be first */
65 unsigned long state;
66 /* Protects concurrent access to the meta state machine
67 * from taggers running on multiple ports on SMP systems
68 */
69 spinlock_t meta_lock;
70 struct sk_buff *stampable_skb;
71 struct kthread_worker *xmit_worker;
72};
73
74static struct sja1105_tagger_private *
75sja1105_tagger_private(struct dsa_switch *ds)
76{
77 return ds->tagger_data;
78}
79
227d07a0
VO
80/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
81static inline bool sja1105_is_link_local(const struct sk_buff *skb)
82{
83 const struct ethhdr *hdr = eth_hdr(skb);
84 u64 dmac = ether_addr_to_u64(hdr->h_dest);
85
79fa7061
VO
86 if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
87 return false;
227d07a0
VO
88 if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
89 SJA1105_LINKLOCAL_FILTER_A)
90 return true;
91 if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
92 SJA1105_LINKLOCAL_FILTER_B)
93 return true;
94 return false;
95}
96
e53e18a6
VO
97struct sja1105_meta {
98 u64 tstamp;
99 u64 dmac_byte_4;
100 u64 dmac_byte_3;
101 u64 source_port;
102 u64 switch_id;
103};
104
105static void sja1105_meta_unpack(const struct sk_buff *skb,
106 struct sja1105_meta *meta)
107{
108 u8 *buf = skb_mac_header(skb) + ETH_HLEN;
109
110 /* UM10944.pdf section 4.2.17 AVB Parameters:
111 * Structure of the meta-data follow-up frame.
112 * It is in network byte order, so there are no quirks
113 * while unpacking the meta frame.
114 *
115 * Also SJA1105 E/T only populates bits 23:0 of the timestamp
116 * whereas P/Q/R/S does 32 bits. Since the structure is the
117 * same and the E/T puts zeroes in the high-order byte, use
118 * a unified unpacking command for both device series.
119 */
120 packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
121 packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
122 packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
123 packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
124 packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
125}
126
d3f9b90b
VO
127static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
128{
129 const struct ethhdr *hdr = eth_hdr(skb);
130 u64 smac = ether_addr_to_u64(hdr->h_source);
131 u64 dmac = ether_addr_to_u64(hdr->h_dest);
132
133 if (smac != SJA1105_META_SMAC)
134 return false;
135 if (dmac != SJA1105_META_DMAC)
136 return false;
137 if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
138 return false;
139 return true;
140}
141
a68578c2 142/* Calls sja1105_port_deferred_xmit in sja1105_main.c */
994d2cbb 143static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
a68578c2
VO
144 struct sk_buff *skb)
145{
c79e8486 146 struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds);
950a419d 147 struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds);
d38049bb
VO
148 void (*xmit_work_fn)(struct kthread_work *work);
149 struct sja1105_deferred_xmit_work *xmit_work;
d38049bb 150 struct kthread_worker *xmit_worker;
994d2cbb 151
bfcf1425 152 xmit_work_fn = tagger_data->xmit_work_fn;
950a419d 153 xmit_worker = priv->xmit_worker;
994d2cbb 154
d38049bb
VO
155 if (!xmit_work_fn || !xmit_worker)
156 return NULL;
157
158 xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
159 if (!xmit_work)
160 return NULL;
161
162 kthread_init_work(&xmit_work->work, xmit_work_fn);
a68578c2
VO
163 /* Increase refcount so the kfree_skb in dsa_slave_xmit
164 * won't really free the packet.
165 */
d38049bb
VO
166 xmit_work->dp = dp;
167 xmit_work->skb = skb_get(skb);
168
169 kthread_queue_work(xmit_worker, &xmit_work->work);
a68578c2
VO
170
171 return NULL;
172}
173
8ded9160
VO
174/* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a
175 * bridge spanning ports of this switch might have.
176 */
994d2cbb 177static u16 sja1105_xmit_tpid(struct dsa_port *dp)
38b5beea 178{
8ded9160
VO
179 struct dsa_switch *ds = dp->ds;
180 struct dsa_port *other_dp;
181 u16 proto;
182
183 /* Since VLAN awareness is global, then if this port is VLAN-unaware,
184 * all ports are. Use the VLAN-unaware TPID used for tag_8021q.
185 */
186 if (!dsa_port_is_vlan_filtering(dp))
187 return ETH_P_SJA1105;
188
189 /* Port is VLAN-aware, so there is a bridge somewhere (a single one,
190 * we're sure about that). It may not be on this port though, so we
191 * need to find it.
192 */
5068887a 193 dsa_switch_for_each_port(other_dp, ds) {
36cbf39b
VO
194 struct net_device *br = dsa_port_bridge_dev_get(other_dp);
195
196 if (!br)
8ded9160
VO
197 continue;
198
199 /* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
200 * which seems pointless to handle, as our port cannot become
201 * VLAN-aware in that case.
202 */
36cbf39b 203 br_vlan_get_proto(br, &proto);
8ded9160
VO
204
205 return proto;
206 }
994d2cbb 207
8ded9160 208 WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n");
994d2cbb 209
8ded9160 210 return ETH_P_SJA1105;
38b5beea
VO
211}
212
b6ad86e6
VO
213static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
214 struct net_device *netdev)
215{
216 struct dsa_port *dp = dsa_slave_to_port(netdev);
36cbf39b
VO
217 unsigned int bridge_num = dsa_port_bridge_num_get(dp);
218 struct net_device *br = dsa_port_bridge_dev_get(dp);
b6ad86e6
VO
219 u16 tx_vid;
220
221 /* If the port is under a VLAN-aware bridge, just slide the
222 * VLAN-tagged packet into the FDB and hope for the best.
223 * This works because we support a single VLAN-aware bridge
224 * across the entire dst, and its VLANs cannot be shared with
225 * any standalone port.
226 */
227 if (br_vlan_enabled(br))
228 return skb;
229
230 /* If the port is under a VLAN-unaware bridge, use an imprecise
231 * TX VLAN that targets the bridge's entire broadcast domain,
232 * instead of just the specific port.
233 */
b6362bdf 234 tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
b6ad86e6 235
994d2cbb 236 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
b6ad86e6
VO
237}
238
b0b8c67e
VO
239/* Transform untagged control packets into pvid-tagged control packets so that
240 * all packets sent by this tagger are VLAN-tagged and we can configure the
241 * switch to drop untagged packets coming from the DSA master.
242 */
243static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
244 struct sk_buff *skb, u8 pcp)
245{
246 __be16 xmit_tpid = htons(sja1105_xmit_tpid(dp));
247 struct vlan_ethhdr *hdr;
248
249 /* If VLAN tag is in hwaccel area, move it to the payload
250 * to deal with both cases uniformly and to ensure that
251 * the VLANs are added in the right order.
252 */
253 if (unlikely(skb_vlan_tag_present(skb))) {
254 skb = __vlan_hwaccel_push_inside(skb);
255 if (!skb)
256 return NULL;
257 }
258
f9346f00 259 hdr = skb_vlan_eth_hdr(skb);
b0b8c67e
VO
260
261 /* If skb is already VLAN-tagged, leave that VLAN ID in place */
262 if (hdr->h_vlan_proto == xmit_tpid)
263 return skb;
264
265 return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) |
266 SJA1105_DEFAULT_VLAN);
267}
268
227d07a0
VO
269static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
270 struct net_device *netdev)
271{
272 struct dsa_port *dp = dsa_slave_to_port(netdev);
5f06c63b
VO
273 u16 queue_mapping = skb_get_queue_mapping(skb);
274 u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
04b67e18 275 u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
227d07a0 276
b6ad86e6
VO
277 if (skb->offload_fwd_mark)
278 return sja1105_imprecise_xmit(skb, netdev);
279
227d07a0
VO
280 /* Transmitting management traffic does not rely upon switch tagging,
281 * but instead SPI-installed management routes. Part 2 of this
282 * is the .port_deferred_xmit driver callback.
283 */
b0b8c67e
VO
284 if (unlikely(sja1105_is_link_local(skb))) {
285 skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
286 if (!skb)
287 return NULL;
288
994d2cbb 289 return sja1105_defer_xmit(dp, skb);
b0b8c67e 290 }
227d07a0 291
994d2cbb 292 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
227d07a0
VO
293 ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
294}
295
4913b8eb
VO
296static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
297 struct net_device *netdev)
298{
566b18c8 299 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
4913b8eb 300 struct dsa_port *dp = dsa_slave_to_port(netdev);
4913b8eb
VO
301 u16 queue_mapping = skb_get_queue_mapping(skb);
302 u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
04b67e18 303 u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
4913b8eb
VO
304 __be32 *tx_trailer;
305 __be16 *tx_header;
306 int trailer_pos;
307
b6ad86e6
VO
308 if (skb->offload_fwd_mark)
309 return sja1105_imprecise_xmit(skb, netdev);
310
4913b8eb
VO
311 /* Transmitting control packets is done using in-band control
312 * extensions, while data packets are transmitted using
313 * tag_8021q TX VLANs.
314 */
315 if (likely(!sja1105_is_link_local(skb)))
994d2cbb 316 return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp),
4913b8eb
VO
317 ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
318
b0b8c67e
VO
319 skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp);
320 if (!skb)
321 return NULL;
322
4913b8eb
VO
323 skb_push(skb, SJA1110_HEADER_LEN);
324
6bef794d 325 dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN);
4913b8eb
VO
326
327 trailer_pos = skb->len;
328
a72808b6 329 tx_header = dsa_etype_header_pos_tx(skb);
4913b8eb
VO
330 tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN);
331
a72808b6
VO
332 tx_header[0] = htons(ETH_P_SJA1110);
333 tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH |
334 SJA1110_TX_HEADER_HAS_TRAILER |
335 SJA1110_TX_HEADER_TRAILER_POS(trailer_pos));
4913b8eb
VO
336 *tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) |
337 SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) |
338 SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index)));
566b18c8
VO
339 if (clone) {
340 u8 ts_id = SJA1105_SKB_CB(clone)->ts_id;
341
a72808b6 342 tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS);
566b18c8
VO
343 *tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id));
344 }
4913b8eb
VO
345
346 return skb;
347}
348
f3097be2
VO
349static void sja1105_transfer_meta(struct sk_buff *skb,
350 const struct sja1105_meta *meta)
351{
352 struct ethhdr *hdr = eth_hdr(skb);
353
354 hdr->h_dest[3] = meta->dmac_byte_3;
355 hdr->h_dest[4] = meta->dmac_byte_4;
617ef8d9 356 SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
f3097be2
VO
357}
358
359/* This is a simple state machine which follows the hardware mechanism of
360 * generating RX timestamps:
361 *
362 * After each timestampable skb (all traffic for which send_meta1 and
363 * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
364 * containing a partial timestamp is immediately generated by the switch and
365 * sent as a follow-up to the link-local frame on the CPU port.
366 *
367 * The meta frames have no unique identifier (such as sequence number) by which
368 * one may pair them to the correct timestampable frame.
369 * Instead, the switch has internal logic that ensures no frames are sent on
370 * the CPU port between a link-local timestampable frame and its corresponding
371 * meta follow-up. It also ensures strict ordering between ports (lower ports
372 * have higher priority towards the CPU port). For this reason, a per-port
373 * data structure is not needed/desirable.
374 *
375 * This function pairs the link-local frame with its partial timestamp from the
376 * meta follow-up frame. The full timestamp will be reconstructed later in a
377 * work queue.
378 */
379static struct sk_buff
380*sja1105_rcv_meta_state_machine(struct sk_buff *skb,
381 struct sja1105_meta *meta,
382 bool is_link_local,
383 bool is_meta)
384{
f3097be2
VO
385 /* Step 1: A timestampable frame was received.
386 * Buffer it until we get its meta frame.
387 */
3e8db7e5 388 if (is_link_local) {
994d2cbb 389 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
950a419d 390 struct sja1105_tagger_private *priv;
c79e8486 391 struct dsa_switch *ds = dp->ds;
994d2cbb 392
950a419d 393 priv = sja1105_tagger_private(ds);
994d2cbb 394
950a419d 395 if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
3e8db7e5
VO
396 /* Do normal processing. */
397 return skb;
398
950a419d 399 spin_lock(&priv->meta_lock);
f3097be2
VO
400 /* Was this a link-local frame instead of the meta
401 * that we were expecting?
402 */
950a419d 403 if (priv->stampable_skb) {
c79e8486 404 dev_err_ratelimited(ds->dev,
f3097be2
VO
405 "Expected meta frame, is %12llx "
406 "in the DSA master multicast filter?\n",
407 SJA1105_META_DMAC);
950a419d 408 kfree_skb(priv->stampable_skb);
f3097be2
VO
409 }
410
411 /* Hold a reference to avoid dsa_switch_rcv
412 * from freeing the skb.
413 */
950a419d
VO
414 priv->stampable_skb = skb_get(skb);
415 spin_unlock(&priv->meta_lock);
f3097be2
VO
416
417 /* Tell DSA we got nothing */
418 return NULL;
419
420 /* Step 2: The meta frame arrived.
421 * Time to take the stampable skb out of the closet, annotate it
422 * with the partial timestamp, and pretend that we received it
423 * just now (basically masquerade the buffered frame as the meta
424 * frame, which serves no further purpose).
425 */
426 } else if (is_meta) {
994d2cbb 427 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
950a419d 428 struct sja1105_tagger_private *priv;
c79e8486 429 struct dsa_switch *ds = dp->ds;
f3097be2
VO
430 struct sk_buff *stampable_skb;
431
950a419d 432 priv = sja1105_tagger_private(ds);
994d2cbb 433
3e8db7e5
VO
434 /* Drop the meta frame if we're not in the right state
435 * to process it.
436 */
950a419d 437 if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
3e8db7e5
VO
438 return NULL;
439
950a419d 440 spin_lock(&priv->meta_lock);
f3097be2 441
950a419d
VO
442 stampable_skb = priv->stampable_skb;
443 priv->stampable_skb = NULL;
f3097be2
VO
444
445 /* Was this a meta frame instead of the link-local
446 * that we were expecting?
447 */
448 if (!stampable_skb) {
c79e8486 449 dev_err_ratelimited(ds->dev,
f3097be2 450 "Unexpected meta frame\n");
950a419d 451 spin_unlock(&priv->meta_lock);
f3097be2
VO
452 return NULL;
453 }
454
455 if (stampable_skb->dev != skb->dev) {
c79e8486 456 dev_err_ratelimited(ds->dev,
f3097be2 457 "Meta frame on wrong port\n");
950a419d 458 spin_unlock(&priv->meta_lock);
f3097be2
VO
459 return NULL;
460 }
461
462 /* Free the meta frame and give DSA the buffered stampable_skb
463 * for further processing up the network stack.
464 */
465 kfree_skb(skb);
f163fed2 466 skb = stampable_skb;
f3097be2 467 sja1105_transfer_meta(skb, meta);
f3097be2 468
950a419d 469 spin_unlock(&priv->meta_lock);
f3097be2
VO
470 }
471
472 return skb;
473}
474
950a419d
VO
475static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
476{
477 struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
478
479 return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
480}
481
482static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
483{
484 struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
485
486 if (on)
487 set_bit(SJA1105_HWTS_RX_EN, &priv->state);
488 else
489 clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
490
491 /* Initialize the meta state machine to a known state */
492 if (!priv->stampable_skb)
493 return;
494
495 kfree_skb(priv->stampable_skb);
496 priv->stampable_skb = NULL;
497}
498
233697b3
VO
499static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
500{
501 u16 tpid = ntohs(eth_hdr(skb)->h_proto);
502
503 return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
504 skb_vlan_tag_present(skb);
505}
506
4913b8eb
VO
507static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
508{
509 return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
510}
511
04a17583
VO
512/* If the VLAN in the packet is a tag_8021q one, set @source_port and
513 * @switch_id and strip the header. Otherwise set @vid and keep it in the
514 * packet.
884be12f 515 */
04a17583 516static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
d7f9787a 517 int *switch_id, int *vbid, u16 *vid)
884be12f 518{
b5653b15 519 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
884be12f
VO
520 u16 vlan_tci;
521
522 if (skb_vlan_tag_present(skb))
523 vlan_tci = skb_vlan_tag_get(skb);
524 else
525 vlan_tci = ntohs(hdr->h_vlan_TCI);
526
d7f9787a
VO
527 if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
528 return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
884be12f
VO
529
530 /* Try our best with imprecise RX */
531 *vid = vlan_tci & VLAN_VID_MASK;
884be12f
VO
532}
533
227d07a0 534static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
29a097b7 535 struct net_device *netdev)
227d07a0 536{
d7f9787a 537 int source_port = -1, switch_id = -1, vbid = -1;
e53e18a6 538 struct sja1105_meta meta = {0};
e80f40cb 539 struct ethhdr *hdr;
42824463 540 bool is_link_local;
e53e18a6 541 bool is_meta;
884be12f 542 u16 vid;
227d07a0 543
e80f40cb 544 hdr = eth_hdr(skb);
42824463 545 is_link_local = sja1105_is_link_local(skb);
e53e18a6 546 is_meta = sja1105_is_meta_frame(skb);
227d07a0 547
233697b3 548 if (sja1105_skb_has_tag_8021q(skb)) {
42824463 549 /* Normal traffic path. */
d7f9787a 550 sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
42824463 551 } else if (is_link_local) {
227d07a0
VO
552 /* Management traffic path. Switch embeds the switch ID and
553 * port ID into bytes of the destination MAC, courtesy of
554 * the incl_srcpt options.
555 */
556 source_port = hdr->h_dest[3];
557 switch_id = hdr->h_dest[4];
558 /* Clear the DMAC bytes that were mangled by the switch */
559 hdr->h_dest[3] = 0;
560 hdr->h_dest[4] = 0;
e53e18a6
VO
561 } else if (is_meta) {
562 sja1105_meta_unpack(skb, &meta);
563 source_port = meta.source_port;
564 switch_id = meta.switch_id;
227d07a0 565 } else {
42824463 566 return NULL;
227d07a0
VO
567 }
568
d7f9787a
VO
569 if (vbid >= 1)
570 skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
571 else if (source_port == -1 || switch_id == -1)
884be12f
VO
572 skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
573 else
574 skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
227d07a0
VO
575 if (!skb->dev) {
576 netdev_warn(netdev, "Couldn't decode source port\n");
577 return NULL;
578 }
579
bea79078
VO
580 if (!is_link_local)
581 dsa_default_offload_fwd_mark(skb);
582
f3097be2
VO
583 return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
584 is_meta);
227d07a0
VO
585}
586
566b18c8
VO
587static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
588{
5d928ff4 589 u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
566b18c8
VO
590 int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
591 int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
fcbf979a 592 struct sja1105_tagger_data *tagger_data;
566b18c8
VO
593 struct net_device *master = skb->dev;
594 struct dsa_port *cpu_dp;
566b18c8
VO
595 struct dsa_switch *ds;
596 int i;
597
598 cpu_dp = master->dsa_ptr;
599 ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
600 if (!ds) {
601 net_err_ratelimited("%s: cannot find switch id %d\n",
602 master->name, switch_id);
603 return NULL;
604 }
605
fcbf979a
VO
606 tagger_data = sja1105_tagger_data(ds);
607 if (!tagger_data->meta_tstamp_handler)
608 return NULL;
609
566b18c8
VO
610 for (i = 0; i <= n_ts; i++) {
611 u8 ts_id, source_port, dir;
612 u64 tstamp;
613
614 ts_id = buf[0];
615 source_port = (buf[1] & GENMASK(7, 4)) >> 4;
616 dir = (buf[1] & BIT(3)) >> 3;
617 tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
618
fcbf979a
VO
619 tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir,
620 tstamp);
566b18c8
VO
621
622 buf += SJA1110_META_TSTAMP_SIZE;
623 }
624
625 /* Discard the meta frame, we've consumed the timestamps it contained */
626 return NULL;
627}
628
4913b8eb
VO
629static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
630 int *source_port,
bea79078
VO
631 int *switch_id,
632 bool *host_only)
4913b8eb
VO
633{
634 u16 rx_header;
635
636 if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN)))
637 return NULL;
638
639 /* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly
640 * what we need because the caller has checked the EtherType (which is
641 * located 2 bytes back) and we just need a pointer to the header that
642 * comes afterwards.
643 */
644 rx_header = ntohs(*(__be16 *)skb->data);
566b18c8 645
bea79078
VO
646 if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
647 *host_only = true;
648
566b18c8
VO
649 if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
650 return sja1110_rcv_meta(skb, rx_header);
4913b8eb
VO
651
652 /* Timestamp frame, we have a trailer */
653 if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) {
654 int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header);
655 u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN;
656 u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp;
657 u8 last_byte = rx_trailer[12];
658
659 /* The timestamp is unaligned, so we need to use packing()
660 * to get it
661 */
662 packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0);
663
664 *source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte);
665 *switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte);
666
667 /* skb->len counts from skb->data, while start_of_padding
668 * counts from the destination MAC address. Right now skb->data
669 * is still as set by the DSA master, so to trim away the
670 * padding and trailer we need to account for the fact that
671 * skb->data points to skb_mac_header(skb) + ETH_HLEN.
672 */
89488763
AC
673 if (pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN))
674 return NULL;
4913b8eb
VO
675 /* Trap-to-host frame, no timestamp trailer */
676 } else {
677 *source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
678 *switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
679 }
680
681 /* Advance skb->data past the DSA header */
682 skb_pull_rcsum(skb, SJA1110_HEADER_LEN);
683
f1dacd7a 684 dsa_strip_etype_header(skb, SJA1110_HEADER_LEN);
4913b8eb
VO
685
686 /* With skb->data in its final place, update the MAC header
687 * so that eth_hdr() continues to works properly.
688 */
689 skb_set_mac_header(skb, -ETH_HLEN);
690
691 return skb;
692}
693
694static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
29a097b7 695 struct net_device *netdev)
4913b8eb 696{
d7f9787a 697 int source_port = -1, switch_id = -1, vbid = -1;
bea79078 698 bool host_only = false;
421297ef 699 u16 vid = 0;
4913b8eb 700
4913b8eb
VO
701 if (sja1110_skb_has_inband_control_extension(skb)) {
702 skb = sja1110_rcv_inband_control_extension(skb, &source_port,
bea79078
VO
703 &switch_id,
704 &host_only);
4913b8eb
VO
705 if (!skb)
706 return NULL;
707 }
708
709 /* Packets with in-band control extensions might still have RX VLANs */
710 if (likely(sja1105_skb_has_tag_8021q(skb)))
d7f9787a 711 sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
4913b8eb 712
d7f9787a
VO
713 if (vbid >= 1)
714 skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
715 else if (source_port == -1 || switch_id == -1)
884be12f
VO
716 skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
717 else
718 skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
4913b8eb 719 if (!skb->dev) {
884be12f 720 netdev_warn(netdev, "Couldn't decode source port\n");
4913b8eb
VO
721 return NULL;
722 }
723
bea79078
VO
724 if (!host_only)
725 dsa_default_offload_fwd_mark(skb);
726
4913b8eb
VO
727 return skb;
728}
729
e6652979
VO
730static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
731 int *offset)
732{
733 /* No tag added for management frames, all ok */
734 if (unlikely(sja1105_is_link_local(skb)))
735 return;
736
737 dsa_tag_generic_flow_dissect(skb, proto, offset);
738}
739
4913b8eb
VO
740static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
741 int *offset)
742{
743 /* Management frames have 2 DSA tags on RX, so the needed_headroom we
744 * declared is fine for the generic dissector adjustment procedure.
745 */
746 if (unlikely(sja1105_is_link_local(skb)))
747 return dsa_tag_generic_flow_dissect(skb, proto, offset);
748
749 /* For the rest, there is a single DSA tag, the tag_8021q one */
750 *offset = VLAN_HLEN;
751 *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
752}
753
7f297314 754static void sja1105_disconnect(struct dsa_switch *ds)
c79e8486 755{
7f297314 756 struct sja1105_tagger_private *priv = ds->tagger_data;
c79e8486 757
7f297314
VO
758 kthread_destroy_worker(priv->xmit_worker);
759 kfree(priv);
760 ds->tagger_data = NULL;
c79e8486
VO
761}
762
7f297314 763static int sja1105_connect(struct dsa_switch *ds)
c79e8486
VO
764{
765 struct sja1105_tagger_data *tagger_data;
950a419d 766 struct sja1105_tagger_private *priv;
c79e8486 767 struct kthread_worker *xmit_worker;
c79e8486
VO
768 int err;
769
7f297314
VO
770 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
771 if (!priv)
772 return -ENOMEM;
c79e8486 773
7f297314 774 spin_lock_init(&priv->meta_lock);
c79e8486 775
7f297314
VO
776 xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
777 ds->dst->index, ds->index);
778 if (IS_ERR(xmit_worker)) {
779 err = PTR_ERR(xmit_worker);
780 kfree(priv);
781 return err;
c79e8486
VO
782 }
783
7f297314
VO
784 priv->xmit_worker = xmit_worker;
785 /* Export functions for switch driver use */
786 tagger_data = &priv->data;
787 tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
788 tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
789 ds->tagger_data = priv;
c79e8486 790
7f297314 791 return 0;
c79e8486
VO
792}
793
097f0244 794static const struct dsa_device_ops sja1105_netdev_ops = {
94793a56 795 .name = SJA1105_NAME,
227d07a0
VO
796 .proto = DSA_TAG_PROTO_SJA1105,
797 .xmit = sja1105_xmit,
798 .rcv = sja1105_rcv,
c79e8486
VO
799 .connect = sja1105_connect,
800 .disconnect = sja1105_disconnect,
4e500251 801 .needed_headroom = VLAN_HLEN,
e6652979 802 .flow_dissect = sja1105_flow_dissect,
707091eb 803 .promisc_on_master = true,
227d07a0
VO
804};
805
4913b8eb 806DSA_TAG_DRIVER(sja1105_netdev_ops);
94793a56 807MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105, SJA1105_NAME);
227d07a0 808
4913b8eb 809static const struct dsa_device_ops sja1110_netdev_ops = {
94793a56 810 .name = SJA1110_NAME,
4913b8eb
VO
811 .proto = DSA_TAG_PROTO_SJA1110,
812 .xmit = sja1110_xmit,
813 .rcv = sja1110_rcv,
c79e8486
VO
814 .connect = sja1105_connect,
815 .disconnect = sja1105_disconnect,
4913b8eb
VO
816 .flow_dissect = sja1110_flow_dissect,
817 .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
818 .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
819};
820
821DSA_TAG_DRIVER(sja1110_netdev_ops);
94793a56 822MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110, SJA1110_NAME);
4913b8eb
VO
823
824static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
825 &DSA_TAG_DRIVER_NAME(sja1105_netdev_ops),
826 &DSA_TAG_DRIVER_NAME(sja1110_netdev_ops),
827};
828
829module_dsa_tag_drivers(sja1105_tag_driver_array);
830
831MODULE_LICENSE("GPL v2");