1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2019 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
20 #include "dpaa2-eth.h"
22 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
23 * using trace events only need to #include <trace/events/sched.h>
25 #define CREATE_TRACE_POINTS
26 #include "dpaa2-eth-trace.h"
28 MODULE_LICENSE("Dual BSD/GPL");
29 MODULE_AUTHOR("Freescale Semiconductor, Inc");
30 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
32 static void *dpaa2_iova_to_virt(struct iommu_domain
*domain
,
35 phys_addr_t phys_addr
;
37 phys_addr
= domain
? iommu_iova_to_phys(domain
, iova_addr
) : iova_addr
;
39 return phys_to_virt(phys_addr
);
42 static void validate_rx_csum(struct dpaa2_eth_priv
*priv
,
46 skb_checksum_none_assert(skb
);
48 /* HW checksum validation is disabled, nothing to do here */
49 if (!(priv
->net_dev
->features
& NETIF_F_RXCSUM
))
52 /* Read checksum validation bits */
53 if (!((fd_status
& DPAA2_FAS_L3CV
) &&
54 (fd_status
& DPAA2_FAS_L4CV
)))
57 /* Inform the stack there's no need to compute L3/L4 csum anymore */
58 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
61 /* Free a received FD.
62 * Not to be used for Tx conf FDs or on any other paths.
64 static void free_rx_fd(struct dpaa2_eth_priv
*priv
,
65 const struct dpaa2_fd
*fd
,
68 struct device
*dev
= priv
->net_dev
->dev
.parent
;
69 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
70 u8 fd_format
= dpaa2_fd_get_format(fd
);
71 struct dpaa2_sg_entry
*sgt
;
75 /* If single buffer frame, just free the data buffer */
76 if (fd_format
== dpaa2_fd_single
)
78 else if (fd_format
!= dpaa2_fd_sg
)
79 /* We don't support any other format */
82 /* For S/G frames, we first need to free all SG entries
83 * except the first one, which was taken care of already
85 sgt
= vaddr
+ dpaa2_fd_get_offset(fd
);
86 for (i
= 1; i
< DPAA2_ETH_MAX_SG_ENTRIES
; i
++) {
87 addr
= dpaa2_sg_get_addr(&sgt
[i
]);
88 sg_vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
89 dma_unmap_page(dev
, addr
, DPAA2_ETH_RX_BUF_SIZE
,
92 free_pages((unsigned long)sg_vaddr
, 0);
93 if (dpaa2_sg_is_final(&sgt
[i
]))
98 free_pages((unsigned long)vaddr
, 0);
101 /* Build a linear skb based on a single-buffer frame descriptor */
102 static struct sk_buff
*build_linear_skb(struct dpaa2_eth_channel
*ch
,
103 const struct dpaa2_fd
*fd
,
106 struct sk_buff
*skb
= NULL
;
107 u16 fd_offset
= dpaa2_fd_get_offset(fd
);
108 u32 fd_length
= dpaa2_fd_get_len(fd
);
112 skb
= build_skb(fd_vaddr
, DPAA2_ETH_RX_BUF_RAW_SIZE
);
116 skb_reserve(skb
, fd_offset
);
117 skb_put(skb
, fd_length
);
122 /* Build a non linear (fragmented) skb based on a S/G table */
123 static struct sk_buff
*build_frag_skb(struct dpaa2_eth_priv
*priv
,
124 struct dpaa2_eth_channel
*ch
,
125 struct dpaa2_sg_entry
*sgt
)
127 struct sk_buff
*skb
= NULL
;
128 struct device
*dev
= priv
->net_dev
->dev
.parent
;
133 struct page
*page
, *head_page
;
137 for (i
= 0; i
< DPAA2_ETH_MAX_SG_ENTRIES
; i
++) {
138 struct dpaa2_sg_entry
*sge
= &sgt
[i
];
140 /* NOTE: We only support SG entries in dpaa2_sg_single format,
141 * but this is the only format we may receive from HW anyway
144 /* Get the address and length from the S/G entry */
145 sg_addr
= dpaa2_sg_get_addr(sge
);
146 sg_vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, sg_addr
);
147 dma_unmap_page(dev
, sg_addr
, DPAA2_ETH_RX_BUF_SIZE
,
150 sg_length
= dpaa2_sg_get_len(sge
);
153 /* We build the skb around the first data buffer */
154 skb
= build_skb(sg_vaddr
, DPAA2_ETH_RX_BUF_RAW_SIZE
);
155 if (unlikely(!skb
)) {
156 /* Free the first SG entry now, since we already
157 * unmapped it and obtained the virtual address
159 free_pages((unsigned long)sg_vaddr
, 0);
161 /* We still need to subtract the buffers used
162 * by this FD from our software counter
164 while (!dpaa2_sg_is_final(&sgt
[i
]) &&
165 i
< DPAA2_ETH_MAX_SG_ENTRIES
)
170 sg_offset
= dpaa2_sg_get_offset(sge
);
171 skb_reserve(skb
, sg_offset
);
172 skb_put(skb
, sg_length
);
174 /* Rest of the data buffers are stored as skb frags */
175 page
= virt_to_page(sg_vaddr
);
176 head_page
= virt_to_head_page(sg_vaddr
);
178 /* Offset in page (which may be compound).
179 * Data in subsequent SG entries is stored from the
180 * beginning of the buffer, so we don't need to add the
183 page_offset
= ((unsigned long)sg_vaddr
&
185 (page_address(page
) - page_address(head_page
));
187 skb_add_rx_frag(skb
, i
- 1, head_page
, page_offset
,
188 sg_length
, DPAA2_ETH_RX_BUF_SIZE
);
191 if (dpaa2_sg_is_final(sge
))
195 WARN_ONCE(i
== DPAA2_ETH_MAX_SG_ENTRIES
, "Final bit not set in SGT");
197 /* Count all data buffers + SG table buffer */
198 ch
->buf_count
-= i
+ 2;
203 /* Free buffers acquired from the buffer pool or which were meant to
204 * be released in the pool
206 static void free_bufs(struct dpaa2_eth_priv
*priv
, u64
*buf_array
, int count
)
208 struct device
*dev
= priv
->net_dev
->dev
.parent
;
212 for (i
= 0; i
< count
; i
++) {
213 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, buf_array
[i
]);
214 dma_unmap_page(dev
, buf_array
[i
], DPAA2_ETH_RX_BUF_SIZE
,
216 free_pages((unsigned long)vaddr
, 0);
220 static void xdp_release_buf(struct dpaa2_eth_priv
*priv
,
221 struct dpaa2_eth_channel
*ch
,
227 ch
->xdp
.drop_bufs
[ch
->xdp
.drop_cnt
++] = addr
;
228 if (ch
->xdp
.drop_cnt
< DPAA2_ETH_BUFS_PER_CMD
)
231 while ((err
= dpaa2_io_service_release(ch
->dpio
, priv
->bpid
,
233 ch
->xdp
.drop_cnt
)) == -EBUSY
) {
234 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
)
240 free_bufs(priv
, ch
->xdp
.drop_bufs
, ch
->xdp
.drop_cnt
);
241 ch
->buf_count
-= ch
->xdp
.drop_cnt
;
244 ch
->xdp
.drop_cnt
= 0;
247 static int xdp_enqueue(struct dpaa2_eth_priv
*priv
, struct dpaa2_fd
*fd
,
248 void *buf_start
, u16 queue_id
)
250 struct dpaa2_eth_fq
*fq
;
251 struct dpaa2_faead
*faead
;
255 /* Mark the egress frame hardware annotation area as valid */
256 frc
= dpaa2_fd_get_frc(fd
);
257 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FAEADV
);
258 dpaa2_fd_set_ctrl(fd
, DPAA2_FD_CTRL_ASAL
);
260 /* Instruct hardware to release the FD buffer directly into
261 * the buffer pool once transmission is completed, instead of
262 * sending a Tx confirmation frame to us
264 ctrl
= DPAA2_FAEAD_A4V
| DPAA2_FAEAD_A2V
| DPAA2_FAEAD_EBDDV
;
265 faead
= dpaa2_get_faead(buf_start
, false);
266 faead
->ctrl
= cpu_to_le32(ctrl
);
267 faead
->conf_fqid
= 0;
269 fq
= &priv
->fq
[queue_id
];
270 for (i
= 0; i
< DPAA2_ETH_ENQUEUE_RETRIES
; i
++) {
271 err
= priv
->enqueue(priv
, fq
, fd
, 0);
279 static u32
run_xdp(struct dpaa2_eth_priv
*priv
,
280 struct dpaa2_eth_channel
*ch
,
281 struct dpaa2_eth_fq
*rx_fq
,
282 struct dpaa2_fd
*fd
, void *vaddr
)
284 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
285 struct rtnl_link_stats64
*percpu_stats
;
286 struct bpf_prog
*xdp_prog
;
288 u32 xdp_act
= XDP_PASS
;
291 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
295 xdp_prog
= READ_ONCE(ch
->xdp
.prog
);
299 xdp
.data
= vaddr
+ dpaa2_fd_get_offset(fd
);
300 xdp
.data_end
= xdp
.data
+ dpaa2_fd_get_len(fd
);
301 xdp
.data_hard_start
= xdp
.data
- XDP_PACKET_HEADROOM
;
302 xdp_set_data_meta_invalid(&xdp
);
303 xdp
.rxq
= &ch
->xdp_rxq
;
305 xdp_act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
307 /* xdp.data pointer may have changed */
308 dpaa2_fd_set_offset(fd
, xdp
.data
- vaddr
);
309 dpaa2_fd_set_len(fd
, xdp
.data_end
- xdp
.data
);
315 err
= xdp_enqueue(priv
, fd
, vaddr
, rx_fq
->flowid
);
317 xdp_release_buf(priv
, ch
, addr
);
318 percpu_stats
->tx_errors
++;
319 ch
->stats
.xdp_tx_err
++;
321 percpu_stats
->tx_packets
++;
322 percpu_stats
->tx_bytes
+= dpaa2_fd_get_len(fd
);
327 bpf_warn_invalid_xdp_action(xdp_act
);
330 trace_xdp_exception(priv
->net_dev
, xdp_prog
, xdp_act
);
333 xdp_release_buf(priv
, ch
, addr
);
334 ch
->stats
.xdp_drop
++;
337 dma_unmap_page(priv
->net_dev
->dev
.parent
, addr
,
338 DPAA2_ETH_RX_BUF_SIZE
, DMA_BIDIRECTIONAL
);
340 xdp
.data_hard_start
= vaddr
;
341 err
= xdp_do_redirect(priv
->net_dev
, &xdp
, xdp_prog
);
343 ch
->stats
.xdp_drop
++;
345 ch
->stats
.xdp_redirect
++;
349 ch
->xdp
.res
|= xdp_act
;
355 /* Main Rx frame processing routine */
356 static void dpaa2_eth_rx(struct dpaa2_eth_priv
*priv
,
357 struct dpaa2_eth_channel
*ch
,
358 const struct dpaa2_fd
*fd
,
359 struct dpaa2_eth_fq
*fq
)
361 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
362 u8 fd_format
= dpaa2_fd_get_format(fd
);
365 struct rtnl_link_stats64
*percpu_stats
;
366 struct dpaa2_eth_drv_stats
*percpu_extras
;
367 struct device
*dev
= priv
->net_dev
->dev
.parent
;
368 struct dpaa2_fas
*fas
;
374 trace_dpaa2_rx_fd(priv
->net_dev
, fd
);
376 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
377 dma_sync_single_for_cpu(dev
, addr
, DPAA2_ETH_RX_BUF_SIZE
,
380 fas
= dpaa2_get_fas(vaddr
, false);
382 buf_data
= vaddr
+ dpaa2_fd_get_offset(fd
);
385 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
386 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
388 if (fd_format
== dpaa2_fd_single
) {
389 xdp_act
= run_xdp(priv
, ch
, fq
, (struct dpaa2_fd
*)fd
, vaddr
);
390 if (xdp_act
!= XDP_PASS
) {
391 percpu_stats
->rx_packets
++;
392 percpu_stats
->rx_bytes
+= dpaa2_fd_get_len(fd
);
396 dma_unmap_page(dev
, addr
, DPAA2_ETH_RX_BUF_SIZE
,
398 skb
= build_linear_skb(ch
, fd
, vaddr
);
399 } else if (fd_format
== dpaa2_fd_sg
) {
400 WARN_ON(priv
->xdp_prog
);
402 dma_unmap_page(dev
, addr
, DPAA2_ETH_RX_BUF_SIZE
,
404 skb
= build_frag_skb(priv
, ch
, buf_data
);
405 free_pages((unsigned long)vaddr
, 0);
406 percpu_extras
->rx_sg_frames
++;
407 percpu_extras
->rx_sg_bytes
+= dpaa2_fd_get_len(fd
);
409 /* We don't support any other format */
410 goto err_frame_format
;
418 /* Get the timestamp value */
419 if (priv
->rx_tstamp
) {
420 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
421 __le64
*ts
= dpaa2_get_ts(vaddr
, false);
424 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
426 ns
= DPAA2_PTP_CLK_PERIOD_NS
* le64_to_cpup(ts
);
427 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
430 /* Check if we need to validate the L4 csum */
431 if (likely(dpaa2_fd_get_frc(fd
) & DPAA2_FD_FRC_FASV
)) {
432 status
= le32_to_cpu(fas
->status
);
433 validate_rx_csum(priv
, status
, skb
);
436 skb
->protocol
= eth_type_trans(skb
, priv
->net_dev
);
437 skb_record_rx_queue(skb
, fq
->flowid
);
439 percpu_stats
->rx_packets
++;
440 percpu_stats
->rx_bytes
+= dpaa2_fd_get_len(fd
);
442 list_add_tail(&skb
->list
, ch
->rx_list
);
447 free_rx_fd(priv
, fd
, vaddr
);
449 percpu_stats
->rx_dropped
++;
452 /* Consume all frames pull-dequeued into the store. This is the simplest way to
453 * make sure we don't accidentally issue another volatile dequeue which would
454 * overwrite (leak) frames already in the store.
456 * Observance of NAPI budget is not our concern, leaving that to the caller.
458 static int consume_frames(struct dpaa2_eth_channel
*ch
,
459 struct dpaa2_eth_fq
**src
)
461 struct dpaa2_eth_priv
*priv
= ch
->priv
;
462 struct dpaa2_eth_fq
*fq
= NULL
;
464 const struct dpaa2_fd
*fd
;
465 int cleaned
= 0, retries
= 0;
469 dq
= dpaa2_io_store_next(ch
->store
, &is_last
);
471 /* If we're here, we *must* have placed a
472 * volatile dequeue comnmand, so keep reading through
473 * the store until we get some sort of valid response
474 * token (either a valid frame or an "empty dequeue")
476 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
) {
477 netdev_err_once(priv
->net_dev
,
478 "Unable to read a valid dequeue response\n");
484 fd
= dpaa2_dq_fd(dq
);
485 fq
= (struct dpaa2_eth_fq
*)(uintptr_t)dpaa2_dq_fqd_ctx(dq
);
487 fq
->consume(priv
, ch
, fd
, fq
);
495 fq
->stats
.frames
+= cleaned
;
497 /* A dequeue operation only pulls frames from a single queue
498 * into the store. Return the frame queue as an out param.
506 /* Configure the egress frame annotation for timestamp update */
507 static void enable_tx_tstamp(struct dpaa2_fd
*fd
, void *buf_start
)
509 struct dpaa2_faead
*faead
;
512 /* Mark the egress frame annotation area as valid */
513 frc
= dpaa2_fd_get_frc(fd
);
514 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FAEADV
);
516 /* Set hardware annotation size */
517 ctrl
= dpaa2_fd_get_ctrl(fd
);
518 dpaa2_fd_set_ctrl(fd
, ctrl
| DPAA2_FD_CTRL_ASAL
);
520 /* enable UPD (update prepanded data) bit in FAEAD field of
521 * hardware frame annotation area
523 ctrl
= DPAA2_FAEAD_A2V
| DPAA2_FAEAD_UPDV
| DPAA2_FAEAD_UPD
;
524 faead
= dpaa2_get_faead(buf_start
, true);
525 faead
->ctrl
= cpu_to_le32(ctrl
);
528 /* Create a frame descriptor based on a fragmented skb */
529 static int build_sg_fd(struct dpaa2_eth_priv
*priv
,
533 struct device
*dev
= priv
->net_dev
->dev
.parent
;
534 void *sgt_buf
= NULL
;
536 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
537 struct dpaa2_sg_entry
*sgt
;
540 struct scatterlist
*scl
, *crt_scl
;
543 struct dpaa2_eth_swa
*swa
;
545 /* Create and map scatterlist.
546 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
547 * to go beyond nr_frags+1.
548 * Note: We don't support chained scatterlists
550 if (unlikely(PAGE_SIZE
/ sizeof(struct scatterlist
) < nr_frags
+ 1))
553 scl
= kcalloc(nr_frags
+ 1, sizeof(struct scatterlist
), GFP_ATOMIC
);
557 sg_init_table(scl
, nr_frags
+ 1);
558 num_sg
= skb_to_sgvec(skb
, scl
, 0, skb
->len
);
559 num_dma_bufs
= dma_map_sg(dev
, scl
, num_sg
, DMA_BIDIRECTIONAL
);
560 if (unlikely(!num_dma_bufs
)) {
562 goto dma_map_sg_failed
;
565 /* Prepare the HW SGT structure */
566 sgt_buf_size
= priv
->tx_data_offset
+
567 sizeof(struct dpaa2_sg_entry
) * num_dma_bufs
;
568 sgt_buf
= napi_alloc_frag(sgt_buf_size
+ DPAA2_ETH_TX_BUF_ALIGN
);
569 if (unlikely(!sgt_buf
)) {
571 goto sgt_buf_alloc_failed
;
573 sgt_buf
= PTR_ALIGN(sgt_buf
, DPAA2_ETH_TX_BUF_ALIGN
);
574 memset(sgt_buf
, 0, sgt_buf_size
);
576 sgt
= (struct dpaa2_sg_entry
*)(sgt_buf
+ priv
->tx_data_offset
);
578 /* Fill in the HW SGT structure.
580 * sgt_buf is zeroed out, so the following fields are implicit
581 * in all sgt entries:
583 * - format is 'dpaa2_sg_single'
585 for_each_sg(scl
, crt_scl
, num_dma_bufs
, i
) {
586 dpaa2_sg_set_addr(&sgt
[i
], sg_dma_address(crt_scl
));
587 dpaa2_sg_set_len(&sgt
[i
], sg_dma_len(crt_scl
));
589 dpaa2_sg_set_final(&sgt
[i
- 1], true);
591 /* Store the skb backpointer in the SGT buffer.
592 * Fit the scatterlist and the number of buffers alongside the
593 * skb backpointer in the software annotation area. We'll need
594 * all of them on Tx Conf.
596 swa
= (struct dpaa2_eth_swa
*)sgt_buf
;
597 swa
->type
= DPAA2_ETH_SWA_SG
;
600 swa
->sg
.num_sg
= num_sg
;
601 swa
->sg
.sgt_size
= sgt_buf_size
;
603 /* Separately map the SGT buffer */
604 addr
= dma_map_single(dev
, sgt_buf
, sgt_buf_size
, DMA_BIDIRECTIONAL
);
605 if (unlikely(dma_mapping_error(dev
, addr
))) {
607 goto dma_map_single_failed
;
609 dpaa2_fd_set_offset(fd
, priv
->tx_data_offset
);
610 dpaa2_fd_set_format(fd
, dpaa2_fd_sg
);
611 dpaa2_fd_set_addr(fd
, addr
);
612 dpaa2_fd_set_len(fd
, skb
->len
);
613 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
615 if (priv
->tx_tstamp
&& skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)
616 enable_tx_tstamp(fd
, sgt_buf
);
620 dma_map_single_failed
:
621 skb_free_frag(sgt_buf
);
622 sgt_buf_alloc_failed
:
623 dma_unmap_sg(dev
, scl
, num_sg
, DMA_BIDIRECTIONAL
);
629 /* Create a frame descriptor based on a linear skb */
630 static int build_single_fd(struct dpaa2_eth_priv
*priv
,
634 struct device
*dev
= priv
->net_dev
->dev
.parent
;
635 u8
*buffer_start
, *aligned_start
;
636 struct dpaa2_eth_swa
*swa
;
639 buffer_start
= skb
->data
- dpaa2_eth_needed_headroom(priv
, skb
);
641 /* If there's enough room to align the FD address, do it.
642 * It will help hardware optimize accesses.
644 aligned_start
= PTR_ALIGN(buffer_start
- DPAA2_ETH_TX_BUF_ALIGN
,
645 DPAA2_ETH_TX_BUF_ALIGN
);
646 if (aligned_start
>= skb
->head
)
647 buffer_start
= aligned_start
;
649 /* Store a backpointer to the skb at the beginning of the buffer
650 * (in the private data area) such that we can release it
653 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
654 swa
->type
= DPAA2_ETH_SWA_SINGLE
;
655 swa
->single
.skb
= skb
;
657 addr
= dma_map_single(dev
, buffer_start
,
658 skb_tail_pointer(skb
) - buffer_start
,
660 if (unlikely(dma_mapping_error(dev
, addr
)))
663 dpaa2_fd_set_addr(fd
, addr
);
664 dpaa2_fd_set_offset(fd
, (u16
)(skb
->data
- buffer_start
));
665 dpaa2_fd_set_len(fd
, skb
->len
);
666 dpaa2_fd_set_format(fd
, dpaa2_fd_single
);
667 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
669 if (priv
->tx_tstamp
&& skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)
670 enable_tx_tstamp(fd
, buffer_start
);
675 /* FD freeing routine on the Tx path
677 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
678 * back-pointed to is also freed.
679 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
682 static void free_tx_fd(const struct dpaa2_eth_priv
*priv
,
683 struct dpaa2_eth_fq
*fq
,
684 const struct dpaa2_fd
*fd
, bool in_napi
)
686 struct device
*dev
= priv
->net_dev
->dev
.parent
;
688 struct sk_buff
*skb
= NULL
;
689 unsigned char *buffer_start
;
690 struct dpaa2_eth_swa
*swa
;
691 u8 fd_format
= dpaa2_fd_get_format(fd
);
692 u32 fd_len
= dpaa2_fd_get_len(fd
);
694 fd_addr
= dpaa2_fd_get_addr(fd
);
695 buffer_start
= dpaa2_iova_to_virt(priv
->iommu_domain
, fd_addr
);
696 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
698 if (fd_format
== dpaa2_fd_single
) {
699 if (swa
->type
== DPAA2_ETH_SWA_SINGLE
) {
700 skb
= swa
->single
.skb
;
701 /* Accessing the skb buffer is safe before dma unmap,
702 * because we didn't map the actual skb shell.
704 dma_unmap_single(dev
, fd_addr
,
705 skb_tail_pointer(skb
) - buffer_start
,
708 WARN_ONCE(swa
->type
!= DPAA2_ETH_SWA_XDP
, "Wrong SWA type");
709 dma_unmap_single(dev
, fd_addr
, swa
->xdp
.dma_size
,
712 } else if (fd_format
== dpaa2_fd_sg
) {
715 /* Unmap the scatterlist */
716 dma_unmap_sg(dev
, swa
->sg
.scl
, swa
->sg
.num_sg
,
720 /* Unmap the SGT buffer */
721 dma_unmap_single(dev
, fd_addr
, swa
->sg
.sgt_size
,
724 netdev_dbg(priv
->net_dev
, "Invalid FD format\n");
728 if (swa
->type
!= DPAA2_ETH_SWA_XDP
&& in_napi
) {
730 fq
->dq_bytes
+= fd_len
;
733 if (swa
->type
== DPAA2_ETH_SWA_XDP
) {
734 xdp_return_frame(swa
->xdp
.xdpf
);
738 /* Get the timestamp value */
739 if (priv
->tx_tstamp
&& skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) {
740 struct skb_shared_hwtstamps shhwtstamps
;
741 __le64
*ts
= dpaa2_get_ts(buffer_start
, true);
744 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
746 ns
= DPAA2_PTP_CLK_PERIOD_NS
* le64_to_cpup(ts
);
747 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
748 skb_tstamp_tx(skb
, &shhwtstamps
);
751 /* Free SGT buffer allocated on tx */
752 if (fd_format
!= dpaa2_fd_single
)
753 skb_free_frag(buffer_start
);
755 /* Move on with skb release */
756 napi_consume_skb(skb
, in_napi
);
759 static netdev_tx_t
dpaa2_eth_tx(struct sk_buff
*skb
, struct net_device
*net_dev
)
761 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
763 struct rtnl_link_stats64
*percpu_stats
;
764 struct dpaa2_eth_drv_stats
*percpu_extras
;
765 struct dpaa2_eth_fq
*fq
;
766 struct netdev_queue
*nq
;
768 unsigned int needed_headroom
;
773 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
774 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
776 needed_headroom
= dpaa2_eth_needed_headroom(priv
, skb
);
777 if (skb_headroom(skb
) < needed_headroom
) {
780 ns
= skb_realloc_headroom(skb
, needed_headroom
);
782 percpu_stats
->tx_dropped
++;
783 goto err_alloc_headroom
;
785 percpu_extras
->tx_reallocs
++;
788 skb_set_owner_w(ns
, skb
->sk
);
794 /* We'll be holding a back-reference to the skb until Tx Confirmation;
795 * we don't want that overwritten by a concurrent Tx with a cloned skb.
797 skb
= skb_unshare(skb
, GFP_ATOMIC
);
798 if (unlikely(!skb
)) {
799 /* skb_unshare() has already freed the skb */
800 percpu_stats
->tx_dropped
++;
804 /* Setup the FD fields */
805 memset(&fd
, 0, sizeof(fd
));
807 if (skb_is_nonlinear(skb
)) {
808 err
= build_sg_fd(priv
, skb
, &fd
);
809 percpu_extras
->tx_sg_frames
++;
810 percpu_extras
->tx_sg_bytes
+= skb
->len
;
812 err
= build_single_fd(priv
, skb
, &fd
);
816 percpu_stats
->tx_dropped
++;
821 trace_dpaa2_tx_fd(net_dev
, &fd
);
823 /* TxConf FQ selection relies on queue id from the stack.
824 * In case of a forwarded frame from another DPNI interface, we choose
825 * a queue affined to the same core that processed the Rx frame
827 queue_mapping
= skb_get_queue_mapping(skb
);
829 if (net_dev
->num_tc
) {
830 prio
= netdev_txq_to_tc(net_dev
, queue_mapping
);
831 /* Hardware interprets priority level 0 as being the highest,
832 * so we need to do a reverse mapping to the netdev tc index
834 prio
= net_dev
->num_tc
- prio
- 1;
835 /* We have only one FQ array entry for all Tx hardware queues
836 * with the same flow id (but different priority levels)
838 queue_mapping
%= dpaa2_eth_queue_count(priv
);
840 fq
= &priv
->fq
[queue_mapping
];
842 fd_len
= dpaa2_fd_get_len(&fd
);
843 nq
= netdev_get_tx_queue(net_dev
, queue_mapping
);
844 netdev_tx_sent_queue(nq
, fd_len
);
846 /* Everything that happens after this enqueues might race with
847 * the Tx confirmation callback for this frame
849 for (i
= 0; i
< DPAA2_ETH_ENQUEUE_RETRIES
; i
++) {
850 err
= priv
->enqueue(priv
, fq
, &fd
, prio
);
854 percpu_extras
->tx_portal_busy
+= i
;
855 if (unlikely(err
< 0)) {
856 percpu_stats
->tx_errors
++;
857 /* Clean up everything, including freeing the skb */
858 free_tx_fd(priv
, fq
, &fd
, false);
859 netdev_tx_completed_queue(nq
, 1, fd_len
);
861 percpu_stats
->tx_packets
++;
862 percpu_stats
->tx_bytes
+= fd_len
;
874 /* Tx confirmation frame processing routine */
875 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv
*priv
,
876 struct dpaa2_eth_channel
*ch __always_unused
,
877 const struct dpaa2_fd
*fd
,
878 struct dpaa2_eth_fq
*fq
)
880 struct rtnl_link_stats64
*percpu_stats
;
881 struct dpaa2_eth_drv_stats
*percpu_extras
;
882 u32 fd_len
= dpaa2_fd_get_len(fd
);
886 trace_dpaa2_tx_conf_fd(priv
->net_dev
, fd
);
888 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
889 percpu_extras
->tx_conf_frames
++;
890 percpu_extras
->tx_conf_bytes
+= fd_len
;
892 /* Check frame errors in the FD field */
893 fd_errors
= dpaa2_fd_get_ctrl(fd
) & DPAA2_FD_TX_ERR_MASK
;
894 free_tx_fd(priv
, fq
, fd
, true);
896 if (likely(!fd_errors
))
900 netdev_dbg(priv
->net_dev
, "TX frame FD error: 0x%08x\n",
903 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
904 /* Tx-conf logically pertains to the egress path. */
905 percpu_stats
->tx_errors
++;
908 static int set_rx_csum(struct dpaa2_eth_priv
*priv
, bool enable
)
912 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
913 DPNI_OFF_RX_L3_CSUM
, enable
);
915 netdev_err(priv
->net_dev
,
916 "dpni_set_offload(RX_L3_CSUM) failed\n");
920 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
921 DPNI_OFF_RX_L4_CSUM
, enable
);
923 netdev_err(priv
->net_dev
,
924 "dpni_set_offload(RX_L4_CSUM) failed\n");
931 static int set_tx_csum(struct dpaa2_eth_priv
*priv
, bool enable
)
935 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
936 DPNI_OFF_TX_L3_CSUM
, enable
);
938 netdev_err(priv
->net_dev
, "dpni_set_offload(TX_L3_CSUM) failed\n");
942 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
943 DPNI_OFF_TX_L4_CSUM
, enable
);
945 netdev_err(priv
->net_dev
, "dpni_set_offload(TX_L4_CSUM) failed\n");
952 /* Perform a single release command to add buffers
953 * to the specified buffer pool
955 static int add_bufs(struct dpaa2_eth_priv
*priv
,
956 struct dpaa2_eth_channel
*ch
, u16 bpid
)
958 struct device
*dev
= priv
->net_dev
->dev
.parent
;
959 u64 buf_array
[DPAA2_ETH_BUFS_PER_CMD
];
965 for (i
= 0; i
< DPAA2_ETH_BUFS_PER_CMD
; i
++) {
966 /* Allocate buffer visible to WRIOP + skb shared info +
969 /* allocate one page for each Rx buffer. WRIOP sees
970 * the entire page except for a tailroom reserved for
973 page
= dev_alloc_pages(0);
977 addr
= dma_map_page(dev
, page
, 0, DPAA2_ETH_RX_BUF_SIZE
,
979 if (unlikely(dma_mapping_error(dev
, addr
)))
985 trace_dpaa2_eth_buf_seed(priv
->net_dev
,
986 page
, DPAA2_ETH_RX_BUF_RAW_SIZE
,
987 addr
, DPAA2_ETH_RX_BUF_SIZE
,
992 /* In case the portal is busy, retry until successful */
993 while ((err
= dpaa2_io_service_release(ch
->dpio
, bpid
,
994 buf_array
, i
)) == -EBUSY
) {
995 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
)
1000 /* If release command failed, clean up and bail out;
1001 * not much else we can do about it
1004 free_bufs(priv
, buf_array
, i
);
1011 __free_pages(page
, 0);
1013 /* If we managed to allocate at least some buffers,
1014 * release them to hardware
1022 static int seed_pool(struct dpaa2_eth_priv
*priv
, u16 bpid
)
1027 for (j
= 0; j
< priv
->num_channels
; j
++) {
1028 for (i
= 0; i
< DPAA2_ETH_NUM_BUFS
;
1029 i
+= DPAA2_ETH_BUFS_PER_CMD
) {
1030 new_count
= add_bufs(priv
, priv
->channel
[j
], bpid
);
1031 priv
->channel
[j
]->buf_count
+= new_count
;
1033 if (new_count
< DPAA2_ETH_BUFS_PER_CMD
) {
1043 * Drain the specified number of buffers from the DPNI's private buffer pool.
1044 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1046 static void drain_bufs(struct dpaa2_eth_priv
*priv
, int count
)
1048 u64 buf_array
[DPAA2_ETH_BUFS_PER_CMD
];
1053 ret
= dpaa2_io_service_acquire(NULL
, priv
->bpid
,
1056 if (ret
== -EBUSY
&&
1057 retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
)
1059 netdev_err(priv
->net_dev
, "dpaa2_io_service_acquire() failed\n");
1062 free_bufs(priv
, buf_array
, ret
);
1067 static void drain_pool(struct dpaa2_eth_priv
*priv
)
1071 drain_bufs(priv
, DPAA2_ETH_BUFS_PER_CMD
);
1072 drain_bufs(priv
, 1);
1074 for (i
= 0; i
< priv
->num_channels
; i
++)
1075 priv
->channel
[i
]->buf_count
= 0;
1078 /* Function is called from softirq context only, so we don't need to guard
1079 * the access to percpu count
1081 static int refill_pool(struct dpaa2_eth_priv
*priv
,
1082 struct dpaa2_eth_channel
*ch
,
1087 if (likely(ch
->buf_count
>= DPAA2_ETH_REFILL_THRESH
))
1091 new_count
= add_bufs(priv
, ch
, bpid
);
1092 if (unlikely(!new_count
)) {
1093 /* Out of memory; abort for now, we'll try later on */
1096 ch
->buf_count
+= new_count
;
1097 } while (ch
->buf_count
< DPAA2_ETH_NUM_BUFS
);
1099 if (unlikely(ch
->buf_count
< DPAA2_ETH_NUM_BUFS
))
1105 static int pull_channel(struct dpaa2_eth_channel
*ch
)
1110 /* Retry while portal is busy */
1112 err
= dpaa2_io_service_pull_channel(ch
->dpio
, ch
->ch_id
,
1116 } while (err
== -EBUSY
&& dequeues
< DPAA2_ETH_SWP_BUSY_RETRIES
);
1118 ch
->stats
.dequeue_portal_busy
+= dequeues
;
1120 ch
->stats
.pull_err
++;
1125 /* NAPI poll routine
1127 * Frames are dequeued from the QMan channel associated with this NAPI context.
1128 * Rx, Tx confirmation and (if configured) Rx error frames all count
1129 * towards the NAPI budget.
1131 static int dpaa2_eth_poll(struct napi_struct
*napi
, int budget
)
1133 struct dpaa2_eth_channel
*ch
;
1134 struct dpaa2_eth_priv
*priv
;
1135 int rx_cleaned
= 0, txconf_cleaned
= 0;
1136 struct dpaa2_eth_fq
*fq
, *txc_fq
= NULL
;
1137 struct netdev_queue
*nq
;
1138 int store_cleaned
, work_done
;
1139 struct list_head rx_list
;
1143 ch
= container_of(napi
, struct dpaa2_eth_channel
, napi
);
1147 INIT_LIST_HEAD(&rx_list
);
1148 ch
->rx_list
= &rx_list
;
1151 err
= pull_channel(ch
);
1155 /* Refill pool if appropriate */
1156 refill_pool(priv
, ch
, priv
->bpid
);
1158 store_cleaned
= consume_frames(ch
, &fq
);
1159 if (store_cleaned
<= 0)
1161 if (fq
->type
== DPAA2_RX_FQ
) {
1162 rx_cleaned
+= store_cleaned
;
1164 txconf_cleaned
+= store_cleaned
;
1165 /* We have a single Tx conf FQ on this channel */
1169 /* If we either consumed the whole NAPI budget with Rx frames
1170 * or we reached the Tx confirmations threshold, we're done.
1172 if (rx_cleaned
>= budget
||
1173 txconf_cleaned
>= DPAA2_ETH_TXCONF_PER_NAPI
) {
1177 } while (store_cleaned
);
1179 /* We didn't consume the entire budget, so finish napi and
1180 * re-enable data availability notifications
1182 napi_complete_done(napi
, rx_cleaned
);
1184 err
= dpaa2_io_service_rearm(ch
->dpio
, &ch
->nctx
);
1186 } while (err
== -EBUSY
&& retries
++ < DPAA2_ETH_SWP_BUSY_RETRIES
);
1187 WARN_ONCE(err
, "CDAN notifications rearm failed on core %d",
1188 ch
->nctx
.desired_cpu
);
1190 work_done
= max(rx_cleaned
, 1);
1193 netif_receive_skb_list(ch
->rx_list
);
1195 if (txc_fq
&& txc_fq
->dq_frames
) {
1196 nq
= netdev_get_tx_queue(priv
->net_dev
, txc_fq
->flowid
);
1197 netdev_tx_completed_queue(nq
, txc_fq
->dq_frames
,
1199 txc_fq
->dq_frames
= 0;
1200 txc_fq
->dq_bytes
= 0;
1203 if (ch
->xdp
.res
& XDP_REDIRECT
)
1209 static void enable_ch_napi(struct dpaa2_eth_priv
*priv
)
1211 struct dpaa2_eth_channel
*ch
;
1214 for (i
= 0; i
< priv
->num_channels
; i
++) {
1215 ch
= priv
->channel
[i
];
1216 napi_enable(&ch
->napi
);
1220 static void disable_ch_napi(struct dpaa2_eth_priv
*priv
)
1222 struct dpaa2_eth_channel
*ch
;
1225 for (i
= 0; i
< priv
->num_channels
; i
++) {
1226 ch
= priv
->channel
[i
];
1227 napi_disable(&ch
->napi
);
1231 static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv
*priv
, bool enable
)
1233 struct dpni_taildrop td
= {0};
1236 if (priv
->rx_td_enabled
== enable
)
1240 td
.threshold
= DPAA2_ETH_TAILDROP_THRESH
;
1242 for (i
= 0; i
< priv
->num_fqs
; i
++) {
1243 if (priv
->fq
[i
].type
!= DPAA2_RX_FQ
)
1245 err
= dpni_set_taildrop(priv
->mc_io
, 0, priv
->mc_token
,
1246 DPNI_CP_QUEUE
, DPNI_QUEUE_RX
, 0,
1247 priv
->fq
[i
].flowid
, &td
);
1249 netdev_err(priv
->net_dev
,
1250 "dpni_set_taildrop() failed\n");
1255 priv
->rx_td_enabled
= enable
;
1258 static int link_state_update(struct dpaa2_eth_priv
*priv
)
1260 struct dpni_link_state state
= {0};
1264 err
= dpni_get_link_state(priv
->mc_io
, 0, priv
->mc_token
, &state
);
1265 if (unlikely(err
)) {
1266 netdev_err(priv
->net_dev
,
1267 "dpni_get_link_state() failed\n");
1271 /* If Tx pause frame settings have changed, we need to update
1272 * Rx FQ taildrop configuration as well. We configure taildrop
1273 * only when pause frame generation is disabled.
1275 tx_pause
= !!(state
.options
& DPNI_LINK_OPT_PAUSE
) ^
1276 !!(state
.options
& DPNI_LINK_OPT_ASYM_PAUSE
);
1277 dpaa2_eth_set_rx_taildrop(priv
, !tx_pause
);
1279 /* When we manage the MAC/PHY using phylink there is no need
1280 * to manually update the netif_carrier.
1285 /* Chech link state; speed / duplex changes are not treated yet */
1286 if (priv
->link_state
.up
== state
.up
)
1290 netif_carrier_on(priv
->net_dev
);
1291 netif_tx_start_all_queues(priv
->net_dev
);
1293 netif_tx_stop_all_queues(priv
->net_dev
);
1294 netif_carrier_off(priv
->net_dev
);
1297 netdev_info(priv
->net_dev
, "Link Event: state %s\n",
1298 state
.up
? "up" : "down");
1301 priv
->link_state
= state
;
1306 static int dpaa2_eth_open(struct net_device
*net_dev
)
1308 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1311 err
= seed_pool(priv
, priv
->bpid
);
1313 /* Not much to do; the buffer pool, though not filled up,
1314 * may still contain some buffers which would enable us
1317 netdev_err(net_dev
, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1318 priv
->dpbp_dev
->obj_desc
.id
, priv
->bpid
);
1322 /* We'll only start the txqs when the link is actually ready;
1323 * make sure we don't race against the link up notification,
1324 * which may come immediately after dpni_enable();
1326 netif_tx_stop_all_queues(net_dev
);
1328 /* Also, explicitly set carrier off, otherwise
1329 * netif_carrier_ok() will return true and cause 'ip link show'
1330 * to report the LOWER_UP flag, even though the link
1331 * notification wasn't even received.
1333 netif_carrier_off(net_dev
);
1335 enable_ch_napi(priv
);
1337 err
= dpni_enable(priv
->mc_io
, 0, priv
->mc_token
);
1339 netdev_err(net_dev
, "dpni_enable() failed\n");
1344 /* If the DPMAC object has already processed the link up
1345 * interrupt, we have to learn the link state ourselves.
1347 err
= link_state_update(priv
);
1349 netdev_err(net_dev
, "Can't update link state\n");
1350 goto link_state_err
;
1353 phylink_start(priv
->mac
->phylink
);
1360 disable_ch_napi(priv
);
1365 /* Total number of in-flight frames on ingress queues */
1366 static u32
ingress_fq_count(struct dpaa2_eth_priv
*priv
)
1368 struct dpaa2_eth_fq
*fq
;
1369 u32 fcnt
= 0, bcnt
= 0, total
= 0;
1372 for (i
= 0; i
< priv
->num_fqs
; i
++) {
1374 err
= dpaa2_io_query_fq_count(NULL
, fq
->fqid
, &fcnt
, &bcnt
);
1376 netdev_warn(priv
->net_dev
, "query_fq_count failed");
1385 static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv
*priv
)
1391 pending
= ingress_fq_count(priv
);
1394 } while (pending
&& --retries
);
1397 #define DPNI_TX_PENDING_VER_MAJOR 7
1398 #define DPNI_TX_PENDING_VER_MINOR 13
1399 static void wait_for_egress_fq_empty(struct dpaa2_eth_priv
*priv
)
1401 union dpni_statistics stats
;
1405 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_TX_PENDING_VER_MAJOR
,
1406 DPNI_TX_PENDING_VER_MINOR
) < 0)
1410 err
= dpni_get_statistics(priv
->mc_io
, 0, priv
->mc_token
, 6,
1414 if (stats
.page_6
.tx_pending_frames
== 0)
1416 } while (--retries
);
1422 static int dpaa2_eth_stop(struct net_device
*net_dev
)
1424 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1425 int dpni_enabled
= 0;
1429 netif_tx_stop_all_queues(net_dev
);
1430 netif_carrier_off(net_dev
);
1432 phylink_stop(priv
->mac
->phylink
);
1435 /* On dpni_disable(), the MC firmware will:
1436 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1437 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1438 * of all in flight Tx frames is finished (and corresponding Tx conf
1439 * frames are enqueued back to software)
1441 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1442 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1443 * and Tx conf queues are consumed on NAPI poll.
1445 wait_for_egress_fq_empty(priv
);
1448 dpni_disable(priv
->mc_io
, 0, priv
->mc_token
);
1449 dpni_is_enabled(priv
->mc_io
, 0, priv
->mc_token
, &dpni_enabled
);
1451 /* Allow the hardware some slack */
1453 } while (dpni_enabled
&& --retries
);
1455 netdev_warn(net_dev
, "Retry count exceeded disabling DPNI\n");
1456 /* Must go on and disable NAPI nonetheless, so we don't crash at
1457 * the next "ifconfig up"
1461 wait_for_ingress_fq_empty(priv
);
1462 disable_ch_napi(priv
);
1464 /* Empty the buffer pool */
1470 static int dpaa2_eth_set_addr(struct net_device
*net_dev
, void *addr
)
1472 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1473 struct device
*dev
= net_dev
->dev
.parent
;
1476 err
= eth_mac_addr(net_dev
, addr
);
1478 dev_err(dev
, "eth_mac_addr() failed (%d)\n", err
);
1482 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1485 dev_err(dev
, "dpni_set_primary_mac_addr() failed (%d)\n", err
);
1492 /** Fill in counters maintained by the GPP driver. These may be different from
1493 * the hardware counters obtained by ethtool.
1495 static void dpaa2_eth_get_stats(struct net_device
*net_dev
,
1496 struct rtnl_link_stats64
*stats
)
1498 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1499 struct rtnl_link_stats64
*percpu_stats
;
1501 u64
*netstats
= (u64
*)stats
;
1503 int num
= sizeof(struct rtnl_link_stats64
) / sizeof(u64
);
1505 for_each_possible_cpu(i
) {
1506 percpu_stats
= per_cpu_ptr(priv
->percpu_stats
, i
);
1507 cpustats
= (u64
*)percpu_stats
;
1508 for (j
= 0; j
< num
; j
++)
1509 netstats
[j
] += cpustats
[j
];
1513 /* Copy mac unicast addresses from @net_dev to @priv.
1514 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1516 static void add_uc_hw_addr(const struct net_device
*net_dev
,
1517 struct dpaa2_eth_priv
*priv
)
1519 struct netdev_hw_addr
*ha
;
1522 netdev_for_each_uc_addr(ha
, net_dev
) {
1523 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1526 netdev_warn(priv
->net_dev
,
1527 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1532 /* Copy mac multicast addresses from @net_dev to @priv
1533 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1535 static void add_mc_hw_addr(const struct net_device
*net_dev
,
1536 struct dpaa2_eth_priv
*priv
)
1538 struct netdev_hw_addr
*ha
;
1541 netdev_for_each_mc_addr(ha
, net_dev
) {
1542 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1545 netdev_warn(priv
->net_dev
,
1546 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1551 static void dpaa2_eth_set_rx_mode(struct net_device
*net_dev
)
1553 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1554 int uc_count
= netdev_uc_count(net_dev
);
1555 int mc_count
= netdev_mc_count(net_dev
);
1556 u8 max_mac
= priv
->dpni_attrs
.mac_filter_entries
;
1557 u32 options
= priv
->dpni_attrs
.options
;
1558 u16 mc_token
= priv
->mc_token
;
1559 struct fsl_mc_io
*mc_io
= priv
->mc_io
;
1562 /* Basic sanity checks; these probably indicate a misconfiguration */
1563 if (options
& DPNI_OPT_NO_MAC_FILTER
&& max_mac
!= 0)
1564 netdev_info(net_dev
,
1565 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1568 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1569 if (uc_count
> max_mac
) {
1570 netdev_info(net_dev
,
1571 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1575 if (mc_count
+ uc_count
> max_mac
) {
1576 netdev_info(net_dev
,
1577 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1578 uc_count
+ mc_count
, max_mac
);
1579 goto force_mc_promisc
;
1582 /* Adjust promisc settings due to flag combinations */
1583 if (net_dev
->flags
& IFF_PROMISC
)
1585 if (net_dev
->flags
& IFF_ALLMULTI
) {
1586 /* First, rebuild unicast filtering table. This should be done
1587 * in promisc mode, in order to avoid frame loss while we
1588 * progressively add entries to the table.
1589 * We don't know whether we had been in promisc already, and
1590 * making an MC call to find out is expensive; so set uc promisc
1593 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
1595 netdev_warn(net_dev
, "Can't set uc promisc\n");
1597 /* Actual uc table reconstruction. */
1598 err
= dpni_clear_mac_filters(mc_io
, 0, mc_token
, 1, 0);
1600 netdev_warn(net_dev
, "Can't clear uc filters\n");
1601 add_uc_hw_addr(net_dev
, priv
);
1603 /* Finally, clear uc promisc and set mc promisc as requested. */
1604 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 0);
1606 netdev_warn(net_dev
, "Can't clear uc promisc\n");
1607 goto force_mc_promisc
;
1610 /* Neither unicast, nor multicast promisc will be on... eventually.
1611 * For now, rebuild mac filtering tables while forcing both of them on.
1613 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
1615 netdev_warn(net_dev
, "Can't set uc promisc (%d)\n", err
);
1616 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 1);
1618 netdev_warn(net_dev
, "Can't set mc promisc (%d)\n", err
);
1620 /* Actual mac filtering tables reconstruction */
1621 err
= dpni_clear_mac_filters(mc_io
, 0, mc_token
, 1, 1);
1623 netdev_warn(net_dev
, "Can't clear mac filters\n");
1624 add_mc_hw_addr(net_dev
, priv
);
1625 add_uc_hw_addr(net_dev
, priv
);
1627 /* Now we can clear both ucast and mcast promisc, without risking
1628 * to drop legitimate frames anymore.
1630 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 0);
1632 netdev_warn(net_dev
, "Can't clear ucast promisc\n");
1633 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 0);
1635 netdev_warn(net_dev
, "Can't clear mcast promisc\n");
1640 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
1642 netdev_warn(net_dev
, "Can't set ucast promisc\n");
1644 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 1);
1646 netdev_warn(net_dev
, "Can't set mcast promisc\n");
1649 static int dpaa2_eth_set_features(struct net_device
*net_dev
,
1650 netdev_features_t features
)
1652 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1653 netdev_features_t changed
= features
^ net_dev
->features
;
1657 if (changed
& NETIF_F_RXCSUM
) {
1658 enable
= !!(features
& NETIF_F_RXCSUM
);
1659 err
= set_rx_csum(priv
, enable
);
1664 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
1665 enable
= !!(features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
1666 err
= set_tx_csum(priv
, enable
);
1674 static int dpaa2_eth_ts_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1676 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
1677 struct hwtstamp_config config
;
1679 if (copy_from_user(&config
, rq
->ifr_data
, sizeof(config
)))
1682 switch (config
.tx_type
) {
1683 case HWTSTAMP_TX_OFF
:
1684 priv
->tx_tstamp
= false;
1686 case HWTSTAMP_TX_ON
:
1687 priv
->tx_tstamp
= true;
1693 if (config
.rx_filter
== HWTSTAMP_FILTER_NONE
) {
1694 priv
->rx_tstamp
= false;
1696 priv
->rx_tstamp
= true;
1697 /* TS is set for all frame types, not only those requested */
1698 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1701 return copy_to_user(rq
->ifr_data
, &config
, sizeof(config
)) ?
1705 static int dpaa2_eth_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1707 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
1709 if (cmd
== SIOCSHWTSTAMP
)
1710 return dpaa2_eth_ts_ioctl(dev
, rq
, cmd
);
1713 return phylink_mii_ioctl(priv
->mac
->phylink
, rq
, cmd
);
1718 static bool xdp_mtu_valid(struct dpaa2_eth_priv
*priv
, int mtu
)
1720 int mfl
, linear_mfl
;
1722 mfl
= DPAA2_ETH_L2_MAX_FRM(mtu
);
1723 linear_mfl
= DPAA2_ETH_RX_BUF_SIZE
- DPAA2_ETH_RX_HWA_SIZE
-
1724 dpaa2_eth_rx_head_room(priv
) - XDP_PACKET_HEADROOM
;
1726 if (mfl
> linear_mfl
) {
1727 netdev_warn(priv
->net_dev
, "Maximum MTU for XDP is %d\n",
1728 linear_mfl
- VLAN_ETH_HLEN
);
1735 static int set_rx_mfl(struct dpaa2_eth_priv
*priv
, int mtu
, bool has_xdp
)
1739 /* We enforce a maximum Rx frame length based on MTU only if we have
1740 * an XDP program attached (in order to avoid Rx S/G frames).
1741 * Otherwise, we accept all incoming frames as long as they are not
1742 * larger than maximum size supported in hardware
1745 mfl
= DPAA2_ETH_L2_MAX_FRM(mtu
);
1747 mfl
= DPAA2_ETH_MFL
;
1749 err
= dpni_set_max_frame_length(priv
->mc_io
, 0, priv
->mc_token
, mfl
);
1751 netdev_err(priv
->net_dev
, "dpni_set_max_frame_length failed\n");
1758 static int dpaa2_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
1760 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
1763 if (!priv
->xdp_prog
)
1766 if (!xdp_mtu_valid(priv
, new_mtu
))
1769 err
= set_rx_mfl(priv
, new_mtu
, true);
1778 static int update_rx_buffer_headroom(struct dpaa2_eth_priv
*priv
, bool has_xdp
)
1780 struct dpni_buffer_layout buf_layout
= {0};
1783 err
= dpni_get_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
1784 DPNI_QUEUE_RX
, &buf_layout
);
1786 netdev_err(priv
->net_dev
, "dpni_get_buffer_layout failed\n");
1790 /* Reserve extra headroom for XDP header size changes */
1791 buf_layout
.data_head_room
= dpaa2_eth_rx_head_room(priv
) +
1792 (has_xdp
? XDP_PACKET_HEADROOM
: 0);
1793 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM
;
1794 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
1795 DPNI_QUEUE_RX
, &buf_layout
);
1797 netdev_err(priv
->net_dev
, "dpni_set_buffer_layout failed\n");
1804 static int setup_xdp(struct net_device
*dev
, struct bpf_prog
*prog
)
1806 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
1807 struct dpaa2_eth_channel
*ch
;
1808 struct bpf_prog
*old
;
1809 bool up
, need_update
;
1812 if (prog
&& !xdp_mtu_valid(priv
, dev
->mtu
))
1816 bpf_prog_add(prog
, priv
->num_channels
);
1818 up
= netif_running(dev
);
1819 need_update
= (!!priv
->xdp_prog
!= !!prog
);
1822 dpaa2_eth_stop(dev
);
1824 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
1825 * Also, when switching between xdp/non-xdp modes we need to reconfigure
1826 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
1827 * so we are sure no old format buffers will be used from now on.
1830 err
= set_rx_mfl(priv
, dev
->mtu
, !!prog
);
1833 err
= update_rx_buffer_headroom(priv
, !!prog
);
1838 old
= xchg(&priv
->xdp_prog
, prog
);
1842 for (i
= 0; i
< priv
->num_channels
; i
++) {
1843 ch
= priv
->channel
[i
];
1844 old
= xchg(&ch
->xdp
.prog
, prog
);
1850 err
= dpaa2_eth_open(dev
);
1859 bpf_prog_sub(prog
, priv
->num_channels
);
1861 dpaa2_eth_open(dev
);
1866 static int dpaa2_eth_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
1868 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
1870 switch (xdp
->command
) {
1871 case XDP_SETUP_PROG
:
1872 return setup_xdp(dev
, xdp
->prog
);
1873 case XDP_QUERY_PROG
:
1874 xdp
->prog_id
= priv
->xdp_prog
? priv
->xdp_prog
->aux
->id
: 0;
1883 static int dpaa2_eth_xdp_xmit_frame(struct net_device
*net_dev
,
1884 struct xdp_frame
*xdpf
)
1886 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1887 struct device
*dev
= net_dev
->dev
.parent
;
1888 struct rtnl_link_stats64
*percpu_stats
;
1889 struct dpaa2_eth_drv_stats
*percpu_extras
;
1890 unsigned int needed_headroom
;
1891 struct dpaa2_eth_swa
*swa
;
1892 struct dpaa2_eth_fq
*fq
;
1894 void *buffer_start
, *aligned_start
;
1898 /* We require a minimum headroom to be able to transmit the frame.
1899 * Otherwise return an error and let the original net_device handle it
1901 needed_headroom
= dpaa2_eth_needed_headroom(priv
, NULL
);
1902 if (xdpf
->headroom
< needed_headroom
)
1905 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
1906 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
1908 /* Setup the FD fields */
1909 memset(&fd
, 0, sizeof(fd
));
1911 /* Align FD address, if possible */
1912 buffer_start
= xdpf
->data
- needed_headroom
;
1913 aligned_start
= PTR_ALIGN(buffer_start
- DPAA2_ETH_TX_BUF_ALIGN
,
1914 DPAA2_ETH_TX_BUF_ALIGN
);
1915 if (aligned_start
>= xdpf
->data
- xdpf
->headroom
)
1916 buffer_start
= aligned_start
;
1918 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
1919 /* fill in necessary fields here */
1920 swa
->type
= DPAA2_ETH_SWA_XDP
;
1921 swa
->xdp
.dma_size
= xdpf
->data
+ xdpf
->len
- buffer_start
;
1922 swa
->xdp
.xdpf
= xdpf
;
1924 addr
= dma_map_single(dev
, buffer_start
,
1927 if (unlikely(dma_mapping_error(dev
, addr
))) {
1928 percpu_stats
->tx_dropped
++;
1932 dpaa2_fd_set_addr(&fd
, addr
);
1933 dpaa2_fd_set_offset(&fd
, xdpf
->data
- buffer_start
);
1934 dpaa2_fd_set_len(&fd
, xdpf
->len
);
1935 dpaa2_fd_set_format(&fd
, dpaa2_fd_single
);
1936 dpaa2_fd_set_ctrl(&fd
, FD_CTRL_PTA
);
1938 fq
= &priv
->fq
[smp_processor_id() % dpaa2_eth_queue_count(priv
)];
1939 for (i
= 0; i
< DPAA2_ETH_ENQUEUE_RETRIES
; i
++) {
1940 err
= priv
->enqueue(priv
, fq
, &fd
, 0);
1944 percpu_extras
->tx_portal_busy
+= i
;
1945 if (unlikely(err
< 0)) {
1946 percpu_stats
->tx_errors
++;
1947 /* let the Rx device handle the cleanup */
1951 percpu_stats
->tx_packets
++;
1952 percpu_stats
->tx_bytes
+= dpaa2_fd_get_len(&fd
);
1957 static int dpaa2_eth_xdp_xmit(struct net_device
*net_dev
, int n
,
1958 struct xdp_frame
**frames
, u32 flags
)
1963 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
1966 if (!netif_running(net_dev
))
1969 for (i
= 0; i
< n
; i
++) {
1970 struct xdp_frame
*xdpf
= frames
[i
];
1972 err
= dpaa2_eth_xdp_xmit_frame(net_dev
, xdpf
);
1974 xdp_return_frame_rx_napi(xdpf
);
1982 static int update_xps(struct dpaa2_eth_priv
*priv
)
1984 struct net_device
*net_dev
= priv
->net_dev
;
1985 struct cpumask xps_mask
;
1986 struct dpaa2_eth_fq
*fq
;
1987 int i
, num_queues
, netdev_queues
;
1990 num_queues
= dpaa2_eth_queue_count(priv
);
1991 netdev_queues
= (net_dev
->num_tc
? : 1) * num_queues
;
1993 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
1994 * queues, so only process those
1996 for (i
= 0; i
< netdev_queues
; i
++) {
1997 fq
= &priv
->fq
[i
% num_queues
];
1999 cpumask_clear(&xps_mask
);
2000 cpumask_set_cpu(fq
->target_cpu
, &xps_mask
);
2002 err
= netif_set_xps_queue(net_dev
, &xps_mask
, i
);
2004 netdev_warn_once(net_dev
, "Error setting XPS queue\n");
2012 static int dpaa2_eth_setup_tc(struct net_device
*net_dev
,
2013 enum tc_setup_type type
, void *type_data
)
2015 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2016 struct tc_mqprio_qopt
*mqprio
= type_data
;
2017 u8 num_tc
, num_queues
;
2020 if (type
!= TC_SETUP_QDISC_MQPRIO
)
2023 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
2024 num_queues
= dpaa2_eth_queue_count(priv
);
2025 num_tc
= mqprio
->num_tc
;
2027 if (num_tc
== net_dev
->num_tc
)
2030 if (num_tc
> dpaa2_eth_tc_count(priv
)) {
2031 netdev_err(net_dev
, "Max %d traffic classes supported\n",
2032 dpaa2_eth_tc_count(priv
));
2037 netdev_reset_tc(net_dev
);
2038 netif_set_real_num_tx_queues(net_dev
, num_queues
);
2042 netdev_set_num_tc(net_dev
, num_tc
);
2043 netif_set_real_num_tx_queues(net_dev
, num_tc
* num_queues
);
2045 for (i
= 0; i
< num_tc
; i
++)
2046 netdev_set_tc_queue(net_dev
, i
, num_queues
, i
* num_queues
);
2054 static const struct net_device_ops dpaa2_eth_ops
= {
2055 .ndo_open
= dpaa2_eth_open
,
2056 .ndo_start_xmit
= dpaa2_eth_tx
,
2057 .ndo_stop
= dpaa2_eth_stop
,
2058 .ndo_set_mac_address
= dpaa2_eth_set_addr
,
2059 .ndo_get_stats64
= dpaa2_eth_get_stats
,
2060 .ndo_set_rx_mode
= dpaa2_eth_set_rx_mode
,
2061 .ndo_set_features
= dpaa2_eth_set_features
,
2062 .ndo_do_ioctl
= dpaa2_eth_ioctl
,
2063 .ndo_change_mtu
= dpaa2_eth_change_mtu
,
2064 .ndo_bpf
= dpaa2_eth_xdp
,
2065 .ndo_xdp_xmit
= dpaa2_eth_xdp_xmit
,
2066 .ndo_setup_tc
= dpaa2_eth_setup_tc
,
2069 static void cdan_cb(struct dpaa2_io_notification_ctx
*ctx
)
2071 struct dpaa2_eth_channel
*ch
;
2073 ch
= container_of(ctx
, struct dpaa2_eth_channel
, nctx
);
2075 /* Update NAPI statistics */
2078 napi_schedule_irqoff(&ch
->napi
);
2081 /* Allocate and configure a DPCON object */
2082 static struct fsl_mc_device
*setup_dpcon(struct dpaa2_eth_priv
*priv
)
2084 struct fsl_mc_device
*dpcon
;
2085 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2088 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
),
2089 FSL_MC_POOL_DPCON
, &dpcon
);
2092 err
= -EPROBE_DEFER
;
2094 dev_info(dev
, "Not enough DPCONs, will go on as-is\n");
2095 return ERR_PTR(err
);
2098 err
= dpcon_open(priv
->mc_io
, 0, dpcon
->obj_desc
.id
, &dpcon
->mc_handle
);
2100 dev_err(dev
, "dpcon_open() failed\n");
2104 err
= dpcon_reset(priv
->mc_io
, 0, dpcon
->mc_handle
);
2106 dev_err(dev
, "dpcon_reset() failed\n");
2110 err
= dpcon_enable(priv
->mc_io
, 0, dpcon
->mc_handle
);
2112 dev_err(dev
, "dpcon_enable() failed\n");
2119 dpcon_close(priv
->mc_io
, 0, dpcon
->mc_handle
);
2121 fsl_mc_object_free(dpcon
);
2126 static void free_dpcon(struct dpaa2_eth_priv
*priv
,
2127 struct fsl_mc_device
*dpcon
)
2129 dpcon_disable(priv
->mc_io
, 0, dpcon
->mc_handle
);
2130 dpcon_close(priv
->mc_io
, 0, dpcon
->mc_handle
);
2131 fsl_mc_object_free(dpcon
);
2134 static struct dpaa2_eth_channel
*
2135 alloc_channel(struct dpaa2_eth_priv
*priv
)
2137 struct dpaa2_eth_channel
*channel
;
2138 struct dpcon_attr attr
;
2139 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2142 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
2146 channel
->dpcon
= setup_dpcon(priv
);
2147 if (IS_ERR_OR_NULL(channel
->dpcon
)) {
2148 err
= PTR_ERR_OR_ZERO(channel
->dpcon
);
2152 err
= dpcon_get_attributes(priv
->mc_io
, 0, channel
->dpcon
->mc_handle
,
2155 dev_err(dev
, "dpcon_get_attributes() failed\n");
2159 channel
->dpcon_id
= attr
.id
;
2160 channel
->ch_id
= attr
.qbman_ch_id
;
2161 channel
->priv
= priv
;
2166 free_dpcon(priv
, channel
->dpcon
);
2169 return ERR_PTR(err
);
2172 static void free_channel(struct dpaa2_eth_priv
*priv
,
2173 struct dpaa2_eth_channel
*channel
)
2175 free_dpcon(priv
, channel
->dpcon
);
2179 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2180 * and register data availability notifications
2182 static int setup_dpio(struct dpaa2_eth_priv
*priv
)
2184 struct dpaa2_io_notification_ctx
*nctx
;
2185 struct dpaa2_eth_channel
*channel
;
2186 struct dpcon_notification_cfg dpcon_notif_cfg
;
2187 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2190 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2191 * many cores as possible, so we need one channel for each core
2192 * (unless there's fewer queues than cores, in which case the extra
2193 * channels would be wasted).
2194 * Allocate one channel per core and register it to the core's
2195 * affine DPIO. If not enough channels are available for all cores
2196 * or if some cores don't have an affine DPIO, there will be no
2197 * ingress frame processing on those cores.
2199 cpumask_clear(&priv
->dpio_cpumask
);
2200 for_each_online_cpu(i
) {
2201 /* Try to allocate a channel */
2202 channel
= alloc_channel(priv
);
2203 if (IS_ERR_OR_NULL(channel
)) {
2204 err
= PTR_ERR_OR_ZERO(channel
);
2205 if (err
!= -EPROBE_DEFER
)
2207 "No affine channel for cpu %d and above\n", i
);
2211 priv
->channel
[priv
->num_channels
] = channel
;
2213 nctx
= &channel
->nctx
;
2216 nctx
->id
= channel
->ch_id
;
2217 nctx
->desired_cpu
= i
;
2219 /* Register the new context */
2220 channel
->dpio
= dpaa2_io_service_select(i
);
2221 err
= dpaa2_io_service_register(channel
->dpio
, nctx
, dev
);
2223 dev_dbg(dev
, "No affine DPIO for cpu %d\n", i
);
2224 /* If no affine DPIO for this core, there's probably
2225 * none available for next cores either. Signal we want
2226 * to retry later, in case the DPIO devices weren't
2229 err
= -EPROBE_DEFER
;
2230 goto err_service_reg
;
2233 /* Register DPCON notification with MC */
2234 dpcon_notif_cfg
.dpio_id
= nctx
->dpio_id
;
2235 dpcon_notif_cfg
.priority
= 0;
2236 dpcon_notif_cfg
.user_ctx
= nctx
->qman64
;
2237 err
= dpcon_set_notification(priv
->mc_io
, 0,
2238 channel
->dpcon
->mc_handle
,
2241 dev_err(dev
, "dpcon_set_notification failed()\n");
2245 /* If we managed to allocate a channel and also found an affine
2246 * DPIO for this core, add it to the final mask
2248 cpumask_set_cpu(i
, &priv
->dpio_cpumask
);
2249 priv
->num_channels
++;
2251 /* Stop if we already have enough channels to accommodate all
2252 * RX and TX conf queues
2254 if (priv
->num_channels
== priv
->dpni_attrs
.num_queues
)
2261 dpaa2_io_service_deregister(channel
->dpio
, nctx
, dev
);
2263 free_channel(priv
, channel
);
2265 if (err
== -EPROBE_DEFER
) {
2266 for (i
= 0; i
< priv
->num_channels
; i
++) {
2267 channel
= priv
->channel
[i
];
2268 nctx
= &channel
->nctx
;
2269 dpaa2_io_service_deregister(channel
->dpio
, nctx
, dev
);
2270 free_channel(priv
, channel
);
2272 priv
->num_channels
= 0;
2276 if (cpumask_empty(&priv
->dpio_cpumask
)) {
2277 dev_err(dev
, "No cpu with an affine DPIO/DPCON\n");
2281 dev_info(dev
, "Cores %*pbl available for processing ingress traffic\n",
2282 cpumask_pr_args(&priv
->dpio_cpumask
));
2287 static void free_dpio(struct dpaa2_eth_priv
*priv
)
2289 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2290 struct dpaa2_eth_channel
*ch
;
2293 /* deregister CDAN notifications and free channels */
2294 for (i
= 0; i
< priv
->num_channels
; i
++) {
2295 ch
= priv
->channel
[i
];
2296 dpaa2_io_service_deregister(ch
->dpio
, &ch
->nctx
, dev
);
2297 free_channel(priv
, ch
);
2301 static struct dpaa2_eth_channel
*get_affine_channel(struct dpaa2_eth_priv
*priv
,
2304 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2307 for (i
= 0; i
< priv
->num_channels
; i
++)
2308 if (priv
->channel
[i
]->nctx
.desired_cpu
== cpu
)
2309 return priv
->channel
[i
];
2311 /* We should never get here. Issue a warning and return
2312 * the first channel, because it's still better than nothing
2314 dev_warn(dev
, "No affine channel found for cpu %d\n", cpu
);
2316 return priv
->channel
[0];
2319 static void set_fq_affinity(struct dpaa2_eth_priv
*priv
)
2321 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2322 struct dpaa2_eth_fq
*fq
;
2323 int rx_cpu
, txc_cpu
;
2326 /* For each FQ, pick one channel/CPU to deliver frames to.
2327 * This may well change at runtime, either through irqbalance or
2328 * through direct user intervention.
2330 rx_cpu
= txc_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2332 for (i
= 0; i
< priv
->num_fqs
; i
++) {
2336 fq
->target_cpu
= rx_cpu
;
2337 rx_cpu
= cpumask_next(rx_cpu
, &priv
->dpio_cpumask
);
2338 if (rx_cpu
>= nr_cpu_ids
)
2339 rx_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2341 case DPAA2_TX_CONF_FQ
:
2342 fq
->target_cpu
= txc_cpu
;
2343 txc_cpu
= cpumask_next(txc_cpu
, &priv
->dpio_cpumask
);
2344 if (txc_cpu
>= nr_cpu_ids
)
2345 txc_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2348 dev_err(dev
, "Unknown FQ type: %d\n", fq
->type
);
2350 fq
->channel
= get_affine_channel(priv
, fq
->target_cpu
);
2356 static void setup_fqs(struct dpaa2_eth_priv
*priv
)
2360 /* We have one TxConf FQ per Tx flow.
2361 * The number of Tx and Rx queues is the same.
2362 * Tx queues come first in the fq array.
2364 for (i
= 0; i
< dpaa2_eth_queue_count(priv
); i
++) {
2365 priv
->fq
[priv
->num_fqs
].type
= DPAA2_TX_CONF_FQ
;
2366 priv
->fq
[priv
->num_fqs
].consume
= dpaa2_eth_tx_conf
;
2367 priv
->fq
[priv
->num_fqs
++].flowid
= (u16
)i
;
2370 for (i
= 0; i
< dpaa2_eth_queue_count(priv
); i
++) {
2371 priv
->fq
[priv
->num_fqs
].type
= DPAA2_RX_FQ
;
2372 priv
->fq
[priv
->num_fqs
].consume
= dpaa2_eth_rx
;
2373 priv
->fq
[priv
->num_fqs
++].flowid
= (u16
)i
;
2376 /* For each FQ, decide on which core to process incoming frames */
2377 set_fq_affinity(priv
);
2380 /* Allocate and configure one buffer pool for each interface */
2381 static int setup_dpbp(struct dpaa2_eth_priv
*priv
)
2384 struct fsl_mc_device
*dpbp_dev
;
2385 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2386 struct dpbp_attr dpbp_attrs
;
2388 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
), FSL_MC_POOL_DPBP
,
2392 err
= -EPROBE_DEFER
;
2394 dev_err(dev
, "DPBP device allocation failed\n");
2398 priv
->dpbp_dev
= dpbp_dev
;
2400 err
= dpbp_open(priv
->mc_io
, 0, priv
->dpbp_dev
->obj_desc
.id
,
2401 &dpbp_dev
->mc_handle
);
2403 dev_err(dev
, "dpbp_open() failed\n");
2407 err
= dpbp_reset(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2409 dev_err(dev
, "dpbp_reset() failed\n");
2413 err
= dpbp_enable(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2415 dev_err(dev
, "dpbp_enable() failed\n");
2419 err
= dpbp_get_attributes(priv
->mc_io
, 0, dpbp_dev
->mc_handle
,
2422 dev_err(dev
, "dpbp_get_attributes() failed\n");
2425 priv
->bpid
= dpbp_attrs
.bpid
;
2430 dpbp_disable(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2433 dpbp_close(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2435 fsl_mc_object_free(dpbp_dev
);
2440 static void free_dpbp(struct dpaa2_eth_priv
*priv
)
2443 dpbp_disable(priv
->mc_io
, 0, priv
->dpbp_dev
->mc_handle
);
2444 dpbp_close(priv
->mc_io
, 0, priv
->dpbp_dev
->mc_handle
);
2445 fsl_mc_object_free(priv
->dpbp_dev
);
2448 static int set_buffer_layout(struct dpaa2_eth_priv
*priv
)
2450 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2451 struct dpni_buffer_layout buf_layout
= {0};
2455 /* We need to check for WRIOP version 1.0.0, but depending on the MC
2456 * version, this number is not always provided correctly on rev1.
2457 * We need to check for both alternatives in this situation.
2459 if (priv
->dpni_attrs
.wriop_version
== DPAA2_WRIOP_VERSION(0, 0, 0) ||
2460 priv
->dpni_attrs
.wriop_version
== DPAA2_WRIOP_VERSION(1, 0, 0))
2461 rx_buf_align
= DPAA2_ETH_RX_BUF_ALIGN_REV1
;
2463 rx_buf_align
= DPAA2_ETH_RX_BUF_ALIGN
;
2466 buf_layout
.private_data_size
= DPAA2_ETH_SWA_SIZE
;
2467 buf_layout
.pass_timestamp
= true;
2468 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE
|
2469 DPNI_BUF_LAYOUT_OPT_TIMESTAMP
;
2470 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2471 DPNI_QUEUE_TX
, &buf_layout
);
2473 dev_err(dev
, "dpni_set_buffer_layout(TX) failed\n");
2477 /* tx-confirm buffer */
2478 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_TIMESTAMP
;
2479 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2480 DPNI_QUEUE_TX_CONFIRM
, &buf_layout
);
2482 dev_err(dev
, "dpni_set_buffer_layout(TX_CONF) failed\n");
2486 /* Now that we've set our tx buffer layout, retrieve the minimum
2487 * required tx data offset.
2489 err
= dpni_get_tx_data_offset(priv
->mc_io
, 0, priv
->mc_token
,
2490 &priv
->tx_data_offset
);
2492 dev_err(dev
, "dpni_get_tx_data_offset() failed\n");
2496 if ((priv
->tx_data_offset
% 64) != 0)
2497 dev_warn(dev
, "Tx data offset (%d) not a multiple of 64B\n",
2498 priv
->tx_data_offset
);
2501 buf_layout
.pass_frame_status
= true;
2502 buf_layout
.pass_parser_result
= true;
2503 buf_layout
.data_align
= rx_buf_align
;
2504 buf_layout
.data_head_room
= dpaa2_eth_rx_head_room(priv
);
2505 buf_layout
.private_data_size
= 0;
2506 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_PARSER_RESULT
|
2507 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
|
2508 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN
|
2509 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM
|
2510 DPNI_BUF_LAYOUT_OPT_TIMESTAMP
;
2511 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2512 DPNI_QUEUE_RX
, &buf_layout
);
2514 dev_err(dev
, "dpni_set_buffer_layout(RX) failed\n");
2521 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2522 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
2524 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv
*priv
,
2525 struct dpaa2_eth_fq
*fq
,
2526 struct dpaa2_fd
*fd
, u8 prio
)
2528 return dpaa2_io_service_enqueue_qd(fq
->channel
->dpio
,
2529 priv
->tx_qdid
, prio
,
2533 static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv
*priv
,
2534 struct dpaa2_eth_fq
*fq
,
2535 struct dpaa2_fd
*fd
, u8 prio
)
2537 return dpaa2_io_service_enqueue_fq(fq
->channel
->dpio
,
2538 fq
->tx_fqid
[prio
], fd
);
2541 static void set_enqueue_mode(struct dpaa2_eth_priv
*priv
)
2543 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_ENQUEUE_FQID_VER_MAJOR
,
2544 DPNI_ENQUEUE_FQID_VER_MINOR
) < 0)
2545 priv
->enqueue
= dpaa2_eth_enqueue_qd
;
2547 priv
->enqueue
= dpaa2_eth_enqueue_fq
;
2550 static int set_pause(struct dpaa2_eth_priv
*priv
)
2552 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2553 struct dpni_link_cfg link_cfg
= {0};
2556 /* Get the default link options so we don't override other flags */
2557 err
= dpni_get_link_cfg(priv
->mc_io
, 0, priv
->mc_token
, &link_cfg
);
2559 dev_err(dev
, "dpni_get_link_cfg() failed\n");
2563 /* By default, enable both Rx and Tx pause frames */
2564 link_cfg
.options
|= DPNI_LINK_OPT_PAUSE
;
2565 link_cfg
.options
&= ~DPNI_LINK_OPT_ASYM_PAUSE
;
2566 err
= dpni_set_link_cfg(priv
->mc_io
, 0, priv
->mc_token
, &link_cfg
);
2568 dev_err(dev
, "dpni_set_link_cfg() failed\n");
2572 priv
->link_state
.options
= link_cfg
.options
;
2577 static void update_tx_fqids(struct dpaa2_eth_priv
*priv
)
2579 struct dpni_queue_id qid
= {0};
2580 struct dpaa2_eth_fq
*fq
;
2581 struct dpni_queue queue
;
2584 /* We only use Tx FQIDs for FQID-based enqueue, so check
2585 * if DPNI version supports it before updating FQIDs
2587 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_ENQUEUE_FQID_VER_MAJOR
,
2588 DPNI_ENQUEUE_FQID_VER_MINOR
) < 0)
2591 for (i
= 0; i
< priv
->num_fqs
; i
++) {
2593 if (fq
->type
!= DPAA2_TX_CONF_FQ
)
2595 for (j
= 0; j
< dpaa2_eth_tc_count(priv
); j
++) {
2596 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
2597 DPNI_QUEUE_TX
, j
, fq
->flowid
,
2602 fq
->tx_fqid
[j
] = qid
.fqid
;
2603 if (fq
->tx_fqid
[j
] == 0)
2608 priv
->enqueue
= dpaa2_eth_enqueue_fq
;
2613 netdev_info(priv
->net_dev
,
2614 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
2615 priv
->enqueue
= dpaa2_eth_enqueue_qd
;
2618 /* Configure the DPNI object this interface is associated with */
2619 static int setup_dpni(struct fsl_mc_device
*ls_dev
)
2621 struct device
*dev
= &ls_dev
->dev
;
2622 struct dpaa2_eth_priv
*priv
;
2623 struct net_device
*net_dev
;
2626 net_dev
= dev_get_drvdata(dev
);
2627 priv
= netdev_priv(net_dev
);
2629 /* get a handle for the DPNI object */
2630 err
= dpni_open(priv
->mc_io
, 0, ls_dev
->obj_desc
.id
, &priv
->mc_token
);
2632 dev_err(dev
, "dpni_open() failed\n");
2636 /* Check if we can work with this DPNI object */
2637 err
= dpni_get_api_version(priv
->mc_io
, 0, &priv
->dpni_ver_major
,
2638 &priv
->dpni_ver_minor
);
2640 dev_err(dev
, "dpni_get_api_version() failed\n");
2643 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_VER_MAJOR
, DPNI_VER_MINOR
) < 0) {
2644 dev_err(dev
, "DPNI version %u.%u not supported, need >= %u.%u\n",
2645 priv
->dpni_ver_major
, priv
->dpni_ver_minor
,
2646 DPNI_VER_MAJOR
, DPNI_VER_MINOR
);
2651 ls_dev
->mc_io
= priv
->mc_io
;
2652 ls_dev
->mc_handle
= priv
->mc_token
;
2654 err
= dpni_reset(priv
->mc_io
, 0, priv
->mc_token
);
2656 dev_err(dev
, "dpni_reset() failed\n");
2660 err
= dpni_get_attributes(priv
->mc_io
, 0, priv
->mc_token
,
2663 dev_err(dev
, "dpni_get_attributes() failed (err=%d)\n", err
);
2667 err
= set_buffer_layout(priv
);
2671 set_enqueue_mode(priv
);
2673 /* Enable pause frame support */
2674 if (dpaa2_eth_has_pause_support(priv
)) {
2675 err
= set_pause(priv
);
2680 priv
->cls_rules
= devm_kzalloc(dev
, sizeof(struct dpaa2_eth_cls_rule
) *
2681 dpaa2_eth_fs_count(priv
), GFP_KERNEL
);
2682 if (!priv
->cls_rules
)
2688 dpni_close(priv
->mc_io
, 0, priv
->mc_token
);
2693 static void free_dpni(struct dpaa2_eth_priv
*priv
)
2697 err
= dpni_reset(priv
->mc_io
, 0, priv
->mc_token
);
2699 netdev_warn(priv
->net_dev
, "dpni_reset() failed (err %d)\n",
2702 dpni_close(priv
->mc_io
, 0, priv
->mc_token
);
2705 static int setup_rx_flow(struct dpaa2_eth_priv
*priv
,
2706 struct dpaa2_eth_fq
*fq
)
2708 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2709 struct dpni_queue queue
;
2710 struct dpni_queue_id qid
;
2713 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
2714 DPNI_QUEUE_RX
, 0, fq
->flowid
, &queue
, &qid
);
2716 dev_err(dev
, "dpni_get_queue(RX) failed\n");
2720 fq
->fqid
= qid
.fqid
;
2722 queue
.destination
.id
= fq
->channel
->dpcon_id
;
2723 queue
.destination
.type
= DPNI_DEST_DPCON
;
2724 queue
.destination
.priority
= 1;
2725 queue
.user_context
= (u64
)(uintptr_t)fq
;
2726 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
2727 DPNI_QUEUE_RX
, 0, fq
->flowid
,
2728 DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
,
2731 dev_err(dev
, "dpni_set_queue(RX) failed\n");
2736 err
= xdp_rxq_info_reg(&fq
->channel
->xdp_rxq
, priv
->net_dev
,
2739 dev_err(dev
, "xdp_rxq_info_reg failed\n");
2743 err
= xdp_rxq_info_reg_mem_model(&fq
->channel
->xdp_rxq
,
2744 MEM_TYPE_PAGE_ORDER0
, NULL
);
2746 dev_err(dev
, "xdp_rxq_info_reg_mem_model failed\n");
2753 static int setup_tx_flow(struct dpaa2_eth_priv
*priv
,
2754 struct dpaa2_eth_fq
*fq
)
2756 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2757 struct dpni_queue queue
;
2758 struct dpni_queue_id qid
;
2761 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
2762 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
2763 DPNI_QUEUE_TX
, i
, fq
->flowid
,
2766 dev_err(dev
, "dpni_get_queue(TX) failed\n");
2769 fq
->tx_fqid
[i
] = qid
.fqid
;
2772 /* All Tx queues belonging to the same flowid have the same qdbin */
2773 fq
->tx_qdbin
= qid
.qdbin
;
2775 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
2776 DPNI_QUEUE_TX_CONFIRM
, 0, fq
->flowid
,
2779 dev_err(dev
, "dpni_get_queue(TX_CONF) failed\n");
2783 fq
->fqid
= qid
.fqid
;
2785 queue
.destination
.id
= fq
->channel
->dpcon_id
;
2786 queue
.destination
.type
= DPNI_DEST_DPCON
;
2787 queue
.destination
.priority
= 0;
2788 queue
.user_context
= (u64
)(uintptr_t)fq
;
2789 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
2790 DPNI_QUEUE_TX_CONFIRM
, 0, fq
->flowid
,
2791 DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
,
2794 dev_err(dev
, "dpni_set_queue(TX_CONF) failed\n");
2801 /* Supported header fields for Rx hash distribution key */
2802 static const struct dpaa2_eth_dist_fields dist_fields
[] = {
2805 .rxnfc_field
= RXH_L2DA
,
2806 .cls_prot
= NET_PROT_ETH
,
2807 .cls_field
= NH_FLD_ETH_DA
,
2808 .id
= DPAA2_ETH_DIST_ETHDST
,
2811 .cls_prot
= NET_PROT_ETH
,
2812 .cls_field
= NH_FLD_ETH_SA
,
2813 .id
= DPAA2_ETH_DIST_ETHSRC
,
2816 /* This is the last ethertype field parsed:
2817 * depending on frame format, it can be the MAC ethertype
2818 * or the VLAN etype.
2820 .cls_prot
= NET_PROT_ETH
,
2821 .cls_field
= NH_FLD_ETH_TYPE
,
2822 .id
= DPAA2_ETH_DIST_ETHTYPE
,
2826 .rxnfc_field
= RXH_VLAN
,
2827 .cls_prot
= NET_PROT_VLAN
,
2828 .cls_field
= NH_FLD_VLAN_TCI
,
2829 .id
= DPAA2_ETH_DIST_VLAN
,
2833 .rxnfc_field
= RXH_IP_SRC
,
2834 .cls_prot
= NET_PROT_IP
,
2835 .cls_field
= NH_FLD_IP_SRC
,
2836 .id
= DPAA2_ETH_DIST_IPSRC
,
2839 .rxnfc_field
= RXH_IP_DST
,
2840 .cls_prot
= NET_PROT_IP
,
2841 .cls_field
= NH_FLD_IP_DST
,
2842 .id
= DPAA2_ETH_DIST_IPDST
,
2845 .rxnfc_field
= RXH_L3_PROTO
,
2846 .cls_prot
= NET_PROT_IP
,
2847 .cls_field
= NH_FLD_IP_PROTO
,
2848 .id
= DPAA2_ETH_DIST_IPPROTO
,
2851 /* Using UDP ports, this is functionally equivalent to raw
2852 * byte pairs from L4 header.
2854 .rxnfc_field
= RXH_L4_B_0_1
,
2855 .cls_prot
= NET_PROT_UDP
,
2856 .cls_field
= NH_FLD_UDP_PORT_SRC
,
2857 .id
= DPAA2_ETH_DIST_L4SRC
,
2860 .rxnfc_field
= RXH_L4_B_2_3
,
2861 .cls_prot
= NET_PROT_UDP
,
2862 .cls_field
= NH_FLD_UDP_PORT_DST
,
2863 .id
= DPAA2_ETH_DIST_L4DST
,
2868 /* Configure the Rx hash key using the legacy API */
2869 static int config_legacy_hash_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
2871 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2872 struct dpni_rx_tc_dist_cfg dist_cfg
;
2875 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
2877 dist_cfg
.key_cfg_iova
= key
;
2878 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
2879 dist_cfg
.dist_mode
= DPNI_DIST_MODE_HASH
;
2881 err
= dpni_set_rx_tc_dist(priv
->mc_io
, 0, priv
->mc_token
, 0, &dist_cfg
);
2883 dev_err(dev
, "dpni_set_rx_tc_dist failed\n");
2888 /* Configure the Rx hash key using the new API */
2889 static int config_hash_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
2891 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2892 struct dpni_rx_dist_cfg dist_cfg
;
2895 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
2897 dist_cfg
.key_cfg_iova
= key
;
2898 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
2899 dist_cfg
.enable
= 1;
2901 err
= dpni_set_rx_hash_dist(priv
->mc_io
, 0, priv
->mc_token
, &dist_cfg
);
2903 dev_err(dev
, "dpni_set_rx_hash_dist failed\n");
2908 /* Configure the Rx flow classification key */
2909 static int config_cls_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
2911 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2912 struct dpni_rx_dist_cfg dist_cfg
;
2915 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
2917 dist_cfg
.key_cfg_iova
= key
;
2918 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
2919 dist_cfg
.enable
= 1;
2921 err
= dpni_set_rx_fs_dist(priv
->mc_io
, 0, priv
->mc_token
, &dist_cfg
);
2923 dev_err(dev
, "dpni_set_rx_fs_dist failed\n");
2928 /* Size of the Rx flow classification key */
2929 int dpaa2_eth_cls_key_size(u64 fields
)
2933 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
2934 if (!(fields
& dist_fields
[i
].id
))
2936 size
+= dist_fields
[i
].size
;
2942 /* Offset of header field in Rx classification key */
2943 int dpaa2_eth_cls_fld_off(int prot
, int field
)
2947 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
2948 if (dist_fields
[i
].cls_prot
== prot
&&
2949 dist_fields
[i
].cls_field
== field
)
2951 off
+= dist_fields
[i
].size
;
2954 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2958 /* Prune unused fields from the classification rule.
2959 * Used when masking is not supported
2961 void dpaa2_eth_cls_trim_rule(void *key_mem
, u64 fields
)
2963 int off
= 0, new_off
= 0;
2966 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
2967 size
= dist_fields
[i
].size
;
2968 if (dist_fields
[i
].id
& fields
) {
2969 memcpy(key_mem
+ new_off
, key_mem
+ off
, size
);
2976 /* Set Rx distribution (hash or flow classification) key
2977 * flags is a combination of RXH_ bits
2979 static int dpaa2_eth_set_dist_key(struct net_device
*net_dev
,
2980 enum dpaa2_eth_rx_dist type
, u64 flags
)
2982 struct device
*dev
= net_dev
->dev
.parent
;
2983 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2984 struct dpkg_profile_cfg cls_cfg
;
2985 u32 rx_hash_fields
= 0;
2986 dma_addr_t key_iova
;
2991 memset(&cls_cfg
, 0, sizeof(cls_cfg
));
2993 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
2994 struct dpkg_extract
*key
=
2995 &cls_cfg
.extracts
[cls_cfg
.num_extracts
];
2997 /* For both Rx hashing and classification keys
2998 * we set only the selected fields.
3000 if (!(flags
& dist_fields
[i
].id
))
3002 if (type
== DPAA2_ETH_RX_DIST_HASH
)
3003 rx_hash_fields
|= dist_fields
[i
].rxnfc_field
;
3005 if (cls_cfg
.num_extracts
>= DPKG_MAX_NUM_OF_EXTRACTS
) {
3006 dev_err(dev
, "error adding key extraction rule, too many rules?\n");
3010 key
->type
= DPKG_EXTRACT_FROM_HDR
;
3011 key
->extract
.from_hdr
.prot
= dist_fields
[i
].cls_prot
;
3012 key
->extract
.from_hdr
.type
= DPKG_FULL_FIELD
;
3013 key
->extract
.from_hdr
.field
= dist_fields
[i
].cls_field
;
3014 cls_cfg
.num_extracts
++;
3017 dma_mem
= kzalloc(DPAA2_CLASSIFIER_DMA_SIZE
, GFP_KERNEL
);
3021 err
= dpni_prepare_key_cfg(&cls_cfg
, dma_mem
);
3023 dev_err(dev
, "dpni_prepare_key_cfg error %d\n", err
);
3027 /* Prepare for setting the rx dist */
3028 key_iova
= dma_map_single(dev
, dma_mem
, DPAA2_CLASSIFIER_DMA_SIZE
,
3030 if (dma_mapping_error(dev
, key_iova
)) {
3031 dev_err(dev
, "DMA mapping failed\n");
3036 if (type
== DPAA2_ETH_RX_DIST_HASH
) {
3037 if (dpaa2_eth_has_legacy_dist(priv
))
3038 err
= config_legacy_hash_key(priv
, key_iova
);
3040 err
= config_hash_key(priv
, key_iova
);
3042 err
= config_cls_key(priv
, key_iova
);
3045 dma_unmap_single(dev
, key_iova
, DPAA2_CLASSIFIER_DMA_SIZE
,
3047 if (!err
&& type
== DPAA2_ETH_RX_DIST_HASH
)
3048 priv
->rx_hash_fields
= rx_hash_fields
;
3055 int dpaa2_eth_set_hash(struct net_device
*net_dev
, u64 flags
)
3057 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3061 if (!dpaa2_eth_hash_enabled(priv
))
3064 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++)
3065 if (dist_fields
[i
].rxnfc_field
& flags
)
3066 key
|= dist_fields
[i
].id
;
3068 return dpaa2_eth_set_dist_key(net_dev
, DPAA2_ETH_RX_DIST_HASH
, key
);
3071 int dpaa2_eth_set_cls(struct net_device
*net_dev
, u64 flags
)
3073 return dpaa2_eth_set_dist_key(net_dev
, DPAA2_ETH_RX_DIST_CLS
, flags
);
3076 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv
*priv
)
3078 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3081 /* Check if we actually support Rx flow classification */
3082 if (dpaa2_eth_has_legacy_dist(priv
)) {
3083 dev_dbg(dev
, "Rx cls not supported by current MC version\n");
3087 if (!dpaa2_eth_fs_enabled(priv
)) {
3088 dev_dbg(dev
, "Rx cls disabled in DPNI options\n");
3092 if (!dpaa2_eth_hash_enabled(priv
)) {
3093 dev_dbg(dev
, "Rx cls disabled for single queue DPNIs\n");
3097 /* If there is no support for masking in the classification table,
3098 * we don't set a default key, as it will depend on the rules
3099 * added by the user at runtime.
3101 if (!dpaa2_eth_fs_mask_enabled(priv
))
3104 err
= dpaa2_eth_set_cls(priv
->net_dev
, DPAA2_ETH_DIST_ALL
);
3109 priv
->rx_cls_enabled
= 1;
3114 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3115 * frame queues and channels
3117 static int bind_dpni(struct dpaa2_eth_priv
*priv
)
3119 struct net_device
*net_dev
= priv
->net_dev
;
3120 struct device
*dev
= net_dev
->dev
.parent
;
3121 struct dpni_pools_cfg pools_params
;
3122 struct dpni_error_cfg err_cfg
;
3126 pools_params
.num_dpbp
= 1;
3127 pools_params
.pools
[0].dpbp_id
= priv
->dpbp_dev
->obj_desc
.id
;
3128 pools_params
.pools
[0].backup_pool
= 0;
3129 pools_params
.pools
[0].buffer_size
= DPAA2_ETH_RX_BUF_SIZE
;
3130 err
= dpni_set_pools(priv
->mc_io
, 0, priv
->mc_token
, &pools_params
);
3132 dev_err(dev
, "dpni_set_pools() failed\n");
3136 /* have the interface implicitly distribute traffic based on
3137 * the default hash key
3139 err
= dpaa2_eth_set_hash(net_dev
, DPAA2_RXH_DEFAULT
);
3140 if (err
&& err
!= -EOPNOTSUPP
)
3141 dev_err(dev
, "Failed to configure hashing\n");
3143 /* Configure the flow classification key; it includes all
3144 * supported header fields and cannot be modified at runtime
3146 err
= dpaa2_eth_set_default_cls(priv
);
3147 if (err
&& err
!= -EOPNOTSUPP
)
3148 dev_err(dev
, "Failed to configure Rx classification key\n");
3150 /* Configure handling of error frames */
3151 err_cfg
.errors
= DPAA2_FAS_RX_ERR_MASK
;
3152 err_cfg
.set_frame_annotation
= 1;
3153 err_cfg
.error_action
= DPNI_ERROR_ACTION_DISCARD
;
3154 err
= dpni_set_errors_behavior(priv
->mc_io
, 0, priv
->mc_token
,
3157 dev_err(dev
, "dpni_set_errors_behavior failed\n");
3161 /* Configure Rx and Tx conf queues to generate CDANs */
3162 for (i
= 0; i
< priv
->num_fqs
; i
++) {
3163 switch (priv
->fq
[i
].type
) {
3165 err
= setup_rx_flow(priv
, &priv
->fq
[i
]);
3167 case DPAA2_TX_CONF_FQ
:
3168 err
= setup_tx_flow(priv
, &priv
->fq
[i
]);
3171 dev_err(dev
, "Invalid FQ type %d\n", priv
->fq
[i
].type
);
3178 err
= dpni_get_qdid(priv
->mc_io
, 0, priv
->mc_token
,
3179 DPNI_QUEUE_TX
, &priv
->tx_qdid
);
3181 dev_err(dev
, "dpni_get_qdid() failed\n");
3188 /* Allocate rings for storing incoming frame descriptors */
3189 static int alloc_rings(struct dpaa2_eth_priv
*priv
)
3191 struct net_device
*net_dev
= priv
->net_dev
;
3192 struct device
*dev
= net_dev
->dev
.parent
;
3195 for (i
= 0; i
< priv
->num_channels
; i
++) {
3196 priv
->channel
[i
]->store
=
3197 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE
, dev
);
3198 if (!priv
->channel
[i
]->store
) {
3199 netdev_err(net_dev
, "dpaa2_io_store_create() failed\n");
3207 for (i
= 0; i
< priv
->num_channels
; i
++) {
3208 if (!priv
->channel
[i
]->store
)
3210 dpaa2_io_store_destroy(priv
->channel
[i
]->store
);
3216 static void free_rings(struct dpaa2_eth_priv
*priv
)
3220 for (i
= 0; i
< priv
->num_channels
; i
++)
3221 dpaa2_io_store_destroy(priv
->channel
[i
]->store
);
3224 static int set_mac_addr(struct dpaa2_eth_priv
*priv
)
3226 struct net_device
*net_dev
= priv
->net_dev
;
3227 struct device
*dev
= net_dev
->dev
.parent
;
3228 u8 mac_addr
[ETH_ALEN
], dpni_mac_addr
[ETH_ALEN
];
3231 /* Get firmware address, if any */
3232 err
= dpni_get_port_mac_addr(priv
->mc_io
, 0, priv
->mc_token
, mac_addr
);
3234 dev_err(dev
, "dpni_get_port_mac_addr() failed\n");
3238 /* Get DPNI attributes address, if any */
3239 err
= dpni_get_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
3242 dev_err(dev
, "dpni_get_primary_mac_addr() failed\n");
3246 /* First check if firmware has any address configured by bootloader */
3247 if (!is_zero_ether_addr(mac_addr
)) {
3248 /* If the DPMAC addr != DPNI addr, update it */
3249 if (!ether_addr_equal(mac_addr
, dpni_mac_addr
)) {
3250 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0,
3254 dev_err(dev
, "dpni_set_primary_mac_addr() failed\n");
3258 memcpy(net_dev
->dev_addr
, mac_addr
, net_dev
->addr_len
);
3259 } else if (is_zero_ether_addr(dpni_mac_addr
)) {
3260 /* No MAC address configured, fill in net_dev->dev_addr
3263 eth_hw_addr_random(net_dev
);
3264 dev_dbg_once(dev
, "device(s) have all-zero hwaddr, replaced with random\n");
3266 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
3269 dev_err(dev
, "dpni_set_primary_mac_addr() failed\n");
3273 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3274 * practical purposes, this will be our "permanent" mac address,
3275 * at least until the next reboot. This move will also permit
3276 * register_netdevice() to properly fill up net_dev->perm_addr.
3278 net_dev
->addr_assign_type
= NET_ADDR_PERM
;
3280 /* NET_ADDR_PERM is default, all we have to do is
3281 * fill in the device addr.
3283 memcpy(net_dev
->dev_addr
, dpni_mac_addr
, net_dev
->addr_len
);
3289 static int netdev_init(struct net_device
*net_dev
)
3291 struct device
*dev
= net_dev
->dev
.parent
;
3292 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3293 u32 options
= priv
->dpni_attrs
.options
;
3294 u64 supported
= 0, not_supported
= 0;
3295 u8 bcast_addr
[ETH_ALEN
];
3299 net_dev
->netdev_ops
= &dpaa2_eth_ops
;
3300 net_dev
->ethtool_ops
= &dpaa2_ethtool_ops
;
3302 err
= set_mac_addr(priv
);
3306 /* Explicitly add the broadcast address to the MAC filtering table */
3307 eth_broadcast_addr(bcast_addr
);
3308 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
, bcast_addr
);
3310 dev_err(dev
, "dpni_add_mac_addr() failed\n");
3314 /* Set MTU upper limit; lower limit is 68B (default value) */
3315 net_dev
->max_mtu
= DPAA2_ETH_MAX_MTU
;
3316 err
= dpni_set_max_frame_length(priv
->mc_io
, 0, priv
->mc_token
,
3319 dev_err(dev
, "dpni_set_max_frame_length() failed\n");
3323 /* Set actual number of queues in the net device */
3324 num_queues
= dpaa2_eth_queue_count(priv
);
3325 err
= netif_set_real_num_tx_queues(net_dev
, num_queues
);
3327 dev_err(dev
, "netif_set_real_num_tx_queues() failed\n");
3330 err
= netif_set_real_num_rx_queues(net_dev
, num_queues
);
3332 dev_err(dev
, "netif_set_real_num_rx_queues() failed\n");
3336 /* Capabilities listing */
3337 supported
|= IFF_LIVE_ADDR_CHANGE
;
3339 if (options
& DPNI_OPT_NO_MAC_FILTER
)
3340 not_supported
|= IFF_UNICAST_FLT
;
3342 supported
|= IFF_UNICAST_FLT
;
3344 net_dev
->priv_flags
|= supported
;
3345 net_dev
->priv_flags
&= ~not_supported
;
3348 net_dev
->features
= NETIF_F_RXCSUM
|
3349 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3350 NETIF_F_SG
| NETIF_F_HIGHDMA
|
3352 net_dev
->hw_features
= net_dev
->features
;
3357 static int poll_link_state(void *arg
)
3359 struct dpaa2_eth_priv
*priv
= (struct dpaa2_eth_priv
*)arg
;
3362 while (!kthread_should_stop()) {
3363 err
= link_state_update(priv
);
3367 msleep(DPAA2_ETH_LINK_STATE_REFRESH
);
3373 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv
*priv
)
3375 struct fsl_mc_device
*dpni_dev
, *dpmac_dev
;
3376 struct dpaa2_mac
*mac
;
3379 dpni_dev
= to_fsl_mc_device(priv
->net_dev
->dev
.parent
);
3380 dpmac_dev
= fsl_mc_get_endpoint(dpni_dev
);
3381 if (IS_ERR(dpmac_dev
) || dpmac_dev
->dev
.type
!= &fsl_mc_bus_dpmac_type
)
3384 if (dpaa2_mac_is_type_fixed(dpmac_dev
, priv
->mc_io
))
3387 mac
= kzalloc(sizeof(struct dpaa2_mac
), GFP_KERNEL
);
3391 mac
->mc_dev
= dpmac_dev
;
3392 mac
->mc_io
= priv
->mc_io
;
3393 mac
->net_dev
= priv
->net_dev
;
3395 err
= dpaa2_mac_connect(mac
);
3397 netdev_err(priv
->net_dev
, "Error connecting to the MAC endpoint\n");
3406 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv
*priv
)
3411 dpaa2_mac_disconnect(priv
->mac
);
3416 static irqreturn_t
dpni_irq0_handler_thread(int irq_num
, void *arg
)
3419 struct device
*dev
= (struct device
*)arg
;
3420 struct fsl_mc_device
*dpni_dev
= to_fsl_mc_device(dev
);
3421 struct net_device
*net_dev
= dev_get_drvdata(dev
);
3422 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3425 err
= dpni_get_irq_status(dpni_dev
->mc_io
, 0, dpni_dev
->mc_handle
,
3426 DPNI_IRQ_INDEX
, &status
);
3427 if (unlikely(err
)) {
3428 netdev_err(net_dev
, "Can't get irq status (err %d)\n", err
);
3432 if (status
& DPNI_IRQ_EVENT_LINK_CHANGED
)
3433 link_state_update(netdev_priv(net_dev
));
3435 if (status
& DPNI_IRQ_EVENT_ENDPOINT_CHANGED
) {
3436 set_mac_addr(netdev_priv(net_dev
));
3437 update_tx_fqids(priv
);
3441 dpaa2_eth_disconnect_mac(priv
);
3443 dpaa2_eth_connect_mac(priv
);
3450 static int setup_irqs(struct fsl_mc_device
*ls_dev
)
3453 struct fsl_mc_device_irq
*irq
;
3455 err
= fsl_mc_allocate_irqs(ls_dev
);
3457 dev_err(&ls_dev
->dev
, "MC irqs allocation failed\n");
3461 irq
= ls_dev
->irqs
[0];
3462 err
= devm_request_threaded_irq(&ls_dev
->dev
, irq
->msi_desc
->irq
,
3463 NULL
, dpni_irq0_handler_thread
,
3464 IRQF_NO_SUSPEND
| IRQF_ONESHOT
,
3465 dev_name(&ls_dev
->dev
), &ls_dev
->dev
);
3467 dev_err(&ls_dev
->dev
, "devm_request_threaded_irq(): %d\n", err
);
3471 err
= dpni_set_irq_mask(ls_dev
->mc_io
, 0, ls_dev
->mc_handle
,
3472 DPNI_IRQ_INDEX
, DPNI_IRQ_EVENT_LINK_CHANGED
|
3473 DPNI_IRQ_EVENT_ENDPOINT_CHANGED
);
3475 dev_err(&ls_dev
->dev
, "dpni_set_irq_mask(): %d\n", err
);
3479 err
= dpni_set_irq_enable(ls_dev
->mc_io
, 0, ls_dev
->mc_handle
,
3482 dev_err(&ls_dev
->dev
, "dpni_set_irq_enable(): %d\n", err
);
3489 devm_free_irq(&ls_dev
->dev
, irq
->msi_desc
->irq
, &ls_dev
->dev
);
3491 fsl_mc_free_irqs(ls_dev
);
3496 static void add_ch_napi(struct dpaa2_eth_priv
*priv
)
3499 struct dpaa2_eth_channel
*ch
;
3501 for (i
= 0; i
< priv
->num_channels
; i
++) {
3502 ch
= priv
->channel
[i
];
3503 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3504 netif_napi_add(priv
->net_dev
, &ch
->napi
, dpaa2_eth_poll
,
3509 static void del_ch_napi(struct dpaa2_eth_priv
*priv
)
3512 struct dpaa2_eth_channel
*ch
;
3514 for (i
= 0; i
< priv
->num_channels
; i
++) {
3515 ch
= priv
->channel
[i
];
3516 netif_napi_del(&ch
->napi
);
3520 static int dpaa2_eth_probe(struct fsl_mc_device
*dpni_dev
)
3523 struct net_device
*net_dev
= NULL
;
3524 struct dpaa2_eth_priv
*priv
= NULL
;
3527 dev
= &dpni_dev
->dev
;
3530 net_dev
= alloc_etherdev_mq(sizeof(*priv
), DPAA2_ETH_MAX_NETDEV_QUEUES
);
3532 dev_err(dev
, "alloc_etherdev_mq() failed\n");
3536 SET_NETDEV_DEV(net_dev
, dev
);
3537 dev_set_drvdata(dev
, net_dev
);
3539 priv
= netdev_priv(net_dev
);
3540 priv
->net_dev
= net_dev
;
3542 priv
->iommu_domain
= iommu_get_domain_for_dev(dev
);
3544 /* Obtain a MC portal */
3545 err
= fsl_mc_portal_allocate(dpni_dev
, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL
,
3549 err
= -EPROBE_DEFER
;
3551 dev_err(dev
, "MC portal allocation failed\n");
3552 goto err_portal_alloc
;
3555 /* MC objects initialization and configuration */
3556 err
= setup_dpni(dpni_dev
);
3558 goto err_dpni_setup
;
3560 err
= setup_dpio(priv
);
3562 goto err_dpio_setup
;
3566 err
= setup_dpbp(priv
);
3568 goto err_dpbp_setup
;
3570 err
= bind_dpni(priv
);
3574 /* Add a NAPI context for each channel */
3577 /* Percpu statistics */
3578 priv
->percpu_stats
= alloc_percpu(*priv
->percpu_stats
);
3579 if (!priv
->percpu_stats
) {
3580 dev_err(dev
, "alloc_percpu(percpu_stats) failed\n");
3582 goto err_alloc_percpu_stats
;
3584 priv
->percpu_extras
= alloc_percpu(*priv
->percpu_extras
);
3585 if (!priv
->percpu_extras
) {
3586 dev_err(dev
, "alloc_percpu(percpu_extras) failed\n");
3588 goto err_alloc_percpu_extras
;
3591 err
= netdev_init(net_dev
);
3593 goto err_netdev_init
;
3595 /* Configure checksum offload based on current interface flags */
3596 err
= set_rx_csum(priv
, !!(net_dev
->features
& NETIF_F_RXCSUM
));
3600 err
= set_tx_csum(priv
, !!(net_dev
->features
&
3601 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)));
3605 err
= alloc_rings(priv
);
3607 goto err_alloc_rings
;
3609 err
= setup_irqs(dpni_dev
);
3611 netdev_warn(net_dev
, "Failed to set link interrupt, fall back to polling\n");
3612 priv
->poll_thread
= kthread_run(poll_link_state
, priv
,
3613 "%s_poll_link", net_dev
->name
);
3614 if (IS_ERR(priv
->poll_thread
)) {
3615 dev_err(dev
, "Error starting polling thread\n");
3616 goto err_poll_thread
;
3618 priv
->do_link_poll
= true;
3621 err
= dpaa2_eth_connect_mac(priv
);
3623 goto err_connect_mac
;
3625 err
= register_netdev(net_dev
);
3627 dev_err(dev
, "register_netdev() failed\n");
3628 goto err_netdev_reg
;
3631 #ifdef CONFIG_DEBUG_FS
3632 dpaa2_dbg_add(priv
);
3635 dev_info(dev
, "Probed interface %s\n", net_dev
->name
);
3639 dpaa2_eth_disconnect_mac(priv
);
3641 if (priv
->do_link_poll
)
3642 kthread_stop(priv
->poll_thread
);
3644 fsl_mc_free_irqs(dpni_dev
);
3650 free_percpu(priv
->percpu_extras
);
3651 err_alloc_percpu_extras
:
3652 free_percpu(priv
->percpu_stats
);
3653 err_alloc_percpu_stats
:
3662 fsl_mc_portal_free(priv
->mc_io
);
3664 dev_set_drvdata(dev
, NULL
);
3665 free_netdev(net_dev
);
3670 static int dpaa2_eth_remove(struct fsl_mc_device
*ls_dev
)
3673 struct net_device
*net_dev
;
3674 struct dpaa2_eth_priv
*priv
;
3677 net_dev
= dev_get_drvdata(dev
);
3678 priv
= netdev_priv(net_dev
);
3680 #ifdef CONFIG_DEBUG_FS
3681 dpaa2_dbg_remove(priv
);
3684 dpaa2_eth_disconnect_mac(priv
);
3687 unregister_netdev(net_dev
);
3689 if (priv
->do_link_poll
)
3690 kthread_stop(priv
->poll_thread
);
3692 fsl_mc_free_irqs(ls_dev
);
3695 free_percpu(priv
->percpu_stats
);
3696 free_percpu(priv
->percpu_extras
);
3703 fsl_mc_portal_free(priv
->mc_io
);
3705 free_netdev(net_dev
);
3707 dev_dbg(net_dev
->dev
.parent
, "Removed interface %s\n", net_dev
->name
);
3712 static const struct fsl_mc_device_id dpaa2_eth_match_id_table
[] = {
3714 .vendor
= FSL_MC_VENDOR_FREESCALE
,
3719 MODULE_DEVICE_TABLE(fslmc
, dpaa2_eth_match_id_table
);
3721 static struct fsl_mc_driver dpaa2_eth_driver
= {
3723 .name
= KBUILD_MODNAME
,
3724 .owner
= THIS_MODULE
,
3726 .probe
= dpaa2_eth_probe
,
3727 .remove
= dpaa2_eth_remove
,
3728 .match_id_table
= dpaa2_eth_match_id_table
3731 static int __init
dpaa2_eth_driver_init(void)
3735 dpaa2_eth_dbg_init();
3736 err
= fsl_mc_driver_register(&dpaa2_eth_driver
);
3738 dpaa2_eth_dbg_exit();
3745 static void __exit
dpaa2_eth_driver_exit(void)
3747 dpaa2_eth_dbg_exit();
3748 fsl_mc_driver_unregister(&dpaa2_eth_driver
);
3751 module_init(dpaa2_eth_driver_init
);
3752 module_exit(dpaa2_eth_driver_exit
);