]>
Commit | Line | Data |
---|---|---|
0bb29b25 | 1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
6e2387e8 | 2 | /* Copyright 2014-2016 Freescale Semiconductor Inc. |
71947923 | 3 | * Copyright 2016-2019 NXP |
6e2387e8 IR |
4 | */ |
5 | #include <linux/init.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/platform_device.h> | |
8 | #include <linux/etherdevice.h> | |
9 | #include <linux/of_net.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/msi.h> | |
12 | #include <linux/kthread.h> | |
08eb2397 | 13 | #include <linux/iommu.h> |
859f998e | 14 | #include <linux/net_tstamp.h> |
6bd067c4 | 15 | #include <linux/fsl/mc.h> |
7e273a8e ICR |
16 | #include <linux/bpf.h> |
17 | #include <linux/bpf_trace.h> | |
859f998e IR |
18 | #include <net/sock.h> |
19 | ||
6e2387e8 IR |
20 | #include "dpaa2-eth.h" |
21 | ||
5636187b IR |
22 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files |
23 | * using trace events only need to #include <trace/events/sched.h> | |
24 | */ | |
25 | #define CREATE_TRACE_POINTS | |
26 | #include "dpaa2-eth-trace.h" | |
27 | ||
6e2387e8 IR |
28 | MODULE_LICENSE("Dual BSD/GPL"); |
29 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | |
30 | MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); | |
31 | ||
08eb2397 IR |
32 | static void *dpaa2_iova_to_virt(struct iommu_domain *domain, |
33 | dma_addr_t iova_addr) | |
34 | { | |
35 | phys_addr_t phys_addr; | |
36 | ||
37 | phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; | |
38 | ||
39 | return phys_to_virt(phys_addr); | |
40 | } | |
41 | ||
6e2387e8 IR |
42 | static void validate_rx_csum(struct dpaa2_eth_priv *priv, |
43 | u32 fd_status, | |
44 | struct sk_buff *skb) | |
45 | { | |
46 | skb_checksum_none_assert(skb); | |
47 | ||
48 | /* HW checksum validation is disabled, nothing to do here */ | |
49 | if (!(priv->net_dev->features & NETIF_F_RXCSUM)) | |
50 | return; | |
51 | ||
52 | /* Read checksum validation bits */ | |
53 | if (!((fd_status & DPAA2_FAS_L3CV) && | |
54 | (fd_status & DPAA2_FAS_L4CV))) | |
55 | return; | |
56 | ||
57 | /* Inform the stack there's no need to compute L3/L4 csum anymore */ | |
58 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
59 | } | |
60 | ||
61 | /* Free a received FD. | |
62 | * Not to be used for Tx conf FDs or on any other paths. | |
63 | */ | |
64 | static void free_rx_fd(struct dpaa2_eth_priv *priv, | |
65 | const struct dpaa2_fd *fd, | |
66 | void *vaddr) | |
67 | { | |
68 | struct device *dev = priv->net_dev->dev.parent; | |
69 | dma_addr_t addr = dpaa2_fd_get_addr(fd); | |
70 | u8 fd_format = dpaa2_fd_get_format(fd); | |
71 | struct dpaa2_sg_entry *sgt; | |
72 | void *sg_vaddr; | |
73 | int i; | |
74 | ||
75 | /* If single buffer frame, just free the data buffer */ | |
76 | if (fd_format == dpaa2_fd_single) | |
77 | goto free_buf; | |
78 | else if (fd_format != dpaa2_fd_sg) | |
79 | /* We don't support any other format */ | |
80 | return; | |
81 | ||
729d79b8 IR |
82 | /* For S/G frames, we first need to free all SG entries |
83 | * except the first one, which was taken care of already | |
84 | */ | |
6e2387e8 | 85 | sgt = vaddr + dpaa2_fd_get_offset(fd); |
729d79b8 | 86 | for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
6e2387e8 | 87 | addr = dpaa2_sg_get_addr(&sgt[i]); |
08eb2397 | 88 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
efa6a7d0 | 89 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
27c87486 | 90 | DMA_BIDIRECTIONAL); |
6e2387e8 | 91 | |
27c87486 | 92 | free_pages((unsigned long)sg_vaddr, 0); |
6e2387e8 IR |
93 | if (dpaa2_sg_is_final(&sgt[i])) |
94 | break; | |
95 | } | |
96 | ||
97 | free_buf: | |
27c87486 | 98 | free_pages((unsigned long)vaddr, 0); |
6e2387e8 IR |
99 | } |
100 | ||
101 | /* Build a linear skb based on a single-buffer frame descriptor */ | |
fdb6ca9e | 102 | static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, |
6e2387e8 IR |
103 | const struct dpaa2_fd *fd, |
104 | void *fd_vaddr) | |
105 | { | |
106 | struct sk_buff *skb = NULL; | |
107 | u16 fd_offset = dpaa2_fd_get_offset(fd); | |
108 | u32 fd_length = dpaa2_fd_get_len(fd); | |
109 | ||
cbb3ea40 IR |
110 | ch->buf_count--; |
111 | ||
27c87486 | 112 | skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
6e2387e8 IR |
113 | if (unlikely(!skb)) |
114 | return NULL; | |
115 | ||
116 | skb_reserve(skb, fd_offset); | |
117 | skb_put(skb, fd_length); | |
118 | ||
6e2387e8 IR |
119 | return skb; |
120 | } | |
121 | ||
122 | /* Build a non linear (fragmented) skb based on a S/G table */ | |
123 | static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, | |
124 | struct dpaa2_eth_channel *ch, | |
125 | struct dpaa2_sg_entry *sgt) | |
126 | { | |
127 | struct sk_buff *skb = NULL; | |
128 | struct device *dev = priv->net_dev->dev.parent; | |
129 | void *sg_vaddr; | |
130 | dma_addr_t sg_addr; | |
131 | u16 sg_offset; | |
132 | u32 sg_length; | |
133 | struct page *page, *head_page; | |
134 | int page_offset; | |
135 | int i; | |
136 | ||
137 | for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { | |
138 | struct dpaa2_sg_entry *sge = &sgt[i]; | |
139 | ||
140 | /* NOTE: We only support SG entries in dpaa2_sg_single format, | |
141 | * but this is the only format we may receive from HW anyway | |
142 | */ | |
143 | ||
144 | /* Get the address and length from the S/G entry */ | |
145 | sg_addr = dpaa2_sg_get_addr(sge); | |
08eb2397 | 146 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); |
efa6a7d0 | 147 | dma_unmap_page(dev, sg_addr, priv->rx_buf_size, |
27c87486 | 148 | DMA_BIDIRECTIONAL); |
6e2387e8 | 149 | |
6e2387e8 IR |
150 | sg_length = dpaa2_sg_get_len(sge); |
151 | ||
152 | if (i == 0) { | |
153 | /* We build the skb around the first data buffer */ | |
27c87486 | 154 | skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
cbb3ea40 | 155 | if (unlikely(!skb)) { |
729d79b8 IR |
156 | /* Free the first SG entry now, since we already |
157 | * unmapped it and obtained the virtual address | |
158 | */ | |
27c87486 | 159 | free_pages((unsigned long)sg_vaddr, 0); |
729d79b8 | 160 | |
cbb3ea40 IR |
161 | /* We still need to subtract the buffers used |
162 | * by this FD from our software counter | |
163 | */ | |
164 | while (!dpaa2_sg_is_final(&sgt[i]) && | |
165 | i < DPAA2_ETH_MAX_SG_ENTRIES) | |
166 | i++; | |
167 | break; | |
168 | } | |
6e2387e8 IR |
169 | |
170 | sg_offset = dpaa2_sg_get_offset(sge); | |
171 | skb_reserve(skb, sg_offset); | |
172 | skb_put(skb, sg_length); | |
173 | } else { | |
174 | /* Rest of the data buffers are stored as skb frags */ | |
175 | page = virt_to_page(sg_vaddr); | |
176 | head_page = virt_to_head_page(sg_vaddr); | |
177 | ||
178 | /* Offset in page (which may be compound). | |
179 | * Data in subsequent SG entries is stored from the | |
180 | * beginning of the buffer, so we don't need to add the | |
181 | * sg_offset. | |
182 | */ | |
183 | page_offset = ((unsigned long)sg_vaddr & | |
184 | (PAGE_SIZE - 1)) + | |
185 | (page_address(page) - page_address(head_page)); | |
186 | ||
187 | skb_add_rx_frag(skb, i - 1, head_page, page_offset, | |
efa6a7d0 | 188 | sg_length, priv->rx_buf_size); |
6e2387e8 IR |
189 | } |
190 | ||
191 | if (dpaa2_sg_is_final(sge)) | |
192 | break; | |
193 | } | |
194 | ||
b63baf71 IR |
195 | WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); |
196 | ||
6e2387e8 IR |
197 | /* Count all data buffers + SG table buffer */ |
198 | ch->buf_count -= i + 2; | |
199 | ||
200 | return skb; | |
201 | } | |
202 | ||
569375fb ICR |
203 | /* Free buffers acquired from the buffer pool or which were meant to |
204 | * be released in the pool | |
205 | */ | |
206 | static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) | |
207 | { | |
208 | struct device *dev = priv->net_dev->dev.parent; | |
209 | void *vaddr; | |
210 | int i; | |
211 | ||
212 | for (i = 0; i < count; i++) { | |
213 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); | |
efa6a7d0 | 214 | dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, |
27c87486 ICR |
215 | DMA_BIDIRECTIONAL); |
216 | free_pages((unsigned long)vaddr, 0); | |
569375fb ICR |
217 | } |
218 | } | |
219 | ||
5d39dc21 ICR |
220 | static void xdp_release_buf(struct dpaa2_eth_priv *priv, |
221 | struct dpaa2_eth_channel *ch, | |
222 | dma_addr_t addr) | |
223 | { | |
ef17bd7c | 224 | int retries = 0; |
5d39dc21 ICR |
225 | int err; |
226 | ||
227 | ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; | |
228 | if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) | |
229 | return; | |
230 | ||
231 | while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, | |
232 | ch->xdp.drop_bufs, | |
ef17bd7c IR |
233 | ch->xdp.drop_cnt)) == -EBUSY) { |
234 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) | |
235 | break; | |
5d39dc21 | 236 | cpu_relax(); |
ef17bd7c | 237 | } |
5d39dc21 ICR |
238 | |
239 | if (err) { | |
240 | free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); | |
241 | ch->buf_count -= ch->xdp.drop_cnt; | |
242 | } | |
243 | ||
244 | ch->xdp.drop_cnt = 0; | |
245 | } | |
246 | ||
99e43521 ICR |
247 | static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, |
248 | void *buf_start, u16 queue_id) | |
249 | { | |
250 | struct dpaa2_eth_fq *fq; | |
251 | struct dpaa2_faead *faead; | |
252 | u32 ctrl, frc; | |
253 | int i, err; | |
254 | ||
255 | /* Mark the egress frame hardware annotation area as valid */ | |
256 | frc = dpaa2_fd_get_frc(fd); | |
257 | dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); | |
258 | dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); | |
259 | ||
260 | /* Instruct hardware to release the FD buffer directly into | |
261 | * the buffer pool once transmission is completed, instead of | |
262 | * sending a Tx confirmation frame to us | |
263 | */ | |
264 | ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; | |
265 | faead = dpaa2_get_faead(buf_start, false); | |
266 | faead->ctrl = cpu_to_le32(ctrl); | |
267 | faead->conf_fqid = 0; | |
268 | ||
269 | fq = &priv->fq[queue_id]; | |
270 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { | |
1fa0f68c | 271 | err = priv->enqueue(priv, fq, fd, 0); |
99e43521 ICR |
272 | if (err != -EBUSY) |
273 | break; | |
274 | } | |
275 | ||
276 | return err; | |
277 | } | |
278 | ||
7e273a8e ICR |
279 | static u32 run_xdp(struct dpaa2_eth_priv *priv, |
280 | struct dpaa2_eth_channel *ch, | |
99e43521 | 281 | struct dpaa2_eth_fq *rx_fq, |
7e273a8e ICR |
282 | struct dpaa2_fd *fd, void *vaddr) |
283 | { | |
5d39dc21 | 284 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
99e43521 | 285 | struct rtnl_link_stats64 *percpu_stats; |
7e273a8e ICR |
286 | struct bpf_prog *xdp_prog; |
287 | struct xdp_buff xdp; | |
288 | u32 xdp_act = XDP_PASS; | |
99e43521 ICR |
289 | int err; |
290 | ||
291 | percpu_stats = this_cpu_ptr(priv->percpu_stats); | |
7e273a8e ICR |
292 | |
293 | rcu_read_lock(); | |
294 | ||
295 | xdp_prog = READ_ONCE(ch->xdp.prog); | |
296 | if (!xdp_prog) | |
297 | goto out; | |
298 | ||
299 | xdp.data = vaddr + dpaa2_fd_get_offset(fd); | |
300 | xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); | |
7b1eea1a | 301 | xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; |
7e273a8e | 302 | xdp_set_data_meta_invalid(&xdp); |
d678be1d | 303 | xdp.rxq = &ch->xdp_rxq; |
7e273a8e ICR |
304 | |
305 | xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); | |
306 | ||
7b1eea1a ICR |
307 | /* xdp.data pointer may have changed */ |
308 | dpaa2_fd_set_offset(fd, xdp.data - vaddr); | |
309 | dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); | |
310 | ||
7e273a8e ICR |
311 | switch (xdp_act) { |
312 | case XDP_PASS: | |
313 | break; | |
99e43521 ICR |
314 | case XDP_TX: |
315 | err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid); | |
316 | if (err) { | |
317 | xdp_release_buf(priv, ch, addr); | |
318 | percpu_stats->tx_errors++; | |
a4a7b762 | 319 | ch->stats.xdp_tx_err++; |
99e43521 ICR |
320 | } else { |
321 | percpu_stats->tx_packets++; | |
322 | percpu_stats->tx_bytes += dpaa2_fd_get_len(fd); | |
a4a7b762 | 323 | ch->stats.xdp_tx++; |
99e43521 ICR |
324 | } |
325 | break; | |
7e273a8e ICR |
326 | default: |
327 | bpf_warn_invalid_xdp_action(xdp_act); | |
c1cb11bc | 328 | /* fall through */ |
7e273a8e ICR |
329 | case XDP_ABORTED: |
330 | trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); | |
c1cb11bc | 331 | /* fall through */ |
7e273a8e | 332 | case XDP_DROP: |
5d39dc21 | 333 | xdp_release_buf(priv, ch, addr); |
a4a7b762 | 334 | ch->stats.xdp_drop++; |
7e273a8e | 335 | break; |
d678be1d IR |
336 | case XDP_REDIRECT: |
337 | dma_unmap_page(priv->net_dev->dev.parent, addr, | |
efa6a7d0 | 338 | priv->rx_buf_size, DMA_BIDIRECTIONAL); |
d678be1d IR |
339 | ch->buf_count--; |
340 | xdp.data_hard_start = vaddr; | |
341 | err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); | |
342 | if (unlikely(err)) | |
343 | ch->stats.xdp_drop++; | |
344 | else | |
345 | ch->stats.xdp_redirect++; | |
346 | break; | |
7e273a8e ICR |
347 | } |
348 | ||
d678be1d | 349 | ch->xdp.res |= xdp_act; |
7e273a8e ICR |
350 | out: |
351 | rcu_read_unlock(); | |
352 | return xdp_act; | |
353 | } | |
354 | ||
6e2387e8 IR |
355 | /* Main Rx frame processing routine */ |
356 | static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, | |
357 | struct dpaa2_eth_channel *ch, | |
358 | const struct dpaa2_fd *fd, | |
dbcdf728 | 359 | struct dpaa2_eth_fq *fq) |
6e2387e8 IR |
360 | { |
361 | dma_addr_t addr = dpaa2_fd_get_addr(fd); | |
362 | u8 fd_format = dpaa2_fd_get_format(fd); | |
363 | void *vaddr; | |
364 | struct sk_buff *skb; | |
365 | struct rtnl_link_stats64 *percpu_stats; | |
85047abd | 366 | struct dpaa2_eth_drv_stats *percpu_extras; |
6e2387e8 IR |
367 | struct device *dev = priv->net_dev->dev.parent; |
368 | struct dpaa2_fas *fas; | |
d695e764 | 369 | void *buf_data; |
6e2387e8 | 370 | u32 status = 0; |
7e273a8e | 371 | u32 xdp_act; |
6e2387e8 | 372 | |
5636187b IR |
373 | /* Tracing point */ |
374 | trace_dpaa2_rx_fd(priv->net_dev, fd); | |
375 | ||
08eb2397 | 376 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
efa6a7d0 | 377 | dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, |
18c2e770 | 378 | DMA_BIDIRECTIONAL); |
6e2387e8 | 379 | |
54ce8917 | 380 | fas = dpaa2_get_fas(vaddr, false); |
d695e764 IR |
381 | prefetch(fas); |
382 | buf_data = vaddr + dpaa2_fd_get_offset(fd); | |
383 | prefetch(buf_data); | |
6e2387e8 IR |
384 | |
385 | percpu_stats = this_cpu_ptr(priv->percpu_stats); | |
85047abd | 386 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
6e2387e8 IR |
387 | |
388 | if (fd_format == dpaa2_fd_single) { | |
99e43521 | 389 | xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); |
7e273a8e ICR |
390 | if (xdp_act != XDP_PASS) { |
391 | percpu_stats->rx_packets++; | |
392 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); | |
393 | return; | |
394 | } | |
395 | ||
efa6a7d0 | 396 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
27c87486 | 397 | DMA_BIDIRECTIONAL); |
fdb6ca9e | 398 | skb = build_linear_skb(ch, fd, vaddr); |
6e2387e8 | 399 | } else if (fd_format == dpaa2_fd_sg) { |
7e273a8e ICR |
400 | WARN_ON(priv->xdp_prog); |
401 | ||
efa6a7d0 | 402 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
27c87486 | 403 | DMA_BIDIRECTIONAL); |
d695e764 | 404 | skb = build_frag_skb(priv, ch, buf_data); |
27c87486 | 405 | free_pages((unsigned long)vaddr, 0); |
85047abd IR |
406 | percpu_extras->rx_sg_frames++; |
407 | percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); | |
6e2387e8 IR |
408 | } else { |
409 | /* We don't support any other format */ | |
410 | goto err_frame_format; | |
411 | } | |
412 | ||
413 | if (unlikely(!skb)) | |
414 | goto err_build_skb; | |
415 | ||
416 | prefetch(skb->data); | |
417 | ||
859f998e IR |
418 | /* Get the timestamp value */ |
419 | if (priv->rx_tstamp) { | |
420 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
421 | __le64 *ts = dpaa2_get_ts(vaddr, false); | |
422 | u64 ns; | |
423 | ||
424 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | |
425 | ||
426 | ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); | |
427 | shhwtstamps->hwtstamp = ns_to_ktime(ns); | |
428 | } | |
429 | ||
6e2387e8 IR |
430 | /* Check if we need to validate the L4 csum */ |
431 | if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { | |
6e2387e8 IR |
432 | status = le32_to_cpu(fas->status); |
433 | validate_rx_csum(priv, status, skb); | |
434 | } | |
435 | ||
436 | skb->protocol = eth_type_trans(skb, priv->net_dev); | |
dbcdf728 | 437 | skb_record_rx_queue(skb, fq->flowid); |
6e2387e8 IR |
438 | |
439 | percpu_stats->rx_packets++; | |
440 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); | |
441 | ||
0a25d92c | 442 | list_add_tail(&skb->list, ch->rx_list); |
6e2387e8 IR |
443 | |
444 | return; | |
445 | ||
446 | err_build_skb: | |
447 | free_rx_fd(priv, fd, vaddr); | |
448 | err_frame_format: | |
449 | percpu_stats->rx_dropped++; | |
450 | } | |
451 | ||
452 | /* Consume all frames pull-dequeued into the store. This is the simplest way to | |
453 | * make sure we don't accidentally issue another volatile dequeue which would | |
454 | * overwrite (leak) frames already in the store. | |
455 | * | |
456 | * Observance of NAPI budget is not our concern, leaving that to the caller. | |
457 | */ | |
68049a5f | 458 | static int consume_frames(struct dpaa2_eth_channel *ch, |
569dac6a | 459 | struct dpaa2_eth_fq **src) |
6e2387e8 IR |
460 | { |
461 | struct dpaa2_eth_priv *priv = ch->priv; | |
68049a5f | 462 | struct dpaa2_eth_fq *fq = NULL; |
6e2387e8 IR |
463 | struct dpaa2_dq *dq; |
464 | const struct dpaa2_fd *fd; | |
ef17bd7c | 465 | int cleaned = 0, retries = 0; |
6e2387e8 IR |
466 | int is_last; |
467 | ||
468 | do { | |
469 | dq = dpaa2_io_store_next(ch->store, &is_last); | |
470 | if (unlikely(!dq)) { | |
471 | /* If we're here, we *must* have placed a | |
472 | * volatile dequeue comnmand, so keep reading through | |
473 | * the store until we get some sort of valid response | |
474 | * token (either a valid frame or an "empty dequeue") | |
475 | */ | |
ef17bd7c IR |
476 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) { |
477 | netdev_err_once(priv->net_dev, | |
478 | "Unable to read a valid dequeue response\n"); | |
479 | return -ETIMEDOUT; | |
480 | } | |
6e2387e8 IR |
481 | continue; |
482 | } | |
483 | ||
484 | fd = dpaa2_dq_fd(dq); | |
75c583ab | 485 | fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); |
6e2387e8 | 486 | |
dbcdf728 | 487 | fq->consume(priv, ch, fd, fq); |
6e2387e8 | 488 | cleaned++; |
ef17bd7c | 489 | retries = 0; |
6e2387e8 IR |
490 | } while (!is_last); |
491 | ||
68049a5f ICR |
492 | if (!cleaned) |
493 | return 0; | |
494 | ||
495 | fq->stats.frames += cleaned; | |
68049a5f ICR |
496 | |
497 | /* A dequeue operation only pulls frames from a single queue | |
569dac6a | 498 | * into the store. Return the frame queue as an out param. |
68049a5f | 499 | */ |
569dac6a ICR |
500 | if (src) |
501 | *src = fq; | |
68049a5f | 502 | |
6e2387e8 IR |
503 | return cleaned; |
504 | } | |
505 | ||
859f998e IR |
506 | /* Configure the egress frame annotation for timestamp update */ |
507 | static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) | |
508 | { | |
509 | struct dpaa2_faead *faead; | |
510 | u32 ctrl, frc; | |
511 | ||
512 | /* Mark the egress frame annotation area as valid */ | |
513 | frc = dpaa2_fd_get_frc(fd); | |
514 | dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); | |
515 | ||
516 | /* Set hardware annotation size */ | |
517 | ctrl = dpaa2_fd_get_ctrl(fd); | |
518 | dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); | |
519 | ||
520 | /* enable UPD (update prepanded data) bit in FAEAD field of | |
521 | * hardware frame annotation area | |
522 | */ | |
523 | ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; | |
524 | faead = dpaa2_get_faead(buf_start, true); | |
525 | faead->ctrl = cpu_to_le32(ctrl); | |
526 | } | |
527 | ||
6e2387e8 IR |
528 | /* Create a frame descriptor based on a fragmented skb */ |
529 | static int build_sg_fd(struct dpaa2_eth_priv *priv, | |
530 | struct sk_buff *skb, | |
531 | struct dpaa2_fd *fd) | |
532 | { | |
533 | struct device *dev = priv->net_dev->dev.parent; | |
534 | void *sgt_buf = NULL; | |
6e2387e8 IR |
535 | dma_addr_t addr; |
536 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
537 | struct dpaa2_sg_entry *sgt; | |
538 | int i, err; | |
539 | int sgt_buf_size; | |
540 | struct scatterlist *scl, *crt_scl; | |
541 | int num_sg; | |
542 | int num_dma_bufs; | |
543 | struct dpaa2_eth_swa *swa; | |
544 | ||
545 | /* Create and map scatterlist. | |
546 | * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have | |
547 | * to go beyond nr_frags+1. | |
548 | * Note: We don't support chained scatterlists | |
549 | */ | |
550 | if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) | |
551 | return -EINVAL; | |
552 | ||
553 | scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); | |
554 | if (unlikely(!scl)) | |
555 | return -ENOMEM; | |
556 | ||
557 | sg_init_table(scl, nr_frags + 1); | |
558 | num_sg = skb_to_sgvec(skb, scl, 0, skb->len); | |
1e5fa9e2 | 559 | num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
560 | if (unlikely(!num_dma_bufs)) { |
561 | err = -ENOMEM; | |
562 | goto dma_map_sg_failed; | |
563 | } | |
564 | ||
565 | /* Prepare the HW SGT structure */ | |
566 | sgt_buf_size = priv->tx_data_offset + | |
fa722c00 | 567 | sizeof(struct dpaa2_sg_entry) * num_dma_bufs; |
90bc6d4b | 568 | sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); |
6e2387e8 IR |
569 | if (unlikely(!sgt_buf)) { |
570 | err = -ENOMEM; | |
571 | goto sgt_buf_alloc_failed; | |
572 | } | |
573 | sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); | |
6a9bbe53 IR |
574 | memset(sgt_buf, 0, sgt_buf_size); |
575 | ||
6e2387e8 IR |
576 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
577 | ||
578 | /* Fill in the HW SGT structure. | |
579 | * | |
580 | * sgt_buf is zeroed out, so the following fields are implicit | |
581 | * in all sgt entries: | |
582 | * - offset is 0 | |
583 | * - format is 'dpaa2_sg_single' | |
584 | */ | |
585 | for_each_sg(scl, crt_scl, num_dma_bufs, i) { | |
586 | dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); | |
587 | dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); | |
588 | } | |
589 | dpaa2_sg_set_final(&sgt[i - 1], true); | |
590 | ||
591 | /* Store the skb backpointer in the SGT buffer. | |
592 | * Fit the scatterlist and the number of buffers alongside the | |
593 | * skb backpointer in the software annotation area. We'll need | |
594 | * all of them on Tx Conf. | |
595 | */ | |
596 | swa = (struct dpaa2_eth_swa *)sgt_buf; | |
e3fdf6ba IR |
597 | swa->type = DPAA2_ETH_SWA_SG; |
598 | swa->sg.skb = skb; | |
599 | swa->sg.scl = scl; | |
600 | swa->sg.num_sg = num_sg; | |
601 | swa->sg.sgt_size = sgt_buf_size; | |
6e2387e8 IR |
602 | |
603 | /* Separately map the SGT buffer */ | |
1e5fa9e2 | 604 | addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
605 | if (unlikely(dma_mapping_error(dev, addr))) { |
606 | err = -ENOMEM; | |
607 | goto dma_map_single_failed; | |
608 | } | |
609 | dpaa2_fd_set_offset(fd, priv->tx_data_offset); | |
610 | dpaa2_fd_set_format(fd, dpaa2_fd_sg); | |
611 | dpaa2_fd_set_addr(fd, addr); | |
612 | dpaa2_fd_set_len(fd, skb->len); | |
b948c8c6 | 613 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
6e2387e8 | 614 | |
859f998e IR |
615 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
616 | enable_tx_tstamp(fd, sgt_buf); | |
617 | ||
6e2387e8 IR |
618 | return 0; |
619 | ||
620 | dma_map_single_failed: | |
6a9bbe53 | 621 | skb_free_frag(sgt_buf); |
6e2387e8 | 622 | sgt_buf_alloc_failed: |
1e5fa9e2 | 623 | dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
624 | dma_map_sg_failed: |
625 | kfree(scl); | |
626 | return err; | |
627 | } | |
628 | ||
629 | /* Create a frame descriptor based on a linear skb */ | |
630 | static int build_single_fd(struct dpaa2_eth_priv *priv, | |
631 | struct sk_buff *skb, | |
632 | struct dpaa2_fd *fd) | |
633 | { | |
634 | struct device *dev = priv->net_dev->dev.parent; | |
c163685f | 635 | u8 *buffer_start, *aligned_start; |
e3fdf6ba | 636 | struct dpaa2_eth_swa *swa; |
6e2387e8 IR |
637 | dma_addr_t addr; |
638 | ||
c163685f IR |
639 | buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); |
640 | ||
641 | /* If there's enough room to align the FD address, do it. | |
642 | * It will help hardware optimize accesses. | |
643 | */ | |
644 | aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, | |
645 | DPAA2_ETH_TX_BUF_ALIGN); | |
646 | if (aligned_start >= skb->head) | |
647 | buffer_start = aligned_start; | |
6e2387e8 | 648 | |
6e2387e8 IR |
649 | /* Store a backpointer to the skb at the beginning of the buffer |
650 | * (in the private data area) such that we can release it | |
651 | * on Tx confirm | |
652 | */ | |
e3fdf6ba IR |
653 | swa = (struct dpaa2_eth_swa *)buffer_start; |
654 | swa->type = DPAA2_ETH_SWA_SINGLE; | |
655 | swa->single.skb = skb; | |
6e2387e8 IR |
656 | |
657 | addr = dma_map_single(dev, buffer_start, | |
658 | skb_tail_pointer(skb) - buffer_start, | |
1e5fa9e2 | 659 | DMA_BIDIRECTIONAL); |
6e2387e8 IR |
660 | if (unlikely(dma_mapping_error(dev, addr))) |
661 | return -ENOMEM; | |
662 | ||
663 | dpaa2_fd_set_addr(fd, addr); | |
664 | dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); | |
665 | dpaa2_fd_set_len(fd, skb->len); | |
666 | dpaa2_fd_set_format(fd, dpaa2_fd_single); | |
b948c8c6 | 667 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
6e2387e8 | 668 | |
859f998e IR |
669 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
670 | enable_tx_tstamp(fd, buffer_start); | |
671 | ||
6e2387e8 IR |
672 | return 0; |
673 | } | |
674 | ||
675 | /* FD freeing routine on the Tx path | |
676 | * | |
677 | * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb | |
678 | * back-pointed to is also freed. | |
679 | * This can be called either from dpaa2_eth_tx_conf() or on the error path of | |
680 | * dpaa2_eth_tx(). | |
6e2387e8 IR |
681 | */ |
682 | static void free_tx_fd(const struct dpaa2_eth_priv *priv, | |
d678be1d | 683 | struct dpaa2_eth_fq *fq, |
0723a3ae | 684 | const struct dpaa2_fd *fd, bool in_napi) |
6e2387e8 IR |
685 | { |
686 | struct device *dev = priv->net_dev->dev.parent; | |
687 | dma_addr_t fd_addr; | |
d678be1d | 688 | struct sk_buff *skb = NULL; |
6e2387e8 | 689 | unsigned char *buffer_start; |
6e2387e8 IR |
690 | struct dpaa2_eth_swa *swa; |
691 | u8 fd_format = dpaa2_fd_get_format(fd); | |
d678be1d | 692 | u32 fd_len = dpaa2_fd_get_len(fd); |
6e2387e8 IR |
693 | |
694 | fd_addr = dpaa2_fd_get_addr(fd); | |
e3fdf6ba IR |
695 | buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); |
696 | swa = (struct dpaa2_eth_swa *)buffer_start; | |
6e2387e8 IR |
697 | |
698 | if (fd_format == dpaa2_fd_single) { | |
d678be1d IR |
699 | if (swa->type == DPAA2_ETH_SWA_SINGLE) { |
700 | skb = swa->single.skb; | |
701 | /* Accessing the skb buffer is safe before dma unmap, | |
702 | * because we didn't map the actual skb shell. | |
703 | */ | |
704 | dma_unmap_single(dev, fd_addr, | |
705 | skb_tail_pointer(skb) - buffer_start, | |
706 | DMA_BIDIRECTIONAL); | |
707 | } else { | |
708 | WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); | |
709 | dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, | |
710 | DMA_BIDIRECTIONAL); | |
711 | } | |
6e2387e8 | 712 | } else if (fd_format == dpaa2_fd_sg) { |
e3fdf6ba | 713 | skb = swa->sg.skb; |
6e2387e8 IR |
714 | |
715 | /* Unmap the scatterlist */ | |
e3fdf6ba IR |
716 | dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, |
717 | DMA_BIDIRECTIONAL); | |
718 | kfree(swa->sg.scl); | |
6e2387e8 IR |
719 | |
720 | /* Unmap the SGT buffer */ | |
e3fdf6ba | 721 | dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, |
b2718e6f | 722 | DMA_BIDIRECTIONAL); |
6e2387e8 | 723 | } else { |
2b7c86eb | 724 | netdev_dbg(priv->net_dev, "Invalid FD format\n"); |
6e2387e8 IR |
725 | return; |
726 | } | |
727 | ||
d678be1d IR |
728 | if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { |
729 | fq->dq_frames++; | |
730 | fq->dq_bytes += fd_len; | |
731 | } | |
732 | ||
733 | if (swa->type == DPAA2_ETH_SWA_XDP) { | |
734 | xdp_return_frame(swa->xdp.xdpf); | |
735 | return; | |
736 | } | |
737 | ||
859f998e IR |
738 | /* Get the timestamp value */ |
739 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { | |
740 | struct skb_shared_hwtstamps shhwtstamps; | |
e3fdf6ba | 741 | __le64 *ts = dpaa2_get_ts(buffer_start, true); |
859f998e IR |
742 | u64 ns; |
743 | ||
744 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | |
745 | ||
746 | ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); | |
747 | shhwtstamps.hwtstamp = ns_to_ktime(ns); | |
748 | skb_tstamp_tx(skb, &shhwtstamps); | |
749 | } | |
750 | ||
6a9bbe53 | 751 | /* Free SGT buffer allocated on tx */ |
6e2387e8 | 752 | if (fd_format != dpaa2_fd_single) |
e3fdf6ba | 753 | skb_free_frag(buffer_start); |
6e2387e8 IR |
754 | |
755 | /* Move on with skb release */ | |
0723a3ae | 756 | napi_consume_skb(skb, in_napi); |
6e2387e8 IR |
757 | } |
758 | ||
c433db40 | 759 | static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) |
6e2387e8 IR |
760 | { |
761 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
762 | struct dpaa2_fd fd; | |
763 | struct rtnl_link_stats64 *percpu_stats; | |
85047abd | 764 | struct dpaa2_eth_drv_stats *percpu_extras; |
6e2387e8 | 765 | struct dpaa2_eth_fq *fq; |
569dac6a | 766 | struct netdev_queue *nq; |
6e2387e8 | 767 | u16 queue_mapping; |
18c21467 | 768 | unsigned int needed_headroom; |
569dac6a | 769 | u32 fd_len; |
ab1e6de2 | 770 | u8 prio = 0; |
6e2387e8 IR |
771 | int err, i; |
772 | ||
773 | percpu_stats = this_cpu_ptr(priv->percpu_stats); | |
85047abd | 774 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
6e2387e8 | 775 | |
18c21467 IR |
776 | needed_headroom = dpaa2_eth_needed_headroom(priv, skb); |
777 | if (skb_headroom(skb) < needed_headroom) { | |
6e2387e8 IR |
778 | struct sk_buff *ns; |
779 | ||
18c21467 | 780 | ns = skb_realloc_headroom(skb, needed_headroom); |
6e2387e8 IR |
781 | if (unlikely(!ns)) { |
782 | percpu_stats->tx_dropped++; | |
783 | goto err_alloc_headroom; | |
784 | } | |
6662b5e4 | 785 | percpu_extras->tx_reallocs++; |
859f998e IR |
786 | |
787 | if (skb->sk) | |
788 | skb_set_owner_w(ns, skb->sk); | |
789 | ||
6e2387e8 IR |
790 | dev_kfree_skb(skb); |
791 | skb = ns; | |
792 | } | |
793 | ||
794 | /* We'll be holding a back-reference to the skb until Tx Confirmation; | |
795 | * we don't want that overwritten by a concurrent Tx with a cloned skb. | |
796 | */ | |
797 | skb = skb_unshare(skb, GFP_ATOMIC); | |
798 | if (unlikely(!skb)) { | |
799 | /* skb_unshare() has already freed the skb */ | |
800 | percpu_stats->tx_dropped++; | |
801 | return NETDEV_TX_OK; | |
802 | } | |
803 | ||
804 | /* Setup the FD fields */ | |
805 | memset(&fd, 0, sizeof(fd)); | |
806 | ||
85047abd | 807 | if (skb_is_nonlinear(skb)) { |
6e2387e8 | 808 | err = build_sg_fd(priv, skb, &fd); |
85047abd IR |
809 | percpu_extras->tx_sg_frames++; |
810 | percpu_extras->tx_sg_bytes += skb->len; | |
811 | } else { | |
6e2387e8 | 812 | err = build_single_fd(priv, skb, &fd); |
85047abd IR |
813 | } |
814 | ||
6e2387e8 IR |
815 | if (unlikely(err)) { |
816 | percpu_stats->tx_dropped++; | |
817 | goto err_build_fd; | |
818 | } | |
819 | ||
5636187b IR |
820 | /* Tracing point */ |
821 | trace_dpaa2_tx_fd(net_dev, &fd); | |
822 | ||
537336ce IR |
823 | /* TxConf FQ selection relies on queue id from the stack. |
824 | * In case of a forwarded frame from another DPNI interface, we choose | |
825 | * a queue affined to the same core that processed the Rx frame | |
6e2387e8 | 826 | */ |
537336ce | 827 | queue_mapping = skb_get_queue_mapping(skb); |
ab1e6de2 IR |
828 | |
829 | if (net_dev->num_tc) { | |
830 | prio = netdev_txq_to_tc(net_dev, queue_mapping); | |
831 | /* Hardware interprets priority level 0 as being the highest, | |
832 | * so we need to do a reverse mapping to the netdev tc index | |
833 | */ | |
834 | prio = net_dev->num_tc - prio - 1; | |
835 | /* We have only one FQ array entry for all Tx hardware queues | |
836 | * with the same flow id (but different priority levels) | |
837 | */ | |
838 | queue_mapping %= dpaa2_eth_queue_count(priv); | |
839 | } | |
6e2387e8 | 840 | fq = &priv->fq[queue_mapping]; |
8c838f53 IC |
841 | |
842 | fd_len = dpaa2_fd_get_len(&fd); | |
843 | nq = netdev_get_tx_queue(net_dev, queue_mapping); | |
844 | netdev_tx_sent_queue(nq, fd_len); | |
845 | ||
846 | /* Everything that happens after this enqueues might race with | |
847 | * the Tx confirmation callback for this frame | |
848 | */ | |
6e2387e8 | 849 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
ab1e6de2 | 850 | err = priv->enqueue(priv, fq, &fd, prio); |
6e2387e8 IR |
851 | if (err != -EBUSY) |
852 | break; | |
853 | } | |
85047abd | 854 | percpu_extras->tx_portal_busy += i; |
6e2387e8 IR |
855 | if (unlikely(err < 0)) { |
856 | percpu_stats->tx_errors++; | |
857 | /* Clean up everything, including freeing the skb */ | |
d678be1d | 858 | free_tx_fd(priv, fq, &fd, false); |
8c838f53 | 859 | netdev_tx_completed_queue(nq, 1, fd_len); |
6e2387e8 IR |
860 | } else { |
861 | percpu_stats->tx_packets++; | |
569dac6a | 862 | percpu_stats->tx_bytes += fd_len; |
6e2387e8 IR |
863 | } |
864 | ||
865 | return NETDEV_TX_OK; | |
866 | ||
867 | err_build_fd: | |
868 | err_alloc_headroom: | |
869 | dev_kfree_skb(skb); | |
870 | ||
871 | return NETDEV_TX_OK; | |
872 | } | |
873 | ||
874 | /* Tx confirmation frame processing routine */ | |
875 | static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, | |
b00c898c | 876 | struct dpaa2_eth_channel *ch __always_unused, |
6e2387e8 | 877 | const struct dpaa2_fd *fd, |
569dac6a | 878 | struct dpaa2_eth_fq *fq) |
6e2387e8 IR |
879 | { |
880 | struct rtnl_link_stats64 *percpu_stats; | |
85047abd | 881 | struct dpaa2_eth_drv_stats *percpu_extras; |
569dac6a | 882 | u32 fd_len = dpaa2_fd_get_len(fd); |
39163c0c | 883 | u32 fd_errors; |
6e2387e8 | 884 | |
5636187b IR |
885 | /* Tracing point */ |
886 | trace_dpaa2_tx_conf_fd(priv->net_dev, fd); | |
887 | ||
85047abd IR |
888 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
889 | percpu_extras->tx_conf_frames++; | |
569dac6a ICR |
890 | percpu_extras->tx_conf_bytes += fd_len; |
891 | ||
39163c0c IR |
892 | /* Check frame errors in the FD field */ |
893 | fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; | |
d678be1d | 894 | free_tx_fd(priv, fq, fd, true); |
39163c0c IR |
895 | |
896 | if (likely(!fd_errors)) | |
897 | return; | |
898 | ||
2b7c86eb IR |
899 | if (net_ratelimit()) |
900 | netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", | |
901 | fd_errors); | |
902 | ||
39163c0c IR |
903 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
904 | /* Tx-conf logically pertains to the egress path. */ | |
905 | percpu_stats->tx_errors++; | |
6e2387e8 IR |
906 | } |
907 | ||
908 | static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) | |
909 | { | |
910 | int err; | |
911 | ||
912 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
913 | DPNI_OFF_RX_L3_CSUM, enable); | |
914 | if (err) { | |
915 | netdev_err(priv->net_dev, | |
916 | "dpni_set_offload(RX_L3_CSUM) failed\n"); | |
917 | return err; | |
918 | } | |
919 | ||
920 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
921 | DPNI_OFF_RX_L4_CSUM, enable); | |
922 | if (err) { | |
923 | netdev_err(priv->net_dev, | |
924 | "dpni_set_offload(RX_L4_CSUM) failed\n"); | |
925 | return err; | |
926 | } | |
927 | ||
928 | return 0; | |
929 | } | |
930 | ||
931 | static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) | |
932 | { | |
933 | int err; | |
934 | ||
935 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
936 | DPNI_OFF_TX_L3_CSUM, enable); | |
937 | if (err) { | |
938 | netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); | |
939 | return err; | |
940 | } | |
941 | ||
942 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
943 | DPNI_OFF_TX_L4_CSUM, enable); | |
944 | if (err) { | |
945 | netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); | |
946 | return err; | |
947 | } | |
948 | ||
949 | return 0; | |
950 | } | |
951 | ||
952 | /* Perform a single release command to add buffers | |
953 | * to the specified buffer pool | |
954 | */ | |
7ec0596f IR |
955 | static int add_bufs(struct dpaa2_eth_priv *priv, |
956 | struct dpaa2_eth_channel *ch, u16 bpid) | |
6e2387e8 IR |
957 | { |
958 | struct device *dev = priv->net_dev->dev.parent; | |
959 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; | |
27c87486 | 960 | struct page *page; |
6e2387e8 | 961 | dma_addr_t addr; |
ef17bd7c | 962 | int retries = 0; |
87eb55e4 | 963 | int i, err; |
6e2387e8 IR |
964 | |
965 | for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { | |
966 | /* Allocate buffer visible to WRIOP + skb shared info + | |
967 | * alignment padding | |
968 | */ | |
27c87486 ICR |
969 | /* allocate one page for each Rx buffer. WRIOP sees |
970 | * the entire page except for a tailroom reserved for | |
971 | * skb shared info | |
972 | */ | |
973 | page = dev_alloc_pages(0); | |
974 | if (!page) | |
6e2387e8 IR |
975 | goto err_alloc; |
976 | ||
efa6a7d0 | 977 | addr = dma_map_page(dev, page, 0, priv->rx_buf_size, |
27c87486 | 978 | DMA_BIDIRECTIONAL); |
6e2387e8 IR |
979 | if (unlikely(dma_mapping_error(dev, addr))) |
980 | goto err_map; | |
981 | ||
982 | buf_array[i] = addr; | |
5636187b IR |
983 | |
984 | /* tracing point */ | |
985 | trace_dpaa2_eth_buf_seed(priv->net_dev, | |
27c87486 | 986 | page, DPAA2_ETH_RX_BUF_RAW_SIZE, |
efa6a7d0 | 987 | addr, priv->rx_buf_size, |
5636187b | 988 | bpid); |
6e2387e8 IR |
989 | } |
990 | ||
991 | release_bufs: | |
87eb55e4 | 992 | /* In case the portal is busy, retry until successful */ |
7ec0596f | 993 | while ((err = dpaa2_io_service_release(ch->dpio, bpid, |
ef17bd7c IR |
994 | buf_array, i)) == -EBUSY) { |
995 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) | |
996 | break; | |
6e2387e8 | 997 | cpu_relax(); |
ef17bd7c | 998 | } |
87eb55e4 IR |
999 | |
1000 | /* If release command failed, clean up and bail out; | |
1001 | * not much else we can do about it | |
1002 | */ | |
1003 | if (err) { | |
1004 | free_bufs(priv, buf_array, i); | |
1005 | return 0; | |
1006 | } | |
1007 | ||
6e2387e8 IR |
1008 | return i; |
1009 | ||
1010 | err_map: | |
27c87486 | 1011 | __free_pages(page, 0); |
6e2387e8 | 1012 | err_alloc: |
87eb55e4 IR |
1013 | /* If we managed to allocate at least some buffers, |
1014 | * release them to hardware | |
1015 | */ | |
6e2387e8 IR |
1016 | if (i) |
1017 | goto release_bufs; | |
1018 | ||
1019 | return 0; | |
1020 | } | |
1021 | ||
1022 | static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) | |
1023 | { | |
1024 | int i, j; | |
1025 | int new_count; | |
1026 | ||
6e2387e8 IR |
1027 | for (j = 0; j < priv->num_channels; j++) { |
1028 | for (i = 0; i < DPAA2_ETH_NUM_BUFS; | |
1029 | i += DPAA2_ETH_BUFS_PER_CMD) { | |
7ec0596f | 1030 | new_count = add_bufs(priv, priv->channel[j], bpid); |
6e2387e8 IR |
1031 | priv->channel[j]->buf_count += new_count; |
1032 | ||
1033 | if (new_count < DPAA2_ETH_BUFS_PER_CMD) { | |
6e2387e8 IR |
1034 | return -ENOMEM; |
1035 | } | |
1036 | } | |
1037 | } | |
6e2387e8 IR |
1038 | |
1039 | return 0; | |
1040 | } | |
1041 | ||
1042 | /** | |
1043 | * Drain the specified number of buffers from the DPNI's private buffer pool. | |
1044 | * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD | |
1045 | */ | |
1046 | static void drain_bufs(struct dpaa2_eth_priv *priv, int count) | |
1047 | { | |
6e2387e8 | 1048 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
ef17bd7c | 1049 | int retries = 0; |
87eb55e4 | 1050 | int ret; |
6e2387e8 IR |
1051 | |
1052 | do { | |
05fa39c6 | 1053 | ret = dpaa2_io_service_acquire(NULL, priv->bpid, |
6e2387e8 IR |
1054 | buf_array, count); |
1055 | if (ret < 0) { | |
ef17bd7c IR |
1056 | if (ret == -EBUSY && |
1057 | retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) | |
1058 | continue; | |
6e2387e8 IR |
1059 | netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); |
1060 | return; | |
1061 | } | |
87eb55e4 | 1062 | free_bufs(priv, buf_array, ret); |
ef17bd7c | 1063 | retries = 0; |
6e2387e8 IR |
1064 | } while (ret); |
1065 | } | |
1066 | ||
1067 | static void drain_pool(struct dpaa2_eth_priv *priv) | |
1068 | { | |
1069 | int i; | |
1070 | ||
1071 | drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); | |
1072 | drain_bufs(priv, 1); | |
1073 | ||
1074 | for (i = 0; i < priv->num_channels; i++) | |
1075 | priv->channel[i]->buf_count = 0; | |
1076 | } | |
1077 | ||
1078 | /* Function is called from softirq context only, so we don't need to guard | |
1079 | * the access to percpu count | |
1080 | */ | |
1081 | static int refill_pool(struct dpaa2_eth_priv *priv, | |
1082 | struct dpaa2_eth_channel *ch, | |
1083 | u16 bpid) | |
1084 | { | |
1085 | int new_count; | |
1086 | ||
1087 | if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) | |
1088 | return 0; | |
1089 | ||
1090 | do { | |
7ec0596f | 1091 | new_count = add_bufs(priv, ch, bpid); |
6e2387e8 IR |
1092 | if (unlikely(!new_count)) { |
1093 | /* Out of memory; abort for now, we'll try later on */ | |
1094 | break; | |
1095 | } | |
1096 | ch->buf_count += new_count; | |
1097 | } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); | |
1098 | ||
1099 | if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) | |
1100 | return -ENOMEM; | |
1101 | ||
1102 | return 0; | |
1103 | } | |
1104 | ||
1105 | static int pull_channel(struct dpaa2_eth_channel *ch) | |
1106 | { | |
1107 | int err; | |
85047abd | 1108 | int dequeues = -1; |
6e2387e8 IR |
1109 | |
1110 | /* Retry while portal is busy */ | |
1111 | do { | |
7ec0596f IR |
1112 | err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, |
1113 | ch->store); | |
85047abd | 1114 | dequeues++; |
6e2387e8 | 1115 | cpu_relax(); |
ef17bd7c | 1116 | } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); |
6e2387e8 | 1117 | |
85047abd IR |
1118 | ch->stats.dequeue_portal_busy += dequeues; |
1119 | if (unlikely(err)) | |
1120 | ch->stats.pull_err++; | |
1121 | ||
6e2387e8 IR |
1122 | return err; |
1123 | } | |
1124 | ||
1125 | /* NAPI poll routine | |
1126 | * | |
1127 | * Frames are dequeued from the QMan channel associated with this NAPI context. | |
1128 | * Rx, Tx confirmation and (if configured) Rx error frames all count | |
1129 | * towards the NAPI budget. | |
1130 | */ | |
1131 | static int dpaa2_eth_poll(struct napi_struct *napi, int budget) | |
1132 | { | |
1133 | struct dpaa2_eth_channel *ch; | |
6e2387e8 | 1134 | struct dpaa2_eth_priv *priv; |
68049a5f | 1135 | int rx_cleaned = 0, txconf_cleaned = 0; |
569dac6a ICR |
1136 | struct dpaa2_eth_fq *fq, *txc_fq = NULL; |
1137 | struct netdev_queue *nq; | |
1138 | int store_cleaned, work_done; | |
0a25d92c | 1139 | struct list_head rx_list; |
ef17bd7c | 1140 | int retries = 0; |
6e2387e8 IR |
1141 | int err; |
1142 | ||
1143 | ch = container_of(napi, struct dpaa2_eth_channel, napi); | |
d678be1d | 1144 | ch->xdp.res = 0; |
6e2387e8 IR |
1145 | priv = ch->priv; |
1146 | ||
0a25d92c IC |
1147 | INIT_LIST_HEAD(&rx_list); |
1148 | ch->rx_list = &rx_list; | |
1149 | ||
68049a5f | 1150 | do { |
6e2387e8 IR |
1151 | err = pull_channel(ch); |
1152 | if (unlikely(err)) | |
1153 | break; | |
1154 | ||
1155 | /* Refill pool if appropriate */ | |
05fa39c6 | 1156 | refill_pool(priv, ch, priv->bpid); |
6e2387e8 | 1157 | |
569dac6a | 1158 | store_cleaned = consume_frames(ch, &fq); |
ef17bd7c | 1159 | if (store_cleaned <= 0) |
569dac6a ICR |
1160 | break; |
1161 | if (fq->type == DPAA2_RX_FQ) { | |
68049a5f | 1162 | rx_cleaned += store_cleaned; |
569dac6a | 1163 | } else { |
68049a5f | 1164 | txconf_cleaned += store_cleaned; |
569dac6a ICR |
1165 | /* We have a single Tx conf FQ on this channel */ |
1166 | txc_fq = fq; | |
1167 | } | |
6e2387e8 | 1168 | |
68049a5f ICR |
1169 | /* If we either consumed the whole NAPI budget with Rx frames |
1170 | * or we reached the Tx confirmations threshold, we're done. | |
6e2387e8 | 1171 | */ |
68049a5f | 1172 | if (rx_cleaned >= budget || |
569dac6a ICR |
1173 | txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { |
1174 | work_done = budget; | |
1175 | goto out; | |
1176 | } | |
68049a5f | 1177 | } while (store_cleaned); |
6e2387e8 | 1178 | |
68049a5f ICR |
1179 | /* We didn't consume the entire budget, so finish napi and |
1180 | * re-enable data availability notifications | |
1181 | */ | |
1182 | napi_complete_done(napi, rx_cleaned); | |
1183 | do { | |
1184 | err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); | |
1185 | cpu_relax(); | |
ef17bd7c | 1186 | } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); |
68049a5f ICR |
1187 | WARN_ONCE(err, "CDAN notifications rearm failed on core %d", |
1188 | ch->nctx.desired_cpu); | |
85047abd | 1189 | |
569dac6a ICR |
1190 | work_done = max(rx_cleaned, 1); |
1191 | ||
1192 | out: | |
0a25d92c IC |
1193 | netif_receive_skb_list(ch->rx_list); |
1194 | ||
d678be1d | 1195 | if (txc_fq && txc_fq->dq_frames) { |
569dac6a ICR |
1196 | nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); |
1197 | netdev_tx_completed_queue(nq, txc_fq->dq_frames, | |
1198 | txc_fq->dq_bytes); | |
1199 | txc_fq->dq_frames = 0; | |
1200 | txc_fq->dq_bytes = 0; | |
1201 | } | |
1202 | ||
d678be1d IR |
1203 | if (ch->xdp.res & XDP_REDIRECT) |
1204 | xdp_do_flush_map(); | |
1205 | ||
569dac6a | 1206 | return work_done; |
6e2387e8 IR |
1207 | } |
1208 | ||
1209 | static void enable_ch_napi(struct dpaa2_eth_priv *priv) | |
1210 | { | |
1211 | struct dpaa2_eth_channel *ch; | |
1212 | int i; | |
1213 | ||
1214 | for (i = 0; i < priv->num_channels; i++) { | |
1215 | ch = priv->channel[i]; | |
1216 | napi_enable(&ch->napi); | |
1217 | } | |
1218 | } | |
1219 | ||
1220 | static void disable_ch_napi(struct dpaa2_eth_priv *priv) | |
1221 | { | |
1222 | struct dpaa2_eth_channel *ch; | |
1223 | int i; | |
1224 | ||
1225 | for (i = 0; i < priv->num_channels; i++) { | |
1226 | ch = priv->channel[i]; | |
1227 | napi_disable(&ch->napi); | |
1228 | } | |
1229 | } | |
1230 | ||
8eb3cef8 IR |
1231 | static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable) |
1232 | { | |
1233 | struct dpni_taildrop td = {0}; | |
1234 | int i, err; | |
1235 | ||
1236 | if (priv->rx_td_enabled == enable) | |
1237 | return; | |
1238 | ||
1239 | td.enable = enable; | |
1240 | td.threshold = DPAA2_ETH_TAILDROP_THRESH; | |
1241 | ||
1242 | for (i = 0; i < priv->num_fqs; i++) { | |
1243 | if (priv->fq[i].type != DPAA2_RX_FQ) | |
1244 | continue; | |
1245 | err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, | |
1246 | DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0, | |
1247 | priv->fq[i].flowid, &td); | |
1248 | if (err) { | |
1249 | netdev_err(priv->net_dev, | |
1250 | "dpni_set_taildrop() failed\n"); | |
1251 | break; | |
1252 | } | |
1253 | } | |
1254 | ||
1255 | priv->rx_td_enabled = enable; | |
1256 | } | |
1257 | ||
6e2387e8 IR |
1258 | static int link_state_update(struct dpaa2_eth_priv *priv) |
1259 | { | |
85b7a342 | 1260 | struct dpni_link_state state = {0}; |
8eb3cef8 | 1261 | bool tx_pause; |
6e2387e8 IR |
1262 | int err; |
1263 | ||
1264 | err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); | |
1265 | if (unlikely(err)) { | |
1266 | netdev_err(priv->net_dev, | |
1267 | "dpni_get_link_state() failed\n"); | |
1268 | return err; | |
1269 | } | |
1270 | ||
8eb3cef8 IR |
1271 | /* If Tx pause frame settings have changed, we need to update |
1272 | * Rx FQ taildrop configuration as well. We configure taildrop | |
1273 | * only when pause frame generation is disabled. | |
1274 | */ | |
1275 | tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^ | |
1276 | !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE); | |
1277 | dpaa2_eth_set_rx_taildrop(priv, !tx_pause); | |
1278 | ||
71947923 IC |
1279 | /* When we manage the MAC/PHY using phylink there is no need |
1280 | * to manually update the netif_carrier. | |
1281 | */ | |
1282 | if (priv->mac) | |
1283 | goto out; | |
1284 | ||
6e2387e8 IR |
1285 | /* Chech link state; speed / duplex changes are not treated yet */ |
1286 | if (priv->link_state.up == state.up) | |
cce62943 | 1287 | goto out; |
6e2387e8 | 1288 | |
6e2387e8 IR |
1289 | if (state.up) { |
1290 | netif_carrier_on(priv->net_dev); | |
1291 | netif_tx_start_all_queues(priv->net_dev); | |
1292 | } else { | |
1293 | netif_tx_stop_all_queues(priv->net_dev); | |
1294 | netif_carrier_off(priv->net_dev); | |
1295 | } | |
1296 | ||
77160af3 | 1297 | netdev_info(priv->net_dev, "Link Event: state %s\n", |
6e2387e8 IR |
1298 | state.up ? "up" : "down"); |
1299 | ||
cce62943 IR |
1300 | out: |
1301 | priv->link_state = state; | |
1302 | ||
6e2387e8 IR |
1303 | return 0; |
1304 | } | |
1305 | ||
1306 | static int dpaa2_eth_open(struct net_device *net_dev) | |
1307 | { | |
1308 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1309 | int err; | |
1310 | ||
05fa39c6 | 1311 | err = seed_pool(priv, priv->bpid); |
6e2387e8 IR |
1312 | if (err) { |
1313 | /* Not much to do; the buffer pool, though not filled up, | |
1314 | * may still contain some buffers which would enable us | |
1315 | * to limp on. | |
1316 | */ | |
1317 | netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", | |
05fa39c6 | 1318 | priv->dpbp_dev->obj_desc.id, priv->bpid); |
6e2387e8 IR |
1319 | } |
1320 | ||
71947923 IC |
1321 | if (!priv->mac) { |
1322 | /* We'll only start the txqs when the link is actually ready; | |
1323 | * make sure we don't race against the link up notification, | |
1324 | * which may come immediately after dpni_enable(); | |
1325 | */ | |
1326 | netif_tx_stop_all_queues(net_dev); | |
1327 | ||
1328 | /* Also, explicitly set carrier off, otherwise | |
1329 | * netif_carrier_ok() will return true and cause 'ip link show' | |
1330 | * to report the LOWER_UP flag, even though the link | |
1331 | * notification wasn't even received. | |
1332 | */ | |
1333 | netif_carrier_off(net_dev); | |
1334 | } | |
6e2387e8 | 1335 | enable_ch_napi(priv); |
6e2387e8 IR |
1336 | |
1337 | err = dpni_enable(priv->mc_io, 0, priv->mc_token); | |
1338 | if (err < 0) { | |
1339 | netdev_err(net_dev, "dpni_enable() failed\n"); | |
1340 | goto enable_err; | |
1341 | } | |
1342 | ||
71947923 IC |
1343 | if (!priv->mac) { |
1344 | /* If the DPMAC object has already processed the link up | |
1345 | * interrupt, we have to learn the link state ourselves. | |
1346 | */ | |
1347 | err = link_state_update(priv); | |
1348 | if (err < 0) { | |
1349 | netdev_err(net_dev, "Can't update link state\n"); | |
1350 | goto link_state_err; | |
1351 | } | |
1352 | } else { | |
1353 | phylink_start(priv->mac->phylink); | |
6e2387e8 IR |
1354 | } |
1355 | ||
1356 | return 0; | |
1357 | ||
1358 | link_state_err: | |
1359 | enable_err: | |
1360 | disable_ch_napi(priv); | |
1361 | drain_pool(priv); | |
1362 | return err; | |
1363 | } | |
1364 | ||
68d74315 ICR |
1365 | /* Total number of in-flight frames on ingress queues */ |
1366 | static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) | |
6e2387e8 | 1367 | { |
68d74315 ICR |
1368 | struct dpaa2_eth_fq *fq; |
1369 | u32 fcnt = 0, bcnt = 0, total = 0; | |
1370 | int i, err; | |
6e2387e8 | 1371 | |
68d74315 ICR |
1372 | for (i = 0; i < priv->num_fqs; i++) { |
1373 | fq = &priv->fq[i]; | |
1374 | err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); | |
1375 | if (err) { | |
1376 | netdev_warn(priv->net_dev, "query_fq_count failed"); | |
1377 | break; | |
1378 | } | |
1379 | total += fcnt; | |
1380 | } | |
6e2387e8 IR |
1381 | |
1382 | return total; | |
1383 | } | |
1384 | ||
52b6a4ff | 1385 | static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) |
6e2387e8 | 1386 | { |
68d74315 ICR |
1387 | int retries = 10; |
1388 | u32 pending; | |
6e2387e8 | 1389 | |
68d74315 ICR |
1390 | do { |
1391 | pending = ingress_fq_count(priv); | |
1392 | if (pending) | |
1393 | msleep(100); | |
1394 | } while (pending && --retries); | |
6e2387e8 IR |
1395 | } |
1396 | ||
52b6a4ff IR |
1397 | #define DPNI_TX_PENDING_VER_MAJOR 7 |
1398 | #define DPNI_TX_PENDING_VER_MINOR 13 | |
1399 | static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) | |
1400 | { | |
1401 | union dpni_statistics stats; | |
1402 | int retries = 10; | |
1403 | int err; | |
1404 | ||
1405 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, | |
1406 | DPNI_TX_PENDING_VER_MINOR) < 0) | |
1407 | goto out; | |
1408 | ||
1409 | do { | |
1410 | err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, | |
1411 | &stats); | |
1412 | if (err) | |
1413 | goto out; | |
1414 | if (stats.page_6.tx_pending_frames == 0) | |
1415 | return; | |
1416 | } while (--retries); | |
1417 | ||
1418 | out: | |
1419 | msleep(500); | |
1420 | } | |
1421 | ||
6e2387e8 IR |
1422 | static int dpaa2_eth_stop(struct net_device *net_dev) |
1423 | { | |
1424 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
85b7a342 | 1425 | int dpni_enabled = 0; |
6e2387e8 | 1426 | int retries = 10; |
6e2387e8 | 1427 | |
71947923 IC |
1428 | if (!priv->mac) { |
1429 | netif_tx_stop_all_queues(net_dev); | |
1430 | netif_carrier_off(net_dev); | |
1431 | } else { | |
1432 | phylink_stop(priv->mac->phylink); | |
1433 | } | |
6e2387e8 | 1434 | |
68d74315 ICR |
1435 | /* On dpni_disable(), the MC firmware will: |
1436 | * - stop MAC Rx and wait for all Rx frames to be enqueued to software | |
1437 | * - cut off WRIOP dequeues from egress FQs and wait until transmission | |
1438 | * of all in flight Tx frames is finished (and corresponding Tx conf | |
1439 | * frames are enqueued back to software) | |
1440 | * | |
1441 | * Before calling dpni_disable(), we wait for all Tx frames to arrive | |
1442 | * on WRIOP. After it finishes, wait until all remaining frames on Rx | |
1443 | * and Tx conf queues are consumed on NAPI poll. | |
6e2387e8 | 1444 | */ |
52b6a4ff | 1445 | wait_for_egress_fq_empty(priv); |
68d74315 | 1446 | |
6e2387e8 IR |
1447 | do { |
1448 | dpni_disable(priv->mc_io, 0, priv->mc_token); | |
1449 | dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); | |
1450 | if (dpni_enabled) | |
1451 | /* Allow the hardware some slack */ | |
1452 | msleep(100); | |
1453 | } while (dpni_enabled && --retries); | |
1454 | if (!retries) { | |
1455 | netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); | |
1456 | /* Must go on and disable NAPI nonetheless, so we don't crash at | |
1457 | * the next "ifconfig up" | |
1458 | */ | |
1459 | } | |
1460 | ||
52b6a4ff | 1461 | wait_for_ingress_fq_empty(priv); |
6e2387e8 IR |
1462 | disable_ch_napi(priv); |
1463 | ||
6e2387e8 IR |
1464 | /* Empty the buffer pool */ |
1465 | drain_pool(priv); | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
6e2387e8 IR |
1470 | static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) |
1471 | { | |
1472 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1473 | struct device *dev = net_dev->dev.parent; | |
1474 | int err; | |
1475 | ||
1476 | err = eth_mac_addr(net_dev, addr); | |
1477 | if (err < 0) { | |
1478 | dev_err(dev, "eth_mac_addr() failed (%d)\n", err); | |
1479 | return err; | |
1480 | } | |
1481 | ||
1482 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, | |
1483 | net_dev->dev_addr); | |
1484 | if (err) { | |
1485 | dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); | |
1486 | return err; | |
1487 | } | |
1488 | ||
1489 | return 0; | |
1490 | } | |
1491 | ||
1492 | /** Fill in counters maintained by the GPP driver. These may be different from | |
1493 | * the hardware counters obtained by ethtool. | |
1494 | */ | |
acbff8e3 IR |
1495 | static void dpaa2_eth_get_stats(struct net_device *net_dev, |
1496 | struct rtnl_link_stats64 *stats) | |
6e2387e8 IR |
1497 | { |
1498 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1499 | struct rtnl_link_stats64 *percpu_stats; | |
1500 | u64 *cpustats; | |
1501 | u64 *netstats = (u64 *)stats; | |
1502 | int i, j; | |
1503 | int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); | |
1504 | ||
1505 | for_each_possible_cpu(i) { | |
1506 | percpu_stats = per_cpu_ptr(priv->percpu_stats, i); | |
1507 | cpustats = (u64 *)percpu_stats; | |
1508 | for (j = 0; j < num; j++) | |
1509 | netstats[j] += cpustats[j]; | |
1510 | } | |
1511 | } | |
1512 | ||
6e2387e8 IR |
1513 | /* Copy mac unicast addresses from @net_dev to @priv. |
1514 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. | |
1515 | */ | |
1516 | static void add_uc_hw_addr(const struct net_device *net_dev, | |
1517 | struct dpaa2_eth_priv *priv) | |
1518 | { | |
1519 | struct netdev_hw_addr *ha; | |
1520 | int err; | |
1521 | ||
1522 | netdev_for_each_uc_addr(ha, net_dev) { | |
1523 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, | |
1524 | ha->addr); | |
1525 | if (err) | |
1526 | netdev_warn(priv->net_dev, | |
1527 | "Could not add ucast MAC %pM to the filtering table (err %d)\n", | |
1528 | ha->addr, err); | |
1529 | } | |
1530 | } | |
1531 | ||
1532 | /* Copy mac multicast addresses from @net_dev to @priv | |
1533 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. | |
1534 | */ | |
1535 | static void add_mc_hw_addr(const struct net_device *net_dev, | |
1536 | struct dpaa2_eth_priv *priv) | |
1537 | { | |
1538 | struct netdev_hw_addr *ha; | |
1539 | int err; | |
1540 | ||
1541 | netdev_for_each_mc_addr(ha, net_dev) { | |
1542 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, | |
1543 | ha->addr); | |
1544 | if (err) | |
1545 | netdev_warn(priv->net_dev, | |
1546 | "Could not add mcast MAC %pM to the filtering table (err %d)\n", | |
1547 | ha->addr, err); | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) | |
1552 | { | |
1553 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1554 | int uc_count = netdev_uc_count(net_dev); | |
1555 | int mc_count = netdev_mc_count(net_dev); | |
1556 | u8 max_mac = priv->dpni_attrs.mac_filter_entries; | |
1557 | u32 options = priv->dpni_attrs.options; | |
1558 | u16 mc_token = priv->mc_token; | |
1559 | struct fsl_mc_io *mc_io = priv->mc_io; | |
1560 | int err; | |
1561 | ||
1562 | /* Basic sanity checks; these probably indicate a misconfiguration */ | |
1563 | if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) | |
1564 | netdev_info(net_dev, | |
1565 | "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", | |
1566 | max_mac); | |
1567 | ||
1568 | /* Force promiscuous if the uc or mc counts exceed our capabilities. */ | |
1569 | if (uc_count > max_mac) { | |
1570 | netdev_info(net_dev, | |
1571 | "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", | |
1572 | uc_count, max_mac); | |
1573 | goto force_promisc; | |
1574 | } | |
1575 | if (mc_count + uc_count > max_mac) { | |
1576 | netdev_info(net_dev, | |
1577 | "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", | |
1578 | uc_count + mc_count, max_mac); | |
1579 | goto force_mc_promisc; | |
1580 | } | |
1581 | ||
1582 | /* Adjust promisc settings due to flag combinations */ | |
1583 | if (net_dev->flags & IFF_PROMISC) | |
1584 | goto force_promisc; | |
1585 | if (net_dev->flags & IFF_ALLMULTI) { | |
1586 | /* First, rebuild unicast filtering table. This should be done | |
1587 | * in promisc mode, in order to avoid frame loss while we | |
1588 | * progressively add entries to the table. | |
1589 | * We don't know whether we had been in promisc already, and | |
1590 | * making an MC call to find out is expensive; so set uc promisc | |
1591 | * nonetheless. | |
1592 | */ | |
1593 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); | |
1594 | if (err) | |
1595 | netdev_warn(net_dev, "Can't set uc promisc\n"); | |
1596 | ||
1597 | /* Actual uc table reconstruction. */ | |
1598 | err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); | |
1599 | if (err) | |
1600 | netdev_warn(net_dev, "Can't clear uc filters\n"); | |
1601 | add_uc_hw_addr(net_dev, priv); | |
1602 | ||
1603 | /* Finally, clear uc promisc and set mc promisc as requested. */ | |
1604 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); | |
1605 | if (err) | |
1606 | netdev_warn(net_dev, "Can't clear uc promisc\n"); | |
1607 | goto force_mc_promisc; | |
1608 | } | |
1609 | ||
1610 | /* Neither unicast, nor multicast promisc will be on... eventually. | |
1611 | * For now, rebuild mac filtering tables while forcing both of them on. | |
1612 | */ | |
1613 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); | |
1614 | if (err) | |
1615 | netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); | |
1616 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); | |
1617 | if (err) | |
1618 | netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); | |
1619 | ||
1620 | /* Actual mac filtering tables reconstruction */ | |
1621 | err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); | |
1622 | if (err) | |
1623 | netdev_warn(net_dev, "Can't clear mac filters\n"); | |
1624 | add_mc_hw_addr(net_dev, priv); | |
1625 | add_uc_hw_addr(net_dev, priv); | |
1626 | ||
1627 | /* Now we can clear both ucast and mcast promisc, without risking | |
1628 | * to drop legitimate frames anymore. | |
1629 | */ | |
1630 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); | |
1631 | if (err) | |
1632 | netdev_warn(net_dev, "Can't clear ucast promisc\n"); | |
1633 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); | |
1634 | if (err) | |
1635 | netdev_warn(net_dev, "Can't clear mcast promisc\n"); | |
1636 | ||
1637 | return; | |
1638 | ||
1639 | force_promisc: | |
1640 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); | |
1641 | if (err) | |
1642 | netdev_warn(net_dev, "Can't set ucast promisc\n"); | |
1643 | force_mc_promisc: | |
1644 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); | |
1645 | if (err) | |
1646 | netdev_warn(net_dev, "Can't set mcast promisc\n"); | |
1647 | } | |
1648 | ||
1649 | static int dpaa2_eth_set_features(struct net_device *net_dev, | |
1650 | netdev_features_t features) | |
1651 | { | |
1652 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1653 | netdev_features_t changed = features ^ net_dev->features; | |
1654 | bool enable; | |
1655 | int err; | |
1656 | ||
1657 | if (changed & NETIF_F_RXCSUM) { | |
1658 | enable = !!(features & NETIF_F_RXCSUM); | |
1659 | err = set_rx_csum(priv, enable); | |
1660 | if (err) | |
1661 | return err; | |
1662 | } | |
1663 | ||
1664 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { | |
1665 | enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | |
1666 | err = set_tx_csum(priv, enable); | |
1667 | if (err) | |
1668 | return err; | |
1669 | } | |
1670 | ||
1671 | return 0; | |
1672 | } | |
1673 | ||
859f998e IR |
1674 | static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1675 | { | |
1676 | struct dpaa2_eth_priv *priv = netdev_priv(dev); | |
1677 | struct hwtstamp_config config; | |
1678 | ||
1679 | if (copy_from_user(&config, rq->ifr_data, sizeof(config))) | |
1680 | return -EFAULT; | |
1681 | ||
1682 | switch (config.tx_type) { | |
1683 | case HWTSTAMP_TX_OFF: | |
1684 | priv->tx_tstamp = false; | |
1685 | break; | |
1686 | case HWTSTAMP_TX_ON: | |
1687 | priv->tx_tstamp = true; | |
1688 | break; | |
1689 | default: | |
1690 | return -ERANGE; | |
1691 | } | |
1692 | ||
1693 | if (config.rx_filter == HWTSTAMP_FILTER_NONE) { | |
1694 | priv->rx_tstamp = false; | |
1695 | } else { | |
1696 | priv->rx_tstamp = true; | |
1697 | /* TS is set for all frame types, not only those requested */ | |
1698 | config.rx_filter = HWTSTAMP_FILTER_ALL; | |
1699 | } | |
1700 | ||
1701 | return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? | |
1702 | -EFAULT : 0; | |
1703 | } | |
1704 | ||
1705 | static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1706 | { | |
4a84182a RK |
1707 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
1708 | ||
859f998e IR |
1709 | if (cmd == SIOCSHWTSTAMP) |
1710 | return dpaa2_eth_ts_ioctl(dev, rq, cmd); | |
1711 | ||
4a84182a RK |
1712 | if (priv->mac) |
1713 | return phylink_mii_ioctl(priv->mac->phylink, rq, cmd); | |
1714 | ||
1715 | return -EOPNOTSUPP; | |
859f998e IR |
1716 | } |
1717 | ||
7e273a8e ICR |
1718 | static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) |
1719 | { | |
1720 | int mfl, linear_mfl; | |
1721 | ||
1722 | mfl = DPAA2_ETH_L2_MAX_FRM(mtu); | |
efa6a7d0 | 1723 | linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - |
7b1eea1a | 1724 | dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; |
7e273a8e ICR |
1725 | |
1726 | if (mfl > linear_mfl) { | |
1727 | netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", | |
1728 | linear_mfl - VLAN_ETH_HLEN); | |
1729 | return false; | |
1730 | } | |
1731 | ||
1732 | return true; | |
1733 | } | |
1734 | ||
1735 | static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) | |
1736 | { | |
1737 | int mfl, err; | |
1738 | ||
1739 | /* We enforce a maximum Rx frame length based on MTU only if we have | |
1740 | * an XDP program attached (in order to avoid Rx S/G frames). | |
1741 | * Otherwise, we accept all incoming frames as long as they are not | |
1742 | * larger than maximum size supported in hardware | |
1743 | */ | |
1744 | if (has_xdp) | |
1745 | mfl = DPAA2_ETH_L2_MAX_FRM(mtu); | |
1746 | else | |
1747 | mfl = DPAA2_ETH_MFL; | |
1748 | ||
1749 | err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); | |
1750 | if (err) { | |
1751 | netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); | |
1752 | return err; | |
1753 | } | |
1754 | ||
1755 | return 0; | |
1756 | } | |
1757 | ||
1758 | static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) | |
1759 | { | |
1760 | struct dpaa2_eth_priv *priv = netdev_priv(dev); | |
1761 | int err; | |
1762 | ||
1763 | if (!priv->xdp_prog) | |
1764 | goto out; | |
1765 | ||
1766 | if (!xdp_mtu_valid(priv, new_mtu)) | |
1767 | return -EINVAL; | |
1768 | ||
1769 | err = set_rx_mfl(priv, new_mtu, true); | |
1770 | if (err) | |
1771 | return err; | |
1772 | ||
1773 | out: | |
1774 | dev->mtu = new_mtu; | |
1775 | return 0; | |
1776 | } | |
1777 | ||
7b1eea1a ICR |
1778 | static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) |
1779 | { | |
1780 | struct dpni_buffer_layout buf_layout = {0}; | |
1781 | int err; | |
1782 | ||
1783 | err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, | |
1784 | DPNI_QUEUE_RX, &buf_layout); | |
1785 | if (err) { | |
1786 | netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); | |
1787 | return err; | |
1788 | } | |
1789 | ||
1790 | /* Reserve extra headroom for XDP header size changes */ | |
1791 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + | |
1792 | (has_xdp ? XDP_PACKET_HEADROOM : 0); | |
1793 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; | |
1794 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, | |
1795 | DPNI_QUEUE_RX, &buf_layout); | |
1796 | if (err) { | |
1797 | netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); | |
1798 | return err; | |
1799 | } | |
1800 | ||
1801 | return 0; | |
1802 | } | |
1803 | ||
7e273a8e ICR |
1804 | static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) |
1805 | { | |
1806 | struct dpaa2_eth_priv *priv = netdev_priv(dev); | |
1807 | struct dpaa2_eth_channel *ch; | |
1808 | struct bpf_prog *old; | |
1809 | bool up, need_update; | |
1810 | int i, err; | |
1811 | ||
1812 | if (prog && !xdp_mtu_valid(priv, dev->mtu)) | |
1813 | return -EINVAL; | |
1814 | ||
85192dbf AN |
1815 | if (prog) |
1816 | bpf_prog_add(prog, priv->num_channels); | |
7e273a8e ICR |
1817 | |
1818 | up = netif_running(dev); | |
1819 | need_update = (!!priv->xdp_prog != !!prog); | |
1820 | ||
1821 | if (up) | |
1822 | dpaa2_eth_stop(dev); | |
1823 | ||
7b1eea1a ICR |
1824 | /* While in xdp mode, enforce a maximum Rx frame size based on MTU. |
1825 | * Also, when switching between xdp/non-xdp modes we need to reconfigure | |
1826 | * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, | |
1827 | * so we are sure no old format buffers will be used from now on. | |
1828 | */ | |
7e273a8e ICR |
1829 | if (need_update) { |
1830 | err = set_rx_mfl(priv, dev->mtu, !!prog); | |
1831 | if (err) | |
1832 | goto out_err; | |
7b1eea1a ICR |
1833 | err = update_rx_buffer_headroom(priv, !!prog); |
1834 | if (err) | |
1835 | goto out_err; | |
7e273a8e ICR |
1836 | } |
1837 | ||
1838 | old = xchg(&priv->xdp_prog, prog); | |
1839 | if (old) | |
1840 | bpf_prog_put(old); | |
1841 | ||
1842 | for (i = 0; i < priv->num_channels; i++) { | |
1843 | ch = priv->channel[i]; | |
1844 | old = xchg(&ch->xdp.prog, prog); | |
1845 | if (old) | |
1846 | bpf_prog_put(old); | |
1847 | } | |
1848 | ||
1849 | if (up) { | |
1850 | err = dpaa2_eth_open(dev); | |
1851 | if (err) | |
1852 | return err; | |
1853 | } | |
1854 | ||
1855 | return 0; | |
1856 | ||
1857 | out_err: | |
1858 | if (prog) | |
1859 | bpf_prog_sub(prog, priv->num_channels); | |
1860 | if (up) | |
1861 | dpaa2_eth_open(dev); | |
1862 | ||
1863 | return err; | |
1864 | } | |
1865 | ||
1866 | static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) | |
1867 | { | |
1868 | struct dpaa2_eth_priv *priv = netdev_priv(dev); | |
1869 | ||
1870 | switch (xdp->command) { | |
1871 | case XDP_SETUP_PROG: | |
1872 | return setup_xdp(dev, xdp->prog); | |
1873 | case XDP_QUERY_PROG: | |
1874 | xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; | |
1875 | break; | |
1876 | default: | |
1877 | return -EINVAL; | |
1878 | } | |
1879 | ||
1880 | return 0; | |
1881 | } | |
1882 | ||
d678be1d IR |
1883 | static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, |
1884 | struct xdp_frame *xdpf) | |
1885 | { | |
1886 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1887 | struct device *dev = net_dev->dev.parent; | |
1888 | struct rtnl_link_stats64 *percpu_stats; | |
1889 | struct dpaa2_eth_drv_stats *percpu_extras; | |
1890 | unsigned int needed_headroom; | |
1891 | struct dpaa2_eth_swa *swa; | |
1892 | struct dpaa2_eth_fq *fq; | |
1893 | struct dpaa2_fd fd; | |
1894 | void *buffer_start, *aligned_start; | |
1895 | dma_addr_t addr; | |
1896 | int err, i; | |
1897 | ||
1898 | /* We require a minimum headroom to be able to transmit the frame. | |
1899 | * Otherwise return an error and let the original net_device handle it | |
1900 | */ | |
1901 | needed_headroom = dpaa2_eth_needed_headroom(priv, NULL); | |
1902 | if (xdpf->headroom < needed_headroom) | |
1903 | return -EINVAL; | |
1904 | ||
1905 | percpu_stats = this_cpu_ptr(priv->percpu_stats); | |
1906 | percpu_extras = this_cpu_ptr(priv->percpu_extras); | |
1907 | ||
1908 | /* Setup the FD fields */ | |
1909 | memset(&fd, 0, sizeof(fd)); | |
1910 | ||
1911 | /* Align FD address, if possible */ | |
1912 | buffer_start = xdpf->data - needed_headroom; | |
1913 | aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, | |
1914 | DPAA2_ETH_TX_BUF_ALIGN); | |
1915 | if (aligned_start >= xdpf->data - xdpf->headroom) | |
1916 | buffer_start = aligned_start; | |
1917 | ||
1918 | swa = (struct dpaa2_eth_swa *)buffer_start; | |
1919 | /* fill in necessary fields here */ | |
1920 | swa->type = DPAA2_ETH_SWA_XDP; | |
1921 | swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; | |
1922 | swa->xdp.xdpf = xdpf; | |
1923 | ||
1924 | addr = dma_map_single(dev, buffer_start, | |
1925 | swa->xdp.dma_size, | |
1926 | DMA_BIDIRECTIONAL); | |
1927 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1928 | percpu_stats->tx_dropped++; | |
1929 | return -ENOMEM; | |
1930 | } | |
1931 | ||
1932 | dpaa2_fd_set_addr(&fd, addr); | |
1933 | dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start); | |
1934 | dpaa2_fd_set_len(&fd, xdpf->len); | |
1935 | dpaa2_fd_set_format(&fd, dpaa2_fd_single); | |
1936 | dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); | |
1937 | ||
64447506 | 1938 | fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)]; |
d678be1d IR |
1939 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
1940 | err = priv->enqueue(priv, fq, &fd, 0); | |
1941 | if (err != -EBUSY) | |
1942 | break; | |
1943 | } | |
1944 | percpu_extras->tx_portal_busy += i; | |
1945 | if (unlikely(err < 0)) { | |
1946 | percpu_stats->tx_errors++; | |
1947 | /* let the Rx device handle the cleanup */ | |
1948 | return err; | |
1949 | } | |
1950 | ||
1951 | percpu_stats->tx_packets++; | |
1952 | percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); | |
1953 | ||
1954 | return 0; | |
1955 | } | |
1956 | ||
1957 | static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, | |
1958 | struct xdp_frame **frames, u32 flags) | |
1959 | { | |
1960 | int drops = 0; | |
1961 | int i, err; | |
1962 | ||
1963 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) | |
1964 | return -EINVAL; | |
1965 | ||
1966 | if (!netif_running(net_dev)) | |
1967 | return -ENETDOWN; | |
1968 | ||
1969 | for (i = 0; i < n; i++) { | |
1970 | struct xdp_frame *xdpf = frames[i]; | |
1971 | ||
1972 | err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf); | |
1973 | if (err) { | |
1974 | xdp_return_frame_rx_napi(xdpf); | |
1975 | drops++; | |
1976 | } | |
1977 | } | |
1978 | ||
1979 | return n - drops; | |
1980 | } | |
1981 | ||
06d5b179 IR |
1982 | static int update_xps(struct dpaa2_eth_priv *priv) |
1983 | { | |
1984 | struct net_device *net_dev = priv->net_dev; | |
1985 | struct cpumask xps_mask; | |
1986 | struct dpaa2_eth_fq *fq; | |
ab1e6de2 | 1987 | int i, num_queues, netdev_queues; |
06d5b179 IR |
1988 | int err = 0; |
1989 | ||
1990 | num_queues = dpaa2_eth_queue_count(priv); | |
ab1e6de2 | 1991 | netdev_queues = (net_dev->num_tc ? : 1) * num_queues; |
06d5b179 IR |
1992 | |
1993 | /* The first <num_queues> entries in priv->fq array are Tx/Tx conf | |
1994 | * queues, so only process those | |
1995 | */ | |
ab1e6de2 IR |
1996 | for (i = 0; i < netdev_queues; i++) { |
1997 | fq = &priv->fq[i % num_queues]; | |
06d5b179 IR |
1998 | |
1999 | cpumask_clear(&xps_mask); | |
2000 | cpumask_set_cpu(fq->target_cpu, &xps_mask); | |
2001 | ||
2002 | err = netif_set_xps_queue(net_dev, &xps_mask, i); | |
2003 | if (err) { | |
2004 | netdev_warn_once(net_dev, "Error setting XPS queue\n"); | |
2005 | break; | |
2006 | } | |
2007 | } | |
2008 | ||
2009 | return err; | |
2010 | } | |
2011 | ||
ab1e6de2 IR |
2012 | static int dpaa2_eth_setup_tc(struct net_device *net_dev, |
2013 | enum tc_setup_type type, void *type_data) | |
2014 | { | |
2015 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
2016 | struct tc_mqprio_qopt *mqprio = type_data; | |
2017 | u8 num_tc, num_queues; | |
2018 | int i; | |
2019 | ||
2020 | if (type != TC_SETUP_QDISC_MQPRIO) | |
2021 | return -EINVAL; | |
2022 | ||
2023 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; | |
2024 | num_queues = dpaa2_eth_queue_count(priv); | |
2025 | num_tc = mqprio->num_tc; | |
2026 | ||
2027 | if (num_tc == net_dev->num_tc) | |
2028 | return 0; | |
2029 | ||
2030 | if (num_tc > dpaa2_eth_tc_count(priv)) { | |
2031 | netdev_err(net_dev, "Max %d traffic classes supported\n", | |
2032 | dpaa2_eth_tc_count(priv)); | |
2033 | return -EINVAL; | |
2034 | } | |
2035 | ||
2036 | if (!num_tc) { | |
2037 | netdev_reset_tc(net_dev); | |
2038 | netif_set_real_num_tx_queues(net_dev, num_queues); | |
2039 | goto out; | |
2040 | } | |
2041 | ||
2042 | netdev_set_num_tc(net_dev, num_tc); | |
2043 | netif_set_real_num_tx_queues(net_dev, num_tc * num_queues); | |
2044 | ||
2045 | for (i = 0; i < num_tc; i++) | |
2046 | netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues); | |
2047 | ||
2048 | out: | |
2049 | update_xps(priv); | |
2050 | ||
2051 | return 0; | |
2052 | } | |
2053 | ||
6e2387e8 IR |
2054 | static const struct net_device_ops dpaa2_eth_ops = { |
2055 | .ndo_open = dpaa2_eth_open, | |
2056 | .ndo_start_xmit = dpaa2_eth_tx, | |
2057 | .ndo_stop = dpaa2_eth_stop, | |
6e2387e8 IR |
2058 | .ndo_set_mac_address = dpaa2_eth_set_addr, |
2059 | .ndo_get_stats64 = dpaa2_eth_get_stats, | |
6e2387e8 IR |
2060 | .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, |
2061 | .ndo_set_features = dpaa2_eth_set_features, | |
859f998e | 2062 | .ndo_do_ioctl = dpaa2_eth_ioctl, |
7e273a8e ICR |
2063 | .ndo_change_mtu = dpaa2_eth_change_mtu, |
2064 | .ndo_bpf = dpaa2_eth_xdp, | |
d678be1d | 2065 | .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, |
ab1e6de2 | 2066 | .ndo_setup_tc = dpaa2_eth_setup_tc, |
6e2387e8 IR |
2067 | }; |
2068 | ||
2069 | static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) | |
2070 | { | |
2071 | struct dpaa2_eth_channel *ch; | |
2072 | ||
2073 | ch = container_of(ctx, struct dpaa2_eth_channel, nctx); | |
85047abd IR |
2074 | |
2075 | /* Update NAPI statistics */ | |
2076 | ch->stats.cdan++; | |
2077 | ||
6e2387e8 IR |
2078 | napi_schedule_irqoff(&ch->napi); |
2079 | } | |
2080 | ||
2081 | /* Allocate and configure a DPCON object */ | |
2082 | static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) | |
2083 | { | |
2084 | struct fsl_mc_device *dpcon; | |
2085 | struct device *dev = priv->net_dev->dev.parent; | |
6e2387e8 IR |
2086 | int err; |
2087 | ||
2088 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), | |
2089 | FSL_MC_POOL_DPCON, &dpcon); | |
2090 | if (err) { | |
d7f5a9d8 IC |
2091 | if (err == -ENXIO) |
2092 | err = -EPROBE_DEFER; | |
2093 | else | |
2094 | dev_info(dev, "Not enough DPCONs, will go on as-is\n"); | |
2095 | return ERR_PTR(err); | |
6e2387e8 IR |
2096 | } |
2097 | ||
2098 | err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); | |
2099 | if (err) { | |
2100 | dev_err(dev, "dpcon_open() failed\n"); | |
f6dda809 | 2101 | goto free; |
6e2387e8 IR |
2102 | } |
2103 | ||
2104 | err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); | |
2105 | if (err) { | |
2106 | dev_err(dev, "dpcon_reset() failed\n"); | |
f6dda809 | 2107 | goto close; |
6e2387e8 IR |
2108 | } |
2109 | ||
6e2387e8 IR |
2110 | err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); |
2111 | if (err) { | |
2112 | dev_err(dev, "dpcon_enable() failed\n"); | |
f6dda809 | 2113 | goto close; |
6e2387e8 IR |
2114 | } |
2115 | ||
2116 | return dpcon; | |
2117 | ||
f6dda809 | 2118 | close: |
6e2387e8 | 2119 | dpcon_close(priv->mc_io, 0, dpcon->mc_handle); |
f6dda809 | 2120 | free: |
6e2387e8 IR |
2121 | fsl_mc_object_free(dpcon); |
2122 | ||
2123 | return NULL; | |
2124 | } | |
2125 | ||
2126 | static void free_dpcon(struct dpaa2_eth_priv *priv, | |
2127 | struct fsl_mc_device *dpcon) | |
2128 | { | |
2129 | dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); | |
2130 | dpcon_close(priv->mc_io, 0, dpcon->mc_handle); | |
2131 | fsl_mc_object_free(dpcon); | |
2132 | } | |
2133 | ||
2134 | static struct dpaa2_eth_channel * | |
2135 | alloc_channel(struct dpaa2_eth_priv *priv) | |
2136 | { | |
2137 | struct dpaa2_eth_channel *channel; | |
2138 | struct dpcon_attr attr; | |
2139 | struct device *dev = priv->net_dev->dev.parent; | |
2140 | int err; | |
2141 | ||
2142 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | |
2143 | if (!channel) | |
2144 | return NULL; | |
2145 | ||
2146 | channel->dpcon = setup_dpcon(priv); | |
d7f5a9d8 | 2147 | if (IS_ERR_OR_NULL(channel->dpcon)) { |
bd8460fa | 2148 | err = PTR_ERR_OR_ZERO(channel->dpcon); |
6e2387e8 | 2149 | goto err_setup; |
d7f5a9d8 | 2150 | } |
6e2387e8 IR |
2151 | |
2152 | err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, | |
2153 | &attr); | |
2154 | if (err) { | |
2155 | dev_err(dev, "dpcon_get_attributes() failed\n"); | |
2156 | goto err_get_attr; | |
2157 | } | |
2158 | ||
2159 | channel->dpcon_id = attr.id; | |
2160 | channel->ch_id = attr.qbman_ch_id; | |
2161 | channel->priv = priv; | |
2162 | ||
2163 | return channel; | |
2164 | ||
2165 | err_get_attr: | |
2166 | free_dpcon(priv, channel->dpcon); | |
2167 | err_setup: | |
2168 | kfree(channel); | |
d7f5a9d8 | 2169 | return ERR_PTR(err); |
6e2387e8 IR |
2170 | } |
2171 | ||
2172 | static void free_channel(struct dpaa2_eth_priv *priv, | |
2173 | struct dpaa2_eth_channel *channel) | |
2174 | { | |
2175 | free_dpcon(priv, channel->dpcon); | |
2176 | kfree(channel); | |
2177 | } | |
2178 | ||
2179 | /* DPIO setup: allocate and configure QBMan channels, setup core affinity | |
2180 | * and register data availability notifications | |
2181 | */ | |
2182 | static int setup_dpio(struct dpaa2_eth_priv *priv) | |
2183 | { | |
2184 | struct dpaa2_io_notification_ctx *nctx; | |
2185 | struct dpaa2_eth_channel *channel; | |
2186 | struct dpcon_notification_cfg dpcon_notif_cfg; | |
2187 | struct device *dev = priv->net_dev->dev.parent; | |
2188 | int i, err; | |
2189 | ||
2190 | /* We want the ability to spread ingress traffic (RX, TX conf) to as | |
2191 | * many cores as possible, so we need one channel for each core | |
2192 | * (unless there's fewer queues than cores, in which case the extra | |
2193 | * channels would be wasted). | |
2194 | * Allocate one channel per core and register it to the core's | |
2195 | * affine DPIO. If not enough channels are available for all cores | |
2196 | * or if some cores don't have an affine DPIO, there will be no | |
2197 | * ingress frame processing on those cores. | |
2198 | */ | |
2199 | cpumask_clear(&priv->dpio_cpumask); | |
2200 | for_each_online_cpu(i) { | |
2201 | /* Try to allocate a channel */ | |
2202 | channel = alloc_channel(priv); | |
d7f5a9d8 | 2203 | if (IS_ERR_OR_NULL(channel)) { |
bd8460fa | 2204 | err = PTR_ERR_OR_ZERO(channel); |
d7f5a9d8 IC |
2205 | if (err != -EPROBE_DEFER) |
2206 | dev_info(dev, | |
2207 | "No affine channel for cpu %d and above\n", i); | |
6e2387e8 IR |
2208 | goto err_alloc_ch; |
2209 | } | |
2210 | ||
2211 | priv->channel[priv->num_channels] = channel; | |
2212 | ||
2213 | nctx = &channel->nctx; | |
2214 | nctx->is_cdan = 1; | |
2215 | nctx->cb = cdan_cb; | |
2216 | nctx->id = channel->ch_id; | |
2217 | nctx->desired_cpu = i; | |
2218 | ||
2219 | /* Register the new context */ | |
7ec0596f | 2220 | channel->dpio = dpaa2_io_service_select(i); |
47441f7f | 2221 | err = dpaa2_io_service_register(channel->dpio, nctx, dev); |
6e2387e8 | 2222 | if (err) { |
5206d8d1 | 2223 | dev_dbg(dev, "No affine DPIO for cpu %d\n", i); |
6e2387e8 | 2224 | /* If no affine DPIO for this core, there's probably |
5206d8d1 IR |
2225 | * none available for next cores either. Signal we want |
2226 | * to retry later, in case the DPIO devices weren't | |
2227 | * probed yet. | |
6e2387e8 | 2228 | */ |
5206d8d1 | 2229 | err = -EPROBE_DEFER; |
6e2387e8 IR |
2230 | goto err_service_reg; |
2231 | } | |
2232 | ||
2233 | /* Register DPCON notification with MC */ | |
2234 | dpcon_notif_cfg.dpio_id = nctx->dpio_id; | |
2235 | dpcon_notif_cfg.priority = 0; | |
2236 | dpcon_notif_cfg.user_ctx = nctx->qman64; | |
2237 | err = dpcon_set_notification(priv->mc_io, 0, | |
2238 | channel->dpcon->mc_handle, | |
2239 | &dpcon_notif_cfg); | |
2240 | if (err) { | |
2241 | dev_err(dev, "dpcon_set_notification failed()\n"); | |
2242 | goto err_set_cdan; | |
2243 | } | |
2244 | ||
2245 | /* If we managed to allocate a channel and also found an affine | |
2246 | * DPIO for this core, add it to the final mask | |
2247 | */ | |
2248 | cpumask_set_cpu(i, &priv->dpio_cpumask); | |
2249 | priv->num_channels++; | |
2250 | ||
2251 | /* Stop if we already have enough channels to accommodate all | |
2252 | * RX and TX conf queues | |
2253 | */ | |
b0e4f37b | 2254 | if (priv->num_channels == priv->dpni_attrs.num_queues) |
6e2387e8 IR |
2255 | break; |
2256 | } | |
2257 | ||
2258 | return 0; | |
2259 | ||
2260 | err_set_cdan: | |
47441f7f | 2261 | dpaa2_io_service_deregister(channel->dpio, nctx, dev); |
6e2387e8 IR |
2262 | err_service_reg: |
2263 | free_channel(priv, channel); | |
2264 | err_alloc_ch: | |
5aa4277d IC |
2265 | if (err == -EPROBE_DEFER) { |
2266 | for (i = 0; i < priv->num_channels; i++) { | |
2267 | channel = priv->channel[i]; | |
2268 | nctx = &channel->nctx; | |
2269 | dpaa2_io_service_deregister(channel->dpio, nctx, dev); | |
2270 | free_channel(priv, channel); | |
2271 | } | |
2272 | priv->num_channels = 0; | |
d7f5a9d8 | 2273 | return err; |
5aa4277d | 2274 | } |
d7f5a9d8 | 2275 | |
6e2387e8 IR |
2276 | if (cpumask_empty(&priv->dpio_cpumask)) { |
2277 | dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); | |
d7f5a9d8 | 2278 | return -ENODEV; |
6e2387e8 IR |
2279 | } |
2280 | ||
2281 | dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", | |
2282 | cpumask_pr_args(&priv->dpio_cpumask)); | |
2283 | ||
2284 | return 0; | |
2285 | } | |
2286 | ||
2287 | static void free_dpio(struct dpaa2_eth_priv *priv) | |
2288 | { | |
47441f7f | 2289 | struct device *dev = priv->net_dev->dev.parent; |
6e2387e8 | 2290 | struct dpaa2_eth_channel *ch; |
47441f7f | 2291 | int i; |
6e2387e8 IR |
2292 | |
2293 | /* deregister CDAN notifications and free channels */ | |
2294 | for (i = 0; i < priv->num_channels; i++) { | |
2295 | ch = priv->channel[i]; | |
47441f7f | 2296 | dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); |
6e2387e8 IR |
2297 | free_channel(priv, ch); |
2298 | } | |
2299 | } | |
2300 | ||
2301 | static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, | |
2302 | int cpu) | |
2303 | { | |
2304 | struct device *dev = priv->net_dev->dev.parent; | |
2305 | int i; | |
2306 | ||
2307 | for (i = 0; i < priv->num_channels; i++) | |
2308 | if (priv->channel[i]->nctx.desired_cpu == cpu) | |
2309 | return priv->channel[i]; | |
2310 | ||
2311 | /* We should never get here. Issue a warning and return | |
2312 | * the first channel, because it's still better than nothing | |
2313 | */ | |
2314 | dev_warn(dev, "No affine channel found for cpu %d\n", cpu); | |
2315 | ||
2316 | return priv->channel[0]; | |
2317 | } | |
2318 | ||
2319 | static void set_fq_affinity(struct dpaa2_eth_priv *priv) | |
2320 | { | |
2321 | struct device *dev = priv->net_dev->dev.parent; | |
2322 | struct dpaa2_eth_fq *fq; | |
2323 | int rx_cpu, txc_cpu; | |
06d5b179 | 2324 | int i; |
6e2387e8 IR |
2325 | |
2326 | /* For each FQ, pick one channel/CPU to deliver frames to. | |
2327 | * This may well change at runtime, either through irqbalance or | |
2328 | * through direct user intervention. | |
2329 | */ | |
2330 | rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); | |
2331 | ||
2332 | for (i = 0; i < priv->num_fqs; i++) { | |
2333 | fq = &priv->fq[i]; | |
2334 | switch (fq->type) { | |
2335 | case DPAA2_RX_FQ: | |
2336 | fq->target_cpu = rx_cpu; | |
2337 | rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); | |
2338 | if (rx_cpu >= nr_cpu_ids) | |
2339 | rx_cpu = cpumask_first(&priv->dpio_cpumask); | |
2340 | break; | |
2341 | case DPAA2_TX_CONF_FQ: | |
2342 | fq->target_cpu = txc_cpu; | |
2343 | txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); | |
2344 | if (txc_cpu >= nr_cpu_ids) | |
2345 | txc_cpu = cpumask_first(&priv->dpio_cpumask); | |
2346 | break; | |
2347 | default: | |
2348 | dev_err(dev, "Unknown FQ type: %d\n", fq->type); | |
2349 | } | |
2350 | fq->channel = get_affine_channel(priv, fq->target_cpu); | |
2351 | } | |
06d5b179 IR |
2352 | |
2353 | update_xps(priv); | |
6e2387e8 IR |
2354 | } |
2355 | ||
2356 | static void setup_fqs(struct dpaa2_eth_priv *priv) | |
2357 | { | |
2358 | int i; | |
2359 | ||
2360 | /* We have one TxConf FQ per Tx flow. | |
2361 | * The number of Tx and Rx queues is the same. | |
2362 | * Tx queues come first in the fq array. | |
2363 | */ | |
2364 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { | |
2365 | priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; | |
2366 | priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; | |
2367 | priv->fq[priv->num_fqs++].flowid = (u16)i; | |
2368 | } | |
2369 | ||
2370 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { | |
2371 | priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; | |
2372 | priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; | |
2373 | priv->fq[priv->num_fqs++].flowid = (u16)i; | |
2374 | } | |
2375 | ||
2376 | /* For each FQ, decide on which core to process incoming frames */ | |
2377 | set_fq_affinity(priv); | |
2378 | } | |
2379 | ||
2380 | /* Allocate and configure one buffer pool for each interface */ | |
2381 | static int setup_dpbp(struct dpaa2_eth_priv *priv) | |
2382 | { | |
2383 | int err; | |
2384 | struct fsl_mc_device *dpbp_dev; | |
2385 | struct device *dev = priv->net_dev->dev.parent; | |
05fa39c6 | 2386 | struct dpbp_attr dpbp_attrs; |
6e2387e8 IR |
2387 | |
2388 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, | |
2389 | &dpbp_dev); | |
2390 | if (err) { | |
d7f5a9d8 IC |
2391 | if (err == -ENXIO) |
2392 | err = -EPROBE_DEFER; | |
2393 | else | |
2394 | dev_err(dev, "DPBP device allocation failed\n"); | |
6e2387e8 IR |
2395 | return err; |
2396 | } | |
2397 | ||
2398 | priv->dpbp_dev = dpbp_dev; | |
2399 | ||
2400 | err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, | |
2401 | &dpbp_dev->mc_handle); | |
2402 | if (err) { | |
2403 | dev_err(dev, "dpbp_open() failed\n"); | |
2404 | goto err_open; | |
2405 | } | |
2406 | ||
d00defe3 IR |
2407 | err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); |
2408 | if (err) { | |
2409 | dev_err(dev, "dpbp_reset() failed\n"); | |
2410 | goto err_reset; | |
2411 | } | |
2412 | ||
6e2387e8 IR |
2413 | err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); |
2414 | if (err) { | |
2415 | dev_err(dev, "dpbp_enable() failed\n"); | |
2416 | goto err_enable; | |
2417 | } | |
2418 | ||
2419 | err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, | |
05fa39c6 | 2420 | &dpbp_attrs); |
6e2387e8 IR |
2421 | if (err) { |
2422 | dev_err(dev, "dpbp_get_attributes() failed\n"); | |
2423 | goto err_get_attr; | |
2424 | } | |
05fa39c6 | 2425 | priv->bpid = dpbp_attrs.bpid; |
6e2387e8 IR |
2426 | |
2427 | return 0; | |
2428 | ||
2429 | err_get_attr: | |
2430 | dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); | |
2431 | err_enable: | |
d00defe3 | 2432 | err_reset: |
6e2387e8 IR |
2433 | dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); |
2434 | err_open: | |
2435 | fsl_mc_object_free(dpbp_dev); | |
2436 | ||
2437 | return err; | |
2438 | } | |
2439 | ||
2440 | static void free_dpbp(struct dpaa2_eth_priv *priv) | |
2441 | { | |
2442 | drain_pool(priv); | |
2443 | dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); | |
2444 | dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); | |
2445 | fsl_mc_object_free(priv->dpbp_dev); | |
2446 | } | |
2447 | ||
308f64e7 | 2448 | static int set_buffer_layout(struct dpaa2_eth_priv *priv) |
6e2387e8 | 2449 | { |
308f64e7 | 2450 | struct device *dev = priv->net_dev->dev.parent; |
50eacbc8 | 2451 | struct dpni_buffer_layout buf_layout = {0}; |
27c87486 | 2452 | u16 rx_buf_align; |
6e2387e8 IR |
2453 | int err; |
2454 | ||
8a4fd877 BP |
2455 | /* We need to check for WRIOP version 1.0.0, but depending on the MC |
2456 | * version, this number is not always provided correctly on rev1. | |
2457 | * We need to check for both alternatives in this situation. | |
2458 | */ | |
2459 | if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || | |
2460 | priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) | |
27c87486 | 2461 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; |
8a4fd877 | 2462 | else |
27c87486 | 2463 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; |
8a4fd877 | 2464 | |
efa6a7d0 IC |
2465 | /* We need to ensure that the buffer size seen by WRIOP is a multiple |
2466 | * of 64 or 256 bytes depending on the WRIOP version. | |
2467 | */ | |
2468 | priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); | |
2469 | ||
4b2d9fe8 | 2470 | /* tx buffer */ |
50eacbc8 | 2471 | buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; |
859f998e IR |
2472 | buf_layout.pass_timestamp = true; |
2473 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | | |
2474 | DPNI_BUF_LAYOUT_OPT_TIMESTAMP; | |
6e2387e8 | 2475 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
50eacbc8 | 2476 | DPNI_QUEUE_TX, &buf_layout); |
6e2387e8 IR |
2477 | if (err) { |
2478 | dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); | |
308f64e7 | 2479 | return err; |
6e2387e8 IR |
2480 | } |
2481 | ||
2482 | /* tx-confirm buffer */ | |
859f998e | 2483 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; |
6e2387e8 | 2484 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
50eacbc8 | 2485 | DPNI_QUEUE_TX_CONFIRM, &buf_layout); |
6e2387e8 IR |
2486 | if (err) { |
2487 | dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); | |
308f64e7 IR |
2488 | return err; |
2489 | } | |
2490 | ||
4b2d9fe8 BP |
2491 | /* Now that we've set our tx buffer layout, retrieve the minimum |
2492 | * required tx data offset. | |
2493 | */ | |
2494 | err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, | |
2495 | &priv->tx_data_offset); | |
2496 | if (err) { | |
2497 | dev_err(dev, "dpni_get_tx_data_offset() failed\n"); | |
2498 | return err; | |
2499 | } | |
2500 | ||
2501 | if ((priv->tx_data_offset % 64) != 0) | |
2502 | dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", | |
2503 | priv->tx_data_offset); | |
2504 | ||
2505 | /* rx buffer */ | |
2b7c86eb | 2506 | buf_layout.pass_frame_status = true; |
4b2d9fe8 | 2507 | buf_layout.pass_parser_result = true; |
27c87486 | 2508 | buf_layout.data_align = rx_buf_align; |
4b2d9fe8 BP |
2509 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); |
2510 | buf_layout.private_data_size = 0; | |
2511 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | | |
2512 | DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | | |
2513 | DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | | |
859f998e IR |
2514 | DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | |
2515 | DPNI_BUF_LAYOUT_OPT_TIMESTAMP; | |
4b2d9fe8 BP |
2516 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
2517 | DPNI_QUEUE_RX, &buf_layout); | |
2518 | if (err) { | |
2519 | dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); | |
2520 | return err; | |
2521 | } | |
2522 | ||
308f64e7 IR |
2523 | return 0; |
2524 | } | |
2525 | ||
1fa0f68c ICR |
2526 | #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 |
2527 | #define DPNI_ENQUEUE_FQID_VER_MINOR 9 | |
2528 | ||
2529 | static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, | |
2530 | struct dpaa2_eth_fq *fq, | |
2531 | struct dpaa2_fd *fd, u8 prio) | |
2532 | { | |
2533 | return dpaa2_io_service_enqueue_qd(fq->channel->dpio, | |
2534 | priv->tx_qdid, prio, | |
2535 | fq->tx_qdbin, fd); | |
2536 | } | |
2537 | ||
2538 | static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, | |
2539 | struct dpaa2_eth_fq *fq, | |
15c87f6b | 2540 | struct dpaa2_fd *fd, u8 prio) |
1fa0f68c ICR |
2541 | { |
2542 | return dpaa2_io_service_enqueue_fq(fq->channel->dpio, | |
15c87f6b | 2543 | fq->tx_fqid[prio], fd); |
1fa0f68c ICR |
2544 | } |
2545 | ||
2546 | static void set_enqueue_mode(struct dpaa2_eth_priv *priv) | |
2547 | { | |
2548 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, | |
2549 | DPNI_ENQUEUE_FQID_VER_MINOR) < 0) | |
2550 | priv->enqueue = dpaa2_eth_enqueue_qd; | |
2551 | else | |
2552 | priv->enqueue = dpaa2_eth_enqueue_fq; | |
2553 | } | |
2554 | ||
8eb3cef8 IR |
2555 | static int set_pause(struct dpaa2_eth_priv *priv) |
2556 | { | |
2557 | struct device *dev = priv->net_dev->dev.parent; | |
2558 | struct dpni_link_cfg link_cfg = {0}; | |
2559 | int err; | |
2560 | ||
2561 | /* Get the default link options so we don't override other flags */ | |
2562 | err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); | |
2563 | if (err) { | |
2564 | dev_err(dev, "dpni_get_link_cfg() failed\n"); | |
2565 | return err; | |
2566 | } | |
2567 | ||
2568 | /* By default, enable both Rx and Tx pause frames */ | |
2569 | link_cfg.options |= DPNI_LINK_OPT_PAUSE; | |
2570 | link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; | |
2571 | err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); | |
2572 | if (err) { | |
2573 | dev_err(dev, "dpni_set_link_cfg() failed\n"); | |
2574 | return err; | |
2575 | } | |
2576 | ||
2577 | priv->link_state.options = link_cfg.options; | |
2578 | ||
2579 | return 0; | |
2580 | } | |
2581 | ||
a690af4f IR |
2582 | static void update_tx_fqids(struct dpaa2_eth_priv *priv) |
2583 | { | |
2584 | struct dpni_queue_id qid = {0}; | |
2585 | struct dpaa2_eth_fq *fq; | |
2586 | struct dpni_queue queue; | |
2587 | int i, j, err; | |
2588 | ||
2589 | /* We only use Tx FQIDs for FQID-based enqueue, so check | |
2590 | * if DPNI version supports it before updating FQIDs | |
2591 | */ | |
2592 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, | |
2593 | DPNI_ENQUEUE_FQID_VER_MINOR) < 0) | |
2594 | return; | |
2595 | ||
2596 | for (i = 0; i < priv->num_fqs; i++) { | |
2597 | fq = &priv->fq[i]; | |
2598 | if (fq->type != DPAA2_TX_CONF_FQ) | |
2599 | continue; | |
2600 | for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { | |
2601 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | |
2602 | DPNI_QUEUE_TX, j, fq->flowid, | |
2603 | &queue, &qid); | |
2604 | if (err) | |
2605 | goto out_err; | |
2606 | ||
2607 | fq->tx_fqid[j] = qid.fqid; | |
2608 | if (fq->tx_fqid[j] == 0) | |
2609 | goto out_err; | |
2610 | } | |
2611 | } | |
2612 | ||
2613 | priv->enqueue = dpaa2_eth_enqueue_fq; | |
2614 | ||
2615 | return; | |
2616 | ||
2617 | out_err: | |
2618 | netdev_info(priv->net_dev, | |
2619 | "Error reading Tx FQID, fallback to QDID-based enqueue\n"); | |
2620 | priv->enqueue = dpaa2_eth_enqueue_qd; | |
2621 | } | |
2622 | ||
308f64e7 IR |
2623 | /* Configure the DPNI object this interface is associated with */ |
2624 | static int setup_dpni(struct fsl_mc_device *ls_dev) | |
2625 | { | |
2626 | struct device *dev = &ls_dev->dev; | |
2627 | struct dpaa2_eth_priv *priv; | |
2628 | struct net_device *net_dev; | |
2629 | int err; | |
2630 | ||
2631 | net_dev = dev_get_drvdata(dev); | |
2632 | priv = netdev_priv(net_dev); | |
2633 | ||
2634 | /* get a handle for the DPNI object */ | |
2635 | err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); | |
2636 | if (err) { | |
2637 | dev_err(dev, "dpni_open() failed\n"); | |
2638 | return err; | |
2639 | } | |
2640 | ||
311cffa5 IR |
2641 | /* Check if we can work with this DPNI object */ |
2642 | err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, | |
2643 | &priv->dpni_ver_minor); | |
2644 | if (err) { | |
2645 | dev_err(dev, "dpni_get_api_version() failed\n"); | |
2646 | goto close; | |
2647 | } | |
2648 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { | |
2649 | dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", | |
2650 | priv->dpni_ver_major, priv->dpni_ver_minor, | |
2651 | DPNI_VER_MAJOR, DPNI_VER_MINOR); | |
2652 | err = -ENOTSUPP; | |
2653 | goto close; | |
2654 | } | |
2655 | ||
308f64e7 IR |
2656 | ls_dev->mc_io = priv->mc_io; |
2657 | ls_dev->mc_handle = priv->mc_token; | |
2658 | ||
2659 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); | |
2660 | if (err) { | |
2661 | dev_err(dev, "dpni_reset() failed\n"); | |
f6dda809 | 2662 | goto close; |
6e2387e8 IR |
2663 | } |
2664 | ||
308f64e7 IR |
2665 | err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, |
2666 | &priv->dpni_attrs); | |
2667 | if (err) { | |
2668 | dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); | |
2669 | goto close; | |
2670 | } | |
2671 | ||
2672 | err = set_buffer_layout(priv); | |
2673 | if (err) | |
2674 | goto close; | |
2675 | ||
1fa0f68c ICR |
2676 | set_enqueue_mode(priv); |
2677 | ||
8eb3cef8 IR |
2678 | /* Enable pause frame support */ |
2679 | if (dpaa2_eth_has_pause_support(priv)) { | |
2680 | err = set_pause(priv); | |
2681 | if (err) | |
2682 | goto close; | |
2683 | } | |
2684 | ||
afb90dbb IR |
2685 | priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * |
2686 | dpaa2_eth_fs_count(priv), GFP_KERNEL); | |
2687 | if (!priv->cls_rules) | |
2688 | goto close; | |
2689 | ||
6e2387e8 IR |
2690 | return 0; |
2691 | ||
f6dda809 | 2692 | close: |
6e2387e8 | 2693 | dpni_close(priv->mc_io, 0, priv->mc_token); |
f6dda809 | 2694 | |
6e2387e8 IR |
2695 | return err; |
2696 | } | |
2697 | ||
2698 | static void free_dpni(struct dpaa2_eth_priv *priv) | |
2699 | { | |
2700 | int err; | |
2701 | ||
2702 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); | |
2703 | if (err) | |
2704 | netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", | |
2705 | err); | |
2706 | ||
2707 | dpni_close(priv->mc_io, 0, priv->mc_token); | |
2708 | } | |
2709 | ||
2710 | static int setup_rx_flow(struct dpaa2_eth_priv *priv, | |
2711 | struct dpaa2_eth_fq *fq) | |
2712 | { | |
2713 | struct device *dev = priv->net_dev->dev.parent; | |
2714 | struct dpni_queue queue; | |
2715 | struct dpni_queue_id qid; | |
6e2387e8 IR |
2716 | int err; |
2717 | ||
2718 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | |
2719 | DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); | |
2720 | if (err) { | |
2721 | dev_err(dev, "dpni_get_queue(RX) failed\n"); | |
2722 | return err; | |
2723 | } | |
2724 | ||
2725 | fq->fqid = qid.fqid; | |
2726 | ||
2727 | queue.destination.id = fq->channel->dpcon_id; | |
2728 | queue.destination.type = DPNI_DEST_DPCON; | |
2729 | queue.destination.priority = 1; | |
75c583ab | 2730 | queue.user_context = (u64)(uintptr_t)fq; |
6e2387e8 IR |
2731 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, |
2732 | DPNI_QUEUE_RX, 0, fq->flowid, | |
16fa1cf1 | 2733 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, |
6e2387e8 IR |
2734 | &queue); |
2735 | if (err) { | |
2736 | dev_err(dev, "dpni_set_queue(RX) failed\n"); | |
2737 | return err; | |
2738 | } | |
2739 | ||
d678be1d IR |
2740 | /* xdp_rxq setup */ |
2741 | err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, | |
2742 | fq->flowid); | |
2743 | if (err) { | |
2744 | dev_err(dev, "xdp_rxq_info_reg failed\n"); | |
2745 | return err; | |
2746 | } | |
2747 | ||
2748 | err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, | |
2749 | MEM_TYPE_PAGE_ORDER0, NULL); | |
2750 | if (err) { | |
2751 | dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); | |
2752 | return err; | |
2753 | } | |
2754 | ||
6e2387e8 IR |
2755 | return 0; |
2756 | } | |
2757 | ||
2758 | static int setup_tx_flow(struct dpaa2_eth_priv *priv, | |
2759 | struct dpaa2_eth_fq *fq) | |
2760 | { | |
2761 | struct device *dev = priv->net_dev->dev.parent; | |
2762 | struct dpni_queue queue; | |
2763 | struct dpni_queue_id qid; | |
15c87f6b | 2764 | int i, err; |
6e2387e8 | 2765 | |
15c87f6b IR |
2766 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
2767 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | |
2768 | DPNI_QUEUE_TX, i, fq->flowid, | |
2769 | &queue, &qid); | |
2770 | if (err) { | |
2771 | dev_err(dev, "dpni_get_queue(TX) failed\n"); | |
2772 | return err; | |
2773 | } | |
2774 | fq->tx_fqid[i] = qid.fqid; | |
6e2387e8 IR |
2775 | } |
2776 | ||
15c87f6b | 2777 | /* All Tx queues belonging to the same flowid have the same qdbin */ |
6e2387e8 IR |
2778 | fq->tx_qdbin = qid.qdbin; |
2779 | ||
2780 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | |
2781 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, | |
2782 | &queue, &qid); | |
2783 | if (err) { | |
2784 | dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); | |
2785 | return err; | |
2786 | } | |
2787 | ||
2788 | fq->fqid = qid.fqid; | |
2789 | ||
2790 | queue.destination.id = fq->channel->dpcon_id; | |
2791 | queue.destination.type = DPNI_DEST_DPCON; | |
2792 | queue.destination.priority = 0; | |
75c583ab | 2793 | queue.user_context = (u64)(uintptr_t)fq; |
6e2387e8 IR |
2794 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, |
2795 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, | |
2796 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, | |
2797 | &queue); | |
2798 | if (err) { | |
2799 | dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); | |
2800 | return err; | |
2801 | } | |
2802 | ||
2803 | return 0; | |
2804 | } | |
2805 | ||
edad8d26 | 2806 | /* Supported header fields for Rx hash distribution key */ |
f76c483a | 2807 | static const struct dpaa2_eth_dist_fields dist_fields[] = { |
6e2387e8 | 2808 | { |
edad8d26 ICR |
2809 | /* L2 header */ |
2810 | .rxnfc_field = RXH_L2DA, | |
2811 | .cls_prot = NET_PROT_ETH, | |
2812 | .cls_field = NH_FLD_ETH_DA, | |
3a1e6b84 | 2813 | .id = DPAA2_ETH_DIST_ETHDST, |
edad8d26 | 2814 | .size = 6, |
afb90dbb IR |
2815 | }, { |
2816 | .cls_prot = NET_PROT_ETH, | |
2817 | .cls_field = NH_FLD_ETH_SA, | |
3a1e6b84 | 2818 | .id = DPAA2_ETH_DIST_ETHSRC, |
afb90dbb IR |
2819 | .size = 6, |
2820 | }, { | |
2821 | /* This is the last ethertype field parsed: | |
2822 | * depending on frame format, it can be the MAC ethertype | |
2823 | * or the VLAN etype. | |
2824 | */ | |
2825 | .cls_prot = NET_PROT_ETH, | |
2826 | .cls_field = NH_FLD_ETH_TYPE, | |
3a1e6b84 | 2827 | .id = DPAA2_ETH_DIST_ETHTYPE, |
afb90dbb | 2828 | .size = 2, |
edad8d26 ICR |
2829 | }, { |
2830 | /* VLAN header */ | |
2831 | .rxnfc_field = RXH_VLAN, | |
2832 | .cls_prot = NET_PROT_VLAN, | |
2833 | .cls_field = NH_FLD_VLAN_TCI, | |
3a1e6b84 | 2834 | .id = DPAA2_ETH_DIST_VLAN, |
edad8d26 ICR |
2835 | .size = 2, |
2836 | }, { | |
6e2387e8 IR |
2837 | /* IP header */ |
2838 | .rxnfc_field = RXH_IP_SRC, | |
2839 | .cls_prot = NET_PROT_IP, | |
2840 | .cls_field = NH_FLD_IP_SRC, | |
3a1e6b84 | 2841 | .id = DPAA2_ETH_DIST_IPSRC, |
6e2387e8 IR |
2842 | .size = 4, |
2843 | }, { | |
2844 | .rxnfc_field = RXH_IP_DST, | |
2845 | .cls_prot = NET_PROT_IP, | |
2846 | .cls_field = NH_FLD_IP_DST, | |
3a1e6b84 | 2847 | .id = DPAA2_ETH_DIST_IPDST, |
6e2387e8 IR |
2848 | .size = 4, |
2849 | }, { | |
2850 | .rxnfc_field = RXH_L3_PROTO, | |
2851 | .cls_prot = NET_PROT_IP, | |
2852 | .cls_field = NH_FLD_IP_PROTO, | |
3a1e6b84 | 2853 | .id = DPAA2_ETH_DIST_IPPROTO, |
6e2387e8 IR |
2854 | .size = 1, |
2855 | }, { | |
2856 | /* Using UDP ports, this is functionally equivalent to raw | |
2857 | * byte pairs from L4 header. | |
2858 | */ | |
2859 | .rxnfc_field = RXH_L4_B_0_1, | |
2860 | .cls_prot = NET_PROT_UDP, | |
2861 | .cls_field = NH_FLD_UDP_PORT_SRC, | |
3a1e6b84 | 2862 | .id = DPAA2_ETH_DIST_L4SRC, |
6e2387e8 IR |
2863 | .size = 2, |
2864 | }, { | |
2865 | .rxnfc_field = RXH_L4_B_2_3, | |
2866 | .cls_prot = NET_PROT_UDP, | |
2867 | .cls_field = NH_FLD_UDP_PORT_DST, | |
3a1e6b84 | 2868 | .id = DPAA2_ETH_DIST_L4DST, |
6e2387e8 IR |
2869 | .size = 2, |
2870 | }, | |
2871 | }; | |
2872 | ||
df85aeb9 IR |
2873 | /* Configure the Rx hash key using the legacy API */ |
2874 | static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) | |
2875 | { | |
2876 | struct device *dev = priv->net_dev->dev.parent; | |
2877 | struct dpni_rx_tc_dist_cfg dist_cfg; | |
2878 | int err; | |
2879 | ||
2880 | memset(&dist_cfg, 0, sizeof(dist_cfg)); | |
2881 | ||
2882 | dist_cfg.key_cfg_iova = key; | |
2883 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); | |
2884 | dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; | |
2885 | ||
2886 | err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); | |
2887 | if (err) | |
2888 | dev_err(dev, "dpni_set_rx_tc_dist failed\n"); | |
2889 | ||
2890 | return err; | |
2891 | } | |
2892 | ||
2893 | /* Configure the Rx hash key using the new API */ | |
2894 | static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) | |
2895 | { | |
2896 | struct device *dev = priv->net_dev->dev.parent; | |
2897 | struct dpni_rx_dist_cfg dist_cfg; | |
2898 | int err; | |
2899 | ||
2900 | memset(&dist_cfg, 0, sizeof(dist_cfg)); | |
2901 | ||
2902 | dist_cfg.key_cfg_iova = key; | |
2903 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); | |
2904 | dist_cfg.enable = 1; | |
2905 | ||
2906 | err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); | |
2907 | if (err) | |
2908 | dev_err(dev, "dpni_set_rx_hash_dist failed\n"); | |
2909 | ||
2910 | return err; | |
2911 | } | |
2912 | ||
4aaaf9b9 IR |
2913 | /* Configure the Rx flow classification key */ |
2914 | static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) | |
2915 | { | |
2916 | struct device *dev = priv->net_dev->dev.parent; | |
2917 | struct dpni_rx_dist_cfg dist_cfg; | |
2918 | int err; | |
2919 | ||
2920 | memset(&dist_cfg, 0, sizeof(dist_cfg)); | |
2921 | ||
2922 | dist_cfg.key_cfg_iova = key; | |
2923 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); | |
2924 | dist_cfg.enable = 1; | |
2925 | ||
2926 | err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); | |
2927 | if (err) | |
2928 | dev_err(dev, "dpni_set_rx_fs_dist failed\n"); | |
2929 | ||
2930 | return err; | |
2931 | } | |
2932 | ||
afb90dbb | 2933 | /* Size of the Rx flow classification key */ |
2d680237 | 2934 | int dpaa2_eth_cls_key_size(u64 fields) |
afb90dbb IR |
2935 | { |
2936 | int i, size = 0; | |
2937 | ||
2d680237 ICR |
2938 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
2939 | if (!(fields & dist_fields[i].id)) | |
2940 | continue; | |
afb90dbb | 2941 | size += dist_fields[i].size; |
2d680237 | 2942 | } |
afb90dbb IR |
2943 | |
2944 | return size; | |
2945 | } | |
2946 | ||
2947 | /* Offset of header field in Rx classification key */ | |
2948 | int dpaa2_eth_cls_fld_off(int prot, int field) | |
2949 | { | |
2950 | int i, off = 0; | |
2951 | ||
2952 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { | |
2953 | if (dist_fields[i].cls_prot == prot && | |
2954 | dist_fields[i].cls_field == field) | |
2955 | return off; | |
2956 | off += dist_fields[i].size; | |
2957 | } | |
2958 | ||
2959 | WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); | |
2960 | return 0; | |
2961 | } | |
2962 | ||
2d680237 ICR |
2963 | /* Prune unused fields from the classification rule. |
2964 | * Used when masking is not supported | |
2965 | */ | |
2966 | void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) | |
2967 | { | |
2968 | int off = 0, new_off = 0; | |
2969 | int i, size; | |
2970 | ||
2971 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { | |
2972 | size = dist_fields[i].size; | |
2973 | if (dist_fields[i].id & fields) { | |
2974 | memcpy(key_mem + new_off, key_mem + off, size); | |
2975 | new_off += size; | |
2976 | } | |
2977 | off += size; | |
2978 | } | |
2979 | } | |
2980 | ||
4aaaf9b9 | 2981 | /* Set Rx distribution (hash or flow classification) key |
6e2387e8 IR |
2982 | * flags is a combination of RXH_ bits |
2983 | */ | |
3233c151 IC |
2984 | static int dpaa2_eth_set_dist_key(struct net_device *net_dev, |
2985 | enum dpaa2_eth_rx_dist type, u64 flags) | |
6e2387e8 IR |
2986 | { |
2987 | struct device *dev = net_dev->dev.parent; | |
2988 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
2989 | struct dpkg_profile_cfg cls_cfg; | |
edad8d26 | 2990 | u32 rx_hash_fields = 0; |
df85aeb9 | 2991 | dma_addr_t key_iova; |
6e2387e8 IR |
2992 | u8 *dma_mem; |
2993 | int i; | |
2994 | int err = 0; | |
2995 | ||
6e2387e8 IR |
2996 | memset(&cls_cfg, 0, sizeof(cls_cfg)); |
2997 | ||
f76c483a | 2998 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
6e2387e8 IR |
2999 | struct dpkg_extract *key = |
3000 | &cls_cfg.extracts[cls_cfg.num_extracts]; | |
3001 | ||
2d680237 ICR |
3002 | /* For both Rx hashing and classification keys |
3003 | * we set only the selected fields. | |
4aaaf9b9 | 3004 | */ |
2d680237 ICR |
3005 | if (!(flags & dist_fields[i].id)) |
3006 | continue; | |
3007 | if (type == DPAA2_ETH_RX_DIST_HASH) | |
4aaaf9b9 | 3008 | rx_hash_fields |= dist_fields[i].rxnfc_field; |
6e2387e8 IR |
3009 | |
3010 | if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { | |
3011 | dev_err(dev, "error adding key extraction rule, too many rules?\n"); | |
3012 | return -E2BIG; | |
3013 | } | |
3014 | ||
3015 | key->type = DPKG_EXTRACT_FROM_HDR; | |
f76c483a | 3016 | key->extract.from_hdr.prot = dist_fields[i].cls_prot; |
6e2387e8 | 3017 | key->extract.from_hdr.type = DPKG_FULL_FIELD; |
f76c483a | 3018 | key->extract.from_hdr.field = dist_fields[i].cls_field; |
6e2387e8 IR |
3019 | cls_cfg.num_extracts++; |
3020 | } | |
3021 | ||
e40ef9e4 | 3022 | dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
6e2387e8 IR |
3023 | if (!dma_mem) |
3024 | return -ENOMEM; | |
3025 | ||
3026 | err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); | |
3027 | if (err) { | |
77160af3 | 3028 | dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); |
df85aeb9 | 3029 | goto free_key; |
6e2387e8 IR |
3030 | } |
3031 | ||
6e2387e8 | 3032 | /* Prepare for setting the rx dist */ |
df85aeb9 IR |
3033 | key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, |
3034 | DMA_TO_DEVICE); | |
3035 | if (dma_mapping_error(dev, key_iova)) { | |
6e2387e8 IR |
3036 | dev_err(dev, "DMA mapping failed\n"); |
3037 | err = -ENOMEM; | |
df85aeb9 | 3038 | goto free_key; |
6e2387e8 IR |
3039 | } |
3040 | ||
4aaaf9b9 IR |
3041 | if (type == DPAA2_ETH_RX_DIST_HASH) { |
3042 | if (dpaa2_eth_has_legacy_dist(priv)) | |
3043 | err = config_legacy_hash_key(priv, key_iova); | |
3044 | else | |
3045 | err = config_hash_key(priv, key_iova); | |
3046 | } else { | |
3047 | err = config_cls_key(priv, key_iova); | |
3048 | } | |
df85aeb9 IR |
3049 | |
3050 | dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, | |
3051 | DMA_TO_DEVICE); | |
4aaaf9b9 | 3052 | if (!err && type == DPAA2_ETH_RX_DIST_HASH) |
edad8d26 | 3053 | priv->rx_hash_fields = rx_hash_fields; |
6e2387e8 | 3054 | |
df85aeb9 | 3055 | free_key: |
6e2387e8 IR |
3056 | kfree(dma_mem); |
3057 | return err; | |
3058 | } | |
3059 | ||
4aaaf9b9 IR |
3060 | int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) |
3061 | { | |
3062 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
3a1e6b84 ICR |
3063 | u64 key = 0; |
3064 | int i; | |
4aaaf9b9 IR |
3065 | |
3066 | if (!dpaa2_eth_hash_enabled(priv)) | |
3067 | return -EOPNOTSUPP; | |
3068 | ||
3a1e6b84 ICR |
3069 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) |
3070 | if (dist_fields[i].rxnfc_field & flags) | |
3071 | key |= dist_fields[i].id; | |
3072 | ||
3073 | return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); | |
4aaaf9b9 IR |
3074 | } |
3075 | ||
2d680237 ICR |
3076 | int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) |
3077 | { | |
3078 | return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); | |
3079 | } | |
3080 | ||
3081 | static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) | |
4aaaf9b9 IR |
3082 | { |
3083 | struct device *dev = priv->net_dev->dev.parent; | |
df8e249b | 3084 | int err; |
4aaaf9b9 IR |
3085 | |
3086 | /* Check if we actually support Rx flow classification */ | |
3087 | if (dpaa2_eth_has_legacy_dist(priv)) { | |
3088 | dev_dbg(dev, "Rx cls not supported by current MC version\n"); | |
3089 | return -EOPNOTSUPP; | |
3090 | } | |
3091 | ||
2d680237 | 3092 | if (!dpaa2_eth_fs_enabled(priv)) { |
4aaaf9b9 IR |
3093 | dev_dbg(dev, "Rx cls disabled in DPNI options\n"); |
3094 | return -EOPNOTSUPP; | |
3095 | } | |
3096 | ||
3097 | if (!dpaa2_eth_hash_enabled(priv)) { | |
3098 | dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); | |
3099 | return -EOPNOTSUPP; | |
3100 | } | |
3101 | ||
2d680237 ICR |
3102 | /* If there is no support for masking in the classification table, |
3103 | * we don't set a default key, as it will depend on the rules | |
3104 | * added by the user at runtime. | |
3105 | */ | |
3106 | if (!dpaa2_eth_fs_mask_enabled(priv)) | |
3107 | goto out; | |
3108 | ||
3109 | err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); | |
df8e249b ICR |
3110 | if (err) |
3111 | return err; | |
3112 | ||
2d680237 | 3113 | out: |
4aaaf9b9 IR |
3114 | priv->rx_cls_enabled = 1; |
3115 | ||
df8e249b | 3116 | return 0; |
4aaaf9b9 IR |
3117 | } |
3118 | ||
6e2387e8 IR |
3119 | /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, |
3120 | * frame queues and channels | |
3121 | */ | |
3122 | static int bind_dpni(struct dpaa2_eth_priv *priv) | |
3123 | { | |
3124 | struct net_device *net_dev = priv->net_dev; | |
3125 | struct device *dev = net_dev->dev.parent; | |
3126 | struct dpni_pools_cfg pools_params; | |
3127 | struct dpni_error_cfg err_cfg; | |
3128 | int err = 0; | |
3129 | int i; | |
3130 | ||
3131 | pools_params.num_dpbp = 1; | |
3132 | pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; | |
3133 | pools_params.pools[0].backup_pool = 0; | |
efa6a7d0 | 3134 | pools_params.pools[0].buffer_size = priv->rx_buf_size; |
6e2387e8 IR |
3135 | err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); |
3136 | if (err) { | |
3137 | dev_err(dev, "dpni_set_pools() failed\n"); | |
3138 | return err; | |
3139 | } | |
3140 | ||
227686b6 IR |
3141 | /* have the interface implicitly distribute traffic based on |
3142 | * the default hash key | |
6e2387e8 | 3143 | */ |
227686b6 | 3144 | err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); |
edad8d26 | 3145 | if (err && err != -EOPNOTSUPP) |
0f4c295f | 3146 | dev_err(dev, "Failed to configure hashing\n"); |
6e2387e8 | 3147 | |
4aaaf9b9 IR |
3148 | /* Configure the flow classification key; it includes all |
3149 | * supported header fields and cannot be modified at runtime | |
3150 | */ | |
2d680237 | 3151 | err = dpaa2_eth_set_default_cls(priv); |
4aaaf9b9 IR |
3152 | if (err && err != -EOPNOTSUPP) |
3153 | dev_err(dev, "Failed to configure Rx classification key\n"); | |
3154 | ||
6e2387e8 | 3155 | /* Configure handling of error frames */ |
39163c0c | 3156 | err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; |
6e2387e8 IR |
3157 | err_cfg.set_frame_annotation = 1; |
3158 | err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; | |
3159 | err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, | |
3160 | &err_cfg); | |
3161 | if (err) { | |
3162 | dev_err(dev, "dpni_set_errors_behavior failed\n"); | |
3163 | return err; | |
3164 | } | |
3165 | ||
3166 | /* Configure Rx and Tx conf queues to generate CDANs */ | |
3167 | for (i = 0; i < priv->num_fqs; i++) { | |
3168 | switch (priv->fq[i].type) { | |
3169 | case DPAA2_RX_FQ: | |
3170 | err = setup_rx_flow(priv, &priv->fq[i]); | |
3171 | break; | |
3172 | case DPAA2_TX_CONF_FQ: | |
3173 | err = setup_tx_flow(priv, &priv->fq[i]); | |
3174 | break; | |
3175 | default: | |
3176 | dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); | |
3177 | return -EINVAL; | |
3178 | } | |
3179 | if (err) | |
3180 | return err; | |
3181 | } | |
3182 | ||
3183 | err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, | |
3184 | DPNI_QUEUE_TX, &priv->tx_qdid); | |
3185 | if (err) { | |
3186 | dev_err(dev, "dpni_get_qdid() failed\n"); | |
3187 | return err; | |
3188 | } | |
3189 | ||
3190 | return 0; | |
3191 | } | |
3192 | ||
3193 | /* Allocate rings for storing incoming frame descriptors */ | |
3194 | static int alloc_rings(struct dpaa2_eth_priv *priv) | |
3195 | { | |
3196 | struct net_device *net_dev = priv->net_dev; | |
3197 | struct device *dev = net_dev->dev.parent; | |
3198 | int i; | |
3199 | ||
3200 | for (i = 0; i < priv->num_channels; i++) { | |
3201 | priv->channel[i]->store = | |
3202 | dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); | |
3203 | if (!priv->channel[i]->store) { | |
3204 | netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); | |
3205 | goto err_ring; | |
3206 | } | |
3207 | } | |
3208 | ||
3209 | return 0; | |
3210 | ||
3211 | err_ring: | |
3212 | for (i = 0; i < priv->num_channels; i++) { | |
3213 | if (!priv->channel[i]->store) | |
3214 | break; | |
3215 | dpaa2_io_store_destroy(priv->channel[i]->store); | |
3216 | } | |
3217 | ||
3218 | return -ENOMEM; | |
3219 | } | |
3220 | ||
3221 | static void free_rings(struct dpaa2_eth_priv *priv) | |
3222 | { | |
3223 | int i; | |
3224 | ||
3225 | for (i = 0; i < priv->num_channels; i++) | |
3226 | dpaa2_io_store_destroy(priv->channel[i]->store); | |
3227 | } | |
3228 | ||
6ab00868 | 3229 | static int set_mac_addr(struct dpaa2_eth_priv *priv) |
6e2387e8 | 3230 | { |
6ab00868 | 3231 | struct net_device *net_dev = priv->net_dev; |
6e2387e8 | 3232 | struct device *dev = net_dev->dev.parent; |
6e2387e8 | 3233 | u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; |
6ab00868 | 3234 | int err; |
6e2387e8 IR |
3235 | |
3236 | /* Get firmware address, if any */ | |
3237 | err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); | |
3238 | if (err) { | |
3239 | dev_err(dev, "dpni_get_port_mac_addr() failed\n"); | |
3240 | return err; | |
3241 | } | |
3242 | ||
3243 | /* Get DPNI attributes address, if any */ | |
3244 | err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, | |
3245 | dpni_mac_addr); | |
3246 | if (err) { | |
6ab00868 | 3247 | dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); |
6e2387e8 IR |
3248 | return err; |
3249 | } | |
3250 | ||
3251 | /* First check if firmware has any address configured by bootloader */ | |
3252 | if (!is_zero_ether_addr(mac_addr)) { | |
3253 | /* If the DPMAC addr != DPNI addr, update it */ | |
3254 | if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { | |
3255 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, | |
3256 | priv->mc_token, | |
3257 | mac_addr); | |
3258 | if (err) { | |
3259 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); | |
3260 | return err; | |
3261 | } | |
3262 | } | |
3263 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); | |
3264 | } else if (is_zero_ether_addr(dpni_mac_addr)) { | |
6ab00868 IR |
3265 | /* No MAC address configured, fill in net_dev->dev_addr |
3266 | * with a random one | |
6e2387e8 IR |
3267 | */ |
3268 | eth_hw_addr_random(net_dev); | |
6ab00868 IR |
3269 | dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); |
3270 | ||
6e2387e8 IR |
3271 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, |
3272 | net_dev->dev_addr); | |
3273 | if (err) { | |
6ab00868 | 3274 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); |
6e2387e8 IR |
3275 | return err; |
3276 | } | |
6ab00868 | 3277 | |
6e2387e8 IR |
3278 | /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all |
3279 | * practical purposes, this will be our "permanent" mac address, | |
3280 | * at least until the next reboot. This move will also permit | |
3281 | * register_netdevice() to properly fill up net_dev->perm_addr. | |
3282 | */ | |
3283 | net_dev->addr_assign_type = NET_ADDR_PERM; | |
3284 | } else { | |
3285 | /* NET_ADDR_PERM is default, all we have to do is | |
3286 | * fill in the device addr. | |
3287 | */ | |
3288 | memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); | |
3289 | } | |
3290 | ||
6ab00868 IR |
3291 | return 0; |
3292 | } | |
3293 | ||
3294 | static int netdev_init(struct net_device *net_dev) | |
3295 | { | |
3296 | struct device *dev = net_dev->dev.parent; | |
3297 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
7f12c8a3 IR |
3298 | u32 options = priv->dpni_attrs.options; |
3299 | u64 supported = 0, not_supported = 0; | |
6ab00868 | 3300 | u8 bcast_addr[ETH_ALEN]; |
bb5b42c0 | 3301 | u8 num_queues; |
6ab00868 IR |
3302 | int err; |
3303 | ||
3304 | net_dev->netdev_ops = &dpaa2_eth_ops; | |
7f12c8a3 | 3305 | net_dev->ethtool_ops = &dpaa2_ethtool_ops; |
6ab00868 IR |
3306 | |
3307 | err = set_mac_addr(priv); | |
3308 | if (err) | |
3309 | return err; | |
3310 | ||
3311 | /* Explicitly add the broadcast address to the MAC filtering table */ | |
6e2387e8 IR |
3312 | eth_broadcast_addr(bcast_addr); |
3313 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); | |
3314 | if (err) { | |
6ab00868 IR |
3315 | dev_err(dev, "dpni_add_mac_addr() failed\n"); |
3316 | return err; | |
6e2387e8 IR |
3317 | } |
3318 | ||
3ccc8d47 | 3319 | /* Set MTU upper limit; lower limit is 68B (default value) */ |
6e2387e8 | 3320 | net_dev->max_mtu = DPAA2_ETH_MAX_MTU; |
00fee002 | 3321 | err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, |
81f34e96 | 3322 | DPAA2_ETH_MFL); |
00fee002 IR |
3323 | if (err) { |
3324 | dev_err(dev, "dpni_set_max_frame_length() failed\n"); | |
3325 | return err; | |
3326 | } | |
6e2387e8 | 3327 | |
bb5b42c0 IR |
3328 | /* Set actual number of queues in the net device */ |
3329 | num_queues = dpaa2_eth_queue_count(priv); | |
3330 | err = netif_set_real_num_tx_queues(net_dev, num_queues); | |
3331 | if (err) { | |
3332 | dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); | |
3333 | return err; | |
3334 | } | |
3335 | err = netif_set_real_num_rx_queues(net_dev, num_queues); | |
3336 | if (err) { | |
3337 | dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); | |
3338 | return err; | |
3339 | } | |
3340 | ||
7f12c8a3 IR |
3341 | /* Capabilities listing */ |
3342 | supported |= IFF_LIVE_ADDR_CHANGE; | |
3343 | ||
3344 | if (options & DPNI_OPT_NO_MAC_FILTER) | |
3345 | not_supported |= IFF_UNICAST_FLT; | |
3346 | else | |
3347 | supported |= IFF_UNICAST_FLT; | |
3348 | ||
3349 | net_dev->priv_flags |= supported; | |
3350 | net_dev->priv_flags &= ~not_supported; | |
3351 | ||
3352 | /* Features */ | |
3353 | net_dev->features = NETIF_F_RXCSUM | | |
3354 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3355 | NETIF_F_SG | NETIF_F_HIGHDMA | | |
3356 | NETIF_F_LLTX; | |
3357 | net_dev->hw_features = net_dev->features; | |
6e2387e8 IR |
3358 | |
3359 | return 0; | |
3360 | } | |
3361 | ||
3362 | static int poll_link_state(void *arg) | |
3363 | { | |
3364 | struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; | |
3365 | int err; | |
3366 | ||
3367 | while (!kthread_should_stop()) { | |
3368 | err = link_state_update(priv); | |
3369 | if (unlikely(err)) | |
3370 | return err; | |
3371 | ||
3372 | msleep(DPAA2_ETH_LINK_STATE_REFRESH); | |
3373 | } | |
3374 | ||
3375 | return 0; | |
3376 | } | |
3377 | ||
71947923 IC |
3378 | static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) |
3379 | { | |
3380 | struct fsl_mc_device *dpni_dev, *dpmac_dev; | |
3381 | struct dpaa2_mac *mac; | |
3382 | int err; | |
3383 | ||
3384 | dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); | |
3385 | dpmac_dev = fsl_mc_get_endpoint(dpni_dev); | |
3386 | if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) | |
3387 | return 0; | |
3388 | ||
3389 | if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io)) | |
3390 | return 0; | |
3391 | ||
3392 | mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); | |
3393 | if (!mac) | |
3394 | return -ENOMEM; | |
3395 | ||
3396 | mac->mc_dev = dpmac_dev; | |
3397 | mac->mc_io = priv->mc_io; | |
3398 | mac->net_dev = priv->net_dev; | |
3399 | ||
3400 | err = dpaa2_mac_connect(mac); | |
3401 | if (err) { | |
3402 | netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n"); | |
3403 | kfree(mac); | |
3404 | return err; | |
3405 | } | |
3406 | priv->mac = mac; | |
3407 | ||
3408 | return 0; | |
3409 | } | |
3410 | ||
3411 | static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) | |
3412 | { | |
3413 | if (!priv->mac) | |
3414 | return; | |
3415 | ||
3416 | dpaa2_mac_disconnect(priv->mac); | |
3417 | kfree(priv->mac); | |
3418 | priv->mac = NULL; | |
3419 | } | |
3420 | ||
6e2387e8 IR |
3421 | static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) |
3422 | { | |
112197de | 3423 | u32 status = ~0; |
6e2387e8 IR |
3424 | struct device *dev = (struct device *)arg; |
3425 | struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); | |
3426 | struct net_device *net_dev = dev_get_drvdata(dev); | |
71947923 | 3427 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
6e2387e8 IR |
3428 | int err; |
3429 | ||
3430 | err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, | |
3431 | DPNI_IRQ_INDEX, &status); | |
3432 | if (unlikely(err)) { | |
77160af3 | 3433 | netdev_err(net_dev, "Can't get irq status (err %d)\n", err); |
112197de | 3434 | return IRQ_HANDLED; |
6e2387e8 IR |
3435 | } |
3436 | ||
112197de | 3437 | if (status & DPNI_IRQ_EVENT_LINK_CHANGED) |
6e2387e8 | 3438 | link_state_update(netdev_priv(net_dev)); |
6e2387e8 | 3439 | |
f5c3fffa | 3440 | if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { |
8398b375 | 3441 | set_mac_addr(netdev_priv(net_dev)); |
f5c3fffa | 3442 | update_tx_fqids(priv); |
71947923 IC |
3443 | |
3444 | rtnl_lock(); | |
3445 | if (priv->mac) | |
3446 | dpaa2_eth_disconnect_mac(priv); | |
3447 | else | |
3448 | dpaa2_eth_connect_mac(priv); | |
3449 | rtnl_unlock(); | |
f5c3fffa | 3450 | } |
8398b375 | 3451 | |
6e2387e8 IR |
3452 | return IRQ_HANDLED; |
3453 | } | |
3454 | ||
3455 | static int setup_irqs(struct fsl_mc_device *ls_dev) | |
3456 | { | |
3457 | int err = 0; | |
3458 | struct fsl_mc_device_irq *irq; | |
3459 | ||
3460 | err = fsl_mc_allocate_irqs(ls_dev); | |
3461 | if (err) { | |
3462 | dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); | |
3463 | return err; | |
3464 | } | |
3465 | ||
3466 | irq = ls_dev->irqs[0]; | |
3467 | err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, | |
fdc9b532 | 3468 | NULL, dpni_irq0_handler_thread, |
6e2387e8 IR |
3469 | IRQF_NO_SUSPEND | IRQF_ONESHOT, |
3470 | dev_name(&ls_dev->dev), &ls_dev->dev); | |
3471 | if (err < 0) { | |
77160af3 | 3472 | dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); |
6e2387e8 IR |
3473 | goto free_mc_irq; |
3474 | } | |
3475 | ||
3476 | err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, | |
8398b375 FC |
3477 | DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | |
3478 | DPNI_IRQ_EVENT_ENDPOINT_CHANGED); | |
6e2387e8 | 3479 | if (err < 0) { |
77160af3 | 3480 | dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); |
6e2387e8 IR |
3481 | goto free_irq; |
3482 | } | |
3483 | ||
3484 | err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, | |
3485 | DPNI_IRQ_INDEX, 1); | |
3486 | if (err < 0) { | |
77160af3 | 3487 | dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); |
6e2387e8 IR |
3488 | goto free_irq; |
3489 | } | |
3490 | ||
3491 | return 0; | |
3492 | ||
3493 | free_irq: | |
3494 | devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); | |
3495 | free_mc_irq: | |
3496 | fsl_mc_free_irqs(ls_dev); | |
3497 | ||
3498 | return err; | |
3499 | } | |
3500 | ||
3501 | static void add_ch_napi(struct dpaa2_eth_priv *priv) | |
3502 | { | |
3503 | int i; | |
3504 | struct dpaa2_eth_channel *ch; | |
3505 | ||
3506 | for (i = 0; i < priv->num_channels; i++) { | |
3507 | ch = priv->channel[i]; | |
3508 | /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ | |
3509 | netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, | |
3510 | NAPI_POLL_WEIGHT); | |
3511 | } | |
3512 | } | |
3513 | ||
3514 | static void del_ch_napi(struct dpaa2_eth_priv *priv) | |
3515 | { | |
3516 | int i; | |
3517 | struct dpaa2_eth_channel *ch; | |
3518 | ||
3519 | for (i = 0; i < priv->num_channels; i++) { | |
3520 | ch = priv->channel[i]; | |
3521 | netif_napi_del(&ch->napi); | |
3522 | } | |
3523 | } | |
3524 | ||
3525 | static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) | |
3526 | { | |
3527 | struct device *dev; | |
3528 | struct net_device *net_dev = NULL; | |
3529 | struct dpaa2_eth_priv *priv = NULL; | |
3530 | int err = 0; | |
3531 | ||
3532 | dev = &dpni_dev->dev; | |
3533 | ||
3534 | /* Net device */ | |
ab1e6de2 | 3535 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); |
6e2387e8 IR |
3536 | if (!net_dev) { |
3537 | dev_err(dev, "alloc_etherdev_mq() failed\n"); | |
3538 | return -ENOMEM; | |
3539 | } | |
3540 | ||
3541 | SET_NETDEV_DEV(net_dev, dev); | |
3542 | dev_set_drvdata(dev, net_dev); | |
3543 | ||
3544 | priv = netdev_priv(net_dev); | |
3545 | priv->net_dev = net_dev; | |
3546 | ||
08eb2397 IR |
3547 | priv->iommu_domain = iommu_get_domain_for_dev(dev); |
3548 | ||
6e2387e8 IR |
3549 | /* Obtain a MC portal */ |
3550 | err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, | |
3551 | &priv->mc_io); | |
3552 | if (err) { | |
8c369610 IR |
3553 | if (err == -ENXIO) |
3554 | err = -EPROBE_DEFER; | |
3555 | else | |
3556 | dev_err(dev, "MC portal allocation failed\n"); | |
6e2387e8 IR |
3557 | goto err_portal_alloc; |
3558 | } | |
3559 | ||
3560 | /* MC objects initialization and configuration */ | |
3561 | err = setup_dpni(dpni_dev); | |
3562 | if (err) | |
3563 | goto err_dpni_setup; | |
3564 | ||
3565 | err = setup_dpio(priv); | |
3566 | if (err) | |
3567 | goto err_dpio_setup; | |
3568 | ||
3569 | setup_fqs(priv); | |
3570 | ||
3571 | err = setup_dpbp(priv); | |
3572 | if (err) | |
3573 | goto err_dpbp_setup; | |
3574 | ||
3575 | err = bind_dpni(priv); | |
3576 | if (err) | |
3577 | goto err_bind; | |
3578 | ||
3579 | /* Add a NAPI context for each channel */ | |
3580 | add_ch_napi(priv); | |
3581 | ||
3582 | /* Percpu statistics */ | |
3583 | priv->percpu_stats = alloc_percpu(*priv->percpu_stats); | |
3584 | if (!priv->percpu_stats) { | |
3585 | dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); | |
3586 | err = -ENOMEM; | |
3587 | goto err_alloc_percpu_stats; | |
3588 | } | |
85047abd IR |
3589 | priv->percpu_extras = alloc_percpu(*priv->percpu_extras); |
3590 | if (!priv->percpu_extras) { | |
3591 | dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); | |
3592 | err = -ENOMEM; | |
3593 | goto err_alloc_percpu_extras; | |
3594 | } | |
6e2387e8 IR |
3595 | |
3596 | err = netdev_init(net_dev); | |
3597 | if (err) | |
3598 | goto err_netdev_init; | |
3599 | ||
3600 | /* Configure checksum offload based on current interface flags */ | |
3601 | err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); | |
3602 | if (err) | |
3603 | goto err_csum; | |
3604 | ||
3605 | err = set_tx_csum(priv, !!(net_dev->features & | |
3606 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); | |
3607 | if (err) | |
3608 | goto err_csum; | |
3609 | ||
3610 | err = alloc_rings(priv); | |
3611 | if (err) | |
3612 | goto err_alloc_rings; | |
3613 | ||
3614 | err = setup_irqs(dpni_dev); | |
3615 | if (err) { | |
3616 | netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); | |
3617 | priv->poll_thread = kthread_run(poll_link_state, priv, | |
3618 | "%s_poll_link", net_dev->name); | |
3619 | if (IS_ERR(priv->poll_thread)) { | |
7f12c8a3 | 3620 | dev_err(dev, "Error starting polling thread\n"); |
6e2387e8 IR |
3621 | goto err_poll_thread; |
3622 | } | |
3623 | priv->do_link_poll = true; | |
3624 | } | |
3625 | ||
71947923 IC |
3626 | err = dpaa2_eth_connect_mac(priv); |
3627 | if (err) | |
3628 | goto err_connect_mac; | |
3629 | ||
7f12c8a3 IR |
3630 | err = register_netdev(net_dev); |
3631 | if (err < 0) { | |
3632 | dev_err(dev, "register_netdev() failed\n"); | |
3633 | goto err_netdev_reg; | |
3634 | } | |
3635 | ||
091a19ea IR |
3636 | #ifdef CONFIG_DEBUG_FS |
3637 | dpaa2_dbg_add(priv); | |
3638 | #endif | |
3639 | ||
6e2387e8 IR |
3640 | dev_info(dev, "Probed interface %s\n", net_dev->name); |
3641 | return 0; | |
3642 | ||
7f12c8a3 | 3643 | err_netdev_reg: |
71947923 IC |
3644 | dpaa2_eth_disconnect_mac(priv); |
3645 | err_connect_mac: | |
7f12c8a3 IR |
3646 | if (priv->do_link_poll) |
3647 | kthread_stop(priv->poll_thread); | |
3648 | else | |
3649 | fsl_mc_free_irqs(dpni_dev); | |
6e2387e8 IR |
3650 | err_poll_thread: |
3651 | free_rings(priv); | |
3652 | err_alloc_rings: | |
3653 | err_csum: | |
6e2387e8 | 3654 | err_netdev_init: |
85047abd IR |
3655 | free_percpu(priv->percpu_extras); |
3656 | err_alloc_percpu_extras: | |
6e2387e8 IR |
3657 | free_percpu(priv->percpu_stats); |
3658 | err_alloc_percpu_stats: | |
3659 | del_ch_napi(priv); | |
3660 | err_bind: | |
3661 | free_dpbp(priv); | |
3662 | err_dpbp_setup: | |
3663 | free_dpio(priv); | |
3664 | err_dpio_setup: | |
3665 | free_dpni(priv); | |
3666 | err_dpni_setup: | |
3667 | fsl_mc_portal_free(priv->mc_io); | |
3668 | err_portal_alloc: | |
3669 | dev_set_drvdata(dev, NULL); | |
3670 | free_netdev(net_dev); | |
3671 | ||
3672 | return err; | |
3673 | } | |
3674 | ||
3675 | static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) | |
3676 | { | |
3677 | struct device *dev; | |
3678 | struct net_device *net_dev; | |
3679 | struct dpaa2_eth_priv *priv; | |
3680 | ||
3681 | dev = &ls_dev->dev; | |
3682 | net_dev = dev_get_drvdata(dev); | |
3683 | priv = netdev_priv(net_dev); | |
3684 | ||
091a19ea IR |
3685 | #ifdef CONFIG_DEBUG_FS |
3686 | dpaa2_dbg_remove(priv); | |
3687 | #endif | |
71947923 IC |
3688 | rtnl_lock(); |
3689 | dpaa2_eth_disconnect_mac(priv); | |
3690 | rtnl_unlock(); | |
3691 | ||
6e2387e8 | 3692 | unregister_netdev(net_dev); |
6e2387e8 IR |
3693 | |
3694 | if (priv->do_link_poll) | |
3695 | kthread_stop(priv->poll_thread); | |
3696 | else | |
3697 | fsl_mc_free_irqs(ls_dev); | |
3698 | ||
3699 | free_rings(priv); | |
3700 | free_percpu(priv->percpu_stats); | |
85047abd | 3701 | free_percpu(priv->percpu_extras); |
6e2387e8 IR |
3702 | |
3703 | del_ch_napi(priv); | |
3704 | free_dpbp(priv); | |
3705 | free_dpio(priv); | |
3706 | free_dpni(priv); | |
3707 | ||
3708 | fsl_mc_portal_free(priv->mc_io); | |
3709 | ||
6e2387e8 IR |
3710 | free_netdev(net_dev); |
3711 | ||
4bc07aa4 | 3712 | dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); |
7472dd9f | 3713 | |
6e2387e8 IR |
3714 | return 0; |
3715 | } | |
3716 | ||
3717 | static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { | |
3718 | { | |
3719 | .vendor = FSL_MC_VENDOR_FREESCALE, | |
3720 | .obj_type = "dpni", | |
3721 | }, | |
3722 | { .vendor = 0x0 } | |
3723 | }; | |
3724 | MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); | |
3725 | ||
3726 | static struct fsl_mc_driver dpaa2_eth_driver = { | |
3727 | .driver = { | |
3728 | .name = KBUILD_MODNAME, | |
3729 | .owner = THIS_MODULE, | |
3730 | }, | |
3731 | .probe = dpaa2_eth_probe, | |
3732 | .remove = dpaa2_eth_remove, | |
3733 | .match_id_table = dpaa2_eth_match_id_table | |
3734 | }; | |
3735 | ||
091a19ea IR |
3736 | static int __init dpaa2_eth_driver_init(void) |
3737 | { | |
3738 | int err; | |
3739 | ||
3740 | dpaa2_eth_dbg_init(); | |
3741 | err = fsl_mc_driver_register(&dpaa2_eth_driver); | |
3742 | if (err) { | |
3743 | dpaa2_eth_dbg_exit(); | |
3744 | return err; | |
3745 | } | |
3746 | ||
3747 | return 0; | |
3748 | } | |
3749 | ||
3750 | static void __exit dpaa2_eth_driver_exit(void) | |
3751 | { | |
3752 | dpaa2_eth_dbg_exit(); | |
3753 | fsl_mc_driver_unregister(&dpaa2_eth_driver); | |
3754 | } | |
3755 | ||
3756 | module_init(dpaa2_eth_driver_init); | |
3757 | module_exit(dpaa2_eth_driver_exit); |