1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
41 #include "mvpp2_prs.h"
42 #include "mvpp2_cls.h"
44 enum mvpp2_bm_pool_log_num
{
54 } mvpp2_pools
[MVPP2_BM_POOLS_NUM
];
56 /* The prototype is added here to be used in start_dev when using ACPI. This
57 * will be removed once phylink is used for all modes (dt+ACPI).
59 static void mvpp2_mac_config(struct phylink_config
*config
, unsigned int mode
,
60 const struct phylink_link_state
*state
);
61 static void mvpp2_mac_link_up(struct phylink_config
*config
,
62 struct phy_device
*phy
,
63 unsigned int mode
, phy_interface_t interface
,
64 int speed
, int duplex
,
65 bool tx_pause
, bool rx_pause
);
68 #define MVPP2_QDIST_SINGLE_MODE 0
69 #define MVPP2_QDIST_MULTI_MODE 1
71 static int queue_mode
= MVPP2_QDIST_MULTI_MODE
;
73 module_param(queue_mode
, int, 0444);
74 MODULE_PARM_DESC(queue_mode
, "Set queue_mode (single=0, multi=1)");
76 /* Utility/helper methods */
78 void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
80 writel(data
, priv
->swth_base
[0] + offset
);
83 u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
85 return readl(priv
->swth_base
[0] + offset
);
88 static u32
mvpp2_read_relaxed(struct mvpp2
*priv
, u32 offset
)
90 return readl_relaxed(priv
->swth_base
[0] + offset
);
93 static inline u32
mvpp2_cpu_to_thread(struct mvpp2
*priv
, int cpu
)
95 return cpu
% priv
->nthreads
;
98 /* These accessors should be used to access:
100 * - per-thread registers, where each thread has its own copy of the
103 * MVPP2_BM_VIRT_ALLOC_REG
104 * MVPP2_BM_ADDR_HIGH_ALLOC
105 * MVPP22_BM_ADDR_HIGH_RLS_REG
106 * MVPP2_BM_VIRT_RLS_REG
107 * MVPP2_ISR_RX_TX_CAUSE_REG
108 * MVPP2_ISR_RX_TX_MASK_REG
110 * MVPP2_AGGR_TXQ_UPDATE_REG
111 * MVPP2_TXQ_RSVD_REQ_REG
112 * MVPP2_TXQ_RSVD_RSLT_REG
116 * - global registers that must be accessed through a specific thread
117 * window, because they are related to an access to a per-thread
120 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
121 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
122 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
123 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
124 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
125 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
126 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
127 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
128 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
129 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
130 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
131 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
132 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
134 static void mvpp2_thread_write(struct mvpp2
*priv
, unsigned int thread
,
135 u32 offset
, u32 data
)
137 writel(data
, priv
->swth_base
[thread
] + offset
);
140 static u32
mvpp2_thread_read(struct mvpp2
*priv
, unsigned int thread
,
143 return readl(priv
->swth_base
[thread
] + offset
);
146 static void mvpp2_thread_write_relaxed(struct mvpp2
*priv
, unsigned int thread
,
147 u32 offset
, u32 data
)
149 writel_relaxed(data
, priv
->swth_base
[thread
] + offset
);
152 static u32
mvpp2_thread_read_relaxed(struct mvpp2
*priv
, unsigned int thread
,
155 return readl_relaxed(priv
->swth_base
[thread
] + offset
);
158 static dma_addr_t
mvpp2_txdesc_dma_addr_get(struct mvpp2_port
*port
,
159 struct mvpp2_tx_desc
*tx_desc
)
161 if (port
->priv
->hw_version
== MVPP21
)
162 return le32_to_cpu(tx_desc
->pp21
.buf_dma_addr
);
164 return le64_to_cpu(tx_desc
->pp22
.buf_dma_addr_ptp
) &
168 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port
*port
,
169 struct mvpp2_tx_desc
*tx_desc
,
172 dma_addr_t addr
, offset
;
174 addr
= dma_addr
& ~MVPP2_TX_DESC_ALIGN
;
175 offset
= dma_addr
& MVPP2_TX_DESC_ALIGN
;
177 if (port
->priv
->hw_version
== MVPP21
) {
178 tx_desc
->pp21
.buf_dma_addr
= cpu_to_le32(addr
);
179 tx_desc
->pp21
.packet_offset
= offset
;
181 __le64 val
= cpu_to_le64(addr
);
183 tx_desc
->pp22
.buf_dma_addr_ptp
&= ~cpu_to_le64(MVPP2_DESC_DMA_MASK
);
184 tx_desc
->pp22
.buf_dma_addr_ptp
|= val
;
185 tx_desc
->pp22
.packet_offset
= offset
;
189 static size_t mvpp2_txdesc_size_get(struct mvpp2_port
*port
,
190 struct mvpp2_tx_desc
*tx_desc
)
192 if (port
->priv
->hw_version
== MVPP21
)
193 return le16_to_cpu(tx_desc
->pp21
.data_size
);
195 return le16_to_cpu(tx_desc
->pp22
.data_size
);
198 static void mvpp2_txdesc_size_set(struct mvpp2_port
*port
,
199 struct mvpp2_tx_desc
*tx_desc
,
202 if (port
->priv
->hw_version
== MVPP21
)
203 tx_desc
->pp21
.data_size
= cpu_to_le16(size
);
205 tx_desc
->pp22
.data_size
= cpu_to_le16(size
);
208 static void mvpp2_txdesc_txq_set(struct mvpp2_port
*port
,
209 struct mvpp2_tx_desc
*tx_desc
,
212 if (port
->priv
->hw_version
== MVPP21
)
213 tx_desc
->pp21
.phys_txq
= txq
;
215 tx_desc
->pp22
.phys_txq
= txq
;
218 static void mvpp2_txdesc_cmd_set(struct mvpp2_port
*port
,
219 struct mvpp2_tx_desc
*tx_desc
,
220 unsigned int command
)
222 if (port
->priv
->hw_version
== MVPP21
)
223 tx_desc
->pp21
.command
= cpu_to_le32(command
);
225 tx_desc
->pp22
.command
= cpu_to_le32(command
);
228 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port
*port
,
229 struct mvpp2_tx_desc
*tx_desc
)
231 if (port
->priv
->hw_version
== MVPP21
)
232 return tx_desc
->pp21
.packet_offset
;
234 return tx_desc
->pp22
.packet_offset
;
237 static dma_addr_t
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port
*port
,
238 struct mvpp2_rx_desc
*rx_desc
)
240 if (port
->priv
->hw_version
== MVPP21
)
241 return le32_to_cpu(rx_desc
->pp21
.buf_dma_addr
);
243 return le64_to_cpu(rx_desc
->pp22
.buf_dma_addr_key_hash
) &
247 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port
*port
,
248 struct mvpp2_rx_desc
*rx_desc
)
250 if (port
->priv
->hw_version
== MVPP21
)
251 return le32_to_cpu(rx_desc
->pp21
.buf_cookie
);
253 return le64_to_cpu(rx_desc
->pp22
.buf_cookie_misc
) &
257 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port
*port
,
258 struct mvpp2_rx_desc
*rx_desc
)
260 if (port
->priv
->hw_version
== MVPP21
)
261 return le16_to_cpu(rx_desc
->pp21
.data_size
);
263 return le16_to_cpu(rx_desc
->pp22
.data_size
);
266 static u32
mvpp2_rxdesc_status_get(struct mvpp2_port
*port
,
267 struct mvpp2_rx_desc
*rx_desc
)
269 if (port
->priv
->hw_version
== MVPP21
)
270 return le32_to_cpu(rx_desc
->pp21
.status
);
272 return le32_to_cpu(rx_desc
->pp22
.status
);
275 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
277 txq_pcpu
->txq_get_index
++;
278 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
279 txq_pcpu
->txq_get_index
= 0;
282 static void mvpp2_txq_inc_put(struct mvpp2_port
*port
,
283 struct mvpp2_txq_pcpu
*txq_pcpu
,
285 struct mvpp2_tx_desc
*tx_desc
)
287 struct mvpp2_txq_pcpu_buf
*tx_buf
=
288 txq_pcpu
->buffs
+ txq_pcpu
->txq_put_index
;
290 tx_buf
->size
= mvpp2_txdesc_size_get(port
, tx_desc
);
291 tx_buf
->dma
= mvpp2_txdesc_dma_addr_get(port
, tx_desc
) +
292 mvpp2_txdesc_offset_get(port
, tx_desc
);
293 txq_pcpu
->txq_put_index
++;
294 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
295 txq_pcpu
->txq_put_index
= 0;
298 /* Get number of maximum RXQ */
299 static int mvpp2_get_nrxqs(struct mvpp2
*priv
)
303 if (priv
->hw_version
== MVPP22
&& queue_mode
== MVPP2_QDIST_SINGLE_MODE
)
306 /* According to the PPv2.2 datasheet and our experiments on
307 * PPv2.1, RX queues have an allocation granularity of 4 (when
308 * more than a single one on PPv2.2).
309 * Round up to nearest multiple of 4.
311 nrxqs
= (num_possible_cpus() + 3) & ~0x3;
312 if (nrxqs
> MVPP2_PORT_MAX_RXQ
)
313 nrxqs
= MVPP2_PORT_MAX_RXQ
;
318 /* Get number of physical egress port */
319 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
321 return MVPP2_MAX_TCONT
+ port
->id
;
324 /* Get number of physical TXQ */
325 static inline int mvpp2_txq_phys(int port
, int txq
)
327 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
330 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool
*pool
)
332 if (likely(pool
->frag_size
<= PAGE_SIZE
))
333 return netdev_alloc_frag(pool
->frag_size
);
335 return kmalloc(pool
->frag_size
, GFP_ATOMIC
);
338 static void mvpp2_frag_free(const struct mvpp2_bm_pool
*pool
, void *data
)
340 if (likely(pool
->frag_size
<= PAGE_SIZE
))
346 /* Buffer Manager configuration routines */
349 static int mvpp2_bm_pool_create(struct device
*dev
, struct mvpp2
*priv
,
350 struct mvpp2_bm_pool
*bm_pool
, int size
)
354 /* Number of buffer pointers must be a multiple of 16, as per
355 * hardware constraints
357 if (!IS_ALIGNED(size
, 16))
360 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
361 * bytes per buffer pointer
363 if (priv
->hw_version
== MVPP21
)
364 bm_pool
->size_bytes
= 2 * sizeof(u32
) * size
;
366 bm_pool
->size_bytes
= 2 * sizeof(u64
) * size
;
368 bm_pool
->virt_addr
= dma_alloc_coherent(dev
, bm_pool
->size_bytes
,
371 if (!bm_pool
->virt_addr
)
374 if (!IS_ALIGNED((unsigned long)bm_pool
->virt_addr
,
375 MVPP2_BM_POOL_PTR_ALIGN
)) {
376 dma_free_coherent(dev
, bm_pool
->size_bytes
,
377 bm_pool
->virt_addr
, bm_pool
->dma_addr
);
378 dev_err(dev
, "BM pool %d is not %d bytes aligned\n",
379 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
383 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
384 lower_32_bits(bm_pool
->dma_addr
));
385 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
387 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
388 val
|= MVPP2_BM_START_MASK
;
389 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
391 bm_pool
->size
= size
;
392 bm_pool
->pkt_size
= 0;
393 bm_pool
->buf_num
= 0;
398 /* Set pool buffer size */
399 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
400 struct mvpp2_bm_pool
*bm_pool
,
405 bm_pool
->buf_size
= buf_size
;
407 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
408 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
411 static void mvpp2_bm_bufs_get_addrs(struct device
*dev
, struct mvpp2
*priv
,
412 struct mvpp2_bm_pool
*bm_pool
,
413 dma_addr_t
*dma_addr
,
414 phys_addr_t
*phys_addr
)
416 unsigned int thread
= mvpp2_cpu_to_thread(priv
, get_cpu());
418 *dma_addr
= mvpp2_thread_read(priv
, thread
,
419 MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
420 *phys_addr
= mvpp2_thread_read(priv
, thread
, MVPP2_BM_VIRT_ALLOC_REG
);
422 if (priv
->hw_version
== MVPP22
) {
424 u32 dma_addr_highbits
, phys_addr_highbits
;
426 val
= mvpp2_thread_read(priv
, thread
, MVPP22_BM_ADDR_HIGH_ALLOC
);
427 dma_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_PHYS_MASK
);
428 phys_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_VIRT_MASK
) >>
429 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT
;
431 if (sizeof(dma_addr_t
) == 8)
432 *dma_addr
|= (u64
)dma_addr_highbits
<< 32;
434 if (sizeof(phys_addr_t
) == 8)
435 *phys_addr
|= (u64
)phys_addr_highbits
<< 32;
441 /* Free all buffers from the pool */
442 static void mvpp2_bm_bufs_free(struct device
*dev
, struct mvpp2
*priv
,
443 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
447 if (buf_num
> bm_pool
->buf_num
) {
448 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
449 bm_pool
->id
, buf_num
);
450 buf_num
= bm_pool
->buf_num
;
453 for (i
= 0; i
< buf_num
; i
++) {
454 dma_addr_t buf_dma_addr
;
455 phys_addr_t buf_phys_addr
;
458 mvpp2_bm_bufs_get_addrs(dev
, priv
, bm_pool
,
459 &buf_dma_addr
, &buf_phys_addr
);
461 dma_unmap_single(dev
, buf_dma_addr
,
462 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
464 data
= (void *)phys_to_virt(buf_phys_addr
);
468 mvpp2_frag_free(bm_pool
, data
);
471 /* Update BM driver with number of buffers removed from pool */
472 bm_pool
->buf_num
-= i
;
475 /* Check number of buffers in BM pool */
476 static int mvpp2_check_hw_buf_num(struct mvpp2
*priv
, struct mvpp2_bm_pool
*bm_pool
)
480 buf_num
+= mvpp2_read(priv
, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool
->id
)) &
481 MVPP22_BM_POOL_PTRS_NUM_MASK
;
482 buf_num
+= mvpp2_read(priv
, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool
->id
)) &
483 MVPP2_BM_BPPI_PTR_NUM_MASK
;
485 /* HW has one buffer ready which is not reflected in the counters */
493 static int mvpp2_bm_pool_destroy(struct device
*dev
, struct mvpp2
*priv
,
494 struct mvpp2_bm_pool
*bm_pool
)
499 buf_num
= mvpp2_check_hw_buf_num(priv
, bm_pool
);
500 mvpp2_bm_bufs_free(dev
, priv
, bm_pool
, buf_num
);
502 /* Check buffer counters after free */
503 buf_num
= mvpp2_check_hw_buf_num(priv
, bm_pool
);
505 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
506 bm_pool
->id
, bm_pool
->buf_num
);
510 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
511 val
|= MVPP2_BM_STOP_MASK
;
512 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
514 dma_free_coherent(dev
, bm_pool
->size_bytes
,
520 static int mvpp2_bm_pools_init(struct device
*dev
, struct mvpp2
*priv
)
522 int i
, err
, size
, poolnum
= MVPP2_BM_POOLS_NUM
;
523 struct mvpp2_bm_pool
*bm_pool
;
525 if (priv
->percpu_pools
)
526 poolnum
= mvpp2_get_nrxqs(priv
) * 2;
528 /* Create all pools with maximum size */
529 size
= MVPP2_BM_POOL_SIZE_MAX
;
530 for (i
= 0; i
< poolnum
; i
++) {
531 bm_pool
= &priv
->bm_pools
[i
];
533 err
= mvpp2_bm_pool_create(dev
, priv
, bm_pool
, size
);
535 goto err_unroll_pools
;
536 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
541 dev_err(dev
, "failed to create BM pool %d, size %d\n", i
, size
);
542 for (i
= i
- 1; i
>= 0; i
--)
543 mvpp2_bm_pool_destroy(dev
, priv
, &priv
->bm_pools
[i
]);
547 static int mvpp2_bm_init(struct device
*dev
, struct mvpp2
*priv
)
549 int i
, err
, poolnum
= MVPP2_BM_POOLS_NUM
;
551 if (priv
->percpu_pools
)
552 poolnum
= mvpp2_get_nrxqs(priv
) * 2;
554 dev_info(dev
, "using %d %s buffers\n", poolnum
,
555 priv
->percpu_pools
? "per-cpu" : "shared");
557 for (i
= 0; i
< poolnum
; i
++) {
558 /* Mask BM all interrupts */
559 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
560 /* Clear BM cause register */
561 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
564 /* Allocate and initialize BM pools */
565 priv
->bm_pools
= devm_kcalloc(dev
, poolnum
,
566 sizeof(*priv
->bm_pools
), GFP_KERNEL
);
570 err
= mvpp2_bm_pools_init(dev
, priv
);
576 static void mvpp2_setup_bm_pool(void)
579 mvpp2_pools
[MVPP2_BM_SHORT
].buf_num
= MVPP2_BM_SHORT_BUF_NUM
;
580 mvpp2_pools
[MVPP2_BM_SHORT
].pkt_size
= MVPP2_BM_SHORT_PKT_SIZE
;
583 mvpp2_pools
[MVPP2_BM_LONG
].buf_num
= MVPP2_BM_LONG_BUF_NUM
;
584 mvpp2_pools
[MVPP2_BM_LONG
].pkt_size
= MVPP2_BM_LONG_PKT_SIZE
;
587 mvpp2_pools
[MVPP2_BM_JUMBO
].buf_num
= MVPP2_BM_JUMBO_BUF_NUM
;
588 mvpp2_pools
[MVPP2_BM_JUMBO
].pkt_size
= MVPP2_BM_JUMBO_PKT_SIZE
;
591 /* Attach long pool to rxq */
592 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
593 int lrxq
, int long_pool
)
598 /* Get queue physical ID */
599 prxq
= port
->rxqs
[lrxq
]->id
;
601 if (port
->priv
->hw_version
== MVPP21
)
602 mask
= MVPP21_RXQ_POOL_LONG_MASK
;
604 mask
= MVPP22_RXQ_POOL_LONG_MASK
;
606 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
608 val
|= (long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) & mask
;
609 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
612 /* Attach short pool to rxq */
613 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
614 int lrxq
, int short_pool
)
619 /* Get queue physical ID */
620 prxq
= port
->rxqs
[lrxq
]->id
;
622 if (port
->priv
->hw_version
== MVPP21
)
623 mask
= MVPP21_RXQ_POOL_SHORT_MASK
;
625 mask
= MVPP22_RXQ_POOL_SHORT_MASK
;
627 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
629 val
|= (short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) & mask
;
630 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
633 static void *mvpp2_buf_alloc(struct mvpp2_port
*port
,
634 struct mvpp2_bm_pool
*bm_pool
,
635 dma_addr_t
*buf_dma_addr
,
636 phys_addr_t
*buf_phys_addr
,
642 data
= mvpp2_frag_alloc(bm_pool
);
646 dma_addr
= dma_map_single(port
->dev
->dev
.parent
, data
,
647 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
649 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, dma_addr
))) {
650 mvpp2_frag_free(bm_pool
, data
);
653 *buf_dma_addr
= dma_addr
;
654 *buf_phys_addr
= virt_to_phys(data
);
659 /* Release buffer to BM */
660 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
661 dma_addr_t buf_dma_addr
,
662 phys_addr_t buf_phys_addr
)
664 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
665 unsigned long flags
= 0;
667 if (test_bit(thread
, &port
->priv
->lock_map
))
668 spin_lock_irqsave(&port
->bm_lock
[thread
], flags
);
670 if (port
->priv
->hw_version
== MVPP22
) {
673 if (sizeof(dma_addr_t
) == 8)
674 val
|= upper_32_bits(buf_dma_addr
) &
675 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK
;
677 if (sizeof(phys_addr_t
) == 8)
678 val
|= (upper_32_bits(buf_phys_addr
)
679 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT
) &
680 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK
;
682 mvpp2_thread_write_relaxed(port
->priv
, thread
,
683 MVPP22_BM_ADDR_HIGH_RLS_REG
, val
);
686 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
687 * returned in the "cookie" field of the RX
688 * descriptor. Instead of storing the virtual address, we
689 * store the physical address
691 mvpp2_thread_write_relaxed(port
->priv
, thread
,
692 MVPP2_BM_VIRT_RLS_REG
, buf_phys_addr
);
693 mvpp2_thread_write_relaxed(port
->priv
, thread
,
694 MVPP2_BM_PHY_RLS_REG(pool
), buf_dma_addr
);
696 if (test_bit(thread
, &port
->priv
->lock_map
))
697 spin_unlock_irqrestore(&port
->bm_lock
[thread
], flags
);
702 /* Allocate buffers for the pool */
703 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
704 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
706 int i
, buf_size
, total_size
;
708 phys_addr_t phys_addr
;
711 if (port
->priv
->percpu_pools
&&
712 bm_pool
->pkt_size
> MVPP2_BM_LONG_PKT_SIZE
) {
713 netdev_err(port
->dev
,
714 "attempted to use jumbo frames with per-cpu pools");
718 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
719 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
722 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
723 netdev_err(port
->dev
,
724 "cannot allocate %d buffers for pool %d\n",
725 buf_num
, bm_pool
->id
);
729 for (i
= 0; i
< buf_num
; i
++) {
730 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
,
731 &phys_addr
, GFP_KERNEL
);
735 mvpp2_bm_pool_put(port
, bm_pool
->id
, dma_addr
,
739 /* Update BM driver with number of buffers added to pool */
740 bm_pool
->buf_num
+= i
;
742 netdev_dbg(port
->dev
,
743 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
744 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
746 netdev_dbg(port
->dev
,
747 "pool %d: %d of %d buffers added\n",
748 bm_pool
->id
, i
, buf_num
);
752 /* Notify the driver that BM pool is being used as specific type and return the
753 * pool pointer on success
755 static struct mvpp2_bm_pool
*
756 mvpp2_bm_pool_use(struct mvpp2_port
*port
, unsigned pool
, int pkt_size
)
758 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
761 if ((port
->priv
->percpu_pools
&& pool
> mvpp2_get_nrxqs(port
->priv
) * 2) ||
762 (!port
->priv
->percpu_pools
&& pool
>= MVPP2_BM_POOLS_NUM
)) {
763 netdev_err(port
->dev
, "Invalid pool %d\n", pool
);
767 /* Allocate buffers in case BM pool is used as long pool, but packet
768 * size doesn't match MTU or BM pool hasn't being used yet
770 if (new_pool
->pkt_size
== 0) {
773 /* Set default buffer number or free all the buffers in case
774 * the pool is not empty
776 pkts_num
= new_pool
->buf_num
;
778 if (port
->priv
->percpu_pools
) {
779 if (pool
< port
->nrxqs
)
780 pkts_num
= mvpp2_pools
[MVPP2_BM_SHORT
].buf_num
;
782 pkts_num
= mvpp2_pools
[MVPP2_BM_LONG
].buf_num
;
784 pkts_num
= mvpp2_pools
[pool
].buf_num
;
787 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
788 port
->priv
, new_pool
, pkts_num
);
791 new_pool
->pkt_size
= pkt_size
;
792 new_pool
->frag_size
=
793 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
794 MVPP2_SKB_SHINFO_SIZE
;
796 /* Allocate buffers for this pool */
797 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
798 if (num
!= pkts_num
) {
799 WARN(1, "pool %d: %d of %d allocated\n",
800 new_pool
->id
, num
, pkts_num
);
805 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
806 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
811 static struct mvpp2_bm_pool
*
812 mvpp2_bm_pool_use_percpu(struct mvpp2_port
*port
, int type
,
813 unsigned int pool
, int pkt_size
)
815 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
818 if (pool
> port
->nrxqs
* 2) {
819 netdev_err(port
->dev
, "Invalid pool %d\n", pool
);
823 /* Allocate buffers in case BM pool is used as long pool, but packet
824 * size doesn't match MTU or BM pool hasn't being used yet
826 if (new_pool
->pkt_size
== 0) {
829 /* Set default buffer number or free all the buffers in case
830 * the pool is not empty
832 pkts_num
= new_pool
->buf_num
;
834 pkts_num
= mvpp2_pools
[type
].buf_num
;
836 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
837 port
->priv
, new_pool
, pkts_num
);
839 new_pool
->pkt_size
= pkt_size
;
840 new_pool
->frag_size
=
841 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
842 MVPP2_SKB_SHINFO_SIZE
;
844 /* Allocate buffers for this pool */
845 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
846 if (num
!= pkts_num
) {
847 WARN(1, "pool %d: %d of %d allocated\n",
848 new_pool
->id
, num
, pkts_num
);
853 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
854 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
859 /* Initialize pools for swf, shared buffers variant */
860 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port
*port
)
862 enum mvpp2_bm_pool_log_num long_log_pool
, short_log_pool
;
865 /* If port pkt_size is higher than 1518B:
866 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
867 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
869 if (port
->pkt_size
> MVPP2_BM_LONG_PKT_SIZE
) {
870 long_log_pool
= MVPP2_BM_JUMBO
;
871 short_log_pool
= MVPP2_BM_LONG
;
873 long_log_pool
= MVPP2_BM_LONG
;
874 short_log_pool
= MVPP2_BM_SHORT
;
877 if (!port
->pool_long
) {
879 mvpp2_bm_pool_use(port
, long_log_pool
,
880 mvpp2_pools
[long_log_pool
].pkt_size
);
881 if (!port
->pool_long
)
884 port
->pool_long
->port_map
|= BIT(port
->id
);
886 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
887 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
890 if (!port
->pool_short
) {
892 mvpp2_bm_pool_use(port
, short_log_pool
,
893 mvpp2_pools
[short_log_pool
].pkt_size
);
894 if (!port
->pool_short
)
897 port
->pool_short
->port_map
|= BIT(port
->id
);
899 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
900 mvpp2_rxq_short_pool_set(port
, rxq
,
901 port
->pool_short
->id
);
907 /* Initialize pools for swf, percpu buffers variant */
908 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port
*port
)
910 struct mvpp2_bm_pool
*p
;
913 for (i
= 0; i
< port
->nrxqs
; i
++) {
914 p
= mvpp2_bm_pool_use_percpu(port
, MVPP2_BM_SHORT
, i
,
915 mvpp2_pools
[MVPP2_BM_SHORT
].pkt_size
);
919 port
->priv
->bm_pools
[i
].port_map
|= BIT(port
->id
);
920 mvpp2_rxq_short_pool_set(port
, i
, port
->priv
->bm_pools
[i
].id
);
923 for (i
= 0; i
< port
->nrxqs
; i
++) {
924 p
= mvpp2_bm_pool_use_percpu(port
, MVPP2_BM_LONG
, i
+ port
->nrxqs
,
925 mvpp2_pools
[MVPP2_BM_LONG
].pkt_size
);
929 port
->priv
->bm_pools
[i
+ port
->nrxqs
].port_map
|= BIT(port
->id
);
930 mvpp2_rxq_long_pool_set(port
, i
,
931 port
->priv
->bm_pools
[i
+ port
->nrxqs
].id
);
934 port
->pool_long
= NULL
;
935 port
->pool_short
= NULL
;
940 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
942 if (port
->priv
->percpu_pools
)
943 return mvpp2_swf_bm_pool_init_percpu(port
);
945 return mvpp2_swf_bm_pool_init_shared(port
);
948 static void mvpp2_set_hw_csum(struct mvpp2_port
*port
,
949 enum mvpp2_bm_pool_log_num new_long_pool
)
951 const netdev_features_t csums
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
953 /* Update L4 checksum when jumbo enable/disable on port.
954 * Only port 0 supports hardware checksum offload due to
955 * the Tx FIFO size limitation.
956 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
957 * has 7 bits, so the maximum L3 offset is 128.
959 if (new_long_pool
== MVPP2_BM_JUMBO
&& port
->id
!= 0) {
960 port
->dev
->features
&= ~csums
;
961 port
->dev
->hw_features
&= ~csums
;
963 port
->dev
->features
|= csums
;
964 port
->dev
->hw_features
|= csums
;
968 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
970 struct mvpp2_port
*port
= netdev_priv(dev
);
971 enum mvpp2_bm_pool_log_num new_long_pool
;
972 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
974 if (port
->priv
->percpu_pools
)
977 /* If port MTU is higher than 1518B:
978 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
979 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
981 if (pkt_size
> MVPP2_BM_LONG_PKT_SIZE
)
982 new_long_pool
= MVPP2_BM_JUMBO
;
984 new_long_pool
= MVPP2_BM_LONG
;
986 if (new_long_pool
!= port
->pool_long
->id
) {
987 /* Remove port from old short & long pool */
988 port
->pool_long
= mvpp2_bm_pool_use(port
, port
->pool_long
->id
,
989 port
->pool_long
->pkt_size
);
990 port
->pool_long
->port_map
&= ~BIT(port
->id
);
991 port
->pool_long
= NULL
;
993 port
->pool_short
= mvpp2_bm_pool_use(port
, port
->pool_short
->id
,
994 port
->pool_short
->pkt_size
);
995 port
->pool_short
->port_map
&= ~BIT(port
->id
);
996 port
->pool_short
= NULL
;
998 port
->pkt_size
= pkt_size
;
1000 /* Add port to new short & long pool */
1001 mvpp2_swf_bm_pool_init(port
);
1003 mvpp2_set_hw_csum(port
, new_long_pool
);
1008 dev
->wanted_features
= dev
->features
;
1010 netdev_update_features(dev
);
1014 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
1016 int i
, sw_thread_mask
= 0;
1018 for (i
= 0; i
< port
->nqvecs
; i
++)
1019 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
1021 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
1022 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask
));
1025 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
1027 int i
, sw_thread_mask
= 0;
1029 for (i
= 0; i
< port
->nqvecs
; i
++)
1030 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
1032 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
1033 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask
));
1036 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector
*qvec
)
1038 struct mvpp2_port
*port
= qvec
->port
;
1040 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
1041 MVPP2_ISR_ENABLE_INTERRUPT(qvec
->sw_thread_mask
));
1044 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector
*qvec
)
1046 struct mvpp2_port
*port
= qvec
->port
;
1048 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
1049 MVPP2_ISR_DISABLE_INTERRUPT(qvec
->sw_thread_mask
));
1052 /* Mask the current thread's Rx/Tx interrupts
1053 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1054 * using smp_processor_id() is OK.
1056 static void mvpp2_interrupts_mask(void *arg
)
1058 struct mvpp2_port
*port
= arg
;
1060 /* If the thread isn't used, don't do anything */
1061 if (smp_processor_id() > port
->priv
->nthreads
)
1064 mvpp2_thread_write(port
->priv
,
1065 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
1066 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
1069 /* Unmask the current thread's Rx/Tx interrupts.
1070 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1071 * using smp_processor_id() is OK.
1073 static void mvpp2_interrupts_unmask(void *arg
)
1075 struct mvpp2_port
*port
= arg
;
1078 /* If the thread isn't used, don't do anything */
1079 if (smp_processor_id() > port
->priv
->nthreads
)
1082 val
= MVPP2_CAUSE_MISC_SUM_MASK
|
1083 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port
->priv
->hw_version
);
1084 if (port
->has_tx_irqs
)
1085 val
|= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
1087 mvpp2_thread_write(port
->priv
,
1088 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
1089 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
1093 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port
*port
, bool mask
)
1098 if (port
->priv
->hw_version
!= MVPP22
)
1104 val
= MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22
);
1106 for (i
= 0; i
< port
->nqvecs
; i
++) {
1107 struct mvpp2_queue_vector
*v
= port
->qvecs
+ i
;
1109 if (v
->type
!= MVPP2_QUEUE_VECTOR_SHARED
)
1112 mvpp2_thread_write(port
->priv
, v
->sw_thread_id
,
1113 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
1117 /* Port configuration routines */
1118 static bool mvpp2_is_xlg(phy_interface_t interface
)
1120 return interface
== PHY_INTERFACE_MODE_10GBASER
||
1121 interface
== PHY_INTERFACE_MODE_XAUI
;
1124 static void mvpp22_gop_init_rgmii(struct mvpp2_port
*port
)
1126 struct mvpp2
*priv
= port
->priv
;
1129 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
1130 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
;
1131 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
1133 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
1134 if (port
->gop_id
== 2)
1135 val
|= GENCONF_CTRL0_PORT0_RGMII
| GENCONF_CTRL0_PORT1_RGMII
;
1136 else if (port
->gop_id
== 3)
1137 val
|= GENCONF_CTRL0_PORT1_RGMII_MII
;
1138 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
1141 static void mvpp22_gop_init_sgmii(struct mvpp2_port
*port
)
1143 struct mvpp2
*priv
= port
->priv
;
1146 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
1147 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
|
1148 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE
;
1149 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
1151 if (port
->gop_id
> 1) {
1152 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
1153 if (port
->gop_id
== 2)
1154 val
&= ~GENCONF_CTRL0_PORT0_RGMII
;
1155 else if (port
->gop_id
== 3)
1156 val
&= ~GENCONF_CTRL0_PORT1_RGMII_MII
;
1157 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
1161 static void mvpp22_gop_init_10gkr(struct mvpp2_port
*port
)
1163 struct mvpp2
*priv
= port
->priv
;
1164 void __iomem
*mpcs
= priv
->iface_base
+ MVPP22_MPCS_BASE(port
->gop_id
);
1165 void __iomem
*xpcs
= priv
->iface_base
+ MVPP22_XPCS_BASE(port
->gop_id
);
1168 val
= readl(xpcs
+ MVPP22_XPCS_CFG0
);
1169 val
&= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1170 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1171 val
|= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1172 writel(val
, xpcs
+ MVPP22_XPCS_CFG0
);
1174 val
= readl(mpcs
+ MVPP22_MPCS_CTRL
);
1175 val
&= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN
;
1176 writel(val
, mpcs
+ MVPP22_MPCS_CTRL
);
1178 val
= readl(mpcs
+ MVPP22_MPCS_CLK_RESET
);
1179 val
&= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1180 val
|= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1181 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
1184 static int mvpp22_gop_init(struct mvpp2_port
*port
)
1186 struct mvpp2
*priv
= port
->priv
;
1189 if (!priv
->sysctrl_base
)
1192 switch (port
->phy_interface
) {
1193 case PHY_INTERFACE_MODE_RGMII
:
1194 case PHY_INTERFACE_MODE_RGMII_ID
:
1195 case PHY_INTERFACE_MODE_RGMII_RXID
:
1196 case PHY_INTERFACE_MODE_RGMII_TXID
:
1197 if (port
->gop_id
== 0)
1199 mvpp22_gop_init_rgmii(port
);
1201 case PHY_INTERFACE_MODE_SGMII
:
1202 case PHY_INTERFACE_MODE_1000BASEX
:
1203 case PHY_INTERFACE_MODE_2500BASEX
:
1204 mvpp22_gop_init_sgmii(port
);
1206 case PHY_INTERFACE_MODE_10GBASER
:
1207 if (port
->gop_id
!= 0)
1209 mvpp22_gop_init_10gkr(port
);
1212 goto unsupported_conf
;
1215 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, &val
);
1216 val
|= GENCONF_PORT_CTRL1_RESET(port
->gop_id
) |
1217 GENCONF_PORT_CTRL1_EN(port
->gop_id
);
1218 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, val
);
1220 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
1221 val
|= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR
;
1222 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
1224 regmap_read(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, &val
);
1225 val
|= GENCONF_SOFT_RESET1_GOP
;
1226 regmap_write(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, val
);
1232 netdev_err(port
->dev
, "Invalid port configuration\n");
1236 static void mvpp22_gop_unmask_irq(struct mvpp2_port
*port
)
1240 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1241 phy_interface_mode_is_8023z(port
->phy_interface
) ||
1242 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
) {
1243 /* Enable the GMAC link status irq for this port */
1244 val
= readl(port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1245 val
|= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT
;
1246 writel(val
, port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1249 if (port
->gop_id
== 0) {
1250 /* Enable the XLG/GIG irqs for this port */
1251 val
= readl(port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1252 if (mvpp2_is_xlg(port
->phy_interface
))
1253 val
|= MVPP22_XLG_EXT_INT_MASK_XLG
;
1255 val
|= MVPP22_XLG_EXT_INT_MASK_GIG
;
1256 writel(val
, port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1260 static void mvpp22_gop_mask_irq(struct mvpp2_port
*port
)
1264 if (port
->gop_id
== 0) {
1265 val
= readl(port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1266 val
&= ~(MVPP22_XLG_EXT_INT_MASK_XLG
|
1267 MVPP22_XLG_EXT_INT_MASK_GIG
);
1268 writel(val
, port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1271 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1272 phy_interface_mode_is_8023z(port
->phy_interface
) ||
1273 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
) {
1274 val
= readl(port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1275 val
&= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT
;
1276 writel(val
, port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1280 static void mvpp22_gop_setup_irq(struct mvpp2_port
*port
)
1284 if (port
->phylink
||
1285 phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1286 phy_interface_mode_is_8023z(port
->phy_interface
) ||
1287 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
) {
1288 val
= readl(port
->base
+ MVPP22_GMAC_INT_MASK
);
1289 val
|= MVPP22_GMAC_INT_MASK_LINK_STAT
;
1290 writel(val
, port
->base
+ MVPP22_GMAC_INT_MASK
);
1293 if (port
->gop_id
== 0) {
1294 val
= readl(port
->base
+ MVPP22_XLG_INT_MASK
);
1295 val
|= MVPP22_XLG_INT_MASK_LINK
;
1296 writel(val
, port
->base
+ MVPP22_XLG_INT_MASK
);
1299 mvpp22_gop_unmask_irq(port
);
1302 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1304 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1305 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1308 * The COMPHY configures the serdes lanes regardless of the actual use of the
1309 * lanes by the physical layer. This is why configurations like
1310 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1312 static int mvpp22_comphy_init(struct mvpp2_port
*port
)
1319 ret
= phy_set_mode_ext(port
->comphy
, PHY_MODE_ETHERNET
,
1320 port
->phy_interface
);
1324 return phy_power_on(port
->comphy
);
1327 static void mvpp2_port_enable(struct mvpp2_port
*port
)
1331 /* Only GOP port 0 has an XLG MAC */
1332 if (port
->gop_id
== 0 && mvpp2_is_xlg(port
->phy_interface
)) {
1333 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
1334 val
|= MVPP22_XLG_CTRL0_PORT_EN
;
1335 val
&= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS
;
1336 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1338 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1339 val
|= MVPP2_GMAC_PORT_EN_MASK
;
1340 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
1341 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1345 static void mvpp2_port_disable(struct mvpp2_port
*port
)
1349 /* Only GOP port 0 has an XLG MAC */
1350 if (port
->gop_id
== 0 && mvpp2_is_xlg(port
->phy_interface
)) {
1351 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
1352 val
&= ~MVPP22_XLG_CTRL0_PORT_EN
;
1353 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1356 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1357 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
1358 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1361 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1362 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
1366 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
1367 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
1368 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1371 /* Configure loopback port */
1372 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
,
1373 const struct phylink_link_state
*state
)
1377 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1379 if (state
->speed
== 1000)
1380 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
1382 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
1384 if (phy_interface_mode_is_8023z(port
->phy_interface
) ||
1385 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
)
1386 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
1388 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
1390 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1393 struct mvpp2_ethtool_counter
{
1394 unsigned int offset
;
1395 const char string
[ETH_GSTRING_LEN
];
1399 static u64
mvpp2_read_count(struct mvpp2_port
*port
,
1400 const struct mvpp2_ethtool_counter
*counter
)
1404 val
= readl(port
->stats_base
+ counter
->offset
);
1405 if (counter
->reg_is_64b
)
1406 val
+= (u64
)readl(port
->stats_base
+ counter
->offset
+ 4) << 32;
1411 /* Some counters are accessed indirectly by first writing an index to
1412 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1413 * register we access, it can be a hit counter for some classification tables,
1414 * a counter specific to a rxq, a txq or a buffer pool.
1416 static u32
mvpp2_read_index(struct mvpp2
*priv
, u32 index
, u32 reg
)
1418 mvpp2_write(priv
, MVPP2_CTRS_IDX
, index
);
1419 return mvpp2_read(priv
, reg
);
1422 /* Due to the fact that software statistics and hardware statistics are, by
1423 * design, incremented at different moments in the chain of packet processing,
1424 * it is very likely that incoming packets could have been dropped after being
1425 * counted by hardware but before reaching software statistics (most probably
1426 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1427 * are added in between as well as TSO skb will be split and header bytes added.
1428 * Hence, statistics gathered from userspace with ifconfig (software) and
1429 * ethtool (hardware) cannot be compared.
1431 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs
[] = {
1432 { MVPP2_MIB_GOOD_OCTETS_RCVD
, "good_octets_received", true },
1433 { MVPP2_MIB_BAD_OCTETS_RCVD
, "bad_octets_received" },
1434 { MVPP2_MIB_CRC_ERRORS_SENT
, "crc_errors_sent" },
1435 { MVPP2_MIB_UNICAST_FRAMES_RCVD
, "unicast_frames_received" },
1436 { MVPP2_MIB_BROADCAST_FRAMES_RCVD
, "broadcast_frames_received" },
1437 { MVPP2_MIB_MULTICAST_FRAMES_RCVD
, "multicast_frames_received" },
1438 { MVPP2_MIB_FRAMES_64_OCTETS
, "frames_64_octets" },
1439 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS
, "frames_65_to_127_octet" },
1440 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS
, "frames_128_to_255_octet" },
1441 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS
, "frames_256_to_511_octet" },
1442 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS
, "frames_512_to_1023_octet" },
1443 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS
, "frames_1024_to_max_octet" },
1444 { MVPP2_MIB_GOOD_OCTETS_SENT
, "good_octets_sent", true },
1445 { MVPP2_MIB_UNICAST_FRAMES_SENT
, "unicast_frames_sent" },
1446 { MVPP2_MIB_MULTICAST_FRAMES_SENT
, "multicast_frames_sent" },
1447 { MVPP2_MIB_BROADCAST_FRAMES_SENT
, "broadcast_frames_sent" },
1448 { MVPP2_MIB_FC_SENT
, "fc_sent" },
1449 { MVPP2_MIB_FC_RCVD
, "fc_received" },
1450 { MVPP2_MIB_RX_FIFO_OVERRUN
, "rx_fifo_overrun" },
1451 { MVPP2_MIB_UNDERSIZE_RCVD
, "undersize_received" },
1452 { MVPP2_MIB_FRAGMENTS_RCVD
, "fragments_received" },
1453 { MVPP2_MIB_OVERSIZE_RCVD
, "oversize_received" },
1454 { MVPP2_MIB_JABBER_RCVD
, "jabber_received" },
1455 { MVPP2_MIB_MAC_RCV_ERROR
, "mac_receive_error" },
1456 { MVPP2_MIB_BAD_CRC_EVENT
, "bad_crc_event" },
1457 { MVPP2_MIB_COLLISION
, "collision" },
1458 { MVPP2_MIB_LATE_COLLISION
, "late_collision" },
1461 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs
[] = {
1462 { MVPP2_OVERRUN_ETH_DROP
, "rx_fifo_or_parser_overrun_drops" },
1463 { MVPP2_CLS_ETH_DROP
, "rx_classifier_drops" },
1466 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs
[] = {
1467 { MVPP2_TX_DESC_ENQ_CTR
, "txq_%d_desc_enqueue" },
1468 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR
, "txq_%d_desc_enqueue_to_ddr" },
1469 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR
, "txq_%d_buff_euqueue_to_ddr" },
1470 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR
, "txq_%d_desc_hardware_forwarded" },
1471 { MVPP2_TX_PKTS_DEQ_CTR
, "txq_%d_packets_dequeued" },
1472 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR
, "txq_%d_queue_full_drops" },
1473 { MVPP2_TX_PKTS_EARLY_DROP_CTR
, "txq_%d_packets_early_drops" },
1474 { MVPP2_TX_PKTS_BM_DROP_CTR
, "txq_%d_packets_bm_drops" },
1475 { MVPP2_TX_PKTS_BM_MC_DROP_CTR
, "txq_%d_packets_rep_bm_drops" },
1478 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs
[] = {
1479 { MVPP2_RX_DESC_ENQ_CTR
, "rxq_%d_desc_enqueue" },
1480 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR
, "rxq_%d_queue_full_drops" },
1481 { MVPP2_RX_PKTS_EARLY_DROP_CTR
, "rxq_%d_packets_early_drops" },
1482 { MVPP2_RX_PKTS_BM_DROP_CTR
, "rxq_%d_packets_bm_drops" },
1485 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1486 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1487 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1488 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)))
1490 static void mvpp2_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
1493 struct mvpp2_port
*port
= netdev_priv(netdev
);
1496 if (sset
!= ETH_SS_STATS
)
1499 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_mib_regs
); i
++) {
1500 strscpy(data
, mvpp2_ethtool_mib_regs
[i
].string
,
1502 data
+= ETH_GSTRING_LEN
;
1505 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_port_regs
); i
++) {
1506 strscpy(data
, mvpp2_ethtool_port_regs
[i
].string
,
1508 data
+= ETH_GSTRING_LEN
;
1511 for (q
= 0; q
< port
->ntxqs
; q
++) {
1512 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_txq_regs
); i
++) {
1513 snprintf(data
, ETH_GSTRING_LEN
,
1514 mvpp2_ethtool_txq_regs
[i
].string
, q
);
1515 data
+= ETH_GSTRING_LEN
;
1519 for (q
= 0; q
< port
->nrxqs
; q
++) {
1520 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_rxq_regs
); i
++) {
1521 snprintf(data
, ETH_GSTRING_LEN
,
1522 mvpp2_ethtool_rxq_regs
[i
].string
,
1524 data
+= ETH_GSTRING_LEN
;
1529 static void mvpp2_read_stats(struct mvpp2_port
*port
)
1534 pstats
= port
->ethtool_stats
;
1536 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_mib_regs
); i
++)
1537 *pstats
++ += mvpp2_read_count(port
, &mvpp2_ethtool_mib_regs
[i
]);
1539 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_port_regs
); i
++)
1540 *pstats
++ += mvpp2_read(port
->priv
,
1541 mvpp2_ethtool_port_regs
[i
].offset
+
1544 for (q
= 0; q
< port
->ntxqs
; q
++)
1545 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_txq_regs
); i
++)
1546 *pstats
++ += mvpp2_read_index(port
->priv
,
1547 MVPP22_CTRS_TX_CTR(port
->id
, i
),
1548 mvpp2_ethtool_txq_regs
[i
].offset
);
1550 /* Rxqs are numbered from 0 from the user standpoint, but not from the
1551 * driver's. We need to add the port->first_rxq offset.
1553 for (q
= 0; q
< port
->nrxqs
; q
++)
1554 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_rxq_regs
); i
++)
1555 *pstats
++ += mvpp2_read_index(port
->priv
,
1556 port
->first_rxq
+ i
,
1557 mvpp2_ethtool_rxq_regs
[i
].offset
);
1560 static void mvpp2_gather_hw_statistics(struct work_struct
*work
)
1562 struct delayed_work
*del_work
= to_delayed_work(work
);
1563 struct mvpp2_port
*port
= container_of(del_work
, struct mvpp2_port
,
1566 mutex_lock(&port
->gather_stats_lock
);
1568 mvpp2_read_stats(port
);
1570 /* No need to read again the counters right after this function if it
1571 * was called asynchronously by the user (ie. use of ethtool).
1573 cancel_delayed_work(&port
->stats_work
);
1574 queue_delayed_work(port
->priv
->stats_queue
, &port
->stats_work
,
1575 MVPP2_MIB_COUNTERS_STATS_DELAY
);
1577 mutex_unlock(&port
->gather_stats_lock
);
1580 static void mvpp2_ethtool_get_stats(struct net_device
*dev
,
1581 struct ethtool_stats
*stats
, u64
*data
)
1583 struct mvpp2_port
*port
= netdev_priv(dev
);
1585 /* Update statistics for the given port, then take the lock to avoid
1586 * concurrent accesses on the ethtool_stats structure during its copy.
1588 mvpp2_gather_hw_statistics(&port
->stats_work
.work
);
1590 mutex_lock(&port
->gather_stats_lock
);
1591 memcpy(data
, port
->ethtool_stats
,
1592 sizeof(u64
) * MVPP2_N_ETHTOOL_STATS(port
->ntxqs
, port
->nrxqs
));
1593 mutex_unlock(&port
->gather_stats_lock
);
1596 static int mvpp2_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
1598 struct mvpp2_port
*port
= netdev_priv(dev
);
1600 if (sset
== ETH_SS_STATS
)
1601 return MVPP2_N_ETHTOOL_STATS(port
->ntxqs
, port
->nrxqs
);
1606 static void mvpp2_mac_reset_assert(struct mvpp2_port
*port
)
1610 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) |
1611 MVPP2_GMAC_PORT_RESET_MASK
;
1612 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
1614 if (port
->priv
->hw_version
== MVPP22
&& port
->gop_id
== 0) {
1615 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
) &
1616 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS
;
1617 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1621 static void mvpp22_pcs_reset_assert(struct mvpp2_port
*port
)
1623 struct mvpp2
*priv
= port
->priv
;
1624 void __iomem
*mpcs
, *xpcs
;
1627 if (port
->priv
->hw_version
!= MVPP22
|| port
->gop_id
!= 0)
1630 mpcs
= priv
->iface_base
+ MVPP22_MPCS_BASE(port
->gop_id
);
1631 xpcs
= priv
->iface_base
+ MVPP22_XPCS_BASE(port
->gop_id
);
1633 val
= readl(mpcs
+ MVPP22_MPCS_CLK_RESET
);
1634 val
&= ~(MAC_CLK_RESET_MAC
| MAC_CLK_RESET_SD_RX
| MAC_CLK_RESET_SD_TX
);
1635 val
|= MVPP22_MPCS_CLK_RESET_DIV_SET
;
1636 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
1638 val
= readl(xpcs
+ MVPP22_XPCS_CFG0
);
1639 writel(val
& ~MVPP22_XPCS_CFG0_RESET_DIS
, xpcs
+ MVPP22_XPCS_CFG0
);
1642 static void mvpp22_pcs_reset_deassert(struct mvpp2_port
*port
)
1644 struct mvpp2
*priv
= port
->priv
;
1645 void __iomem
*mpcs
, *xpcs
;
1648 if (port
->priv
->hw_version
!= MVPP22
|| port
->gop_id
!= 0)
1651 mpcs
= priv
->iface_base
+ MVPP22_MPCS_BASE(port
->gop_id
);
1652 xpcs
= priv
->iface_base
+ MVPP22_XPCS_BASE(port
->gop_id
);
1654 switch (port
->phy_interface
) {
1655 case PHY_INTERFACE_MODE_10GBASER
:
1656 val
= readl(mpcs
+ MVPP22_MPCS_CLK_RESET
);
1657 val
|= MAC_CLK_RESET_MAC
| MAC_CLK_RESET_SD_RX
|
1658 MAC_CLK_RESET_SD_TX
;
1659 val
&= ~MVPP22_MPCS_CLK_RESET_DIV_SET
;
1660 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
1662 case PHY_INTERFACE_MODE_XAUI
:
1663 case PHY_INTERFACE_MODE_RXAUI
:
1664 val
= readl(xpcs
+ MVPP22_XPCS_CFG0
);
1665 writel(val
| MVPP22_XPCS_CFG0_RESET_DIS
, xpcs
+ MVPP22_XPCS_CFG0
);
1672 /* Change maximum receive size of the port */
1673 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
1677 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1678 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
1679 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
1680 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
1681 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1684 /* Change maximum receive size of the port */
1685 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port
*port
)
1689 val
= readl(port
->base
+ MVPP22_XLG_CTRL1_REG
);
1690 val
&= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK
;
1691 val
|= ((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
1692 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS
;
1693 writel(val
, port
->base
+ MVPP22_XLG_CTRL1_REG
);
1696 /* Set defaults to the MVPP2 port */
1697 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
1699 int tx_port_num
, val
, queue
, lrxq
;
1701 if (port
->priv
->hw_version
== MVPP21
) {
1702 /* Update TX FIFO MIN Threshold */
1703 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
1704 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
1705 /* Min. TX threshold must be less than minimal packet length */
1706 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1707 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
1710 /* Disable Legacy WRR, Disable EJP, Release from reset */
1711 tx_port_num
= mvpp2_egress_port(port
);
1712 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
1714 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
1716 /* Set TXQ scheduling to Round-Robin */
1717 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_FIXED_PRIO_REG
, 0);
1719 /* Close bandwidth for all queues */
1720 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++)
1721 mvpp2_write(port
->priv
,
1722 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue
), 0);
1724 /* Set refill period to 1 usec, refill tokens
1725 * and bucket size to maximum
1727 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
1728 port
->priv
->tclk
/ USEC_PER_SEC
);
1729 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
1730 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
1731 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1732 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
1733 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
1734 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
1735 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
1737 /* Set MaximumLowLatencyPacketSize value to 256 */
1738 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
1739 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
1740 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1742 /* Enable Rx cache snoop */
1743 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1744 queue
= port
->rxqs
[lrxq
]->id
;
1745 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1746 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
1747 MVPP2_SNOOP_BUF_HDR_MASK
;
1748 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1751 /* At default, mask all interrupts to all present cpus */
1752 mvpp2_interrupts_disable(port
);
1755 /* Enable/disable receiving packets */
1756 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
1761 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1762 queue
= port
->rxqs
[lrxq
]->id
;
1763 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1764 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
1765 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1769 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
1774 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1775 queue
= port
->rxqs
[lrxq
]->id
;
1776 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1777 val
|= MVPP2_RXQ_DISABLE_MASK
;
1778 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1782 /* Enable transmit via physical egress queue
1783 * - HW starts take descriptors from DRAM
1785 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
1789 int tx_port_num
= mvpp2_egress_port(port
);
1791 /* Enable all initialized TXs. */
1793 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
1794 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
1797 qmap
|= (1 << queue
);
1800 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1801 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
1804 /* Disable transmit via physical egress queue
1805 * - HW doesn't take descriptors from DRAM
1807 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
1811 int tx_port_num
= mvpp2_egress_port(port
);
1813 /* Issue stop command for active channels only */
1814 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1815 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
1816 MVPP2_TXP_SCHED_ENQ_MASK
;
1818 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
1819 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
1821 /* Wait for all Tx activity to terminate. */
1824 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
1825 netdev_warn(port
->dev
,
1826 "Tx stop timed out, status=0x%08x\n",
1833 /* Check port TX Command register that all
1834 * Tx queues are stopped
1836 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
1837 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
1840 /* Rx descriptors helper methods */
1842 /* Get number of Rx descriptors occupied by received packets */
1844 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
1846 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
1848 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
1851 /* Update Rx queue status with the number of occupied and available
1852 * Rx descriptor slots.
1855 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
1856 int used_count
, int free_count
)
1858 /* Decrement the number of used descriptors and increment count
1859 * increment the number of free descriptors.
1861 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
1863 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
1866 /* Get pointer to next RX descriptor to be processed by SW */
1867 static inline struct mvpp2_rx_desc
*
1868 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
1870 int rx_desc
= rxq
->next_desc_to_proc
;
1872 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
1873 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
1874 return rxq
->descs
+ rx_desc
;
1877 /* Set rx queue offset */
1878 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
1879 int prxq
, int offset
)
1883 /* Convert offset from bytes to units of 32 bytes */
1884 offset
= offset
>> 5;
1886 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
1887 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
1890 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
1891 MVPP2_RXQ_PACKET_OFFSET_MASK
);
1893 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
1896 /* Tx descriptors helper methods */
1898 /* Get pointer to next Tx descriptor to be processed (send) by HW */
1899 static struct mvpp2_tx_desc
*
1900 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
1902 int tx_desc
= txq
->next_desc_to_proc
;
1904 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
1905 return txq
->descs
+ tx_desc
;
1908 /* Update HW with number of aggregated Tx descriptors to be sent
1910 * Called only from mvpp2_tx(), so migration is disabled, using
1911 * smp_processor_id() is OK.
1913 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
1915 /* aggregated access - relevant TXQ number is written in TX desc */
1916 mvpp2_thread_write(port
->priv
,
1917 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
1918 MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
1921 /* Check if there are enough free descriptors in aggregated txq.
1922 * If not, update the number of occupied descriptors and repeat the check.
1924 * Called only from mvpp2_tx(), so migration is disabled, using
1925 * smp_processor_id() is OK.
1927 static int mvpp2_aggr_desc_num_check(struct mvpp2_port
*port
,
1928 struct mvpp2_tx_queue
*aggr_txq
, int num
)
1930 if ((aggr_txq
->count
+ num
) > MVPP2_AGGR_TXQ_SIZE
) {
1931 /* Update number of occupied aggregated Tx descriptors */
1932 unsigned int thread
=
1933 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
1934 u32 val
= mvpp2_read_relaxed(port
->priv
,
1935 MVPP2_AGGR_TXQ_STATUS_REG(thread
));
1937 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
1939 if ((aggr_txq
->count
+ num
) > MVPP2_AGGR_TXQ_SIZE
)
1945 /* Reserved Tx descriptors allocation request
1947 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
1948 * only by mvpp2_tx(), so migration is disabled, using
1949 * smp_processor_id() is OK.
1951 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port
*port
,
1952 struct mvpp2_tx_queue
*txq
, int num
)
1954 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
1955 struct mvpp2
*priv
= port
->priv
;
1958 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
1959 mvpp2_thread_write_relaxed(priv
, thread
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
1961 val
= mvpp2_thread_read_relaxed(priv
, thread
, MVPP2_TXQ_RSVD_RSLT_REG
);
1963 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
1966 /* Check if there are enough reserved descriptors for transmission.
1967 * If not, request chunk of reserved descriptors and check again.
1969 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port
*port
,
1970 struct mvpp2_tx_queue
*txq
,
1971 struct mvpp2_txq_pcpu
*txq_pcpu
,
1974 int req
, desc_count
;
1975 unsigned int thread
;
1977 if (txq_pcpu
->reserved_num
>= num
)
1980 /* Not enough descriptors reserved! Update the reserved descriptor
1981 * count and check again.
1985 /* Compute total of used descriptors */
1986 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
1987 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
1989 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, thread
);
1990 desc_count
+= txq_pcpu_aux
->count
;
1991 desc_count
+= txq_pcpu_aux
->reserved_num
;
1994 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
1998 (txq
->size
- (MVPP2_MAX_THREADS
* MVPP2_CPU_DESC_CHUNK
)))
2001 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(port
, txq
, req
);
2003 /* OK, the descriptor could have been updated: check again. */
2004 if (txq_pcpu
->reserved_num
< num
)
2009 /* Release the last allocated Tx descriptor. Useful to handle DMA
2010 * mapping failures in the Tx path.
2012 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
2014 if (txq
->next_desc_to_proc
== 0)
2015 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
2017 txq
->next_desc_to_proc
--;
2020 /* Set Tx descriptors fields relevant for CSUM calculation */
2021 static u32
mvpp2_txq_desc_csum(int l3_offs
, __be16 l3_proto
,
2022 int ip_hdr_len
, int l4_proto
)
2026 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2027 * G_L4_chk, L4_type required only for checksum calculation
2029 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
2030 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
2031 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
2033 if (l3_proto
== htons(ETH_P_IP
)) {
2034 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
2035 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
2037 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
2040 if (l4_proto
== IPPROTO_TCP
) {
2041 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
2042 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
2043 } else if (l4_proto
== IPPROTO_UDP
) {
2044 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
2045 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
2047 command
|= MVPP2_TXD_L4_CSUM_NOT
;
2053 /* Get number of sent descriptors and decrement counter.
2054 * The number of sent descriptors is returned.
2057 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2058 * (migration disabled) and from the TX completion tasklet (migration
2059 * disabled) so using smp_processor_id() is OK.
2061 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
2062 struct mvpp2_tx_queue
*txq
)
2066 /* Reading status reg resets transmitted descriptor counter */
2067 val
= mvpp2_thread_read_relaxed(port
->priv
,
2068 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
2069 MVPP2_TXQ_SENT_REG(txq
->id
));
2071 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
2072 MVPP2_TRANSMITTED_COUNT_OFFSET
;
2075 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2076 * disabled, therefore using smp_processor_id() is OK.
2078 static void mvpp2_txq_sent_counter_clear(void *arg
)
2080 struct mvpp2_port
*port
= arg
;
2083 /* If the thread isn't used, don't do anything */
2084 if (smp_processor_id() > port
->priv
->nthreads
)
2087 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2088 int id
= port
->txqs
[queue
]->id
;
2090 mvpp2_thread_read(port
->priv
,
2091 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
2092 MVPP2_TXQ_SENT_REG(id
));
2096 /* Set max sizes for Tx queues */
2097 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
2100 int txq
, tx_port_num
;
2102 mtu
= port
->pkt_size
* 8;
2103 if (mtu
> MVPP2_TXP_MTU_MAX
)
2104 mtu
= MVPP2_TXP_MTU_MAX
;
2106 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2109 /* Indirect access to registers */
2110 tx_port_num
= mvpp2_egress_port(port
);
2111 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
2114 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
2115 val
&= ~MVPP2_TXP_MTU_MAX
;
2117 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
2119 /* TXP token size and all TXQs token size must be larger that MTU */
2120 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
2121 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
2124 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
2126 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
2129 for (txq
= 0; txq
< port
->ntxqs
; txq
++) {
2130 val
= mvpp2_read(port
->priv
,
2131 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
2132 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
2136 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
2138 mvpp2_write(port
->priv
,
2139 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
2145 /* Set the number of packets that will be received before Rx interrupt
2146 * will be generated by HW.
2148 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
2149 struct mvpp2_rx_queue
*rxq
)
2151 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2153 if (rxq
->pkts_coal
> MVPP2_OCCUPIED_THRESH_MASK
)
2154 rxq
->pkts_coal
= MVPP2_OCCUPIED_THRESH_MASK
;
2156 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
2157 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_THRESH_REG
,
2163 /* For some reason in the LSP this is done on each CPU. Why ? */
2164 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port
*port
,
2165 struct mvpp2_tx_queue
*txq
)
2167 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2170 if (txq
->done_pkts_coal
> MVPP2_TXQ_THRESH_MASK
)
2171 txq
->done_pkts_coal
= MVPP2_TXQ_THRESH_MASK
;
2173 val
= (txq
->done_pkts_coal
<< MVPP2_TXQ_THRESH_OFFSET
);
2174 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2175 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_THRESH_REG
, val
);
2180 static u32
mvpp2_usec_to_cycles(u32 usec
, unsigned long clk_hz
)
2182 u64 tmp
= (u64
)clk_hz
* usec
;
2184 do_div(tmp
, USEC_PER_SEC
);
2186 return tmp
> U32_MAX
? U32_MAX
: tmp
;
2189 static u32
mvpp2_cycles_to_usec(u32 cycles
, unsigned long clk_hz
)
2191 u64 tmp
= (u64
)cycles
* USEC_PER_SEC
;
2193 do_div(tmp
, clk_hz
);
2195 return tmp
> U32_MAX
? U32_MAX
: tmp
;
2198 /* Set the time delay in usec before Rx interrupt */
2199 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
2200 struct mvpp2_rx_queue
*rxq
)
2202 unsigned long freq
= port
->priv
->tclk
;
2203 u32 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
2205 if (val
> MVPP2_MAX_ISR_RX_THRESHOLD
) {
2207 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD
, freq
);
2209 /* re-evaluate to get actual register value */
2210 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
2213 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
2216 static void mvpp2_tx_time_coal_set(struct mvpp2_port
*port
)
2218 unsigned long freq
= port
->priv
->tclk
;
2219 u32 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
2221 if (val
> MVPP2_MAX_ISR_TX_THRESHOLD
) {
2222 port
->tx_time_coal
=
2223 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD
, freq
);
2225 /* re-evaluate to get actual register value */
2226 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
2229 mvpp2_write(port
->priv
, MVPP2_ISR_TX_THRESHOLD_REG(port
->id
), val
);
2232 /* Free Tx queue skbuffs */
2233 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
2234 struct mvpp2_tx_queue
*txq
,
2235 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
2239 for (i
= 0; i
< num
; i
++) {
2240 struct mvpp2_txq_pcpu_buf
*tx_buf
=
2241 txq_pcpu
->buffs
+ txq_pcpu
->txq_get_index
;
2243 if (!IS_TSO_HEADER(txq_pcpu
, tx_buf
->dma
))
2244 dma_unmap_single(port
->dev
->dev
.parent
, tx_buf
->dma
,
2245 tx_buf
->size
, DMA_TO_DEVICE
);
2247 dev_kfree_skb_any(tx_buf
->skb
);
2249 mvpp2_txq_inc_get(txq_pcpu
);
2253 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
2256 int queue
= fls(cause
) - 1;
2258 return port
->rxqs
[queue
];
2261 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
2264 int queue
= fls(cause
) - 1;
2266 return port
->txqs
[queue
];
2269 /* Handle end of transmission */
2270 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
2271 struct mvpp2_txq_pcpu
*txq_pcpu
)
2273 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
2276 if (txq_pcpu
->thread
!= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()))
2277 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
2279 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
2282 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
2284 txq_pcpu
->count
-= tx_done
;
2286 if (netif_tx_queue_stopped(nq
))
2287 if (txq_pcpu
->count
<= txq_pcpu
->wake_threshold
)
2288 netif_tx_wake_queue(nq
);
2291 static unsigned int mvpp2_tx_done(struct mvpp2_port
*port
, u32 cause
,
2292 unsigned int thread
)
2294 struct mvpp2_tx_queue
*txq
;
2295 struct mvpp2_txq_pcpu
*txq_pcpu
;
2296 unsigned int tx_todo
= 0;
2299 txq
= mvpp2_get_tx_queue(port
, cause
);
2303 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2305 if (txq_pcpu
->count
) {
2306 mvpp2_txq_done(port
, txq
, txq_pcpu
);
2307 tx_todo
+= txq_pcpu
->count
;
2310 cause
&= ~(1 << txq
->log_id
);
2315 /* Rx/Tx queue initialization/cleanup methods */
2317 /* Allocate and initialize descriptors for aggr TXQ */
2318 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
2319 struct mvpp2_tx_queue
*aggr_txq
,
2320 unsigned int thread
, struct mvpp2
*priv
)
2324 /* Allocate memory for TX descriptors */
2325 aggr_txq
->descs
= dma_alloc_coherent(&pdev
->dev
,
2326 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
2327 &aggr_txq
->descs_dma
, GFP_KERNEL
);
2328 if (!aggr_txq
->descs
)
2331 aggr_txq
->last_desc
= MVPP2_AGGR_TXQ_SIZE
- 1;
2333 /* Aggr TXQ no reset WA */
2334 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
2335 MVPP2_AGGR_TXQ_INDEX_REG(thread
));
2337 /* Set Tx descriptors queue starting address indirect
2340 if (priv
->hw_version
== MVPP21
)
2341 txq_dma
= aggr_txq
->descs_dma
;
2343 txq_dma
= aggr_txq
->descs_dma
>>
2344 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS
;
2346 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread
), txq_dma
);
2347 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread
),
2348 MVPP2_AGGR_TXQ_SIZE
);
2353 /* Create a specified Rx queue */
2354 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
2355 struct mvpp2_rx_queue
*rxq
)
2358 unsigned int thread
;
2361 rxq
->size
= port
->rx_ring_size
;
2363 /* Allocate memory for RX descriptors */
2364 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
2365 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2366 &rxq
->descs_dma
, GFP_KERNEL
);
2370 rxq
->last_desc
= rxq
->size
- 1;
2372 /* Zero occupied and non-occupied counters - direct access */
2373 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
2375 /* Set Rx descriptors queue starting address - indirect access */
2376 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2377 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
2378 if (port
->priv
->hw_version
== MVPP21
)
2379 rxq_dma
= rxq
->descs_dma
;
2381 rxq_dma
= rxq
->descs_dma
>> MVPP22_DESC_ADDR_OFFS
;
2382 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_ADDR_REG
, rxq_dma
);
2383 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
2384 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_INDEX_REG
, 0);
2388 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
2390 /* Set coalescing pkts and time */
2391 mvpp2_rx_pkts_coal_set(port
, rxq
);
2392 mvpp2_rx_time_coal_set(port
, rxq
);
2394 /* Add number of descriptors ready for receiving packets */
2395 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
2400 /* Push packets received by the RXQ to BM pool */
2401 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
2402 struct mvpp2_rx_queue
*rxq
)
2406 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
2410 for (i
= 0; i
< rx_received
; i
++) {
2411 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
2412 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2415 pool
= (status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
2416 MVPP2_RXD_BM_POOL_ID_OFFS
;
2418 mvpp2_bm_pool_put(port
, pool
,
2419 mvpp2_rxdesc_dma_addr_get(port
, rx_desc
),
2420 mvpp2_rxdesc_cookie_get(port
, rx_desc
));
2422 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
2425 /* Cleanup Rx queue */
2426 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
2427 struct mvpp2_rx_queue
*rxq
)
2429 unsigned int thread
;
2431 mvpp2_rxq_drop_pkts(port
, rxq
);
2434 dma_free_coherent(port
->dev
->dev
.parent
,
2435 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2441 rxq
->next_desc_to_proc
= 0;
2444 /* Clear Rx descriptors queue starting address and size;
2445 * free descriptor number
2447 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
2448 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2449 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
2450 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
2451 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
2455 /* Create and initialize a Tx queue */
2456 static int mvpp2_txq_init(struct mvpp2_port
*port
,
2457 struct mvpp2_tx_queue
*txq
)
2460 unsigned int thread
;
2461 int desc
, desc_per_txq
, tx_port_num
;
2462 struct mvpp2_txq_pcpu
*txq_pcpu
;
2464 txq
->size
= port
->tx_ring_size
;
2466 /* Allocate memory for Tx descriptors */
2467 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
2468 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2469 &txq
->descs_dma
, GFP_KERNEL
);
2473 txq
->last_desc
= txq
->size
- 1;
2475 /* Set Tx descriptors queue starting address - indirect access */
2476 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2477 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2478 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_ADDR_REG
,
2480 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_SIZE_REG
,
2481 txq
->size
& MVPP2_TXQ_DESC_SIZE_MASK
);
2482 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_INDEX_REG
, 0);
2483 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_RSVD_CLR_REG
,
2484 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
2485 val
= mvpp2_thread_read(port
->priv
, thread
, MVPP2_TXQ_PENDING_REG
);
2486 val
&= ~MVPP2_TXQ_PENDING_MASK
;
2487 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PENDING_REG
, val
);
2489 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2490 * for each existing TXQ.
2491 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2492 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2495 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
2496 (txq
->log_id
* desc_per_txq
);
2498 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
,
2499 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
2500 MVPP2_PREF_BUF_THRESH(desc_per_txq
/ 2));
2503 /* WRR / EJP configuration - indirect access */
2504 tx_port_num
= mvpp2_egress_port(port
);
2505 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
2507 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
2508 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
2509 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2510 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
2511 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
2513 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
2514 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
2517 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
2518 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2519 txq_pcpu
->size
= txq
->size
;
2520 txq_pcpu
->buffs
= kmalloc_array(txq_pcpu
->size
,
2521 sizeof(*txq_pcpu
->buffs
),
2523 if (!txq_pcpu
->buffs
)
2526 txq_pcpu
->count
= 0;
2527 txq_pcpu
->reserved_num
= 0;
2528 txq_pcpu
->txq_put_index
= 0;
2529 txq_pcpu
->txq_get_index
= 0;
2530 txq_pcpu
->tso_headers
= NULL
;
2532 txq_pcpu
->stop_threshold
= txq
->size
- MVPP2_MAX_SKB_DESCS
;
2533 txq_pcpu
->wake_threshold
= txq_pcpu
->stop_threshold
/ 2;
2535 txq_pcpu
->tso_headers
=
2536 dma_alloc_coherent(port
->dev
->dev
.parent
,
2537 txq_pcpu
->size
* TSO_HEADER_SIZE
,
2538 &txq_pcpu
->tso_headers_dma
,
2540 if (!txq_pcpu
->tso_headers
)
2547 /* Free allocated TXQ resources */
2548 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
2549 struct mvpp2_tx_queue
*txq
)
2551 struct mvpp2_txq_pcpu
*txq_pcpu
;
2552 unsigned int thread
;
2554 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
2555 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2556 kfree(txq_pcpu
->buffs
);
2558 if (txq_pcpu
->tso_headers
)
2559 dma_free_coherent(port
->dev
->dev
.parent
,
2560 txq_pcpu
->size
* TSO_HEADER_SIZE
,
2561 txq_pcpu
->tso_headers
,
2562 txq_pcpu
->tso_headers_dma
);
2564 txq_pcpu
->tso_headers
= NULL
;
2568 dma_free_coherent(port
->dev
->dev
.parent
,
2569 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2570 txq
->descs
, txq
->descs_dma
);
2574 txq
->next_desc_to_proc
= 0;
2577 /* Set minimum bandwidth for disabled TXQs */
2578 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->log_id
), 0);
2580 /* Set Tx descriptors queue starting address and size */
2581 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2582 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2583 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
2584 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
2588 /* Cleanup Tx ports */
2589 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
2591 struct mvpp2_txq_pcpu
*txq_pcpu
;
2593 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2596 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2597 val
= mvpp2_thread_read(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
);
2598 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
2599 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
, val
);
2601 /* The napi queue has been stopped so wait for all packets
2602 * to be transmitted.
2606 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
2607 netdev_warn(port
->dev
,
2608 "port %d: cleaning queue %d timed out\n",
2609 port
->id
, txq
->log_id
);
2615 pending
= mvpp2_thread_read(port
->priv
, thread
,
2616 MVPP2_TXQ_PENDING_REG
);
2617 pending
&= MVPP2_TXQ_PENDING_MASK
;
2620 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
2621 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
, val
);
2624 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
2625 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2627 /* Release all packets */
2628 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
2631 txq_pcpu
->count
= 0;
2632 txq_pcpu
->txq_put_index
= 0;
2633 txq_pcpu
->txq_get_index
= 0;
2637 /* Cleanup all Tx queues */
2638 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
2640 struct mvpp2_tx_queue
*txq
;
2644 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
2646 /* Reset Tx ports and delete Tx queues */
2647 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
2648 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
2650 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2651 txq
= port
->txqs
[queue
];
2652 mvpp2_txq_clean(port
, txq
);
2653 mvpp2_txq_deinit(port
, txq
);
2656 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
2658 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
2659 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
2662 /* Cleanup all Rx queues */
2663 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
2667 for (queue
= 0; queue
< port
->nrxqs
; queue
++)
2668 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
2671 /* Init all Rx queues for port */
2672 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
2676 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
2677 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
2684 mvpp2_cleanup_rxqs(port
);
2688 /* Init all tx queues for port */
2689 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
2691 struct mvpp2_tx_queue
*txq
;
2692 int queue
, err
, cpu
;
2694 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2695 txq
= port
->txqs
[queue
];
2696 err
= mvpp2_txq_init(port
, txq
);
2700 /* Assign this queue to a CPU */
2701 cpu
= queue
% num_present_cpus();
2702 netif_set_xps_queue(port
->dev
, cpumask_of(cpu
), queue
);
2705 if (port
->has_tx_irqs
) {
2706 mvpp2_tx_time_coal_set(port
);
2707 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2708 txq
= port
->txqs
[queue
];
2709 mvpp2_tx_pkts_coal_set(port
, txq
);
2713 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
2717 mvpp2_cleanup_txqs(port
);
2721 /* The callback for per-port interrupt */
2722 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
2724 struct mvpp2_queue_vector
*qv
= dev_id
;
2726 mvpp2_qvec_interrupt_disable(qv
);
2728 napi_schedule(&qv
->napi
);
2733 /* Per-port interrupt for link status changes */
2734 static irqreturn_t
mvpp2_link_status_isr(int irq
, void *dev_id
)
2736 struct mvpp2_port
*port
= (struct mvpp2_port
*)dev_id
;
2737 struct net_device
*dev
= port
->dev
;
2738 bool event
= false, link
= false;
2741 mvpp22_gop_mask_irq(port
);
2743 if (port
->gop_id
== 0 && mvpp2_is_xlg(port
->phy_interface
)) {
2744 val
= readl(port
->base
+ MVPP22_XLG_INT_STAT
);
2745 if (val
& MVPP22_XLG_INT_STAT_LINK
) {
2747 val
= readl(port
->base
+ MVPP22_XLG_STATUS
);
2748 if (val
& MVPP22_XLG_STATUS_LINK_UP
)
2751 } else if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
2752 phy_interface_mode_is_8023z(port
->phy_interface
) ||
2753 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
) {
2754 val
= readl(port
->base
+ MVPP22_GMAC_INT_STAT
);
2755 if (val
& MVPP22_GMAC_INT_STAT_LINK
) {
2757 val
= readl(port
->base
+ MVPP2_GMAC_STATUS0
);
2758 if (val
& MVPP2_GMAC_STATUS0_LINK_UP
)
2763 if (port
->phylink
) {
2764 phylink_mac_change(port
->phylink
, link
);
2768 if (!netif_running(dev
) || !event
)
2772 mvpp2_interrupts_enable(port
);
2774 mvpp2_egress_enable(port
);
2775 mvpp2_ingress_enable(port
);
2776 netif_carrier_on(dev
);
2777 netif_tx_wake_all_queues(dev
);
2779 netif_tx_stop_all_queues(dev
);
2780 netif_carrier_off(dev
);
2781 mvpp2_ingress_disable(port
);
2782 mvpp2_egress_disable(port
);
2784 mvpp2_interrupts_disable(port
);
2788 mvpp22_gop_unmask_irq(port
);
2792 static enum hrtimer_restart
mvpp2_hr_timer_cb(struct hrtimer
*timer
)
2794 struct net_device
*dev
;
2795 struct mvpp2_port
*port
;
2796 struct mvpp2_port_pcpu
*port_pcpu
;
2797 unsigned int tx_todo
, cause
;
2799 port_pcpu
= container_of(timer
, struct mvpp2_port_pcpu
, tx_done_timer
);
2800 dev
= port_pcpu
->dev
;
2802 if (!netif_running(dev
))
2803 return HRTIMER_NORESTART
;
2805 port_pcpu
->timer_scheduled
= false;
2806 port
= netdev_priv(dev
);
2808 /* Process all the Tx queues */
2809 cause
= (1 << port
->ntxqs
) - 1;
2810 tx_todo
= mvpp2_tx_done(port
, cause
,
2811 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()));
2813 /* Set the timer in case not all the packets were processed */
2814 if (tx_todo
&& !port_pcpu
->timer_scheduled
) {
2815 port_pcpu
->timer_scheduled
= true;
2816 hrtimer_forward_now(&port_pcpu
->tx_done_timer
,
2817 MVPP2_TXDONE_HRTIMER_PERIOD_NS
);
2819 return HRTIMER_RESTART
;
2821 return HRTIMER_NORESTART
;
2824 /* Main RX/TX processing routines */
2826 /* Display more error info */
2827 static void mvpp2_rx_error(struct mvpp2_port
*port
,
2828 struct mvpp2_rx_desc
*rx_desc
)
2830 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2831 size_t sz
= mvpp2_rxdesc_size_get(port
, rx_desc
);
2832 char *err_str
= NULL
;
2834 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
2835 case MVPP2_RXD_ERR_CRC
:
2838 case MVPP2_RXD_ERR_OVERRUN
:
2839 err_str
= "overrun";
2841 case MVPP2_RXD_ERR_RESOURCE
:
2842 err_str
= "resource";
2845 if (err_str
&& net_ratelimit())
2846 netdev_err(port
->dev
,
2847 "bad rx status %08x (%s error), size=%zu\n",
2848 status
, err_str
, sz
);
2851 /* Handle RX checksum offload */
2852 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
2853 struct sk_buff
*skb
)
2855 if (((status
& MVPP2_RXD_L3_IP4
) &&
2856 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
2857 (status
& MVPP2_RXD_L3_IP6
))
2858 if (((status
& MVPP2_RXD_L4_UDP
) ||
2859 (status
& MVPP2_RXD_L4_TCP
)) &&
2860 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
2862 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2866 skb
->ip_summed
= CHECKSUM_NONE
;
2869 /* Allocate a new skb and add it to BM pool */
2870 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
2871 struct mvpp2_bm_pool
*bm_pool
, int pool
)
2873 dma_addr_t dma_addr
;
2874 phys_addr_t phys_addr
;
2877 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
, &phys_addr
,
2882 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
2887 /* Handle tx checksum */
2888 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
2890 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2893 __be16 l3_proto
= vlan_get_protocol(skb
);
2895 if (l3_proto
== htons(ETH_P_IP
)) {
2896 struct iphdr
*ip4h
= ip_hdr(skb
);
2898 /* Calculate IPv4 checksum and L4 checksum */
2899 ip_hdr_len
= ip4h
->ihl
;
2900 l4_proto
= ip4h
->protocol
;
2901 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
2902 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
2904 /* Read l4_protocol from one of IPv6 extra headers */
2905 if (skb_network_header_len(skb
) > 0)
2906 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
2907 l4_proto
= ip6h
->nexthdr
;
2909 return MVPP2_TXD_L4_CSUM_NOT
;
2912 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
2913 l3_proto
, ip_hdr_len
, l4_proto
);
2916 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
2919 /* Main rx processing */
2920 static int mvpp2_rx(struct mvpp2_port
*port
, struct napi_struct
*napi
,
2921 int rx_todo
, struct mvpp2_rx_queue
*rxq
)
2923 struct net_device
*dev
= port
->dev
;
2929 /* Get number of received packets and clamp the to-do */
2930 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
2931 if (rx_todo
> rx_received
)
2932 rx_todo
= rx_received
;
2934 while (rx_done
< rx_todo
) {
2935 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
2936 struct mvpp2_bm_pool
*bm_pool
;
2937 struct sk_buff
*skb
;
2938 unsigned int frag_size
;
2939 dma_addr_t dma_addr
;
2940 phys_addr_t phys_addr
;
2942 int pool
, rx_bytes
, err
;
2946 rx_status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2947 rx_bytes
= mvpp2_rxdesc_size_get(port
, rx_desc
);
2948 rx_bytes
-= MVPP2_MH_SIZE
;
2949 dma_addr
= mvpp2_rxdesc_dma_addr_get(port
, rx_desc
);
2950 phys_addr
= mvpp2_rxdesc_cookie_get(port
, rx_desc
);
2951 data
= (void *)phys_to_virt(phys_addr
);
2953 pool
= (rx_status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
2954 MVPP2_RXD_BM_POOL_ID_OFFS
;
2955 bm_pool
= &port
->priv
->bm_pools
[pool
];
2957 /* In case of an error, release the requested buffer pointer
2958 * to the Buffer Manager. This request process is controlled
2959 * by the hardware, and the information about the buffer is
2960 * comprised by the RX descriptor.
2962 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
)
2963 goto err_drop_frame
;
2965 dma_sync_single_for_cpu(dev
->dev
.parent
, dma_addr
,
2966 rx_bytes
+ MVPP2_MH_SIZE
,
2970 if (bm_pool
->frag_size
> PAGE_SIZE
)
2973 frag_size
= bm_pool
->frag_size
;
2975 skb
= build_skb(data
, frag_size
);
2977 netdev_warn(port
->dev
, "skb build failed\n");
2978 goto err_drop_frame
;
2981 err
= mvpp2_rx_refill(port
, bm_pool
, pool
);
2983 netdev_err(port
->dev
, "failed to refill BM pools\n");
2984 goto err_drop_frame
;
2987 dma_unmap_single_attrs(dev
->dev
.parent
, dma_addr
,
2988 bm_pool
->buf_size
, DMA_FROM_DEVICE
,
2989 DMA_ATTR_SKIP_CPU_SYNC
);
2992 rcvd_bytes
+= rx_bytes
;
2994 skb_reserve(skb
, MVPP2_MH_SIZE
+ NET_SKB_PAD
);
2995 skb_put(skb
, rx_bytes
);
2996 skb
->protocol
= eth_type_trans(skb
, dev
);
2997 mvpp2_rx_csum(port
, rx_status
, skb
);
2999 napi_gro_receive(napi
, skb
);
3003 dev
->stats
.rx_errors
++;
3004 mvpp2_rx_error(port
, rx_desc
);
3005 /* Return the buffer to the pool */
3006 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
3010 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
3012 u64_stats_update_begin(&stats
->syncp
);
3013 stats
->rx_packets
+= rcvd_pkts
;
3014 stats
->rx_bytes
+= rcvd_bytes
;
3015 u64_stats_update_end(&stats
->syncp
);
3018 /* Update Rx queue management counters */
3020 mvpp2_rxq_status_update(port
, rxq
->id
, rx_done
, rx_done
);
3026 tx_desc_unmap_put(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
3027 struct mvpp2_tx_desc
*desc
)
3029 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
3030 struct mvpp2_txq_pcpu
*txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
3032 dma_addr_t buf_dma_addr
=
3033 mvpp2_txdesc_dma_addr_get(port
, desc
);
3035 mvpp2_txdesc_size_get(port
, desc
);
3036 if (!IS_TSO_HEADER(txq_pcpu
, buf_dma_addr
))
3037 dma_unmap_single(port
->dev
->dev
.parent
, buf_dma_addr
,
3038 buf_sz
, DMA_TO_DEVICE
);
3039 mvpp2_txq_desc_put(txq
);
3042 /* Handle tx fragmentation processing */
3043 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
3044 struct mvpp2_tx_queue
*aggr_txq
,
3045 struct mvpp2_tx_queue
*txq
)
3047 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
3048 struct mvpp2_txq_pcpu
*txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
3049 struct mvpp2_tx_desc
*tx_desc
;
3051 dma_addr_t buf_dma_addr
;
3053 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3054 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3055 void *addr
= skb_frag_address(frag
);
3057 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
3058 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
3059 mvpp2_txdesc_size_set(port
, tx_desc
, skb_frag_size(frag
));
3061 buf_dma_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
3062 skb_frag_size(frag
),
3064 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_dma_addr
)) {
3065 mvpp2_txq_desc_put(txq
);
3069 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
3071 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
3072 /* Last descriptor */
3073 mvpp2_txdesc_cmd_set(port
, tx_desc
,
3075 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
3077 /* Descriptor in the middle: Not First, Not Last */
3078 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
3079 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
3085 /* Release all descriptors that were used to map fragments of
3086 * this packet, as well as the corresponding DMA mappings
3088 for (i
= i
- 1; i
>= 0; i
--) {
3089 tx_desc
= txq
->descs
+ i
;
3090 tx_desc_unmap_put(port
, txq
, tx_desc
);
3096 static inline void mvpp2_tso_put_hdr(struct sk_buff
*skb
,
3097 struct net_device
*dev
,
3098 struct mvpp2_tx_queue
*txq
,
3099 struct mvpp2_tx_queue
*aggr_txq
,
3100 struct mvpp2_txq_pcpu
*txq_pcpu
,
3103 struct mvpp2_port
*port
= netdev_priv(dev
);
3104 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
3107 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
3108 mvpp2_txdesc_size_set(port
, tx_desc
, hdr_sz
);
3110 addr
= txq_pcpu
->tso_headers_dma
+
3111 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
3112 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, addr
);
3114 mvpp2_txdesc_cmd_set(port
, tx_desc
, mvpp2_skb_tx_csum(port
, skb
) |
3116 MVPP2_TXD_PADDING_DISABLE
);
3117 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
3120 static inline int mvpp2_tso_put_data(struct sk_buff
*skb
,
3121 struct net_device
*dev
, struct tso_t
*tso
,
3122 struct mvpp2_tx_queue
*txq
,
3123 struct mvpp2_tx_queue
*aggr_txq
,
3124 struct mvpp2_txq_pcpu
*txq_pcpu
,
3125 int sz
, bool left
, bool last
)
3127 struct mvpp2_port
*port
= netdev_priv(dev
);
3128 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
3129 dma_addr_t buf_dma_addr
;
3131 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
3132 mvpp2_txdesc_size_set(port
, tx_desc
, sz
);
3134 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, tso
->data
, sz
,
3136 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
3137 mvpp2_txq_desc_put(txq
);
3141 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
3144 mvpp2_txdesc_cmd_set(port
, tx_desc
, MVPP2_TXD_L_DESC
);
3146 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
3150 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
3153 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
3157 static int mvpp2_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
3158 struct mvpp2_tx_queue
*txq
,
3159 struct mvpp2_tx_queue
*aggr_txq
,
3160 struct mvpp2_txq_pcpu
*txq_pcpu
)
3162 struct mvpp2_port
*port
= netdev_priv(dev
);
3164 int hdr_sz
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
3165 int i
, len
, descs
= 0;
3167 /* Check number of available descriptors */
3168 if (mvpp2_aggr_desc_num_check(port
, aggr_txq
, tso_count_descs(skb
)) ||
3169 mvpp2_txq_reserved_desc_num_proc(port
, txq
, txq_pcpu
,
3170 tso_count_descs(skb
)))
3173 tso_start(skb
, &tso
);
3174 len
= skb
->len
- hdr_sz
;
3176 int left
= min_t(int, skb_shinfo(skb
)->gso_size
, len
);
3177 char *hdr
= txq_pcpu
->tso_headers
+
3178 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
3183 tso_build_hdr(skb
, hdr
, &tso
, left
, len
== 0);
3184 mvpp2_tso_put_hdr(skb
, dev
, txq
, aggr_txq
, txq_pcpu
, hdr_sz
);
3187 int sz
= min_t(int, tso
.size
, left
);
3191 if (mvpp2_tso_put_data(skb
, dev
, &tso
, txq
, aggr_txq
,
3192 txq_pcpu
, sz
, left
, len
== 0))
3194 tso_build_data(skb
, &tso
, sz
);
3201 for (i
= descs
- 1; i
>= 0; i
--) {
3202 struct mvpp2_tx_desc
*tx_desc
= txq
->descs
+ i
;
3203 tx_desc_unmap_put(port
, txq
, tx_desc
);
3208 /* Main tx processing */
3209 static netdev_tx_t
mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
3211 struct mvpp2_port
*port
= netdev_priv(dev
);
3212 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
3213 struct mvpp2_txq_pcpu
*txq_pcpu
;
3214 struct mvpp2_tx_desc
*tx_desc
;
3215 dma_addr_t buf_dma_addr
;
3216 unsigned long flags
= 0;
3217 unsigned int thread
;
3222 thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
3224 txq_id
= skb_get_queue_mapping(skb
);
3225 txq
= port
->txqs
[txq_id
];
3226 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
3227 aggr_txq
= &port
->priv
->aggr_txqs
[thread
];
3229 if (test_bit(thread
, &port
->priv
->lock_map
))
3230 spin_lock_irqsave(&port
->tx_lock
[thread
], flags
);
3232 if (skb_is_gso(skb
)) {
3233 frags
= mvpp2_tx_tso(skb
, dev
, txq
, aggr_txq
, txq_pcpu
);
3236 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
3238 /* Check number of available descriptors */
3239 if (mvpp2_aggr_desc_num_check(port
, aggr_txq
, frags
) ||
3240 mvpp2_txq_reserved_desc_num_proc(port
, txq
, txq_pcpu
, frags
)) {
3245 /* Get a descriptor for the first part of the packet */
3246 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
3247 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
3248 mvpp2_txdesc_size_set(port
, tx_desc
, skb_headlen(skb
));
3250 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
3251 skb_headlen(skb
), DMA_TO_DEVICE
);
3252 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
3253 mvpp2_txq_desc_put(txq
);
3258 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
3260 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
3263 /* First and Last descriptor */
3264 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
3265 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
3266 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
3268 /* First but not Last */
3269 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
3270 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
3271 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
3273 /* Continue with other skb fragments */
3274 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
3275 tx_desc_unmap_put(port
, txq
, tx_desc
);
3282 struct mvpp2_pcpu_stats
*stats
= per_cpu_ptr(port
->stats
, thread
);
3283 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
3285 txq_pcpu
->reserved_num
-= frags
;
3286 txq_pcpu
->count
+= frags
;
3287 aggr_txq
->count
+= frags
;
3289 /* Enable transmit */
3291 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
3293 if (txq_pcpu
->count
>= txq_pcpu
->stop_threshold
)
3294 netif_tx_stop_queue(nq
);
3296 u64_stats_update_begin(&stats
->syncp
);
3297 stats
->tx_packets
++;
3298 stats
->tx_bytes
+= skb
->len
;
3299 u64_stats_update_end(&stats
->syncp
);
3301 dev
->stats
.tx_dropped
++;
3302 dev_kfree_skb_any(skb
);
3305 /* Finalize TX processing */
3306 if (!port
->has_tx_irqs
&& txq_pcpu
->count
>= txq
->done_pkts_coal
)
3307 mvpp2_txq_done(port
, txq
, txq_pcpu
);
3309 /* Set the timer in case not all frags were processed */
3310 if (!port
->has_tx_irqs
&& txq_pcpu
->count
<= frags
&&
3311 txq_pcpu
->count
> 0) {
3312 struct mvpp2_port_pcpu
*port_pcpu
= per_cpu_ptr(port
->pcpu
, thread
);
3314 if (!port_pcpu
->timer_scheduled
) {
3315 port_pcpu
->timer_scheduled
= true;
3316 hrtimer_start(&port_pcpu
->tx_done_timer
,
3317 MVPP2_TXDONE_HRTIMER_PERIOD_NS
,
3318 HRTIMER_MODE_REL_PINNED_SOFT
);
3322 if (test_bit(thread
, &port
->priv
->lock_map
))
3323 spin_unlock_irqrestore(&port
->tx_lock
[thread
], flags
);
3325 return NETDEV_TX_OK
;
3328 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
3330 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
3331 netdev_err(dev
, "FCS error\n");
3332 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
3333 netdev_err(dev
, "rx fifo overrun error\n");
3334 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
3335 netdev_err(dev
, "tx fifo underrun error\n");
3338 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
3340 u32 cause_rx_tx
, cause_rx
, cause_tx
, cause_misc
;
3342 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
3343 struct mvpp2_queue_vector
*qv
;
3344 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
3346 qv
= container_of(napi
, struct mvpp2_queue_vector
, napi
);
3348 /* Rx/Tx cause register
3350 * Bits 0-15: each bit indicates received packets on the Rx queue
3351 * (bit 0 is for Rx queue 0).
3353 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
3354 * (bit 16 is for Tx queue 0).
3356 * Each CPU has its own Rx/Tx cause register
3358 cause_rx_tx
= mvpp2_thread_read_relaxed(port
->priv
, qv
->sw_thread_id
,
3359 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
3361 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
3363 mvpp2_cause_error(port
->dev
, cause_misc
);
3365 /* Clear the cause register */
3366 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
3367 mvpp2_thread_write(port
->priv
, thread
,
3368 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
3369 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
3372 if (port
->has_tx_irqs
) {
3373 cause_tx
= cause_rx_tx
& MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
3375 cause_tx
>>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET
;
3376 mvpp2_tx_done(port
, cause_tx
, qv
->sw_thread_id
);
3380 /* Process RX packets */
3381 cause_rx
= cause_rx_tx
&
3382 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port
->priv
->hw_version
);
3383 cause_rx
<<= qv
->first_rxq
;
3384 cause_rx
|= qv
->pending_cause_rx
;
3385 while (cause_rx
&& budget
> 0) {
3387 struct mvpp2_rx_queue
*rxq
;
3389 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
3393 count
= mvpp2_rx(port
, napi
, budget
, rxq
);
3397 /* Clear the bit associated to this Rx queue
3398 * so that next iteration will continue from
3399 * the next Rx queue.
3401 cause_rx
&= ~(1 << rxq
->logic_rxq
);
3407 napi_complete_done(napi
, rx_done
);
3409 mvpp2_qvec_interrupt_enable(qv
);
3411 qv
->pending_cause_rx
= cause_rx
;
3415 static void mvpp22_mode_reconfigure(struct mvpp2_port
*port
)
3419 /* Set the GMAC & XLG MAC in reset */
3420 mvpp2_mac_reset_assert(port
);
3422 /* Set the MPCS and XPCS in reset */
3423 mvpp22_pcs_reset_assert(port
);
3425 /* comphy reconfiguration */
3426 mvpp22_comphy_init(port
);
3428 /* gop reconfiguration */
3429 mvpp22_gop_init(port
);
3431 mvpp22_pcs_reset_deassert(port
);
3433 /* Only GOP port 0 has an XLG MAC */
3434 if (port
->gop_id
== 0) {
3435 ctrl3
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
3436 ctrl3
&= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
3438 if (mvpp2_is_xlg(port
->phy_interface
))
3439 ctrl3
|= MVPP22_XLG_CTRL3_MACMODESELECT_10G
;
3441 ctrl3
|= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC
;
3443 writel(ctrl3
, port
->base
+ MVPP22_XLG_CTRL3_REG
);
3446 if (port
->gop_id
== 0 && mvpp2_is_xlg(port
->phy_interface
))
3447 mvpp2_xlg_max_rx_size_set(port
);
3449 mvpp2_gmac_max_rx_size_set(port
);
3452 /* Set hw internals when starting port */
3453 static void mvpp2_start_dev(struct mvpp2_port
*port
)
3457 mvpp2_txp_max_tx_size_set(port
);
3459 for (i
= 0; i
< port
->nqvecs
; i
++)
3460 napi_enable(&port
->qvecs
[i
].napi
);
3462 /* Enable interrupts on all threads */
3463 mvpp2_interrupts_enable(port
);
3465 if (port
->priv
->hw_version
== MVPP22
)
3466 mvpp22_mode_reconfigure(port
);
3468 if (port
->phylink
) {
3469 phylink_start(port
->phylink
);
3471 /* Phylink isn't used as of now for ACPI, so the MAC has to be
3472 * configured manually when the interface is started. This will
3473 * be removed as soon as the phylink ACPI support lands in.
3475 struct phylink_link_state state
= {
3476 .interface
= port
->phy_interface
,
3478 mvpp2_mac_config(&port
->phylink_config
, MLO_AN_INBAND
, &state
);
3479 mvpp2_mac_link_up(&port
->phylink_config
, NULL
,
3480 MLO_AN_INBAND
, port
->phy_interface
,
3481 SPEED_UNKNOWN
, DUPLEX_UNKNOWN
, false, false);
3484 netif_tx_start_all_queues(port
->dev
);
3487 /* Set hw internals when stopping port */
3488 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
3492 /* Disable interrupts on all threads */
3493 mvpp2_interrupts_disable(port
);
3495 for (i
= 0; i
< port
->nqvecs
; i
++)
3496 napi_disable(&port
->qvecs
[i
].napi
);
3499 phylink_stop(port
->phylink
);
3500 phy_power_off(port
->comphy
);
3503 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
3504 struct ethtool_ringparam
*ring
)
3506 u16 new_rx_pending
= ring
->rx_pending
;
3507 u16 new_tx_pending
= ring
->tx_pending
;
3509 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
3512 if (ring
->rx_pending
> MVPP2_MAX_RXD_MAX
)
3513 new_rx_pending
= MVPP2_MAX_RXD_MAX
;
3514 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
3515 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
3517 if (ring
->tx_pending
> MVPP2_MAX_TXD_MAX
)
3518 new_tx_pending
= MVPP2_MAX_TXD_MAX
;
3519 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
3520 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
3522 /* The Tx ring size cannot be smaller than the minimum number of
3523 * descriptors needed for TSO.
3525 if (new_tx_pending
< MVPP2_MAX_SKB_DESCS
)
3526 new_tx_pending
= ALIGN(MVPP2_MAX_SKB_DESCS
, 32);
3528 if (ring
->rx_pending
!= new_rx_pending
) {
3529 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
3530 ring
->rx_pending
, new_rx_pending
);
3531 ring
->rx_pending
= new_rx_pending
;
3534 if (ring
->tx_pending
!= new_tx_pending
) {
3535 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
3536 ring
->tx_pending
, new_tx_pending
);
3537 ring
->tx_pending
= new_tx_pending
;
3543 static void mvpp21_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
3545 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
3547 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3548 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
3549 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
3550 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
3551 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
3552 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
3553 addr
[3] = mac_addr_h
& 0xFF;
3554 addr
[4] = mac_addr_m
& 0xFF;
3555 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
3558 static int mvpp2_irqs_init(struct mvpp2_port
*port
)
3562 for (i
= 0; i
< port
->nqvecs
; i
++) {
3563 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3565 if (qv
->type
== MVPP2_QUEUE_VECTOR_PRIVATE
) {
3566 qv
->mask
= kzalloc(cpumask_size(), GFP_KERNEL
);
3572 irq_set_status_flags(qv
->irq
, IRQ_NO_BALANCING
);
3575 err
= request_irq(qv
->irq
, mvpp2_isr
, 0, port
->dev
->name
, qv
);
3579 if (qv
->type
== MVPP2_QUEUE_VECTOR_PRIVATE
) {
3582 for_each_present_cpu(cpu
) {
3583 if (mvpp2_cpu_to_thread(port
->priv
, cpu
) ==
3585 cpumask_set_cpu(cpu
, qv
->mask
);
3588 irq_set_affinity_hint(qv
->irq
, qv
->mask
);
3594 for (i
= 0; i
< port
->nqvecs
; i
++) {
3595 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3597 irq_set_affinity_hint(qv
->irq
, NULL
);
3600 free_irq(qv
->irq
, qv
);
3606 static void mvpp2_irqs_deinit(struct mvpp2_port
*port
)
3610 for (i
= 0; i
< port
->nqvecs
; i
++) {
3611 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3613 irq_set_affinity_hint(qv
->irq
, NULL
);
3616 irq_clear_status_flags(qv
->irq
, IRQ_NO_BALANCING
);
3617 free_irq(qv
->irq
, qv
);
3621 static bool mvpp22_rss_is_supported(void)
3623 return queue_mode
== MVPP2_QDIST_MULTI_MODE
;
3626 static int mvpp2_open(struct net_device
*dev
)
3628 struct mvpp2_port
*port
= netdev_priv(dev
);
3629 struct mvpp2
*priv
= port
->priv
;
3630 unsigned char mac_bcast
[ETH_ALEN
] = {
3631 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3635 err
= mvpp2_prs_mac_da_accept(port
, mac_bcast
, true);
3637 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
3640 err
= mvpp2_prs_mac_da_accept(port
, dev
->dev_addr
, true);
3642 netdev_err(dev
, "mvpp2_prs_mac_da_accept own addr failed\n");
3645 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
3647 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
3650 err
= mvpp2_prs_def_flow(port
);
3652 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
3656 /* Allocate the Rx/Tx queues */
3657 err
= mvpp2_setup_rxqs(port
);
3659 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
3663 err
= mvpp2_setup_txqs(port
);
3665 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
3666 goto err_cleanup_rxqs
;
3669 err
= mvpp2_irqs_init(port
);
3671 netdev_err(port
->dev
, "cannot init IRQs\n");
3672 goto err_cleanup_txqs
;
3675 /* Phylink isn't supported yet in ACPI mode */
3676 if (port
->of_node
) {
3677 err
= phylink_of_phy_connect(port
->phylink
, port
->of_node
, 0);
3679 netdev_err(port
->dev
, "could not attach PHY (%d)\n",
3687 if (priv
->hw_version
== MVPP22
&& port
->link_irq
) {
3688 err
= request_irq(port
->link_irq
, mvpp2_link_status_isr
, 0,
3691 netdev_err(port
->dev
, "cannot request link IRQ %d\n",
3696 mvpp22_gop_setup_irq(port
);
3698 /* In default link is down */
3699 netif_carrier_off(port
->dev
);
3707 netdev_err(port
->dev
,
3708 "invalid configuration: no dt or link IRQ");
3712 /* Unmask interrupts on all CPUs */
3713 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
3714 mvpp2_shared_interrupt_mask_unmask(port
, false);
3716 mvpp2_start_dev(port
);
3718 /* Start hardware statistics gathering */
3719 queue_delayed_work(priv
->stats_queue
, &port
->stats_work
,
3720 MVPP2_MIB_COUNTERS_STATS_DELAY
);
3725 mvpp2_irqs_deinit(port
);
3727 mvpp2_cleanup_txqs(port
);
3729 mvpp2_cleanup_rxqs(port
);
3733 static int mvpp2_stop(struct net_device
*dev
)
3735 struct mvpp2_port
*port
= netdev_priv(dev
);
3736 struct mvpp2_port_pcpu
*port_pcpu
;
3737 unsigned int thread
;
3739 mvpp2_stop_dev(port
);
3741 /* Mask interrupts on all threads */
3742 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
3743 mvpp2_shared_interrupt_mask_unmask(port
, true);
3746 phylink_disconnect_phy(port
->phylink
);
3748 free_irq(port
->link_irq
, port
);
3750 mvpp2_irqs_deinit(port
);
3751 if (!port
->has_tx_irqs
) {
3752 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
3753 port_pcpu
= per_cpu_ptr(port
->pcpu
, thread
);
3755 hrtimer_cancel(&port_pcpu
->tx_done_timer
);
3756 port_pcpu
->timer_scheduled
= false;
3759 mvpp2_cleanup_rxqs(port
);
3760 mvpp2_cleanup_txqs(port
);
3762 cancel_delayed_work_sync(&port
->stats_work
);
3764 mvpp2_mac_reset_assert(port
);
3765 mvpp22_pcs_reset_assert(port
);
3770 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port
*port
,
3771 struct netdev_hw_addr_list
*list
)
3773 struct netdev_hw_addr
*ha
;
3776 netdev_hw_addr_list_for_each(ha
, list
) {
3777 ret
= mvpp2_prs_mac_da_accept(port
, ha
->addr
, true);
3785 static void mvpp2_set_rx_promisc(struct mvpp2_port
*port
, bool enable
)
3787 if (!enable
&& (port
->dev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
3788 mvpp2_prs_vid_enable_filtering(port
);
3790 mvpp2_prs_vid_disable_filtering(port
);
3792 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3793 MVPP2_PRS_L2_UNI_CAST
, enable
);
3795 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3796 MVPP2_PRS_L2_MULTI_CAST
, enable
);
3799 static void mvpp2_set_rx_mode(struct net_device
*dev
)
3801 struct mvpp2_port
*port
= netdev_priv(dev
);
3803 /* Clear the whole UC and MC list */
3804 mvpp2_prs_mac_del_all(port
);
3806 if (dev
->flags
& IFF_PROMISC
) {
3807 mvpp2_set_rx_promisc(port
, true);
3811 mvpp2_set_rx_promisc(port
, false);
3813 if (netdev_uc_count(dev
) > MVPP2_PRS_MAC_UC_FILT_MAX
||
3814 mvpp2_prs_mac_da_accept_list(port
, &dev
->uc
))
3815 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3816 MVPP2_PRS_L2_UNI_CAST
, true);
3818 if (dev
->flags
& IFF_ALLMULTI
) {
3819 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3820 MVPP2_PRS_L2_MULTI_CAST
, true);
3824 if (netdev_mc_count(dev
) > MVPP2_PRS_MAC_MC_FILT_MAX
||
3825 mvpp2_prs_mac_da_accept_list(port
, &dev
->mc
))
3826 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3827 MVPP2_PRS_L2_MULTI_CAST
, true);
3830 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
3832 const struct sockaddr
*addr
= p
;
3835 if (!is_valid_ether_addr(addr
->sa_data
))
3836 return -EADDRNOTAVAIL
;
3838 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
3840 /* Reconfigure parser accept the original MAC address */
3841 mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
3842 netdev_err(dev
, "failed to change MAC address\n");
3847 /* Shut down all the ports, reconfigure the pools as percpu or shared,
3848 * then bring up again all ports.
3850 static int mvpp2_bm_switch_buffers(struct mvpp2
*priv
, bool percpu
)
3852 int numbufs
= MVPP2_BM_POOLS_NUM
, i
;
3853 struct mvpp2_port
*port
= NULL
;
3854 bool status
[MVPP2_MAX_PORTS
];
3856 for (i
= 0; i
< priv
->port_count
; i
++) {
3857 port
= priv
->port_list
[i
];
3858 status
[i
] = netif_running(port
->dev
);
3860 mvpp2_stop(port
->dev
);
3863 /* nrxqs is the same for all ports */
3864 if (priv
->percpu_pools
)
3865 numbufs
= port
->nrxqs
* 2;
3867 for (i
= 0; i
< numbufs
; i
++)
3868 mvpp2_bm_pool_destroy(port
->dev
->dev
.parent
, priv
, &priv
->bm_pools
[i
]);
3870 devm_kfree(port
->dev
->dev
.parent
, priv
->bm_pools
);
3871 priv
->percpu_pools
= percpu
;
3872 mvpp2_bm_init(port
->dev
->dev
.parent
, priv
);
3874 for (i
= 0; i
< priv
->port_count
; i
++) {
3875 port
= priv
->port_list
[i
];
3876 mvpp2_swf_bm_pool_init(port
);
3878 mvpp2_open(port
->dev
);
3884 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
3886 struct mvpp2_port
*port
= netdev_priv(dev
);
3887 bool running
= netif_running(dev
);
3888 struct mvpp2
*priv
= port
->priv
;
3891 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
3892 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
3893 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
3894 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
3897 if (MVPP2_RX_PKT_SIZE(mtu
) > MVPP2_BM_LONG_PKT_SIZE
) {
3898 if (priv
->percpu_pools
) {
3899 netdev_warn(dev
, "mtu %d too high, switching to shared buffers", mtu
);
3900 mvpp2_bm_switch_buffers(priv
, false);
3906 for (i
= 0; i
< priv
->port_count
; i
++)
3907 if (priv
->port_list
[i
] != port
&&
3908 MVPP2_RX_PKT_SIZE(priv
->port_list
[i
]->dev
->mtu
) >
3909 MVPP2_BM_LONG_PKT_SIZE
) {
3914 /* No port is using jumbo frames */
3916 dev_info(port
->dev
->dev
.parent
,
3917 "all ports have a low MTU, switching to per-cpu buffers");
3918 mvpp2_bm_switch_buffers(priv
, true);
3923 mvpp2_stop_dev(port
);
3925 err
= mvpp2_bm_update_mtu(dev
, mtu
);
3927 netdev_err(dev
, "failed to change MTU\n");
3928 /* Reconfigure BM to the original MTU */
3929 mvpp2_bm_update_mtu(dev
, dev
->mtu
);
3931 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3935 mvpp2_start_dev(port
);
3936 mvpp2_egress_enable(port
);
3937 mvpp2_ingress_enable(port
);
3944 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3946 struct mvpp2_port
*port
= netdev_priv(dev
);
3950 for_each_possible_cpu(cpu
) {
3951 struct mvpp2_pcpu_stats
*cpu_stats
;
3957 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
3959 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
3960 rx_packets
= cpu_stats
->rx_packets
;
3961 rx_bytes
= cpu_stats
->rx_bytes
;
3962 tx_packets
= cpu_stats
->tx_packets
;
3963 tx_bytes
= cpu_stats
->tx_bytes
;
3964 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
3966 stats
->rx_packets
+= rx_packets
;
3967 stats
->rx_bytes
+= rx_bytes
;
3968 stats
->tx_packets
+= tx_packets
;
3969 stats
->tx_bytes
+= tx_bytes
;
3972 stats
->rx_errors
= dev
->stats
.rx_errors
;
3973 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
3974 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
3977 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3979 struct mvpp2_port
*port
= netdev_priv(dev
);
3984 return phylink_mii_ioctl(port
->phylink
, ifr
, cmd
);
3987 static int mvpp2_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3989 struct mvpp2_port
*port
= netdev_priv(dev
);
3992 ret
= mvpp2_prs_vid_entry_add(port
, vid
);
3994 netdev_err(dev
, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
3995 MVPP2_PRS_VLAN_FILT_MAX
- 1);
3999 static int mvpp2_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
4001 struct mvpp2_port
*port
= netdev_priv(dev
);
4003 mvpp2_prs_vid_entry_remove(port
, vid
);
4007 static int mvpp2_set_features(struct net_device
*dev
,
4008 netdev_features_t features
)
4010 netdev_features_t changed
= dev
->features
^ features
;
4011 struct mvpp2_port
*port
= netdev_priv(dev
);
4013 if (changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
4014 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
4015 mvpp2_prs_vid_enable_filtering(port
);
4017 /* Invalidate all registered VID filters for this
4020 mvpp2_prs_vid_remove_all(port
);
4022 mvpp2_prs_vid_disable_filtering(port
);
4026 if (changed
& NETIF_F_RXHASH
) {
4027 if (features
& NETIF_F_RXHASH
)
4028 mvpp22_port_rss_enable(port
);
4030 mvpp22_port_rss_disable(port
);
4036 /* Ethtool methods */
4038 static int mvpp2_ethtool_nway_reset(struct net_device
*dev
)
4040 struct mvpp2_port
*port
= netdev_priv(dev
);
4045 return phylink_ethtool_nway_reset(port
->phylink
);
4048 /* Set interrupt coalescing for ethtools */
4049 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
4050 struct ethtool_coalesce
*c
)
4052 struct mvpp2_port
*port
= netdev_priv(dev
);
4055 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
4056 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
4058 rxq
->time_coal
= c
->rx_coalesce_usecs
;
4059 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
4060 mvpp2_rx_pkts_coal_set(port
, rxq
);
4061 mvpp2_rx_time_coal_set(port
, rxq
);
4064 if (port
->has_tx_irqs
) {
4065 port
->tx_time_coal
= c
->tx_coalesce_usecs
;
4066 mvpp2_tx_time_coal_set(port
);
4069 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4070 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
4072 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
4074 if (port
->has_tx_irqs
)
4075 mvpp2_tx_pkts_coal_set(port
, txq
);
4081 /* get coalescing for ethtools */
4082 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
4083 struct ethtool_coalesce
*c
)
4085 struct mvpp2_port
*port
= netdev_priv(dev
);
4087 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
4088 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
4089 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
4090 c
->tx_coalesce_usecs
= port
->tx_time_coal
;
4094 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
4095 struct ethtool_drvinfo
*drvinfo
)
4097 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
4098 sizeof(drvinfo
->driver
));
4099 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
4100 sizeof(drvinfo
->version
));
4101 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
4102 sizeof(drvinfo
->bus_info
));
4105 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
4106 struct ethtool_ringparam
*ring
)
4108 struct mvpp2_port
*port
= netdev_priv(dev
);
4110 ring
->rx_max_pending
= MVPP2_MAX_RXD_MAX
;
4111 ring
->tx_max_pending
= MVPP2_MAX_TXD_MAX
;
4112 ring
->rx_pending
= port
->rx_ring_size
;
4113 ring
->tx_pending
= port
->tx_ring_size
;
4116 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
4117 struct ethtool_ringparam
*ring
)
4119 struct mvpp2_port
*port
= netdev_priv(dev
);
4120 u16 prev_rx_ring_size
= port
->rx_ring_size
;
4121 u16 prev_tx_ring_size
= port
->tx_ring_size
;
4124 err
= mvpp2_check_ringparam_valid(dev
, ring
);
4128 if (!netif_running(dev
)) {
4129 port
->rx_ring_size
= ring
->rx_pending
;
4130 port
->tx_ring_size
= ring
->tx_pending
;
4134 /* The interface is running, so we have to force a
4135 * reallocation of the queues
4137 mvpp2_stop_dev(port
);
4138 mvpp2_cleanup_rxqs(port
);
4139 mvpp2_cleanup_txqs(port
);
4141 port
->rx_ring_size
= ring
->rx_pending
;
4142 port
->tx_ring_size
= ring
->tx_pending
;
4144 err
= mvpp2_setup_rxqs(port
);
4146 /* Reallocate Rx queues with the original ring size */
4147 port
->rx_ring_size
= prev_rx_ring_size
;
4148 ring
->rx_pending
= prev_rx_ring_size
;
4149 err
= mvpp2_setup_rxqs(port
);
4153 err
= mvpp2_setup_txqs(port
);
4155 /* Reallocate Tx queues with the original ring size */
4156 port
->tx_ring_size
= prev_tx_ring_size
;
4157 ring
->tx_pending
= prev_tx_ring_size
;
4158 err
= mvpp2_setup_txqs(port
);
4160 goto err_clean_rxqs
;
4163 mvpp2_start_dev(port
);
4164 mvpp2_egress_enable(port
);
4165 mvpp2_ingress_enable(port
);
4170 mvpp2_cleanup_rxqs(port
);
4172 netdev_err(dev
, "failed to change ring parameters");
4176 static void mvpp2_ethtool_get_pause_param(struct net_device
*dev
,
4177 struct ethtool_pauseparam
*pause
)
4179 struct mvpp2_port
*port
= netdev_priv(dev
);
4184 phylink_ethtool_get_pauseparam(port
->phylink
, pause
);
4187 static int mvpp2_ethtool_set_pause_param(struct net_device
*dev
,
4188 struct ethtool_pauseparam
*pause
)
4190 struct mvpp2_port
*port
= netdev_priv(dev
);
4195 return phylink_ethtool_set_pauseparam(port
->phylink
, pause
);
4198 static int mvpp2_ethtool_get_link_ksettings(struct net_device
*dev
,
4199 struct ethtool_link_ksettings
*cmd
)
4201 struct mvpp2_port
*port
= netdev_priv(dev
);
4206 return phylink_ethtool_ksettings_get(port
->phylink
, cmd
);
4209 static int mvpp2_ethtool_set_link_ksettings(struct net_device
*dev
,
4210 const struct ethtool_link_ksettings
*cmd
)
4212 struct mvpp2_port
*port
= netdev_priv(dev
);
4217 return phylink_ethtool_ksettings_set(port
->phylink
, cmd
);
4220 static int mvpp2_ethtool_get_rxnfc(struct net_device
*dev
,
4221 struct ethtool_rxnfc
*info
, u32
*rules
)
4223 struct mvpp2_port
*port
= netdev_priv(dev
);
4224 int ret
= 0, i
, loc
= 0;
4226 if (!mvpp22_rss_is_supported())
4229 switch (info
->cmd
) {
4231 ret
= mvpp2_ethtool_rxfh_get(port
, info
);
4233 case ETHTOOL_GRXRINGS
:
4234 info
->data
= port
->nrxqs
;
4236 case ETHTOOL_GRXCLSRLCNT
:
4237 info
->rule_cnt
= port
->n_rfs_rules
;
4239 case ETHTOOL_GRXCLSRULE
:
4240 ret
= mvpp2_ethtool_cls_rule_get(port
, info
);
4242 case ETHTOOL_GRXCLSRLALL
:
4243 for (i
= 0; i
< MVPP2_N_RFS_ENTRIES_PER_FLOW
; i
++) {
4244 if (port
->rfs_rules
[i
])
4255 static int mvpp2_ethtool_set_rxnfc(struct net_device
*dev
,
4256 struct ethtool_rxnfc
*info
)
4258 struct mvpp2_port
*port
= netdev_priv(dev
);
4261 if (!mvpp22_rss_is_supported())
4264 switch (info
->cmd
) {
4266 ret
= mvpp2_ethtool_rxfh_set(port
, info
);
4268 case ETHTOOL_SRXCLSRLINS
:
4269 ret
= mvpp2_ethtool_cls_rule_ins(port
, info
);
4271 case ETHTOOL_SRXCLSRLDEL
:
4272 ret
= mvpp2_ethtool_cls_rule_del(port
, info
);
4280 static u32
mvpp2_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
4282 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES
: 0;
4285 static int mvpp2_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
4288 struct mvpp2_port
*port
= netdev_priv(dev
);
4291 if (!mvpp22_rss_is_supported())
4295 ret
= mvpp22_port_rss_ctx_indir_get(port
, 0, indir
);
4298 *hfunc
= ETH_RSS_HASH_CRC32
;
4303 static int mvpp2_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
4304 const u8
*key
, const u8 hfunc
)
4306 struct mvpp2_port
*port
= netdev_priv(dev
);
4309 if (!mvpp22_rss_is_supported())
4312 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_CRC32
)
4319 ret
= mvpp22_port_rss_ctx_indir_set(port
, 0, indir
);
4324 static int mvpp2_ethtool_get_rxfh_context(struct net_device
*dev
, u32
*indir
,
4325 u8
*key
, u8
*hfunc
, u32 rss_context
)
4327 struct mvpp2_port
*port
= netdev_priv(dev
);
4330 if (!mvpp22_rss_is_supported())
4334 *hfunc
= ETH_RSS_HASH_CRC32
;
4337 ret
= mvpp22_port_rss_ctx_indir_get(port
, rss_context
, indir
);
4342 static int mvpp2_ethtool_set_rxfh_context(struct net_device
*dev
,
4343 const u32
*indir
, const u8
*key
,
4344 const u8 hfunc
, u32
*rss_context
,
4347 struct mvpp2_port
*port
= netdev_priv(dev
);
4350 if (!mvpp22_rss_is_supported())
4353 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_CRC32
)
4360 return mvpp22_port_rss_ctx_delete(port
, *rss_context
);
4362 if (*rss_context
== ETH_RXFH_CONTEXT_ALLOC
) {
4363 ret
= mvpp22_port_rss_ctx_create(port
, rss_context
);
4368 return mvpp22_port_rss_ctx_indir_set(port
, *rss_context
, indir
);
4372 static const struct net_device_ops mvpp2_netdev_ops
= {
4373 .ndo_open
= mvpp2_open
,
4374 .ndo_stop
= mvpp2_stop
,
4375 .ndo_start_xmit
= mvpp2_tx
,
4376 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
4377 .ndo_set_mac_address
= mvpp2_set_mac_address
,
4378 .ndo_change_mtu
= mvpp2_change_mtu
,
4379 .ndo_get_stats64
= mvpp2_get_stats64
,
4380 .ndo_do_ioctl
= mvpp2_ioctl
,
4381 .ndo_vlan_rx_add_vid
= mvpp2_vlan_rx_add_vid
,
4382 .ndo_vlan_rx_kill_vid
= mvpp2_vlan_rx_kill_vid
,
4383 .ndo_set_features
= mvpp2_set_features
,
4386 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
4387 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
4388 ETHTOOL_COALESCE_MAX_FRAMES
,
4389 .nway_reset
= mvpp2_ethtool_nway_reset
,
4390 .get_link
= ethtool_op_get_link
,
4391 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
4392 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
4393 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
4394 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
4395 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
4396 .get_strings
= mvpp2_ethtool_get_strings
,
4397 .get_ethtool_stats
= mvpp2_ethtool_get_stats
,
4398 .get_sset_count
= mvpp2_ethtool_get_sset_count
,
4399 .get_pauseparam
= mvpp2_ethtool_get_pause_param
,
4400 .set_pauseparam
= mvpp2_ethtool_set_pause_param
,
4401 .get_link_ksettings
= mvpp2_ethtool_get_link_ksettings
,
4402 .set_link_ksettings
= mvpp2_ethtool_set_link_ksettings
,
4403 .get_rxnfc
= mvpp2_ethtool_get_rxnfc
,
4404 .set_rxnfc
= mvpp2_ethtool_set_rxnfc
,
4405 .get_rxfh_indir_size
= mvpp2_ethtool_get_rxfh_indir_size
,
4406 .get_rxfh
= mvpp2_ethtool_get_rxfh
,
4407 .set_rxfh
= mvpp2_ethtool_set_rxfh
,
4408 .get_rxfh_context
= mvpp2_ethtool_get_rxfh_context
,
4409 .set_rxfh_context
= mvpp2_ethtool_set_rxfh_context
,
4412 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
4413 * had a single IRQ defined per-port.
4415 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port
*port
,
4416 struct device_node
*port_node
)
4418 struct mvpp2_queue_vector
*v
= &port
->qvecs
[0];
4421 v
->nrxqs
= port
->nrxqs
;
4422 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
4423 v
->sw_thread_id
= 0;
4424 v
->sw_thread_mask
= *cpumask_bits(cpu_online_mask
);
4426 v
->irq
= irq_of_parse_and_map(port_node
, 0);
4429 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
4437 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port
*port
,
4438 struct device_node
*port_node
)
4440 struct mvpp2
*priv
= port
->priv
;
4441 struct mvpp2_queue_vector
*v
;
4444 switch (queue_mode
) {
4445 case MVPP2_QDIST_SINGLE_MODE
:
4446 port
->nqvecs
= priv
->nthreads
+ 1;
4448 case MVPP2_QDIST_MULTI_MODE
:
4449 port
->nqvecs
= priv
->nthreads
;
4453 for (i
= 0; i
< port
->nqvecs
; i
++) {
4456 v
= port
->qvecs
+ i
;
4459 v
->type
= MVPP2_QUEUE_VECTOR_PRIVATE
;
4460 v
->sw_thread_id
= i
;
4461 v
->sw_thread_mask
= BIT(i
);
4463 if (port
->flags
& MVPP2_F_DT_COMPAT
)
4464 snprintf(irqname
, sizeof(irqname
), "tx-cpu%d", i
);
4466 snprintf(irqname
, sizeof(irqname
), "hif%d", i
);
4468 if (queue_mode
== MVPP2_QDIST_MULTI_MODE
) {
4471 } else if (queue_mode
== MVPP2_QDIST_SINGLE_MODE
&&
4472 i
== (port
->nqvecs
- 1)) {
4474 v
->nrxqs
= port
->nrxqs
;
4475 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
4477 if (port
->flags
& MVPP2_F_DT_COMPAT
)
4478 strncpy(irqname
, "rx-shared", sizeof(irqname
));
4482 v
->irq
= of_irq_get_byname(port_node
, irqname
);
4484 v
->irq
= fwnode_irq_get(port
->fwnode
, i
);
4490 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
4497 for (i
= 0; i
< port
->nqvecs
; i
++)
4498 irq_dispose_mapping(port
->qvecs
[i
].irq
);
4502 static int mvpp2_queue_vectors_init(struct mvpp2_port
*port
,
4503 struct device_node
*port_node
)
4505 if (port
->has_tx_irqs
)
4506 return mvpp2_multi_queue_vectors_init(port
, port_node
);
4508 return mvpp2_simple_queue_vectors_init(port
, port_node
);
4511 static void mvpp2_queue_vectors_deinit(struct mvpp2_port
*port
)
4515 for (i
= 0; i
< port
->nqvecs
; i
++)
4516 irq_dispose_mapping(port
->qvecs
[i
].irq
);
4519 /* Configure Rx queue group interrupt for this port */
4520 static void mvpp2_rx_irqs_setup(struct mvpp2_port
*port
)
4522 struct mvpp2
*priv
= port
->priv
;
4526 if (priv
->hw_version
== MVPP21
) {
4527 mvpp2_write(priv
, MVPP21_ISR_RXQ_GROUP_REG(port
->id
),
4532 /* Handle the more complicated PPv2.2 case */
4533 for (i
= 0; i
< port
->nqvecs
; i
++) {
4534 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
4539 val
= qv
->sw_thread_id
;
4540 val
|= port
->id
<< MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET
;
4541 mvpp2_write(priv
, MVPP22_ISR_RXQ_GROUP_INDEX_REG
, val
);
4543 val
= qv
->first_rxq
;
4544 val
|= qv
->nrxqs
<< MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET
;
4545 mvpp2_write(priv
, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG
, val
);
4549 /* Initialize port HW */
4550 static int mvpp2_port_init(struct mvpp2_port
*port
)
4552 struct device
*dev
= port
->dev
->dev
.parent
;
4553 struct mvpp2
*priv
= port
->priv
;
4554 struct mvpp2_txq_pcpu
*txq_pcpu
;
4555 unsigned int thread
;
4558 /* Checks for hardware constraints */
4559 if (port
->first_rxq
+ port
->nrxqs
>
4560 MVPP2_MAX_PORTS
* priv
->max_port_rxqs
)
4563 if (port
->nrxqs
> priv
->max_port_rxqs
|| port
->ntxqs
> MVPP2_MAX_TXQ
)
4567 mvpp2_egress_disable(port
);
4568 mvpp2_port_disable(port
);
4570 port
->tx_time_coal
= MVPP2_TXDONE_COAL_USEC
;
4572 port
->txqs
= devm_kcalloc(dev
, port
->ntxqs
, sizeof(*port
->txqs
),
4577 /* Associate physical Tx queues to this port and initialize.
4578 * The mapping is predefined.
4580 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4581 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
4582 struct mvpp2_tx_queue
*txq
;
4584 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
4587 goto err_free_percpu
;
4590 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
4593 goto err_free_percpu
;
4596 txq
->id
= queue_phy_id
;
4597 txq
->log_id
= queue
;
4598 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
4599 for (thread
= 0; thread
< priv
->nthreads
; thread
++) {
4600 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
4601 txq_pcpu
->thread
= thread
;
4604 port
->txqs
[queue
] = txq
;
4607 port
->rxqs
= devm_kcalloc(dev
, port
->nrxqs
, sizeof(*port
->rxqs
),
4611 goto err_free_percpu
;
4614 /* Allocate and initialize Rx queue for this port */
4615 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
4616 struct mvpp2_rx_queue
*rxq
;
4618 /* Map physical Rx queue to port's logical Rx queue */
4619 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
4622 goto err_free_percpu
;
4624 /* Map this Rx queue to a physical queue */
4625 rxq
->id
= port
->first_rxq
+ queue
;
4626 rxq
->port
= port
->id
;
4627 rxq
->logic_rxq
= queue
;
4629 port
->rxqs
[queue
] = rxq
;
4632 mvpp2_rx_irqs_setup(port
);
4634 /* Create Rx descriptor rings */
4635 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
4636 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
4638 rxq
->size
= port
->rx_ring_size
;
4639 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
4640 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
4643 mvpp2_ingress_disable(port
);
4645 /* Port default configuration */
4646 mvpp2_defaults_set(port
);
4648 /* Port's classifier configuration */
4649 mvpp2_cls_oversize_rxq_set(port
);
4650 mvpp2_cls_port_config(port
);
4652 if (mvpp22_rss_is_supported())
4653 mvpp22_port_rss_init(port
);
4655 /* Provide an initial Rx packet size */
4656 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
4658 /* Initialize pools for swf */
4659 err
= mvpp2_swf_bm_pool_init(port
);
4661 goto err_free_percpu
;
4663 /* Clear all port stats */
4664 mvpp2_read_stats(port
);
4665 memset(port
->ethtool_stats
, 0,
4666 MVPP2_N_ETHTOOL_STATS(port
->ntxqs
, port
->nrxqs
) * sizeof(u64
));
4671 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4672 if (!port
->txqs
[queue
])
4674 free_percpu(port
->txqs
[queue
]->pcpu
);
4679 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node
*port_node
,
4680 unsigned long *flags
)
4682 char *irqs
[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
4686 for (i
= 0; i
< 5; i
++)
4687 if (of_property_match_string(port_node
, "interrupt-names",
4691 *flags
|= MVPP2_F_DT_COMPAT
;
4695 /* Checks if the port dt description has the required Tx interrupts:
4696 * - PPv2.1: there are no such interrupts.
4698 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
4699 * - The new ones have: "hifX" with X in [0..8]
4701 * All those variants are supported to keep the backward compatibility.
4703 static bool mvpp2_port_has_irqs(struct mvpp2
*priv
,
4704 struct device_node
*port_node
,
4705 unsigned long *flags
)
4714 if (priv
->hw_version
== MVPP21
)
4717 if (mvpp22_port_has_legacy_tx_irqs(port_node
, flags
))
4720 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
4721 snprintf(name
, 5, "hif%d", i
);
4722 if (of_property_match_string(port_node
, "interrupt-names",
4730 static void mvpp2_port_copy_mac_addr(struct net_device
*dev
, struct mvpp2
*priv
,
4731 struct fwnode_handle
*fwnode
,
4734 struct mvpp2_port
*port
= netdev_priv(dev
);
4735 char hw_mac_addr
[ETH_ALEN
] = {0};
4736 char fw_mac_addr
[ETH_ALEN
];
4738 if (fwnode_get_mac_address(fwnode
, fw_mac_addr
, ETH_ALEN
)) {
4739 *mac_from
= "firmware node";
4740 ether_addr_copy(dev
->dev_addr
, fw_mac_addr
);
4744 if (priv
->hw_version
== MVPP21
) {
4745 mvpp21_get_mac_address(port
, hw_mac_addr
);
4746 if (is_valid_ether_addr(hw_mac_addr
)) {
4747 *mac_from
= "hardware";
4748 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
4753 *mac_from
= "random";
4754 eth_hw_addr_random(dev
);
4757 static void mvpp2_phylink_validate(struct phylink_config
*config
,
4758 unsigned long *supported
,
4759 struct phylink_link_state
*state
)
4761 struct mvpp2_port
*port
= container_of(config
, struct mvpp2_port
,
4763 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
4765 /* Invalid combinations */
4766 switch (state
->interface
) {
4767 case PHY_INTERFACE_MODE_10GBASER
:
4768 case PHY_INTERFACE_MODE_XAUI
:
4769 if (port
->gop_id
!= 0)
4772 case PHY_INTERFACE_MODE_RGMII
:
4773 case PHY_INTERFACE_MODE_RGMII_ID
:
4774 case PHY_INTERFACE_MODE_RGMII_RXID
:
4775 case PHY_INTERFACE_MODE_RGMII_TXID
:
4776 if (port
->priv
->hw_version
== MVPP22
&& port
->gop_id
== 0)
4783 phylink_set(mask
, Autoneg
);
4784 phylink_set_port_modes(mask
);
4785 phylink_set(mask
, Pause
);
4786 phylink_set(mask
, Asym_Pause
);
4788 switch (state
->interface
) {
4789 case PHY_INTERFACE_MODE_10GBASER
:
4790 case PHY_INTERFACE_MODE_XAUI
:
4791 case PHY_INTERFACE_MODE_NA
:
4792 if (port
->gop_id
== 0) {
4793 phylink_set(mask
, 10000baseT_Full
);
4794 phylink_set(mask
, 10000baseCR_Full
);
4795 phylink_set(mask
, 10000baseSR_Full
);
4796 phylink_set(mask
, 10000baseLR_Full
);
4797 phylink_set(mask
, 10000baseLRM_Full
);
4798 phylink_set(mask
, 10000baseER_Full
);
4799 phylink_set(mask
, 10000baseKR_Full
);
4801 if (state
->interface
!= PHY_INTERFACE_MODE_NA
)
4804 case PHY_INTERFACE_MODE_RGMII
:
4805 case PHY_INTERFACE_MODE_RGMII_ID
:
4806 case PHY_INTERFACE_MODE_RGMII_RXID
:
4807 case PHY_INTERFACE_MODE_RGMII_TXID
:
4808 case PHY_INTERFACE_MODE_SGMII
:
4809 phylink_set(mask
, 10baseT_Half
);
4810 phylink_set(mask
, 10baseT_Full
);
4811 phylink_set(mask
, 100baseT_Half
);
4812 phylink_set(mask
, 100baseT_Full
);
4813 phylink_set(mask
, 1000baseT_Full
);
4814 phylink_set(mask
, 1000baseX_Full
);
4815 if (state
->interface
!= PHY_INTERFACE_MODE_NA
)
4818 case PHY_INTERFACE_MODE_1000BASEX
:
4819 case PHY_INTERFACE_MODE_2500BASEX
:
4821 state
->interface
!= PHY_INTERFACE_MODE_2500BASEX
) {
4822 phylink_set(mask
, 1000baseT_Full
);
4823 phylink_set(mask
, 1000baseX_Full
);
4826 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
) {
4827 phylink_set(mask
, 2500baseT_Full
);
4828 phylink_set(mask
, 2500baseX_Full
);
4835 bitmap_and(supported
, supported
, mask
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
4836 bitmap_and(state
->advertising
, state
->advertising
, mask
,
4837 __ETHTOOL_LINK_MODE_MASK_NBITS
);
4839 phylink_helper_basex_speed(state
);
4843 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
4846 static void mvpp22_xlg_pcs_get_state(struct mvpp2_port
*port
,
4847 struct phylink_link_state
*state
)
4851 state
->speed
= SPEED_10000
;
4853 state
->an_complete
= 1;
4855 val
= readl(port
->base
+ MVPP22_XLG_STATUS
);
4856 state
->link
= !!(val
& MVPP22_XLG_STATUS_LINK_UP
);
4859 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4860 if (val
& MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN
)
4861 state
->pause
|= MLO_PAUSE_TX
;
4862 if (val
& MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
)
4863 state
->pause
|= MLO_PAUSE_RX
;
4866 static void mvpp2_gmac_pcs_get_state(struct mvpp2_port
*port
,
4867 struct phylink_link_state
*state
)
4871 val
= readl(port
->base
+ MVPP2_GMAC_STATUS0
);
4873 state
->an_complete
= !!(val
& MVPP2_GMAC_STATUS0_AN_COMPLETE
);
4874 state
->link
= !!(val
& MVPP2_GMAC_STATUS0_LINK_UP
);
4875 state
->duplex
= !!(val
& MVPP2_GMAC_STATUS0_FULL_DUPLEX
);
4877 switch (port
->phy_interface
) {
4878 case PHY_INTERFACE_MODE_1000BASEX
:
4879 state
->speed
= SPEED_1000
;
4881 case PHY_INTERFACE_MODE_2500BASEX
:
4882 state
->speed
= SPEED_2500
;
4885 if (val
& MVPP2_GMAC_STATUS0_GMII_SPEED
)
4886 state
->speed
= SPEED_1000
;
4887 else if (val
& MVPP2_GMAC_STATUS0_MII_SPEED
)
4888 state
->speed
= SPEED_100
;
4890 state
->speed
= SPEED_10
;
4894 if (val
& MVPP2_GMAC_STATUS0_RX_PAUSE
)
4895 state
->pause
|= MLO_PAUSE_RX
;
4896 if (val
& MVPP2_GMAC_STATUS0_TX_PAUSE
)
4897 state
->pause
|= MLO_PAUSE_TX
;
4900 static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config
*config
,
4901 struct phylink_link_state
*state
)
4903 struct mvpp2_port
*port
= container_of(config
, struct mvpp2_port
,
4906 if (port
->priv
->hw_version
== MVPP22
&& port
->gop_id
== 0) {
4907 u32 mode
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
4908 mode
&= MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
4910 if (mode
== MVPP22_XLG_CTRL3_MACMODESELECT_10G
) {
4911 mvpp22_xlg_pcs_get_state(port
, state
);
4916 mvpp2_gmac_pcs_get_state(port
, state
);
4919 static void mvpp2_mac_an_restart(struct phylink_config
*config
)
4921 struct mvpp2_port
*port
= container_of(config
, struct mvpp2_port
,
4923 u32 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4925 writel(val
| MVPP2_GMAC_IN_BAND_RESTART_AN
,
4926 port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4927 writel(val
& ~MVPP2_GMAC_IN_BAND_RESTART_AN
,
4928 port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4931 static void mvpp2_xlg_config(struct mvpp2_port
*port
, unsigned int mode
,
4932 const struct phylink_link_state
*state
)
4934 u32 old_ctrl0
, ctrl0
;
4935 u32 old_ctrl4
, ctrl4
;
4937 old_ctrl0
= ctrl0
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4938 old_ctrl4
= ctrl4
= readl(port
->base
+ MVPP22_XLG_CTRL4_REG
);
4940 ctrl0
|= MVPP22_XLG_CTRL0_MAC_RESET_DIS
;
4942 if (state
->pause
& MLO_PAUSE_TX
)
4943 ctrl0
|= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN
;
4945 ctrl0
&= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN
;
4947 if (state
->pause
& MLO_PAUSE_RX
)
4948 ctrl0
|= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
;
4950 ctrl0
&= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
;
4952 ctrl4
&= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC
|
4953 MVPP22_XLG_CTRL4_EN_IDLE_CHECK
);
4954 ctrl4
|= MVPP22_XLG_CTRL4_FWD_FC
| MVPP22_XLG_CTRL4_FWD_PFC
;
4956 if (old_ctrl0
!= ctrl0
)
4957 writel(ctrl0
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
4958 if (old_ctrl4
!= ctrl4
)
4959 writel(ctrl4
, port
->base
+ MVPP22_XLG_CTRL4_REG
);
4961 if (!(old_ctrl0
& MVPP22_XLG_CTRL0_MAC_RESET_DIS
)) {
4962 while (!(readl(port
->base
+ MVPP22_XLG_CTRL0_REG
) &
4963 MVPP22_XLG_CTRL0_MAC_RESET_DIS
))
4968 static void mvpp2_gmac_config(struct mvpp2_port
*port
, unsigned int mode
,
4969 const struct phylink_link_state
*state
)
4972 u32 old_ctrl0
, ctrl0
;
4973 u32 old_ctrl2
, ctrl2
;
4974 u32 old_ctrl4
, ctrl4
;
4976 old_an
= an
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4977 old_ctrl0
= ctrl0
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4978 old_ctrl2
= ctrl2
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4979 old_ctrl4
= ctrl4
= readl(port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4981 an
&= ~(MVPP2_GMAC_AN_SPEED_EN
| MVPP2_GMAC_FC_ADV_EN
|
4982 MVPP2_GMAC_FC_ADV_ASM_EN
| MVPP2_GMAC_FLOW_CTRL_AUTONEG
|
4983 MVPP2_GMAC_AN_DUPLEX_EN
| MVPP2_GMAC_IN_BAND_AUTONEG
|
4984 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS
);
4985 ctrl0
&= ~MVPP2_GMAC_PORT_TYPE_MASK
;
4986 ctrl2
&= ~(MVPP2_GMAC_INBAND_AN_MASK
| MVPP2_GMAC_PORT_RESET_MASK
|
4987 MVPP2_GMAC_PCS_ENABLE_MASK
);
4989 /* Configure port type */
4990 if (phy_interface_mode_is_8023z(state
->interface
)) {
4991 ctrl2
|= MVPP2_GMAC_PCS_ENABLE_MASK
;
4992 ctrl4
&= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL
;
4993 ctrl4
|= MVPP22_CTRL4_SYNC_BYPASS_DIS
|
4994 MVPP22_CTRL4_DP_CLK_SEL
|
4995 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4996 } else if (state
->interface
== PHY_INTERFACE_MODE_SGMII
) {
4997 ctrl2
|= MVPP2_GMAC_PCS_ENABLE_MASK
| MVPP2_GMAC_INBAND_AN_MASK
;
4998 ctrl4
&= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL
;
4999 ctrl4
|= MVPP22_CTRL4_SYNC_BYPASS_DIS
|
5000 MVPP22_CTRL4_DP_CLK_SEL
|
5001 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
5002 } else if (phy_interface_mode_is_rgmii(state
->interface
)) {
5003 ctrl4
&= ~MVPP22_CTRL4_DP_CLK_SEL
;
5004 ctrl4
|= MVPP22_CTRL4_EXT_PIN_GMII_SEL
|
5005 MVPP22_CTRL4_SYNC_BYPASS_DIS
|
5006 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
5009 /* Configure advertisement bits */
5010 if (phylink_test(state
->advertising
, Pause
))
5011 an
|= MVPP2_GMAC_FC_ADV_EN
;
5012 if (phylink_test(state
->advertising
, Asym_Pause
))
5013 an
|= MVPP2_GMAC_FC_ADV_ASM_EN
;
5015 /* Configure negotiation style */
5016 if (!phylink_autoneg_inband(mode
)) {
5017 /* Phy or fixed speed - no in-band AN, nothing to do, leave the
5018 * configured speed, duplex and flow control as-is.
5020 } else if (state
->interface
== PHY_INTERFACE_MODE_SGMII
) {
5021 /* SGMII in-band mode receives the speed and duplex from
5022 * the PHY. Flow control information is not received. */
5023 an
&= ~(MVPP2_GMAC_FORCE_LINK_DOWN
|
5024 MVPP2_GMAC_FORCE_LINK_PASS
|
5025 MVPP2_GMAC_CONFIG_MII_SPEED
|
5026 MVPP2_GMAC_CONFIG_GMII_SPEED
|
5027 MVPP2_GMAC_CONFIG_FULL_DUPLEX
);
5028 an
|= MVPP2_GMAC_IN_BAND_AUTONEG
|
5029 MVPP2_GMAC_AN_SPEED_EN
|
5030 MVPP2_GMAC_AN_DUPLEX_EN
;
5031 } else if (phy_interface_mode_is_8023z(state
->interface
)) {
5032 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
5033 * they negotiate duplex: they are always operating with a fixed
5034 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
5035 * speed and full duplex here.
5037 ctrl0
|= MVPP2_GMAC_PORT_TYPE_MASK
;
5038 an
&= ~(MVPP2_GMAC_FORCE_LINK_DOWN
|
5039 MVPP2_GMAC_FORCE_LINK_PASS
|
5040 MVPP2_GMAC_CONFIG_MII_SPEED
|
5041 MVPP2_GMAC_CONFIG_GMII_SPEED
|
5042 MVPP2_GMAC_CONFIG_FULL_DUPLEX
);
5043 an
|= MVPP2_GMAC_IN_BAND_AUTONEG
|
5044 MVPP2_GMAC_CONFIG_GMII_SPEED
|
5045 MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
5047 if (state
->pause
& MLO_PAUSE_AN
&& state
->an_enabled
)
5048 an
|= MVPP2_GMAC_FLOW_CTRL_AUTONEG
;
5051 /* Some fields of the auto-negotiation register require the port to be down when
5052 * their value is updated.
5054 #define MVPP2_GMAC_AN_PORT_DOWN_MASK \
5055 (MVPP2_GMAC_IN_BAND_AUTONEG | \
5056 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
5057 MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
5058 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
5059 MVPP2_GMAC_AN_DUPLEX_EN)
5061 if ((old_ctrl0
^ ctrl0
) & MVPP2_GMAC_PORT_TYPE_MASK
||
5062 (old_ctrl2
^ ctrl2
) & MVPP2_GMAC_INBAND_AN_MASK
||
5063 (old_an
^ an
) & MVPP2_GMAC_AN_PORT_DOWN_MASK
) {
5064 /* Force link down */
5065 old_an
&= ~MVPP2_GMAC_FORCE_LINK_PASS
;
5066 old_an
|= MVPP2_GMAC_FORCE_LINK_DOWN
;
5067 writel(old_an
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5069 /* Set the GMAC in a reset state - do this in a way that
5070 * ensures we clear it below.
5072 old_ctrl2
|= MVPP2_GMAC_PORT_RESET_MASK
;
5073 writel(old_ctrl2
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
5076 if (old_ctrl0
!= ctrl0
)
5077 writel(ctrl0
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
5078 if (old_ctrl2
!= ctrl2
)
5079 writel(ctrl2
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
5080 if (old_ctrl4
!= ctrl4
)
5081 writel(ctrl4
, port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
5083 writel(an
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5085 if (old_ctrl2
& MVPP2_GMAC_PORT_RESET_MASK
) {
5086 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
5087 MVPP2_GMAC_PORT_RESET_MASK
)
5092 static void mvpp2_mac_config(struct phylink_config
*config
, unsigned int mode
,
5093 const struct phylink_link_state
*state
)
5095 struct net_device
*dev
= to_net_dev(config
->dev
);
5096 struct mvpp2_port
*port
= netdev_priv(dev
);
5097 bool change_interface
= port
->phy_interface
!= state
->interface
;
5099 /* Check for invalid configuration */
5100 if (mvpp2_is_xlg(state
->interface
) && port
->gop_id
!= 0) {
5101 netdev_err(dev
, "Invalid mode on %s\n", dev
->name
);
5105 /* Make sure the port is disabled when reconfiguring the mode */
5106 mvpp2_port_disable(port
);
5108 if (port
->priv
->hw_version
== MVPP22
&& change_interface
) {
5109 mvpp22_gop_mask_irq(port
);
5111 port
->phy_interface
= state
->interface
;
5113 /* Reconfigure the serdes lanes */
5114 phy_power_off(port
->comphy
);
5115 mvpp22_mode_reconfigure(port
);
5118 /* mac (re)configuration */
5119 if (mvpp2_is_xlg(state
->interface
))
5120 mvpp2_xlg_config(port
, mode
, state
);
5121 else if (phy_interface_mode_is_rgmii(state
->interface
) ||
5122 phy_interface_mode_is_8023z(state
->interface
) ||
5123 state
->interface
== PHY_INTERFACE_MODE_SGMII
)
5124 mvpp2_gmac_config(port
, mode
, state
);
5126 if (port
->priv
->hw_version
== MVPP21
&& port
->flags
& MVPP2_F_LOOPBACK
)
5127 mvpp2_port_loopback_set(port
, state
);
5129 if (port
->priv
->hw_version
== MVPP22
&& change_interface
)
5130 mvpp22_gop_unmask_irq(port
);
5132 mvpp2_port_enable(port
);
5135 static void mvpp2_mac_link_up(struct phylink_config
*config
,
5136 struct phy_device
*phy
,
5137 unsigned int mode
, phy_interface_t interface
,
5138 int speed
, int duplex
,
5139 bool tx_pause
, bool rx_pause
)
5141 struct net_device
*dev
= to_net_dev(config
->dev
);
5142 struct mvpp2_port
*port
= netdev_priv(dev
);
5145 if (mvpp2_is_xlg(interface
)) {
5146 if (!phylink_autoneg_inband(mode
)) {
5147 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
5148 val
&= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN
;
5149 val
|= MVPP22_XLG_CTRL0_FORCE_LINK_PASS
;
5150 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
5153 if (!phylink_autoneg_inband(mode
)) {
5154 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5155 val
&= ~(MVPP2_GMAC_FORCE_LINK_DOWN
|
5156 MVPP2_GMAC_CONFIG_MII_SPEED
|
5157 MVPP2_GMAC_CONFIG_GMII_SPEED
|
5158 MVPP2_GMAC_CONFIG_FULL_DUPLEX
);
5159 val
|= MVPP2_GMAC_FORCE_LINK_PASS
;
5161 if (speed
== SPEED_1000
|| speed
== SPEED_2500
)
5162 val
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
5163 else if (speed
== SPEED_100
)
5164 val
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
5166 if (duplex
== DUPLEX_FULL
)
5167 val
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
5169 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5172 /* We can always update the flow control enable bits;
5173 * these will only be effective if flow control AN
5174 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
5176 val
= readl(port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
5177 val
&= ~(MVPP22_CTRL4_RX_FC_EN
| MVPP22_CTRL4_TX_FC_EN
);
5179 val
|= MVPP22_CTRL4_TX_FC_EN
;
5181 val
|= MVPP22_CTRL4_RX_FC_EN
;
5182 writel(val
, port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
5185 mvpp2_port_enable(port
);
5187 mvpp2_egress_enable(port
);
5188 mvpp2_ingress_enable(port
);
5189 netif_tx_wake_all_queues(dev
);
5192 static void mvpp2_mac_link_down(struct phylink_config
*config
,
5193 unsigned int mode
, phy_interface_t interface
)
5195 struct net_device
*dev
= to_net_dev(config
->dev
);
5196 struct mvpp2_port
*port
= netdev_priv(dev
);
5199 if (!phylink_autoneg_inband(mode
)) {
5200 if (mvpp2_is_xlg(interface
)) {
5201 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
5202 val
&= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS
;
5203 val
|= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN
;
5204 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
5206 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5207 val
&= ~MVPP2_GMAC_FORCE_LINK_PASS
;
5208 val
|= MVPP2_GMAC_FORCE_LINK_DOWN
;
5209 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
5213 netif_tx_stop_all_queues(dev
);
5214 mvpp2_egress_disable(port
);
5215 mvpp2_ingress_disable(port
);
5217 mvpp2_port_disable(port
);
5220 static const struct phylink_mac_ops mvpp2_phylink_ops
= {
5221 .validate
= mvpp2_phylink_validate
,
5222 .mac_pcs_get_state
= mvpp2_phylink_mac_pcs_get_state
,
5223 .mac_an_restart
= mvpp2_mac_an_restart
,
5224 .mac_config
= mvpp2_mac_config
,
5225 .mac_link_up
= mvpp2_mac_link_up
,
5226 .mac_link_down
= mvpp2_mac_link_down
,
5229 /* Ports initialization */
5230 static int mvpp2_port_probe(struct platform_device
*pdev
,
5231 struct fwnode_handle
*port_fwnode
,
5234 struct phy
*comphy
= NULL
;
5235 struct mvpp2_port
*port
;
5236 struct mvpp2_port_pcpu
*port_pcpu
;
5237 struct device_node
*port_node
= to_of_node(port_fwnode
);
5238 netdev_features_t features
;
5239 struct net_device
*dev
;
5240 struct phylink
*phylink
;
5241 char *mac_from
= "";
5242 unsigned int ntxqs
, nrxqs
, thread
;
5243 unsigned long flags
= 0;
5249 has_tx_irqs
= mvpp2_port_has_irqs(priv
, port_node
, &flags
);
5250 if (!has_tx_irqs
&& queue_mode
== MVPP2_QDIST_MULTI_MODE
) {
5252 "not enough IRQs to support multi queue mode\n");
5256 ntxqs
= MVPP2_MAX_TXQ
;
5257 nrxqs
= mvpp2_get_nrxqs(priv
);
5259 dev
= alloc_etherdev_mqs(sizeof(*port
), ntxqs
, nrxqs
);
5263 phy_mode
= fwnode_get_phy_mode(port_fwnode
);
5265 dev_err(&pdev
->dev
, "incorrect phy mode\n");
5267 goto err_free_netdev
;
5271 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
5272 * Existing usage of 10GBASE-KR is not correct; no backplane
5273 * negotiation is done, and this driver does not actually support
5276 if (phy_mode
== PHY_INTERFACE_MODE_10GKR
)
5277 phy_mode
= PHY_INTERFACE_MODE_10GBASER
;
5280 comphy
= devm_of_phy_get(&pdev
->dev
, port_node
, NULL
);
5281 if (IS_ERR(comphy
)) {
5282 if (PTR_ERR(comphy
) == -EPROBE_DEFER
) {
5283 err
= -EPROBE_DEFER
;
5284 goto err_free_netdev
;
5290 if (fwnode_property_read_u32(port_fwnode
, "port-id", &id
)) {
5292 dev_err(&pdev
->dev
, "missing port-id value\n");
5293 goto err_free_netdev
;
5296 dev
->tx_queue_len
= MVPP2_MAX_TXD_MAX
;
5297 dev
->watchdog_timeo
= 5 * HZ
;
5298 dev
->netdev_ops
= &mvpp2_netdev_ops
;
5299 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
5301 port
= netdev_priv(dev
);
5303 port
->fwnode
= port_fwnode
;
5304 port
->has_phy
= !!of_find_property(port_node
, "phy", NULL
);
5305 port
->ntxqs
= ntxqs
;
5306 port
->nrxqs
= nrxqs
;
5308 port
->has_tx_irqs
= has_tx_irqs
;
5309 port
->flags
= flags
;
5311 err
= mvpp2_queue_vectors_init(port
, port_node
);
5313 goto err_free_netdev
;
5316 port
->link_irq
= of_irq_get_byname(port_node
, "link");
5318 port
->link_irq
= fwnode_irq_get(port_fwnode
, port
->nqvecs
+ 1);
5319 if (port
->link_irq
== -EPROBE_DEFER
) {
5320 err
= -EPROBE_DEFER
;
5321 goto err_deinit_qvecs
;
5323 if (port
->link_irq
<= 0)
5324 /* the link irq is optional */
5327 if (fwnode_property_read_bool(port_fwnode
, "marvell,loopback"))
5328 port
->flags
|= MVPP2_F_LOOPBACK
;
5331 if (priv
->hw_version
== MVPP21
)
5332 port
->first_rxq
= port
->id
* port
->nrxqs
;
5334 port
->first_rxq
= port
->id
* priv
->max_port_rxqs
;
5336 port
->of_node
= port_node
;
5337 port
->phy_interface
= phy_mode
;
5338 port
->comphy
= comphy
;
5340 if (priv
->hw_version
== MVPP21
) {
5341 port
->base
= devm_platform_ioremap_resource(pdev
, 2 + id
);
5342 if (IS_ERR(port
->base
)) {
5343 err
= PTR_ERR(port
->base
);
5347 port
->stats_base
= port
->priv
->lms_base
+
5348 MVPP21_MIB_COUNTERS_OFFSET
+
5349 port
->gop_id
* MVPP21_MIB_COUNTERS_PORT_SZ
;
5351 if (fwnode_property_read_u32(port_fwnode
, "gop-port-id",
5354 dev_err(&pdev
->dev
, "missing gop-port-id value\n");
5355 goto err_deinit_qvecs
;
5358 port
->base
= priv
->iface_base
+ MVPP22_GMAC_BASE(port
->gop_id
);
5359 port
->stats_base
= port
->priv
->iface_base
+
5360 MVPP22_MIB_COUNTERS_OFFSET
+
5361 port
->gop_id
* MVPP22_MIB_COUNTERS_PORT_SZ
;
5364 /* Alloc per-cpu and ethtool stats */
5365 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
5371 port
->ethtool_stats
= devm_kcalloc(&pdev
->dev
,
5372 MVPP2_N_ETHTOOL_STATS(ntxqs
, nrxqs
),
5373 sizeof(u64
), GFP_KERNEL
);
5374 if (!port
->ethtool_stats
) {
5376 goto err_free_stats
;
5379 mutex_init(&port
->gather_stats_lock
);
5380 INIT_DELAYED_WORK(&port
->stats_work
, mvpp2_gather_hw_statistics
);
5382 mvpp2_port_copy_mac_addr(dev
, priv
, port_fwnode
, &mac_from
);
5384 port
->tx_ring_size
= MVPP2_MAX_TXD_DFLT
;
5385 port
->rx_ring_size
= MVPP2_MAX_RXD_DFLT
;
5386 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5388 err
= mvpp2_port_init(port
);
5390 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
5391 goto err_free_stats
;
5394 mvpp2_port_periodic_xon_disable(port
);
5396 mvpp2_mac_reset_assert(port
);
5397 mvpp22_pcs_reset_assert(port
);
5399 port
->pcpu
= alloc_percpu(struct mvpp2_port_pcpu
);
5402 goto err_free_txq_pcpu
;
5405 if (!port
->has_tx_irqs
) {
5406 for (thread
= 0; thread
< priv
->nthreads
; thread
++) {
5407 port_pcpu
= per_cpu_ptr(port
->pcpu
, thread
);
5409 hrtimer_init(&port_pcpu
->tx_done_timer
, CLOCK_MONOTONIC
,
5410 HRTIMER_MODE_REL_PINNED_SOFT
);
5411 port_pcpu
->tx_done_timer
.function
= mvpp2_hr_timer_cb
;
5412 port_pcpu
->timer_scheduled
= false;
5413 port_pcpu
->dev
= dev
;
5417 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
5419 dev
->features
= features
| NETIF_F_RXCSUM
;
5420 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
|
5421 NETIF_F_HW_VLAN_CTAG_FILTER
;
5423 if (mvpp22_rss_is_supported()) {
5424 dev
->hw_features
|= NETIF_F_RXHASH
;
5425 dev
->features
|= NETIF_F_NTUPLE
;
5428 if (!port
->priv
->percpu_pools
)
5429 mvpp2_set_hw_csum(port
, port
->pool_long
->id
);
5431 dev
->vlan_features
|= features
;
5432 dev
->gso_max_segs
= MVPP2_MAX_TSO_SEGS
;
5433 dev
->priv_flags
|= IFF_UNICAST_FLT
;
5435 /* MTU range: 68 - 9704 */
5436 dev
->min_mtu
= ETH_MIN_MTU
;
5437 /* 9704 == 9728 - 20 and rounding to 8 */
5438 dev
->max_mtu
= MVPP2_BM_JUMBO_PKT_SIZE
;
5439 dev
->dev
.of_node
= port_node
;
5441 /* Phylink isn't used w/ ACPI as of now */
5443 port
->phylink_config
.dev
= &dev
->dev
;
5444 port
->phylink_config
.type
= PHYLINK_NETDEV
;
5446 phylink
= phylink_create(&port
->phylink_config
, port_fwnode
,
5447 phy_mode
, &mvpp2_phylink_ops
);
5448 if (IS_ERR(phylink
)) {
5449 err
= PTR_ERR(phylink
);
5450 goto err_free_port_pcpu
;
5452 port
->phylink
= phylink
;
5454 port
->phylink
= NULL
;
5457 /* Cycle the comphy to power it down, saving 270mW per port -
5458 * don't worry about an error powering it up. When the comphy
5459 * driver does this, we can remove this code.
5462 err
= mvpp22_comphy_init(port
);
5464 phy_power_off(port
->comphy
);
5467 err
= register_netdev(dev
);
5469 dev_err(&pdev
->dev
, "failed to register netdev\n");
5472 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
5474 priv
->port_list
[priv
->port_count
++] = port
;
5480 phylink_destroy(port
->phylink
);
5482 free_percpu(port
->pcpu
);
5484 for (i
= 0; i
< port
->ntxqs
; i
++)
5485 free_percpu(port
->txqs
[i
]->pcpu
);
5487 free_percpu(port
->stats
);
5490 irq_dispose_mapping(port
->link_irq
);
5492 mvpp2_queue_vectors_deinit(port
);
5498 /* Ports removal routine */
5499 static void mvpp2_port_remove(struct mvpp2_port
*port
)
5503 unregister_netdev(port
->dev
);
5505 phylink_destroy(port
->phylink
);
5506 free_percpu(port
->pcpu
);
5507 free_percpu(port
->stats
);
5508 for (i
= 0; i
< port
->ntxqs
; i
++)
5509 free_percpu(port
->txqs
[i
]->pcpu
);
5510 mvpp2_queue_vectors_deinit(port
);
5512 irq_dispose_mapping(port
->link_irq
);
5513 free_netdev(port
->dev
);
5516 /* Initialize decoding windows */
5517 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
5523 for (i
= 0; i
< 6; i
++) {
5524 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
5525 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
5528 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
5533 for (i
= 0; i
< dram
->num_cs
; i
++) {
5534 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
5536 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
5537 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
5538 dram
->mbus_dram_target_id
);
5540 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
5541 (cs
->size
- 1) & 0xffff0000);
5543 win_enable
|= (1 << i
);
5546 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
5549 /* Initialize Rx FIFO's */
5550 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
5554 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
5555 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
5556 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB
);
5557 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
5558 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB
);
5561 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
5562 MVPP2_RX_FIFO_PORT_MIN_PKT
);
5563 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
5566 static void mvpp22_rx_fifo_init(struct mvpp2
*priv
)
5570 /* The FIFO size parameters are set depending on the maximum speed a
5571 * given port can handle:
5574 * - Ports 2 and 3: 1Gbps
5577 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
5578 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB
);
5579 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
5580 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB
);
5582 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
5583 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB
);
5584 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
5585 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB
);
5587 for (port
= 2; port
< MVPP2_MAX_PORTS
; port
++) {
5588 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
5589 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB
);
5590 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
5591 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB
);
5594 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
5595 MVPP2_RX_FIFO_PORT_MIN_PKT
);
5596 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
5599 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
5600 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
5601 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
5603 static void mvpp22_tx_fifo_init(struct mvpp2
*priv
)
5605 int port
, size
, thrs
;
5607 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
5609 size
= MVPP22_TX_FIFO_DATA_SIZE_10KB
;
5610 thrs
= MVPP2_TX_FIFO_THRESHOLD_10KB
;
5612 size
= MVPP22_TX_FIFO_DATA_SIZE_3KB
;
5613 thrs
= MVPP2_TX_FIFO_THRESHOLD_3KB
;
5615 mvpp2_write(priv
, MVPP22_TX_FIFO_SIZE_REG(port
), size
);
5616 mvpp2_write(priv
, MVPP22_TX_FIFO_THRESH_REG(port
), thrs
);
5620 static void mvpp2_axi_init(struct mvpp2
*priv
)
5622 u32 val
, rdval
, wrval
;
5624 mvpp2_write(priv
, MVPP22_BM_ADDR_HIGH_RLS_REG
, 0x0);
5626 /* AXI Bridge Configuration */
5628 rdval
= MVPP22_AXI_CODE_CACHE_RD_CACHE
5629 << MVPP22_AXI_ATTR_CACHE_OFFS
;
5630 rdval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5631 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
5633 wrval
= MVPP22_AXI_CODE_CACHE_WR_CACHE
5634 << MVPP22_AXI_ATTR_CACHE_OFFS
;
5635 wrval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5636 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
5639 mvpp2_write(priv
, MVPP22_AXI_BM_WR_ATTR_REG
, wrval
);
5640 mvpp2_write(priv
, MVPP22_AXI_BM_RD_ATTR_REG
, rdval
);
5643 mvpp2_write(priv
, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG
, rdval
);
5644 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG
, wrval
);
5645 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG
, rdval
);
5646 mvpp2_write(priv
, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG
, wrval
);
5649 mvpp2_write(priv
, MVPP22_AXI_TX_DATA_RD_ATTR_REG
, rdval
);
5650 mvpp2_write(priv
, MVPP22_AXI_RX_DATA_WR_ATTR_REG
, wrval
);
5652 val
= MVPP22_AXI_CODE_CACHE_NON_CACHE
5653 << MVPP22_AXI_CODE_CACHE_OFFS
;
5654 val
|= MVPP22_AXI_CODE_DOMAIN_SYSTEM
5655 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5656 mvpp2_write(priv
, MVPP22_AXI_RD_NORMAL_CODE_REG
, val
);
5657 mvpp2_write(priv
, MVPP22_AXI_WR_NORMAL_CODE_REG
, val
);
5659 val
= MVPP22_AXI_CODE_CACHE_RD_CACHE
5660 << MVPP22_AXI_CODE_CACHE_OFFS
;
5661 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5662 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5664 mvpp2_write(priv
, MVPP22_AXI_RD_SNOOP_CODE_REG
, val
);
5666 val
= MVPP22_AXI_CODE_CACHE_WR_CACHE
5667 << MVPP22_AXI_CODE_CACHE_OFFS
;
5668 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5669 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5671 mvpp2_write(priv
, MVPP22_AXI_WR_SNOOP_CODE_REG
, val
);
5674 /* Initialize network controller common part HW */
5675 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
5677 const struct mbus_dram_target_info
*dram_target_info
;
5681 /* MBUS windows configuration */
5682 dram_target_info
= mv_mbus_dram_info();
5683 if (dram_target_info
)
5684 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
5686 if (priv
->hw_version
== MVPP22
)
5687 mvpp2_axi_init(priv
);
5689 /* Disable HW PHY polling */
5690 if (priv
->hw_version
== MVPP21
) {
5691 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5692 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
5693 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5695 val
= readl(priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5696 val
&= ~MVPP22_SMI_POLLING_EN
;
5697 writel(val
, priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5700 /* Allocate and initialize aggregated TXQs */
5701 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, MVPP2_MAX_THREADS
,
5702 sizeof(*priv
->aggr_txqs
),
5704 if (!priv
->aggr_txqs
)
5707 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
5708 priv
->aggr_txqs
[i
].id
= i
;
5709 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
5710 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
], i
, priv
);
5716 if (priv
->hw_version
== MVPP21
) {
5717 mvpp2_rx_fifo_init(priv
);
5719 mvpp22_rx_fifo_init(priv
);
5720 mvpp22_tx_fifo_init(priv
);
5723 if (priv
->hw_version
== MVPP21
)
5724 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
5725 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
5727 /* Allow cache snoop when transmiting packets */
5728 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
5730 /* Buffer Manager initialization */
5731 err
= mvpp2_bm_init(&pdev
->dev
, priv
);
5735 /* Parser default initialization */
5736 err
= mvpp2_prs_default_init(pdev
, priv
);
5740 /* Classifier default initialization */
5741 mvpp2_cls_init(priv
);
5746 static int mvpp2_probe(struct platform_device
*pdev
)
5748 const struct acpi_device_id
*acpi_id
;
5749 struct fwnode_handle
*fwnode
= pdev
->dev
.fwnode
;
5750 struct fwnode_handle
*port_fwnode
;
5752 struct resource
*res
;
5757 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
5761 if (has_acpi_companion(&pdev
->dev
)) {
5762 acpi_id
= acpi_match_device(pdev
->dev
.driver
->acpi_match_table
,
5766 priv
->hw_version
= (unsigned long)acpi_id
->driver_data
;
5769 (unsigned long)of_device_get_match_data(&pdev
->dev
);
5772 /* multi queue mode isn't supported on PPV2.1, fallback to single
5775 if (priv
->hw_version
== MVPP21
)
5776 queue_mode
= MVPP2_QDIST_SINGLE_MODE
;
5778 base
= devm_platform_ioremap_resource(pdev
, 0);
5780 return PTR_ERR(base
);
5782 if (priv
->hw_version
== MVPP21
) {
5783 priv
->lms_base
= devm_platform_ioremap_resource(pdev
, 1);
5784 if (IS_ERR(priv
->lms_base
))
5785 return PTR_ERR(priv
->lms_base
);
5787 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
5788 if (has_acpi_companion(&pdev
->dev
)) {
5789 /* In case the MDIO memory region is declared in
5790 * the ACPI, it can already appear as 'in-use'
5791 * in the OS. Because it is overlapped by second
5792 * region of the network controller, make
5793 * sure it is released, before requesting it again.
5794 * The care is taken by mvpp2 driver to avoid
5795 * concurrent access to this memory region.
5797 release_resource(res
);
5799 priv
->iface_base
= devm_ioremap_resource(&pdev
->dev
, res
);
5800 if (IS_ERR(priv
->iface_base
))
5801 return PTR_ERR(priv
->iface_base
);
5804 if (priv
->hw_version
== MVPP22
&& dev_of_node(&pdev
->dev
)) {
5805 priv
->sysctrl_base
=
5806 syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
5807 "marvell,system-controller");
5808 if (IS_ERR(priv
->sysctrl_base
))
5809 /* The system controller regmap is optional for dt
5810 * compatibility reasons. When not provided, the
5811 * configuration of the GoP relies on the
5812 * firmware/bootloader.
5814 priv
->sysctrl_base
= NULL
;
5817 if (priv
->hw_version
== MVPP22
&&
5818 mvpp2_get_nrxqs(priv
) * 2 <= MVPP2_BM_MAX_POOLS
)
5819 priv
->percpu_pools
= 1;
5821 mvpp2_setup_bm_pool();
5824 priv
->nthreads
= min_t(unsigned int, num_present_cpus(),
5827 shared
= num_present_cpus() - priv
->nthreads
;
5829 bitmap_fill(&priv
->lock_map
,
5830 min_t(int, shared
, MVPP2_MAX_THREADS
));
5832 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
5835 addr_space_sz
= (priv
->hw_version
== MVPP21
?
5836 MVPP21_ADDR_SPACE_SZ
: MVPP22_ADDR_SPACE_SZ
);
5837 priv
->swth_base
[i
] = base
+ i
* addr_space_sz
;
5840 if (priv
->hw_version
== MVPP21
)
5841 priv
->max_port_rxqs
= 8;
5843 priv
->max_port_rxqs
= 32;
5845 if (dev_of_node(&pdev
->dev
)) {
5846 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
5847 if (IS_ERR(priv
->pp_clk
))
5848 return PTR_ERR(priv
->pp_clk
);
5849 err
= clk_prepare_enable(priv
->pp_clk
);
5853 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
5854 if (IS_ERR(priv
->gop_clk
)) {
5855 err
= PTR_ERR(priv
->gop_clk
);
5858 err
= clk_prepare_enable(priv
->gop_clk
);
5862 if (priv
->hw_version
== MVPP22
) {
5863 priv
->mg_clk
= devm_clk_get(&pdev
->dev
, "mg_clk");
5864 if (IS_ERR(priv
->mg_clk
)) {
5865 err
= PTR_ERR(priv
->mg_clk
);
5869 err
= clk_prepare_enable(priv
->mg_clk
);
5873 priv
->mg_core_clk
= devm_clk_get(&pdev
->dev
, "mg_core_clk");
5874 if (IS_ERR(priv
->mg_core_clk
)) {
5875 priv
->mg_core_clk
= NULL
;
5877 err
= clk_prepare_enable(priv
->mg_core_clk
);
5883 priv
->axi_clk
= devm_clk_get(&pdev
->dev
, "axi_clk");
5884 if (IS_ERR(priv
->axi_clk
)) {
5885 err
= PTR_ERR(priv
->axi_clk
);
5886 if (err
== -EPROBE_DEFER
)
5887 goto err_mg_core_clk
;
5888 priv
->axi_clk
= NULL
;
5890 err
= clk_prepare_enable(priv
->axi_clk
);
5892 goto err_mg_core_clk
;
5895 /* Get system's tclk rate */
5896 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
5897 } else if (device_property_read_u32(&pdev
->dev
, "clock-frequency",
5899 dev_err(&pdev
->dev
, "missing clock-frequency value\n");
5903 if (priv
->hw_version
== MVPP22
) {
5904 err
= dma_set_mask(&pdev
->dev
, MVPP2_DESC_DMA_MASK
);
5907 /* Sadly, the BM pools all share the same register to
5908 * store the high 32 bits of their address. So they
5909 * must all have the same high 32 bits, which forces
5910 * us to restrict coherent memory to DMA_BIT_MASK(32).
5912 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
5917 /* Initialize network controller */
5918 err
= mvpp2_init(pdev
, priv
);
5920 dev_err(&pdev
->dev
, "failed to initialize controller\n");
5924 /* Initialize ports */
5925 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5926 err
= mvpp2_port_probe(pdev
, port_fwnode
, priv
);
5928 goto err_port_probe
;
5931 if (priv
->port_count
== 0) {
5932 dev_err(&pdev
->dev
, "no ports enabled\n");
5937 /* Statistics must be gathered regularly because some of them (like
5938 * packets counters) are 32-bit registers and could overflow quite
5939 * quickly. For instance, a 10Gb link used at full bandwidth with the
5940 * smallest packets (64B) will overflow a 32-bit counter in less than
5941 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
5943 snprintf(priv
->queue_name
, sizeof(priv
->queue_name
),
5944 "stats-wq-%s%s", netdev_name(priv
->port_list
[0]->dev
),
5945 priv
->port_count
> 1 ? "+" : "");
5946 priv
->stats_queue
= create_singlethread_workqueue(priv
->queue_name
);
5947 if (!priv
->stats_queue
) {
5949 goto err_port_probe
;
5952 mvpp2_dbgfs_init(priv
, pdev
->name
);
5954 platform_set_drvdata(pdev
, priv
);
5959 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5960 if (priv
->port_list
[i
])
5961 mvpp2_port_remove(priv
->port_list
[i
]);
5965 clk_disable_unprepare(priv
->axi_clk
);
5968 if (priv
->hw_version
== MVPP22
)
5969 clk_disable_unprepare(priv
->mg_core_clk
);
5971 if (priv
->hw_version
== MVPP22
)
5972 clk_disable_unprepare(priv
->mg_clk
);
5974 clk_disable_unprepare(priv
->gop_clk
);
5976 clk_disable_unprepare(priv
->pp_clk
);
5980 static int mvpp2_remove(struct platform_device
*pdev
)
5982 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
5983 struct fwnode_handle
*fwnode
= pdev
->dev
.fwnode
;
5984 struct fwnode_handle
*port_fwnode
;
5987 mvpp2_dbgfs_cleanup(priv
);
5989 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5990 if (priv
->port_list
[i
]) {
5991 mutex_destroy(&priv
->port_list
[i
]->gather_stats_lock
);
5992 mvpp2_port_remove(priv
->port_list
[i
]);
5997 destroy_workqueue(priv
->stats_queue
);
5999 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
6000 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
6002 mvpp2_bm_pool_destroy(&pdev
->dev
, priv
, bm_pool
);
6005 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
6006 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
6008 dma_free_coherent(&pdev
->dev
,
6009 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
6011 aggr_txq
->descs_dma
);
6014 if (is_acpi_node(port_fwnode
))
6017 clk_disable_unprepare(priv
->axi_clk
);
6018 clk_disable_unprepare(priv
->mg_core_clk
);
6019 clk_disable_unprepare(priv
->mg_clk
);
6020 clk_disable_unprepare(priv
->pp_clk
);
6021 clk_disable_unprepare(priv
->gop_clk
);
6026 static const struct of_device_id mvpp2_match
[] = {
6028 .compatible
= "marvell,armada-375-pp2",
6029 .data
= (void *)MVPP21
,
6032 .compatible
= "marvell,armada-7k-pp22",
6033 .data
= (void *)MVPP22
,
6037 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
6039 static const struct acpi_device_id mvpp2_acpi_match
[] = {
6040 { "MRVL0110", MVPP22
},
6043 MODULE_DEVICE_TABLE(acpi
, mvpp2_acpi_match
);
6045 static struct platform_driver mvpp2_driver
= {
6046 .probe
= mvpp2_probe
,
6047 .remove
= mvpp2_remove
,
6049 .name
= MVPP2_DRIVER_NAME
,
6050 .of_match_table
= mvpp2_match
,
6051 .acpi_match_table
= ACPI_PTR(mvpp2_acpi_match
),
6055 module_platform_driver(mvpp2_driver
);
6057 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6058 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6059 MODULE_LICENSE("GPL v2");