]>
Commit | Line | Data |
---|---|---|
f1e37e31 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3f518509 MW |
2 | /* |
3 | * Driver for Marvell PPv2 network controller for Armada 375 SoC. | |
4 | * | |
5 | * Copyright (C) 2014 Marvell | |
6 | * | |
7 | * Marcin Wojtas <mw@semihalf.com> | |
3f518509 MW |
8 | */ |
9 | ||
a75edc7c | 10 | #include <linux/acpi.h> |
3f518509 MW |
11 | #include <linux/kernel.h> |
12 | #include <linux/netdevice.h> | |
13 | #include <linux/etherdevice.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/skbuff.h> | |
16 | #include <linux/inetdevice.h> | |
17 | #include <linux/mbus.h> | |
18 | #include <linux/module.h> | |
f84bf386 | 19 | #include <linux/mfd/syscon.h> |
3f518509 MW |
20 | #include <linux/interrupt.h> |
21 | #include <linux/cpumask.h> | |
22 | #include <linux/of.h> | |
23 | #include <linux/of_irq.h> | |
24 | #include <linux/of_mdio.h> | |
25 | #include <linux/of_net.h> | |
26 | #include <linux/of_address.h> | |
faca9247 | 27 | #include <linux/of_device.h> |
3f518509 | 28 | #include <linux/phy.h> |
4bb04326 | 29 | #include <linux/phylink.h> |
542897d9 | 30 | #include <linux/phy/phy.h> |
3f518509 | 31 | #include <linux/clk.h> |
edc660fa MW |
32 | #include <linux/hrtimer.h> |
33 | #include <linux/ktime.h> | |
f84bf386 | 34 | #include <linux/regmap.h> |
3f518509 MW |
35 | #include <uapi/linux/ppp_defs.h> |
36 | #include <net/ip.h> | |
37 | #include <net/ipv6.h> | |
186cd4d4 | 38 | #include <net/tso.h> |
3f518509 | 39 | |
db9d7d36 MC |
40 | #include "mvpp2.h" |
41 | #include "mvpp2_prs.h" | |
42 | #include "mvpp2_cls.h" | |
a786841d | 43 | |
01d04936 SC |
44 | enum mvpp2_bm_pool_log_num { |
45 | MVPP2_BM_SHORT, | |
46 | MVPP2_BM_LONG, | |
576193f2 | 47 | MVPP2_BM_JUMBO, |
01d04936 | 48 | MVPP2_BM_POOLS_NUM |
3f518509 MW |
49 | }; |
50 | ||
db9d7d36 MC |
51 | static struct { |
52 | int pkt_size; | |
53 | int buf_num; | |
54 | } mvpp2_pools[MVPP2_BM_POOLS_NUM]; | |
3f518509 | 55 | |
db9d7d36 MC |
56 | /* The prototype is added here to be used in start_dev when using ACPI. This |
57 | * will be removed once phylink is used for all modes (dt+ACPI). | |
58 | */ | |
44cc27e4 | 59 | static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, |
db9d7d36 | 60 | const struct phylink_link_state *state); |
91a208f2 RK |
61 | static void mvpp2_mac_link_up(struct phylink_config *config, |
62 | struct phy_device *phy, | |
63 | unsigned int mode, phy_interface_t interface, | |
64 | int speed, int duplex, | |
65 | bool tx_pause, bool rx_pause); | |
10fea26c | 66 | |
db9d7d36 MC |
67 | /* Queue modes */ |
68 | #define MVPP2_QDIST_SINGLE_MODE 0 | |
69 | #define MVPP2_QDIST_MULTI_MODE 1 | |
3f518509 | 70 | |
3f6aaf72 | 71 | static int queue_mode = MVPP2_QDIST_MULTI_MODE; |
3f518509 | 72 | |
db9d7d36 MC |
73 | module_param(queue_mode, int, 0444); |
74 | MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); | |
3f518509 | 75 | |
db9d7d36 | 76 | /* Utility/helper methods */ |
3f518509 | 77 | |
db9d7d36 MC |
78 | void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) |
79 | { | |
80 | writel(data, priv->swth_base[0] + offset); | |
3f518509 MW |
81 | } |
82 | ||
db9d7d36 | 83 | u32 mvpp2_read(struct mvpp2 *priv, u32 offset) |
3f518509 | 84 | { |
db9d7d36 | 85 | return readl(priv->swth_base[0] + offset); |
3f518509 MW |
86 | } |
87 | ||
16274427 | 88 | static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) |
3f518509 | 89 | { |
db9d7d36 | 90 | return readl_relaxed(priv->swth_base[0] + offset); |
3f518509 | 91 | } |
543ec376 | 92 | |
e531f767 | 93 | static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) |
543ec376 | 94 | { |
e531f767 | 95 | return cpu % priv->nthreads; |
543ec376 AT |
96 | } |
97 | ||
db9d7d36 MC |
98 | /* These accessors should be used to access: |
99 | * | |
543ec376 | 100 | * - per-thread registers, where each thread has its own copy of the |
db9d7d36 MC |
101 | * register. |
102 | * | |
103 | * MVPP2_BM_VIRT_ALLOC_REG | |
104 | * MVPP2_BM_ADDR_HIGH_ALLOC | |
105 | * MVPP22_BM_ADDR_HIGH_RLS_REG | |
106 | * MVPP2_BM_VIRT_RLS_REG | |
107 | * MVPP2_ISR_RX_TX_CAUSE_REG | |
108 | * MVPP2_ISR_RX_TX_MASK_REG | |
109 | * MVPP2_TXQ_NUM_REG | |
110 | * MVPP2_AGGR_TXQ_UPDATE_REG | |
111 | * MVPP2_TXQ_RSVD_REQ_REG | |
112 | * MVPP2_TXQ_RSVD_RSLT_REG | |
113 | * MVPP2_TXQ_SENT_REG | |
114 | * MVPP2_RXQ_NUM_REG | |
115 | * | |
543ec376 AT |
116 | * - global registers that must be accessed through a specific thread |
117 | * window, because they are related to an access to a per-thread | |
db9d7d36 MC |
118 | * register |
119 | * | |
120 | * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) | |
121 | * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) | |
122 | * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) | |
123 | * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) | |
124 | * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) | |
125 | * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) | |
126 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) | |
127 | * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) | |
128 | * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) | |
129 | * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) | |
130 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) | |
131 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) | |
132 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) | |
133 | */ | |
1068549c | 134 | static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 MC |
135 | u32 offset, u32 data) |
136 | { | |
543ec376 | 137 | writel(data, priv->swth_base[thread] + offset); |
3f518509 MW |
138 | } |
139 | ||
1068549c | 140 | static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 | 141 | u32 offset) |
3f518509 | 142 | { |
543ec376 | 143 | return readl(priv->swth_base[thread] + offset); |
db9d7d36 | 144 | } |
3f518509 | 145 | |
1068549c | 146 | static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 MC |
147 | u32 offset, u32 data) |
148 | { | |
543ec376 | 149 | writel_relaxed(data, priv->swth_base[thread] + offset); |
db9d7d36 | 150 | } |
0c6d9b44 | 151 | |
1068549c | 152 | static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 MC |
153 | u32 offset) |
154 | { | |
543ec376 | 155 | return readl_relaxed(priv->swth_base[thread] + offset); |
db9d7d36 | 156 | } |
3f518509 | 157 | |
db9d7d36 MC |
158 | static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, |
159 | struct mvpp2_tx_desc *tx_desc) | |
160 | { | |
161 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 162 | return le32_to_cpu(tx_desc->pp21.buf_dma_addr); |
db9d7d36 | 163 | else |
7b9c7d7d MC |
164 | return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & |
165 | MVPP2_DESC_DMA_MASK; | |
db9d7d36 | 166 | } |
3f518509 | 167 | |
db9d7d36 MC |
168 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, |
169 | struct mvpp2_tx_desc *tx_desc, | |
170 | dma_addr_t dma_addr) | |
171 | { | |
172 | dma_addr_t addr, offset; | |
3f518509 | 173 | |
db9d7d36 MC |
174 | addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; |
175 | offset = dma_addr & MVPP2_TX_DESC_ALIGN; | |
3f518509 | 176 | |
db9d7d36 | 177 | if (port->priv->hw_version == MVPP21) { |
7b9c7d7d | 178 | tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); |
db9d7d36 | 179 | tx_desc->pp21.packet_offset = offset; |
0c6d9b44 | 180 | } else { |
7b9c7d7d | 181 | __le64 val = cpu_to_le64(addr); |
3f518509 | 182 | |
7b9c7d7d | 183 | tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); |
db9d7d36 MC |
184 | tx_desc->pp22.buf_dma_addr_ptp |= val; |
185 | tx_desc->pp22.packet_offset = offset; | |
186 | } | |
3f518509 MW |
187 | } |
188 | ||
db9d7d36 MC |
189 | static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, |
190 | struct mvpp2_tx_desc *tx_desc) | |
3f518509 | 191 | { |
db9d7d36 | 192 | if (port->priv->hw_version == MVPP21) |
7b9c7d7d | 193 | return le16_to_cpu(tx_desc->pp21.data_size); |
db9d7d36 | 194 | else |
7b9c7d7d | 195 | return le16_to_cpu(tx_desc->pp22.data_size); |
3f518509 MW |
196 | } |
197 | ||
db9d7d36 MC |
198 | static void mvpp2_txdesc_size_set(struct mvpp2_port *port, |
199 | struct mvpp2_tx_desc *tx_desc, | |
200 | size_t size) | |
3f518509 | 201 | { |
db9d7d36 | 202 | if (port->priv->hw_version == MVPP21) |
7b9c7d7d | 203 | tx_desc->pp21.data_size = cpu_to_le16(size); |
db9d7d36 | 204 | else |
7b9c7d7d | 205 | tx_desc->pp22.data_size = cpu_to_le16(size); |
3f518509 MW |
206 | } |
207 | ||
db9d7d36 MC |
208 | static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, |
209 | struct mvpp2_tx_desc *tx_desc, | |
210 | unsigned int txq) | |
3f518509 | 211 | { |
db9d7d36 MC |
212 | if (port->priv->hw_version == MVPP21) |
213 | tx_desc->pp21.phys_txq = txq; | |
214 | else | |
215 | tx_desc->pp22.phys_txq = txq; | |
3f518509 MW |
216 | } |
217 | ||
db9d7d36 MC |
218 | static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, |
219 | struct mvpp2_tx_desc *tx_desc, | |
220 | unsigned int command) | |
3f518509 | 221 | { |
db9d7d36 | 222 | if (port->priv->hw_version == MVPP21) |
7b9c7d7d | 223 | tx_desc->pp21.command = cpu_to_le32(command); |
db9d7d36 | 224 | else |
7b9c7d7d | 225 | tx_desc->pp22.command = cpu_to_le32(command); |
db9d7d36 | 226 | } |
3f518509 | 227 | |
db9d7d36 MC |
228 | static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, |
229 | struct mvpp2_tx_desc *tx_desc) | |
230 | { | |
231 | if (port->priv->hw_version == MVPP21) | |
232 | return tx_desc->pp21.packet_offset; | |
233 | else | |
234 | return tx_desc->pp22.packet_offset; | |
235 | } | |
3f518509 | 236 | |
db9d7d36 MC |
237 | static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, |
238 | struct mvpp2_rx_desc *rx_desc) | |
239 | { | |
240 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 241 | return le32_to_cpu(rx_desc->pp21.buf_dma_addr); |
db9d7d36 | 242 | else |
7b9c7d7d MC |
243 | return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & |
244 | MVPP2_DESC_DMA_MASK; | |
db9d7d36 | 245 | } |
3f518509 | 246 | |
db9d7d36 MC |
247 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, |
248 | struct mvpp2_rx_desc *rx_desc) | |
249 | { | |
250 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 251 | return le32_to_cpu(rx_desc->pp21.buf_cookie); |
db9d7d36 | 252 | else |
7b9c7d7d MC |
253 | return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & |
254 | MVPP2_DESC_DMA_MASK; | |
db9d7d36 | 255 | } |
3f518509 | 256 | |
db9d7d36 MC |
257 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, |
258 | struct mvpp2_rx_desc *rx_desc) | |
259 | { | |
260 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 261 | return le16_to_cpu(rx_desc->pp21.data_size); |
db9d7d36 | 262 | else |
7b9c7d7d | 263 | return le16_to_cpu(rx_desc->pp22.data_size); |
db9d7d36 | 264 | } |
3f518509 | 265 | |
db9d7d36 MC |
266 | static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, |
267 | struct mvpp2_rx_desc *rx_desc) | |
268 | { | |
269 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 270 | return le32_to_cpu(rx_desc->pp21.status); |
db9d7d36 | 271 | else |
7b9c7d7d | 272 | return le32_to_cpu(rx_desc->pp22.status); |
3f518509 MW |
273 | } |
274 | ||
db9d7d36 | 275 | static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) |
3f518509 | 276 | { |
db9d7d36 MC |
277 | txq_pcpu->txq_get_index++; |
278 | if (txq_pcpu->txq_get_index == txq_pcpu->size) | |
279 | txq_pcpu->txq_get_index = 0; | |
280 | } | |
3f518509 | 281 | |
db9d7d36 MC |
282 | static void mvpp2_txq_inc_put(struct mvpp2_port *port, |
283 | struct mvpp2_txq_pcpu *txq_pcpu, | |
284 | struct sk_buff *skb, | |
285 | struct mvpp2_tx_desc *tx_desc) | |
286 | { | |
287 | struct mvpp2_txq_pcpu_buf *tx_buf = | |
288 | txq_pcpu->buffs + txq_pcpu->txq_put_index; | |
289 | tx_buf->skb = skb; | |
290 | tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); | |
291 | tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + | |
292 | mvpp2_txdesc_offset_get(port, tx_desc); | |
293 | txq_pcpu->txq_put_index++; | |
294 | if (txq_pcpu->txq_put_index == txq_pcpu->size) | |
295 | txq_pcpu->txq_put_index = 0; | |
296 | } | |
3f518509 | 297 | |
7d04b0b1 MC |
298 | /* Get number of maximum RXQ */ |
299 | static int mvpp2_get_nrxqs(struct mvpp2 *priv) | |
300 | { | |
301 | unsigned int nrxqs; | |
302 | ||
303 | if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) | |
304 | return 1; | |
305 | ||
306 | /* According to the PPv2.2 datasheet and our experiments on | |
307 | * PPv2.1, RX queues have an allocation granularity of 4 (when | |
308 | * more than a single one on PPv2.2). | |
309 | * Round up to nearest multiple of 4. | |
310 | */ | |
311 | nrxqs = (num_possible_cpus() + 3) & ~0x3; | |
312 | if (nrxqs > MVPP2_PORT_MAX_RXQ) | |
313 | nrxqs = MVPP2_PORT_MAX_RXQ; | |
314 | ||
315 | return nrxqs; | |
316 | } | |
317 | ||
db9d7d36 MC |
318 | /* Get number of physical egress port */ |
319 | static inline int mvpp2_egress_port(struct mvpp2_port *port) | |
320 | { | |
321 | return MVPP2_MAX_TCONT + port->id; | |
322 | } | |
3f518509 | 323 | |
db9d7d36 MC |
324 | /* Get number of physical TXQ */ |
325 | static inline int mvpp2_txq_phys(int port, int txq) | |
326 | { | |
327 | return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; | |
3f518509 MW |
328 | } |
329 | ||
0e037281 TP |
330 | static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) |
331 | { | |
332 | if (likely(pool->frag_size <= PAGE_SIZE)) | |
333 | return netdev_alloc_frag(pool->frag_size); | |
334 | else | |
335 | return kmalloc(pool->frag_size, GFP_ATOMIC); | |
336 | } | |
337 | ||
338 | static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) | |
339 | { | |
340 | if (likely(pool->frag_size <= PAGE_SIZE)) | |
341 | skb_free_frag(data); | |
342 | else | |
343 | kfree(data); | |
344 | } | |
345 | ||
3f518509 MW |
346 | /* Buffer Manager configuration routines */ |
347 | ||
348 | /* Create pool */ | |
13616361 | 349 | static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, |
3f518509 MW |
350 | struct mvpp2_bm_pool *bm_pool, int size) |
351 | { | |
3f518509 MW |
352 | u32 val; |
353 | ||
d01524d8 TP |
354 | /* Number of buffer pointers must be a multiple of 16, as per |
355 | * hardware constraints | |
356 | */ | |
357 | if (!IS_ALIGNED(size, 16)) | |
358 | return -EINVAL; | |
359 | ||
360 | /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 | |
361 | * bytes per buffer pointer | |
362 | */ | |
363 | if (priv->hw_version == MVPP21) | |
364 | bm_pool->size_bytes = 2 * sizeof(u32) * size; | |
365 | else | |
366 | bm_pool->size_bytes = 2 * sizeof(u64) * size; | |
367 | ||
13616361 | 368 | bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, |
20396136 | 369 | &bm_pool->dma_addr, |
3f518509 MW |
370 | GFP_KERNEL); |
371 | if (!bm_pool->virt_addr) | |
372 | return -ENOMEM; | |
373 | ||
d3158807 TP |
374 | if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, |
375 | MVPP2_BM_POOL_PTR_ALIGN)) { | |
13616361 | 376 | dma_free_coherent(dev, bm_pool->size_bytes, |
d01524d8 | 377 | bm_pool->virt_addr, bm_pool->dma_addr); |
13616361 | 378 | dev_err(dev, "BM pool %d is not %d bytes aligned\n", |
3f518509 MW |
379 | bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); |
380 | return -ENOMEM; | |
381 | } | |
382 | ||
383 | mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), | |
d01524d8 | 384 | lower_32_bits(bm_pool->dma_addr)); |
3f518509 MW |
385 | mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); |
386 | ||
387 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); | |
388 | val |= MVPP2_BM_START_MASK; | |
389 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); | |
390 | ||
3f518509 MW |
391 | bm_pool->size = size; |
392 | bm_pool->pkt_size = 0; | |
393 | bm_pool->buf_num = 0; | |
3f518509 MW |
394 | |
395 | return 0; | |
396 | } | |
397 | ||
398 | /* Set pool buffer size */ | |
399 | static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, | |
400 | struct mvpp2_bm_pool *bm_pool, | |
401 | int buf_size) | |
402 | { | |
403 | u32 val; | |
404 | ||
405 | bm_pool->buf_size = buf_size; | |
406 | ||
407 | val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); | |
408 | mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); | |
409 | } | |
410 | ||
d01524d8 TP |
411 | static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, |
412 | struct mvpp2_bm_pool *bm_pool, | |
413 | dma_addr_t *dma_addr, | |
414 | phys_addr_t *phys_addr) | |
415 | { | |
e531f767 | 416 | unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); |
a786841d | 417 | |
1068549c | 418 | *dma_addr = mvpp2_thread_read(priv, thread, |
a786841d | 419 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); |
1068549c | 420 | *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); |
d01524d8 TP |
421 | |
422 | if (priv->hw_version == MVPP22) { | |
423 | u32 val; | |
424 | u32 dma_addr_highbits, phys_addr_highbits; | |
425 | ||
1068549c | 426 | val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); |
d01524d8 TP |
427 | dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); |
428 | phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> | |
429 | MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; | |
430 | ||
431 | if (sizeof(dma_addr_t) == 8) | |
432 | *dma_addr |= (u64)dma_addr_highbits << 32; | |
433 | ||
434 | if (sizeof(phys_addr_t) == 8) | |
435 | *phys_addr |= (u64)phys_addr_highbits << 32; | |
436 | } | |
a704bb5c TP |
437 | |
438 | put_cpu(); | |
d01524d8 TP |
439 | } |
440 | ||
7861f12b | 441 | /* Free all buffers from the pool */ |
4229d502 | 442 | static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, |
effbf5f5 | 443 | struct mvpp2_bm_pool *bm_pool, int buf_num) |
3f518509 MW |
444 | { |
445 | int i; | |
446 | ||
effbf5f5 SC |
447 | if (buf_num > bm_pool->buf_num) { |
448 | WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", | |
449 | bm_pool->id, buf_num); | |
450 | buf_num = bm_pool->buf_num; | |
451 | } | |
452 | ||
453 | for (i = 0; i < buf_num; i++) { | |
20396136 | 454 | dma_addr_t buf_dma_addr; |
4e4a105f TP |
455 | phys_addr_t buf_phys_addr; |
456 | void *data; | |
3f518509 | 457 | |
d01524d8 TP |
458 | mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, |
459 | &buf_dma_addr, &buf_phys_addr); | |
4229d502 | 460 | |
20396136 | 461 | dma_unmap_single(dev, buf_dma_addr, |
4229d502 MW |
462 | bm_pool->buf_size, DMA_FROM_DEVICE); |
463 | ||
4e4a105f TP |
464 | data = (void *)phys_to_virt(buf_phys_addr); |
465 | if (!data) | |
3f518509 | 466 | break; |
0e037281 | 467 | |
4e4a105f | 468 | mvpp2_frag_free(bm_pool, data); |
3f518509 MW |
469 | } |
470 | ||
471 | /* Update BM driver with number of buffers removed from pool */ | |
472 | bm_pool->buf_num -= i; | |
3f518509 MW |
473 | } |
474 | ||
effbf5f5 | 475 | /* Check number of buffers in BM pool */ |
6e61e10a | 476 | static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) |
effbf5f5 SC |
477 | { |
478 | int buf_num = 0; | |
479 | ||
480 | buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & | |
481 | MVPP22_BM_POOL_PTRS_NUM_MASK; | |
482 | buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & | |
483 | MVPP2_BM_BPPI_PTR_NUM_MASK; | |
484 | ||
485 | /* HW has one buffer ready which is not reflected in the counters */ | |
486 | if (buf_num) | |
487 | buf_num += 1; | |
488 | ||
489 | return buf_num; | |
490 | } | |
491 | ||
3f518509 | 492 | /* Cleanup pool */ |
13616361 | 493 | static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, |
3f518509 MW |
494 | struct mvpp2_bm_pool *bm_pool) |
495 | { | |
effbf5f5 | 496 | int buf_num; |
3f518509 MW |
497 | u32 val; |
498 | ||
effbf5f5 | 499 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); |
13616361 | 500 | mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); |
effbf5f5 SC |
501 | |
502 | /* Check buffer counters after free */ | |
503 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); | |
504 | if (buf_num) { | |
505 | WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", | |
506 | bm_pool->id, bm_pool->buf_num); | |
3f518509 MW |
507 | return 0; |
508 | } | |
509 | ||
510 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); | |
511 | val |= MVPP2_BM_STOP_MASK; | |
512 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); | |
513 | ||
13616361 | 514 | dma_free_coherent(dev, bm_pool->size_bytes, |
3f518509 | 515 | bm_pool->virt_addr, |
20396136 | 516 | bm_pool->dma_addr); |
3f518509 MW |
517 | return 0; |
518 | } | |
519 | ||
13616361 | 520 | static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) |
3f518509 | 521 | { |
7d04b0b1 | 522 | int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; |
3f518509 MW |
523 | struct mvpp2_bm_pool *bm_pool; |
524 | ||
7d04b0b1 MC |
525 | if (priv->percpu_pools) |
526 | poolnum = mvpp2_get_nrxqs(priv) * 2; | |
527 | ||
3f518509 MW |
528 | /* Create all pools with maximum size */ |
529 | size = MVPP2_BM_POOL_SIZE_MAX; | |
7d04b0b1 | 530 | for (i = 0; i < poolnum; i++) { |
3f518509 MW |
531 | bm_pool = &priv->bm_pools[i]; |
532 | bm_pool->id = i; | |
13616361 | 533 | err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); |
3f518509 MW |
534 | if (err) |
535 | goto err_unroll_pools; | |
536 | mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); | |
537 | } | |
538 | return 0; | |
539 | ||
540 | err_unroll_pools: | |
13616361 | 541 | dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); |
3f518509 | 542 | for (i = i - 1; i >= 0; i--) |
13616361 | 543 | mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); |
3f518509 MW |
544 | return err; |
545 | } | |
546 | ||
13616361 | 547 | static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) |
3f518509 | 548 | { |
7d04b0b1 MC |
549 | int i, err, poolnum = MVPP2_BM_POOLS_NUM; |
550 | ||
551 | if (priv->percpu_pools) | |
552 | poolnum = mvpp2_get_nrxqs(priv) * 2; | |
3f518509 | 553 | |
7d04b0b1 MC |
554 | dev_info(dev, "using %d %s buffers\n", poolnum, |
555 | priv->percpu_pools ? "per-cpu" : "shared"); | |
556 | ||
557 | for (i = 0; i < poolnum; i++) { | |
3f518509 MW |
558 | /* Mask BM all interrupts */ |
559 | mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); | |
560 | /* Clear BM cause register */ | |
561 | mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); | |
562 | } | |
563 | ||
564 | /* Allocate and initialize BM pools */ | |
7d04b0b1 | 565 | priv->bm_pools = devm_kcalloc(dev, poolnum, |
81f915eb | 566 | sizeof(*priv->bm_pools), GFP_KERNEL); |
3f518509 MW |
567 | if (!priv->bm_pools) |
568 | return -ENOMEM; | |
569 | ||
13616361 | 570 | err = mvpp2_bm_pools_init(dev, priv); |
3f518509 MW |
571 | if (err < 0) |
572 | return err; | |
573 | return 0; | |
574 | } | |
575 | ||
01d04936 SC |
576 | static void mvpp2_setup_bm_pool(void) |
577 | { | |
578 | /* Short pool */ | |
579 | mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; | |
580 | mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; | |
581 | ||
582 | /* Long pool */ | |
583 | mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; | |
584 | mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; | |
576193f2 SC |
585 | |
586 | /* Jumbo pool */ | |
587 | mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; | |
588 | mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; | |
01d04936 SC |
589 | } |
590 | ||
3f518509 MW |
591 | /* Attach long pool to rxq */ |
592 | static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, | |
593 | int lrxq, int long_pool) | |
594 | { | |
5eac892a | 595 | u32 val, mask; |
3f518509 MW |
596 | int prxq; |
597 | ||
598 | /* Get queue physical ID */ | |
599 | prxq = port->rxqs[lrxq]->id; | |
600 | ||
5eac892a TP |
601 | if (port->priv->hw_version == MVPP21) |
602 | mask = MVPP21_RXQ_POOL_LONG_MASK; | |
603 | else | |
604 | mask = MVPP22_RXQ_POOL_LONG_MASK; | |
3f518509 | 605 | |
5eac892a TP |
606 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
607 | val &= ~mask; | |
608 | val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; | |
3f518509 MW |
609 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
610 | } | |
611 | ||
612 | /* Attach short pool to rxq */ | |
613 | static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, | |
614 | int lrxq, int short_pool) | |
615 | { | |
5eac892a | 616 | u32 val, mask; |
3f518509 MW |
617 | int prxq; |
618 | ||
619 | /* Get queue physical ID */ | |
620 | prxq = port->rxqs[lrxq]->id; | |
621 | ||
5eac892a TP |
622 | if (port->priv->hw_version == MVPP21) |
623 | mask = MVPP21_RXQ_POOL_SHORT_MASK; | |
624 | else | |
625 | mask = MVPP22_RXQ_POOL_SHORT_MASK; | |
3f518509 | 626 | |
5eac892a TP |
627 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
628 | val &= ~mask; | |
629 | val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; | |
3f518509 MW |
630 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
631 | } | |
632 | ||
0e037281 TP |
633 | static void *mvpp2_buf_alloc(struct mvpp2_port *port, |
634 | struct mvpp2_bm_pool *bm_pool, | |
20396136 | 635 | dma_addr_t *buf_dma_addr, |
4e4a105f | 636 | phys_addr_t *buf_phys_addr, |
0e037281 | 637 | gfp_t gfp_mask) |
3f518509 | 638 | { |
20396136 | 639 | dma_addr_t dma_addr; |
0e037281 | 640 | void *data; |
3f518509 | 641 | |
0e037281 TP |
642 | data = mvpp2_frag_alloc(bm_pool); |
643 | if (!data) | |
3f518509 MW |
644 | return NULL; |
645 | ||
20396136 TP |
646 | dma_addr = dma_map_single(port->dev->dev.parent, data, |
647 | MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), | |
648 | DMA_FROM_DEVICE); | |
649 | if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { | |
0e037281 | 650 | mvpp2_frag_free(bm_pool, data); |
3f518509 MW |
651 | return NULL; |
652 | } | |
20396136 | 653 | *buf_dma_addr = dma_addr; |
4e4a105f | 654 | *buf_phys_addr = virt_to_phys(data); |
3f518509 | 655 | |
0e037281 | 656 | return data; |
3f518509 MW |
657 | } |
658 | ||
3f518509 MW |
659 | /* Release buffer to BM */ |
660 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, | |
20396136 | 661 | dma_addr_t buf_dma_addr, |
4e4a105f | 662 | phys_addr_t buf_phys_addr) |
3f518509 | 663 | { |
e531f767 AT |
664 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
665 | unsigned long flags = 0; | |
666 | ||
667 | if (test_bit(thread, &port->priv->lock_map)) | |
668 | spin_lock_irqsave(&port->bm_lock[thread], flags); | |
a786841d | 669 | |
d01524d8 TP |
670 | if (port->priv->hw_version == MVPP22) { |
671 | u32 val = 0; | |
672 | ||
673 | if (sizeof(dma_addr_t) == 8) | |
674 | val |= upper_32_bits(buf_dma_addr) & | |
675 | MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; | |
676 | ||
677 | if (sizeof(phys_addr_t) == 8) | |
678 | val |= (upper_32_bits(buf_phys_addr) | |
679 | << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & | |
680 | MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; | |
681 | ||
1068549c | 682 | mvpp2_thread_write_relaxed(port->priv, thread, |
cdcfeb0f | 683 | MVPP22_BM_ADDR_HIGH_RLS_REG, val); |
d01524d8 TP |
684 | } |
685 | ||
4e4a105f TP |
686 | /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply |
687 | * returned in the "cookie" field of the RX | |
688 | * descriptor. Instead of storing the virtual address, we | |
689 | * store the physical address | |
690 | */ | |
1068549c | 691 | mvpp2_thread_write_relaxed(port->priv, thread, |
cdcfeb0f | 692 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); |
1068549c | 693 | mvpp2_thread_write_relaxed(port->priv, thread, |
cdcfeb0f | 694 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); |
a704bb5c | 695 | |
e531f767 AT |
696 | if (test_bit(thread, &port->priv->lock_map)) |
697 | spin_unlock_irqrestore(&port->bm_lock[thread], flags); | |
698 | ||
a704bb5c | 699 | put_cpu(); |
3f518509 MW |
700 | } |
701 | ||
3f518509 MW |
702 | /* Allocate buffers for the pool */ |
703 | static int mvpp2_bm_bufs_add(struct mvpp2_port *port, | |
704 | struct mvpp2_bm_pool *bm_pool, int buf_num) | |
705 | { | |
3f518509 | 706 | int i, buf_size, total_size; |
20396136 | 707 | dma_addr_t dma_addr; |
4e4a105f | 708 | phys_addr_t phys_addr; |
0e037281 | 709 | void *buf; |
3f518509 | 710 | |
7d04b0b1 MC |
711 | if (port->priv->percpu_pools && |
712 | bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { | |
713 | netdev_err(port->dev, | |
714 | "attempted to use jumbo frames with per-cpu pools"); | |
715 | return 0; | |
716 | } | |
717 | ||
3f518509 MW |
718 | buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); |
719 | total_size = MVPP2_RX_TOTAL_SIZE(buf_size); | |
720 | ||
721 | if (buf_num < 0 || | |
722 | (buf_num + bm_pool->buf_num > bm_pool->size)) { | |
723 | netdev_err(port->dev, | |
724 | "cannot allocate %d buffers for pool %d\n", | |
725 | buf_num, bm_pool->id); | |
726 | return 0; | |
727 | } | |
728 | ||
3f518509 | 729 | for (i = 0; i < buf_num; i++) { |
4e4a105f TP |
730 | buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, |
731 | &phys_addr, GFP_KERNEL); | |
0e037281 | 732 | if (!buf) |
3f518509 MW |
733 | break; |
734 | ||
20396136 | 735 | mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, |
4e4a105f | 736 | phys_addr); |
3f518509 MW |
737 | } |
738 | ||
739 | /* Update BM driver with number of buffers added to pool */ | |
740 | bm_pool->buf_num += i; | |
3f518509 MW |
741 | |
742 | netdev_dbg(port->dev, | |
01d04936 | 743 | "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", |
3f518509 MW |
744 | bm_pool->id, bm_pool->pkt_size, buf_size, total_size); |
745 | ||
746 | netdev_dbg(port->dev, | |
01d04936 | 747 | "pool %d: %d of %d buffers added\n", |
3f518509 MW |
748 | bm_pool->id, i, buf_num); |
749 | return i; | |
750 | } | |
751 | ||
752 | /* Notify the driver that BM pool is being used as specific type and return the | |
753 | * pool pointer on success | |
754 | */ | |
755 | static struct mvpp2_bm_pool * | |
01d04936 | 756 | mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) |
3f518509 | 757 | { |
3f518509 MW |
758 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; |
759 | int num; | |
760 | ||
7d04b0b1 MC |
761 | if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || |
762 | (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { | |
763 | netdev_err(port->dev, "Invalid pool %d\n", pool); | |
764 | return NULL; | |
765 | } | |
766 | ||
767 | /* Allocate buffers in case BM pool is used as long pool, but packet | |
768 | * size doesn't match MTU or BM pool hasn't being used yet | |
769 | */ | |
770 | if (new_pool->pkt_size == 0) { | |
771 | int pkts_num; | |
772 | ||
773 | /* Set default buffer number or free all the buffers in case | |
774 | * the pool is not empty | |
775 | */ | |
776 | pkts_num = new_pool->buf_num; | |
777 | if (pkts_num == 0) { | |
778 | if (port->priv->percpu_pools) { | |
779 | if (pool < port->nrxqs) | |
780 | pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; | |
781 | else | |
782 | pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; | |
783 | } else { | |
784 | pkts_num = mvpp2_pools[pool].buf_num; | |
785 | } | |
786 | } else { | |
787 | mvpp2_bm_bufs_free(port->dev->dev.parent, | |
788 | port->priv, new_pool, pkts_num); | |
789 | } | |
790 | ||
791 | new_pool->pkt_size = pkt_size; | |
792 | new_pool->frag_size = | |
793 | SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + | |
794 | MVPP2_SKB_SHINFO_SIZE; | |
795 | ||
796 | /* Allocate buffers for this pool */ | |
797 | num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); | |
798 | if (num != pkts_num) { | |
799 | WARN(1, "pool %d: %d of %d allocated\n", | |
800 | new_pool->id, num, pkts_num); | |
801 | return NULL; | |
802 | } | |
803 | } | |
804 | ||
805 | mvpp2_bm_pool_bufsize_set(port->priv, new_pool, | |
806 | MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); | |
807 | ||
808 | return new_pool; | |
809 | } | |
810 | ||
811 | static struct mvpp2_bm_pool * | |
812 | mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, | |
813 | unsigned int pool, int pkt_size) | |
814 | { | |
815 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; | |
816 | int num; | |
817 | ||
818 | if (pool > port->nrxqs * 2) { | |
01d04936 | 819 | netdev_err(port->dev, "Invalid pool %d\n", pool); |
3f518509 MW |
820 | return NULL; |
821 | } | |
822 | ||
3f518509 MW |
823 | /* Allocate buffers in case BM pool is used as long pool, but packet |
824 | * size doesn't match MTU or BM pool hasn't being used yet | |
825 | */ | |
01d04936 | 826 | if (new_pool->pkt_size == 0) { |
3f518509 MW |
827 | int pkts_num; |
828 | ||
829 | /* Set default buffer number or free all the buffers in case | |
830 | * the pool is not empty | |
831 | */ | |
832 | pkts_num = new_pool->buf_num; | |
833 | if (pkts_num == 0) | |
7d04b0b1 | 834 | pkts_num = mvpp2_pools[type].buf_num; |
3f518509 | 835 | else |
4229d502 | 836 | mvpp2_bm_bufs_free(port->dev->dev.parent, |
effbf5f5 | 837 | port->priv, new_pool, pkts_num); |
3f518509 MW |
838 | |
839 | new_pool->pkt_size = pkt_size; | |
0e037281 TP |
840 | new_pool->frag_size = |
841 | SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + | |
842 | MVPP2_SKB_SHINFO_SIZE; | |
3f518509 MW |
843 | |
844 | /* Allocate buffers for this pool */ | |
845 | num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); | |
846 | if (num != pkts_num) { | |
847 | WARN(1, "pool %d: %d of %d allocated\n", | |
848 | new_pool->id, num, pkts_num); | |
3f518509 MW |
849 | return NULL; |
850 | } | |
851 | } | |
852 | ||
853 | mvpp2_bm_pool_bufsize_set(port->priv, new_pool, | |
854 | MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); | |
855 | ||
3f518509 MW |
856 | return new_pool; |
857 | } | |
858 | ||
7d04b0b1 MC |
859 | /* Initialize pools for swf, shared buffers variant */ |
860 | static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) | |
3f518509 | 861 | { |
576193f2 | 862 | enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; |
7d04b0b1 | 863 | int rxq; |
576193f2 SC |
864 | |
865 | /* If port pkt_size is higher than 1518B: | |
866 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool | |
867 | * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool | |
868 | */ | |
869 | if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { | |
870 | long_log_pool = MVPP2_BM_JUMBO; | |
871 | short_log_pool = MVPP2_BM_LONG; | |
872 | } else { | |
873 | long_log_pool = MVPP2_BM_LONG; | |
874 | short_log_pool = MVPP2_BM_SHORT; | |
875 | } | |
3f518509 MW |
876 | |
877 | if (!port->pool_long) { | |
878 | port->pool_long = | |
576193f2 SC |
879 | mvpp2_bm_pool_use(port, long_log_pool, |
880 | mvpp2_pools[long_log_pool].pkt_size); | |
3f518509 MW |
881 | if (!port->pool_long) |
882 | return -ENOMEM; | |
883 | ||
576193f2 | 884 | port->pool_long->port_map |= BIT(port->id); |
3f518509 | 885 | |
09f83975 | 886 | for (rxq = 0; rxq < port->nrxqs; rxq++) |
3f518509 MW |
887 | mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); |
888 | } | |
889 | ||
890 | if (!port->pool_short) { | |
891 | port->pool_short = | |
576193f2 | 892 | mvpp2_bm_pool_use(port, short_log_pool, |
e2e03164 | 893 | mvpp2_pools[short_log_pool].pkt_size); |
3f518509 MW |
894 | if (!port->pool_short) |
895 | return -ENOMEM; | |
896 | ||
576193f2 | 897 | port->pool_short->port_map |= BIT(port->id); |
3f518509 | 898 | |
09f83975 | 899 | for (rxq = 0; rxq < port->nrxqs; rxq++) |
3f518509 MW |
900 | mvpp2_rxq_short_pool_set(port, rxq, |
901 | port->pool_short->id); | |
902 | } | |
903 | ||
904 | return 0; | |
905 | } | |
906 | ||
7d04b0b1 MC |
907 | /* Initialize pools for swf, percpu buffers variant */ |
908 | static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) | |
909 | { | |
910 | struct mvpp2_bm_pool *p; | |
911 | int i; | |
912 | ||
913 | for (i = 0; i < port->nrxqs; i++) { | |
914 | p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, | |
915 | mvpp2_pools[MVPP2_BM_SHORT].pkt_size); | |
916 | if (!p) | |
917 | return -ENOMEM; | |
918 | ||
919 | port->priv->bm_pools[i].port_map |= BIT(port->id); | |
920 | mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id); | |
921 | } | |
922 | ||
923 | for (i = 0; i < port->nrxqs; i++) { | |
924 | p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, | |
925 | mvpp2_pools[MVPP2_BM_LONG].pkt_size); | |
926 | if (!p) | |
927 | return -ENOMEM; | |
928 | ||
929 | port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id); | |
930 | mvpp2_rxq_long_pool_set(port, i, | |
931 | port->priv->bm_pools[i + port->nrxqs].id); | |
932 | } | |
933 | ||
934 | port->pool_long = NULL; | |
935 | port->pool_short = NULL; | |
936 | ||
937 | return 0; | |
938 | } | |
939 | ||
940 | static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) | |
941 | { | |
942 | if (port->priv->percpu_pools) | |
943 | return mvpp2_swf_bm_pool_init_percpu(port); | |
944 | else | |
945 | return mvpp2_swf_bm_pool_init_shared(port); | |
946 | } | |
947 | ||
d66503c4 MC |
948 | static void mvpp2_set_hw_csum(struct mvpp2_port *port, |
949 | enum mvpp2_bm_pool_log_num new_long_pool) | |
950 | { | |
951 | const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
952 | ||
953 | /* Update L4 checksum when jumbo enable/disable on port. | |
954 | * Only port 0 supports hardware checksum offload due to | |
955 | * the Tx FIFO size limitation. | |
956 | * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor | |
957 | * has 7 bits, so the maximum L3 offset is 128. | |
958 | */ | |
959 | if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { | |
960 | port->dev->features &= ~csums; | |
961 | port->dev->hw_features &= ~csums; | |
962 | } else { | |
963 | port->dev->features |= csums; | |
964 | port->dev->hw_features |= csums; | |
965 | } | |
966 | } | |
967 | ||
3f518509 MW |
968 | static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) |
969 | { | |
970 | struct mvpp2_port *port = netdev_priv(dev); | |
576193f2 SC |
971 | enum mvpp2_bm_pool_log_num new_long_pool; |
972 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); | |
3f518509 | 973 | |
7d04b0b1 MC |
974 | if (port->priv->percpu_pools) |
975 | goto out_set; | |
976 | ||
576193f2 SC |
977 | /* If port MTU is higher than 1518B: |
978 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool | |
979 | * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool | |
980 | */ | |
981 | if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) | |
982 | new_long_pool = MVPP2_BM_JUMBO; | |
983 | else | |
984 | new_long_pool = MVPP2_BM_LONG; | |
985 | ||
986 | if (new_long_pool != port->pool_long->id) { | |
987 | /* Remove port from old short & long pool */ | |
988 | port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, | |
989 | port->pool_long->pkt_size); | |
990 | port->pool_long->port_map &= ~BIT(port->id); | |
991 | port->pool_long = NULL; | |
992 | ||
993 | port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, | |
994 | port->pool_short->pkt_size); | |
995 | port->pool_short->port_map &= ~BIT(port->id); | |
996 | port->pool_short = NULL; | |
997 | ||
998 | port->pkt_size = pkt_size; | |
999 | ||
1000 | /* Add port to new short & long pool */ | |
1001 | mvpp2_swf_bm_pool_init(port); | |
1002 | ||
d66503c4 | 1003 | mvpp2_set_hw_csum(port, new_long_pool); |
3f518509 MW |
1004 | } |
1005 | ||
7d04b0b1 | 1006 | out_set: |
3f518509 | 1007 | dev->mtu = mtu; |
576193f2 SC |
1008 | dev->wanted_features = dev->features; |
1009 | ||
3f518509 MW |
1010 | netdev_update_features(dev); |
1011 | return 0; | |
1012 | } | |
1013 | ||
1014 | static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) | |
1015 | { | |
591f4cfa TP |
1016 | int i, sw_thread_mask = 0; |
1017 | ||
1018 | for (i = 0; i < port->nqvecs; i++) | |
1019 | sw_thread_mask |= port->qvecs[i].sw_thread_mask; | |
3f518509 | 1020 | |
3f518509 | 1021 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
591f4cfa | 1022 | MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); |
3f518509 MW |
1023 | } |
1024 | ||
1025 | static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) | |
1026 | { | |
591f4cfa TP |
1027 | int i, sw_thread_mask = 0; |
1028 | ||
1029 | for (i = 0; i < port->nqvecs; i++) | |
1030 | sw_thread_mask |= port->qvecs[i].sw_thread_mask; | |
1031 | ||
1032 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), | |
1033 | MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); | |
1034 | } | |
1035 | ||
1036 | static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) | |
1037 | { | |
1038 | struct mvpp2_port *port = qvec->port; | |
3f518509 | 1039 | |
3f518509 | 1040 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
591f4cfa TP |
1041 | MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); |
1042 | } | |
1043 | ||
1044 | static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) | |
1045 | { | |
1046 | struct mvpp2_port *port = qvec->port; | |
1047 | ||
1048 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), | |
1049 | MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); | |
3f518509 MW |
1050 | } |
1051 | ||
543ec376 | 1052 | /* Mask the current thread's Rx/Tx interrupts |
e0af22d9 TP |
1053 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
1054 | * using smp_processor_id() is OK. | |
1055 | */ | |
3f518509 MW |
1056 | static void mvpp2_interrupts_mask(void *arg) |
1057 | { | |
1058 | struct mvpp2_port *port = arg; | |
1059 | ||
e531f767 AT |
1060 | /* If the thread isn't used, don't do anything */ |
1061 | if (smp_processor_id() > port->priv->nthreads) | |
1062 | return; | |
1063 | ||
1068549c | 1064 | mvpp2_thread_write(port->priv, |
e531f767 | 1065 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
a786841d | 1066 | MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); |
3f518509 MW |
1067 | } |
1068 | ||
543ec376 | 1069 | /* Unmask the current thread's Rx/Tx interrupts. |
e0af22d9 TP |
1070 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
1071 | * using smp_processor_id() is OK. | |
1072 | */ | |
3f518509 MW |
1073 | static void mvpp2_interrupts_unmask(void *arg) |
1074 | { | |
1075 | struct mvpp2_port *port = arg; | |
213f428f TP |
1076 | u32 val; |
1077 | ||
e531f767 AT |
1078 | /* If the thread isn't used, don't do anything */ |
1079 | if (smp_processor_id() > port->priv->nthreads) | |
1080 | return; | |
1081 | ||
213f428f | 1082 | val = MVPP2_CAUSE_MISC_SUM_MASK | |
70afb58e | 1083 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); |
213f428f TP |
1084 | if (port->has_tx_irqs) |
1085 | val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; | |
3f518509 | 1086 | |
1068549c | 1087 | mvpp2_thread_write(port->priv, |
e531f767 | 1088 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
213f428f TP |
1089 | MVPP2_ISR_RX_TX_MASK_REG(port->id), val); |
1090 | } | |
1091 | ||
1092 | static void | |
1093 | mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) | |
1094 | { | |
1095 | u32 val; | |
1096 | int i; | |
1097 | ||
1098 | if (port->priv->hw_version != MVPP22) | |
1099 | return; | |
1100 | ||
1101 | if (mask) | |
1102 | val = 0; | |
1103 | else | |
70afb58e | 1104 | val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); |
213f428f TP |
1105 | |
1106 | for (i = 0; i < port->nqvecs; i++) { | |
1107 | struct mvpp2_queue_vector *v = port->qvecs + i; | |
1108 | ||
1109 | if (v->type != MVPP2_QUEUE_VECTOR_SHARED) | |
1110 | continue; | |
1111 | ||
1068549c | 1112 | mvpp2_thread_write(port->priv, v->sw_thread_id, |
213f428f TP |
1113 | MVPP2_ISR_RX_TX_MASK_REG(port->id), val); |
1114 | } | |
3f518509 MW |
1115 | } |
1116 | ||
1117 | /* Port configuration routines */ | |
b7d286f0 RK |
1118 | static bool mvpp2_is_xlg(phy_interface_t interface) |
1119 | { | |
e0f909bc | 1120 | return interface == PHY_INTERFACE_MODE_10GBASER || |
b7d286f0 RK |
1121 | interface == PHY_INTERFACE_MODE_XAUI; |
1122 | } | |
3f518509 | 1123 | |
f84bf386 AT |
1124 | static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) |
1125 | { | |
1126 | struct mvpp2 *priv = port->priv; | |
1127 | u32 val; | |
1128 | ||
1129 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); | |
1130 | val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; | |
1131 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); | |
1132 | ||
1133 | regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); | |
1134 | if (port->gop_id == 2) | |
1135 | val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; | |
1136 | else if (port->gop_id == 3) | |
1137 | val |= GENCONF_CTRL0_PORT1_RGMII_MII; | |
1138 | regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); | |
1139 | } | |
1140 | ||
1141 | static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) | |
1142 | { | |
1143 | struct mvpp2 *priv = port->priv; | |
1144 | u32 val; | |
1145 | ||
1146 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); | |
1147 | val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | | |
1148 | GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; | |
1149 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); | |
1150 | ||
1151 | if (port->gop_id > 1) { | |
1152 | regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); | |
1153 | if (port->gop_id == 2) | |
1154 | val &= ~GENCONF_CTRL0_PORT0_RGMII; | |
1155 | else if (port->gop_id == 3) | |
1156 | val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; | |
1157 | regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); | |
1158 | } | |
1159 | } | |
1160 | ||
1161 | static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) | |
1162 | { | |
1163 | struct mvpp2 *priv = port->priv; | |
1164 | void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); | |
1165 | void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); | |
1166 | u32 val; | |
1167 | ||
f84bf386 AT |
1168 | val = readl(xpcs + MVPP22_XPCS_CFG0); |
1169 | val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | | |
1170 | MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); | |
1171 | val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); | |
1172 | writel(val, xpcs + MVPP22_XPCS_CFG0); | |
1173 | ||
f84bf386 AT |
1174 | val = readl(mpcs + MVPP22_MPCS_CTRL); |
1175 | val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; | |
1176 | writel(val, mpcs + MVPP22_MPCS_CTRL); | |
1177 | ||
1178 | val = readl(mpcs + MVPP22_MPCS_CLK_RESET); | |
7409e66e | 1179 | val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); |
f84bf386 AT |
1180 | val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); |
1181 | writel(val, mpcs + MVPP22_MPCS_CLK_RESET); | |
f84bf386 AT |
1182 | } |
1183 | ||
1184 | static int mvpp22_gop_init(struct mvpp2_port *port) | |
1185 | { | |
1186 | struct mvpp2 *priv = port->priv; | |
1187 | u32 val; | |
1188 | ||
1189 | if (!priv->sysctrl_base) | |
1190 | return 0; | |
1191 | ||
1192 | switch (port->phy_interface) { | |
1193 | case PHY_INTERFACE_MODE_RGMII: | |
1194 | case PHY_INTERFACE_MODE_RGMII_ID: | |
1195 | case PHY_INTERFACE_MODE_RGMII_RXID: | |
1196 | case PHY_INTERFACE_MODE_RGMII_TXID: | |
1197 | if (port->gop_id == 0) | |
1198 | goto invalid_conf; | |
1199 | mvpp22_gop_init_rgmii(port); | |
1200 | break; | |
1201 | case PHY_INTERFACE_MODE_SGMII: | |
d97c9f4a | 1202 | case PHY_INTERFACE_MODE_1000BASEX: |
a6fe31de | 1203 | case PHY_INTERFACE_MODE_2500BASEX: |
f84bf386 AT |
1204 | mvpp22_gop_init_sgmii(port); |
1205 | break; | |
e0f909bc | 1206 | case PHY_INTERFACE_MODE_10GBASER: |
f84bf386 AT |
1207 | if (port->gop_id != 0) |
1208 | goto invalid_conf; | |
1209 | mvpp22_gop_init_10gkr(port); | |
1210 | break; | |
1211 | default: | |
1212 | goto unsupported_conf; | |
1213 | } | |
1214 | ||
1215 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); | |
1216 | val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | | |
1217 | GENCONF_PORT_CTRL1_EN(port->gop_id); | |
1218 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); | |
1219 | ||
1220 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); | |
1221 | val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; | |
1222 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); | |
1223 | ||
1224 | regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); | |
1225 | val |= GENCONF_SOFT_RESET1_GOP; | |
1226 | regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); | |
1227 | ||
1228 | unsupported_conf: | |
1229 | return 0; | |
1230 | ||
1231 | invalid_conf: | |
1232 | netdev_err(port->dev, "Invalid port configuration\n"); | |
1233 | return -EINVAL; | |
1234 | } | |
1235 | ||
fd3651b2 AT |
1236 | static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) |
1237 | { | |
1238 | u32 val; | |
1239 | ||
1240 | if (phy_interface_mode_is_rgmii(port->phy_interface) || | |
4a4cec72 RK |
1241 | phy_interface_mode_is_8023z(port->phy_interface) || |
1242 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
fd3651b2 AT |
1243 | /* Enable the GMAC link status irq for this port */ |
1244 | val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); | |
1245 | val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; | |
1246 | writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); | |
1247 | } | |
1248 | ||
1249 | if (port->gop_id == 0) { | |
1250 | /* Enable the XLG/GIG irqs for this port */ | |
1251 | val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); | |
1d9b041e | 1252 | if (mvpp2_is_xlg(port->phy_interface)) |
fd3651b2 AT |
1253 | val |= MVPP22_XLG_EXT_INT_MASK_XLG; |
1254 | else | |
1255 | val |= MVPP22_XLG_EXT_INT_MASK_GIG; | |
1256 | writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | static void mvpp22_gop_mask_irq(struct mvpp2_port *port) | |
1261 | { | |
1262 | u32 val; | |
1263 | ||
1264 | if (port->gop_id == 0) { | |
1265 | val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); | |
1266 | val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | | |
a3302baa | 1267 | MVPP22_XLG_EXT_INT_MASK_GIG); |
fd3651b2 AT |
1268 | writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); |
1269 | } | |
1270 | ||
1271 | if (phy_interface_mode_is_rgmii(port->phy_interface) || | |
4a4cec72 RK |
1272 | phy_interface_mode_is_8023z(port->phy_interface) || |
1273 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
fd3651b2 AT |
1274 | val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); |
1275 | val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; | |
1276 | writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); | |
1277 | } | |
1278 | } | |
1279 | ||
1280 | static void mvpp22_gop_setup_irq(struct mvpp2_port *port) | |
1281 | { | |
1282 | u32 val; | |
1283 | ||
bf2fa125 RK |
1284 | if (port->phylink || |
1285 | phy_interface_mode_is_rgmii(port->phy_interface) || | |
4a4cec72 RK |
1286 | phy_interface_mode_is_8023z(port->phy_interface) || |
1287 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
fd3651b2 AT |
1288 | val = readl(port->base + MVPP22_GMAC_INT_MASK); |
1289 | val |= MVPP22_GMAC_INT_MASK_LINK_STAT; | |
1290 | writel(val, port->base + MVPP22_GMAC_INT_MASK); | |
1291 | } | |
1292 | ||
1293 | if (port->gop_id == 0) { | |
1294 | val = readl(port->base + MVPP22_XLG_INT_MASK); | |
1295 | val |= MVPP22_XLG_INT_MASK_LINK; | |
1296 | writel(val, port->base + MVPP22_XLG_INT_MASK); | |
1297 | } | |
1298 | ||
1299 | mvpp22_gop_unmask_irq(port); | |
1300 | } | |
1301 | ||
a6fe31de AT |
1302 | /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). |
1303 | * | |
1304 | * The PHY mode used by the PPv2 driver comes from the network subsystem, while | |
1305 | * the one given to the COMPHY comes from the generic PHY subsystem. Hence they | |
1306 | * differ. | |
1307 | * | |
1308 | * The COMPHY configures the serdes lanes regardless of the actual use of the | |
1309 | * lanes by the physical layer. This is why configurations like | |
1310 | * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. | |
1311 | */ | |
542897d9 AT |
1312 | static int mvpp22_comphy_init(struct mvpp2_port *port) |
1313 | { | |
542897d9 AT |
1314 | int ret; |
1315 | ||
1316 | if (!port->comphy) | |
1317 | return 0; | |
1318 | ||
cccc43b8 GS |
1319 | ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, |
1320 | port->phy_interface); | |
542897d9 AT |
1321 | if (ret) |
1322 | return ret; | |
1323 | ||
1324 | return phy_power_on(port->comphy); | |
1325 | } | |
1326 | ||
3f518509 MW |
1327 | static void mvpp2_port_enable(struct mvpp2_port *port) |
1328 | { | |
1329 | u32 val; | |
1330 | ||
725757ae | 1331 | /* Only GOP port 0 has an XLG MAC */ |
b7d286f0 | 1332 | if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { |
725757ae | 1333 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
649e51d5 | 1334 | val |= MVPP22_XLG_CTRL0_PORT_EN; |
725757ae AT |
1335 | val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; |
1336 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
1337 | } else { | |
1338 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
1339 | val |= MVPP2_GMAC_PORT_EN_MASK; | |
1340 | val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; | |
1341 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
1342 | } | |
3f518509 MW |
1343 | } |
1344 | ||
1345 | static void mvpp2_port_disable(struct mvpp2_port *port) | |
1346 | { | |
1347 | u32 val; | |
1348 | ||
725757ae | 1349 | /* Only GOP port 0 has an XLG MAC */ |
b7d286f0 | 1350 | if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { |
725757ae | 1351 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
4bb04326 AT |
1352 | val &= ~MVPP22_XLG_CTRL0_PORT_EN; |
1353 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
725757ae | 1354 | } |
6b10bfc5 AT |
1355 | |
1356 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
1357 | val &= ~(MVPP2_GMAC_PORT_EN_MASK); | |
1358 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
3f518509 MW |
1359 | } |
1360 | ||
1361 | /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ | |
1362 | static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) | |
1363 | { | |
1364 | u32 val; | |
1365 | ||
1366 | val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & | |
1367 | ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; | |
1368 | writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); | |
1369 | } | |
1370 | ||
1371 | /* Configure loopback port */ | |
4bb04326 AT |
1372 | static void mvpp2_port_loopback_set(struct mvpp2_port *port, |
1373 | const struct phylink_link_state *state) | |
3f518509 MW |
1374 | { |
1375 | u32 val; | |
1376 | ||
1377 | val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); | |
1378 | ||
4bb04326 | 1379 | if (state->speed == 1000) |
3f518509 MW |
1380 | val |= MVPP2_GMAC_GMII_LB_EN_MASK; |
1381 | else | |
1382 | val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; | |
1383 | ||
4a4cec72 RK |
1384 | if (phy_interface_mode_is_8023z(port->phy_interface) || |
1385 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) | |
3f518509 MW |
1386 | val |= MVPP2_GMAC_PCS_LB_EN_MASK; |
1387 | else | |
1388 | val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; | |
1389 | ||
1390 | writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); | |
1391 | } | |
1392 | ||
118d6298 MR |
1393 | struct mvpp2_ethtool_counter { |
1394 | unsigned int offset; | |
1395 | const char string[ETH_GSTRING_LEN]; | |
1396 | bool reg_is_64b; | |
1397 | }; | |
1398 | ||
1399 | static u64 mvpp2_read_count(struct mvpp2_port *port, | |
1400 | const struct mvpp2_ethtool_counter *counter) | |
1401 | { | |
1402 | u64 val; | |
1403 | ||
1404 | val = readl(port->stats_base + counter->offset); | |
1405 | if (counter->reg_is_64b) | |
1406 | val += (u64)readl(port->stats_base + counter->offset + 4) << 32; | |
1407 | ||
1408 | return val; | |
1409 | } | |
1410 | ||
9bea6897 MC |
1411 | /* Some counters are accessed indirectly by first writing an index to |
1412 | * MVPP2_CTRS_IDX. The index can represent various resources depending on the | |
1413 | * register we access, it can be a hit counter for some classification tables, | |
1414 | * a counter specific to a rxq, a txq or a buffer pool. | |
1415 | */ | |
1416 | static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) | |
1417 | { | |
1418 | mvpp2_write(priv, MVPP2_CTRS_IDX, index); | |
1419 | return mvpp2_read(priv, reg); | |
1420 | } | |
1421 | ||
118d6298 MR |
1422 | /* Due to the fact that software statistics and hardware statistics are, by |
1423 | * design, incremented at different moments in the chain of packet processing, | |
1424 | * it is very likely that incoming packets could have been dropped after being | |
1425 | * counted by hardware but before reaching software statistics (most probably | |
1426 | * multicast packets), and in the oppposite way, during transmission, FCS bytes | |
1427 | * are added in between as well as TSO skb will be split and header bytes added. | |
1428 | * Hence, statistics gathered from userspace with ifconfig (software) and | |
1429 | * ethtool (hardware) cannot be compared. | |
1430 | */ | |
f9fa96b9 | 1431 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { |
118d6298 MR |
1432 | { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, |
1433 | { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, | |
1434 | { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, | |
1435 | { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, | |
1436 | { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, | |
1437 | { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, | |
1438 | { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, | |
1439 | { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, | |
1440 | { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, | |
1441 | { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, | |
1442 | { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, | |
1443 | { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, | |
1444 | { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, | |
1445 | { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, | |
1446 | { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, | |
1447 | { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, | |
1448 | { MVPP2_MIB_FC_SENT, "fc_sent" }, | |
1449 | { MVPP2_MIB_FC_RCVD, "fc_received" }, | |
1450 | { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, | |
1451 | { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, | |
1452 | { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, | |
1453 | { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, | |
1454 | { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, | |
1455 | { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, | |
1456 | { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, | |
1457 | { MVPP2_MIB_COLLISION, "collision" }, | |
1458 | { MVPP2_MIB_LATE_COLLISION, "late_collision" }, | |
1459 | }; | |
1460 | ||
9bea6897 MC |
1461 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { |
1462 | { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, | |
1463 | { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, | |
1464 | }; | |
1465 | ||
1466 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { | |
1467 | { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, | |
1468 | { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, | |
1469 | { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, | |
1470 | { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, | |
1471 | { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, | |
1472 | { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, | |
1473 | { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, | |
1474 | { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, | |
1475 | { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, | |
1476 | }; | |
1477 | ||
1478 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { | |
1479 | { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, | |
1480 | { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, | |
1481 | { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, | |
1482 | { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, | |
1483 | }; | |
1484 | ||
1485 | #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ | |
1486 | ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ | |
1487 | (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ | |
1488 | (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs))) | |
1489 | ||
118d6298 MR |
1490 | static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, |
1491 | u8 *data) | |
1492 | { | |
9bea6897 MC |
1493 | struct mvpp2_port *port = netdev_priv(netdev); |
1494 | int i, q; | |
118d6298 | 1495 | |
9bea6897 MC |
1496 | if (sset != ETH_SS_STATS) |
1497 | return; | |
1498 | ||
1499 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { | |
1500 | strscpy(data, mvpp2_ethtool_mib_regs[i].string, | |
1501 | ETH_GSTRING_LEN); | |
1502 | data += ETH_GSTRING_LEN; | |
1503 | } | |
1504 | ||
1505 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { | |
1506 | strscpy(data, mvpp2_ethtool_port_regs[i].string, | |
1507 | ETH_GSTRING_LEN); | |
1508 | data += ETH_GSTRING_LEN; | |
1509 | } | |
1510 | ||
1511 | for (q = 0; q < port->ntxqs; q++) { | |
1512 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { | |
1513 | snprintf(data, ETH_GSTRING_LEN, | |
1514 | mvpp2_ethtool_txq_regs[i].string, q); | |
1515 | data += ETH_GSTRING_LEN; | |
1516 | } | |
1517 | } | |
1518 | ||
1519 | for (q = 0; q < port->nrxqs; q++) { | |
1520 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { | |
1521 | snprintf(data, ETH_GSTRING_LEN, | |
1522 | mvpp2_ethtool_rxq_regs[i].string, | |
1523 | q); | |
1524 | data += ETH_GSTRING_LEN; | |
1525 | } | |
118d6298 MR |
1526 | } |
1527 | } | |
1528 | ||
9bea6897 MC |
1529 | static void mvpp2_read_stats(struct mvpp2_port *port) |
1530 | { | |
1531 | u64 *pstats; | |
1532 | int i, q; | |
1533 | ||
1534 | pstats = port->ethtool_stats; | |
1535 | ||
1536 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) | |
1537 | *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); | |
1538 | ||
1539 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) | |
1540 | *pstats++ += mvpp2_read(port->priv, | |
1541 | mvpp2_ethtool_port_regs[i].offset + | |
1542 | 4 * port->id); | |
1543 | ||
1544 | for (q = 0; q < port->ntxqs; q++) | |
1545 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) | |
1546 | *pstats++ += mvpp2_read_index(port->priv, | |
1547 | MVPP22_CTRS_TX_CTR(port->id, i), | |
1548 | mvpp2_ethtool_txq_regs[i].offset); | |
1549 | ||
1550 | /* Rxqs are numbered from 0 from the user standpoint, but not from the | |
1551 | * driver's. We need to add the port->first_rxq offset. | |
1552 | */ | |
1553 | for (q = 0; q < port->nrxqs; q++) | |
1554 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) | |
1555 | *pstats++ += mvpp2_read_index(port->priv, | |
1556 | port->first_rxq + i, | |
1557 | mvpp2_ethtool_rxq_regs[i].offset); | |
1558 | } | |
1559 | ||
118d6298 MR |
1560 | static void mvpp2_gather_hw_statistics(struct work_struct *work) |
1561 | { | |
1562 | struct delayed_work *del_work = to_delayed_work(work); | |
e5c500eb MR |
1563 | struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, |
1564 | stats_work); | |
118d6298 | 1565 | |
e5c500eb | 1566 | mutex_lock(&port->gather_stats_lock); |
118d6298 | 1567 | |
9bea6897 | 1568 | mvpp2_read_stats(port); |
118d6298 MR |
1569 | |
1570 | /* No need to read again the counters right after this function if it | |
1571 | * was called asynchronously by the user (ie. use of ethtool). | |
1572 | */ | |
e5c500eb MR |
1573 | cancel_delayed_work(&port->stats_work); |
1574 | queue_delayed_work(port->priv->stats_queue, &port->stats_work, | |
118d6298 MR |
1575 | MVPP2_MIB_COUNTERS_STATS_DELAY); |
1576 | ||
e5c500eb | 1577 | mutex_unlock(&port->gather_stats_lock); |
118d6298 MR |
1578 | } |
1579 | ||
1580 | static void mvpp2_ethtool_get_stats(struct net_device *dev, | |
1581 | struct ethtool_stats *stats, u64 *data) | |
1582 | { | |
1583 | struct mvpp2_port *port = netdev_priv(dev); | |
1584 | ||
e5c500eb MR |
1585 | /* Update statistics for the given port, then take the lock to avoid |
1586 | * concurrent accesses on the ethtool_stats structure during its copy. | |
1587 | */ | |
1588 | mvpp2_gather_hw_statistics(&port->stats_work.work); | |
118d6298 | 1589 | |
e5c500eb | 1590 | mutex_lock(&port->gather_stats_lock); |
118d6298 | 1591 | memcpy(data, port->ethtool_stats, |
9bea6897 | 1592 | sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); |
e5c500eb | 1593 | mutex_unlock(&port->gather_stats_lock); |
118d6298 MR |
1594 | } |
1595 | ||
1596 | static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) | |
1597 | { | |
9bea6897 MC |
1598 | struct mvpp2_port *port = netdev_priv(dev); |
1599 | ||
118d6298 | 1600 | if (sset == ETH_SS_STATS) |
9bea6897 | 1601 | return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); |
118d6298 MR |
1602 | |
1603 | return -EOPNOTSUPP; | |
1604 | } | |
1605 | ||
649e51d5 | 1606 | static void mvpp2_mac_reset_assert(struct mvpp2_port *port) |
3f518509 | 1607 | { |
649e51d5 | 1608 | u32 val; |
118d6298 | 1609 | |
316734fd RK |
1610 | val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | |
1611 | MVPP2_GMAC_PORT_RESET_MASK; | |
3f518509 | 1612 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); |
649e51d5 AT |
1613 | |
1614 | if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { | |
1615 | val = readl(port->base + MVPP22_XLG_CTRL0_REG) & | |
1616 | ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; | |
1617 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
1618 | } | |
3f518509 MW |
1619 | } |
1620 | ||
7409e66e AT |
1621 | static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) |
1622 | { | |
1623 | struct mvpp2 *priv = port->priv; | |
1624 | void __iomem *mpcs, *xpcs; | |
1625 | u32 val; | |
1626 | ||
1627 | if (port->priv->hw_version != MVPP22 || port->gop_id != 0) | |
1628 | return; | |
1629 | ||
1630 | mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); | |
1631 | xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); | |
1632 | ||
1633 | val = readl(mpcs + MVPP22_MPCS_CLK_RESET); | |
1634 | val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); | |
1635 | val |= MVPP22_MPCS_CLK_RESET_DIV_SET; | |
1636 | writel(val, mpcs + MVPP22_MPCS_CLK_RESET); | |
1637 | ||
1638 | val = readl(xpcs + MVPP22_XPCS_CFG0); | |
1639 | writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); | |
1640 | } | |
1641 | ||
1642 | static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port) | |
1643 | { | |
1644 | struct mvpp2 *priv = port->priv; | |
1645 | void __iomem *mpcs, *xpcs; | |
1646 | u32 val; | |
1647 | ||
1648 | if (port->priv->hw_version != MVPP22 || port->gop_id != 0) | |
1649 | return; | |
1650 | ||
1651 | mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); | |
1652 | xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); | |
1653 | ||
1654 | switch (port->phy_interface) { | |
e0f909bc | 1655 | case PHY_INTERFACE_MODE_10GBASER: |
7409e66e AT |
1656 | val = readl(mpcs + MVPP22_MPCS_CLK_RESET); |
1657 | val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | | |
1658 | MAC_CLK_RESET_SD_TX; | |
1659 | val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; | |
1660 | writel(val, mpcs + MVPP22_MPCS_CLK_RESET); | |
1661 | break; | |
1662 | case PHY_INTERFACE_MODE_XAUI: | |
1663 | case PHY_INTERFACE_MODE_RXAUI: | |
1664 | val = readl(xpcs + MVPP22_XPCS_CFG0); | |
1665 | writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); | |
1666 | break; | |
1667 | default: | |
1668 | break; | |
1669 | } | |
1670 | } | |
1671 | ||
3f518509 MW |
1672 | /* Change maximum receive size of the port */ |
1673 | static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) | |
1674 | { | |
1675 | u32 val; | |
1676 | ||
1677 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
1678 | val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; | |
1679 | val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << | |
1680 | MVPP2_GMAC_MAX_RX_SIZE_OFFS); | |
1681 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
1682 | } | |
1683 | ||
76eb1b1d SC |
1684 | /* Change maximum receive size of the port */ |
1685 | static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) | |
1686 | { | |
1687 | u32 val; | |
1688 | ||
1689 | val = readl(port->base + MVPP22_XLG_CTRL1_REG); | |
1690 | val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; | |
1691 | val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << | |
ec15ecde | 1692 | MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; |
76eb1b1d SC |
1693 | writel(val, port->base + MVPP22_XLG_CTRL1_REG); |
1694 | } | |
1695 | ||
3f518509 MW |
1696 | /* Set defaults to the MVPP2 port */ |
1697 | static void mvpp2_defaults_set(struct mvpp2_port *port) | |
1698 | { | |
21808437 | 1699 | int tx_port_num, val, queue, lrxq; |
3f518509 | 1700 | |
3d9017d9 | 1701 | if (port->priv->hw_version == MVPP21) { |
3d9017d9 TP |
1702 | /* Update TX FIFO MIN Threshold */ |
1703 | val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); | |
1704 | val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; | |
1705 | /* Min. TX threshold must be less than minimal packet length */ | |
1706 | val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); | |
1707 | writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); | |
1708 | } | |
3f518509 MW |
1709 | |
1710 | /* Disable Legacy WRR, Disable EJP, Release from reset */ | |
1711 | tx_port_num = mvpp2_egress_port(port); | |
1712 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, | |
1713 | tx_port_num); | |
1714 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); | |
1715 | ||
4251ea5b MC |
1716 | /* Set TXQ scheduling to Round-Robin */ |
1717 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); | |
1718 | ||
3f518509 | 1719 | /* Close bandwidth for all queues */ |
21808437 | 1720 | for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) |
3f518509 | 1721 | mvpp2_write(port->priv, |
21808437 | 1722 | MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); |
3f518509 MW |
1723 | |
1724 | /* Set refill period to 1 usec, refill tokens | |
1725 | * and bucket size to maximum | |
1726 | */ | |
1727 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, | |
1728 | port->priv->tclk / USEC_PER_SEC); | |
1729 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); | |
1730 | val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; | |
1731 | val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); | |
1732 | val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; | |
1733 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); | |
1734 | val = MVPP2_TXP_TOKEN_SIZE_MAX; | |
1735 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); | |
1736 | ||
1737 | /* Set MaximumLowLatencyPacketSize value to 256 */ | |
1738 | mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), | |
1739 | MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | | |
1740 | MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); | |
1741 | ||
1742 | /* Enable Rx cache snoop */ | |
09f83975 | 1743 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
3f518509 MW |
1744 | queue = port->rxqs[lrxq]->id; |
1745 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
1746 | val |= MVPP2_SNOOP_PKT_SIZE_MASK | | |
1747 | MVPP2_SNOOP_BUF_HDR_MASK; | |
1748 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
1749 | } | |
1750 | ||
1751 | /* At default, mask all interrupts to all present cpus */ | |
1752 | mvpp2_interrupts_disable(port); | |
1753 | } | |
1754 | ||
1755 | /* Enable/disable receiving packets */ | |
1756 | static void mvpp2_ingress_enable(struct mvpp2_port *port) | |
1757 | { | |
1758 | u32 val; | |
1759 | int lrxq, queue; | |
1760 | ||
09f83975 | 1761 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
3f518509 MW |
1762 | queue = port->rxqs[lrxq]->id; |
1763 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
1764 | val &= ~MVPP2_RXQ_DISABLE_MASK; | |
1765 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
1766 | } | |
1767 | } | |
1768 | ||
1769 | static void mvpp2_ingress_disable(struct mvpp2_port *port) | |
1770 | { | |
1771 | u32 val; | |
1772 | int lrxq, queue; | |
1773 | ||
09f83975 | 1774 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
3f518509 MW |
1775 | queue = port->rxqs[lrxq]->id; |
1776 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
1777 | val |= MVPP2_RXQ_DISABLE_MASK; | |
1778 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
1779 | } | |
1780 | } | |
1781 | ||
1782 | /* Enable transmit via physical egress queue | |
1783 | * - HW starts take descriptors from DRAM | |
1784 | */ | |
1785 | static void mvpp2_egress_enable(struct mvpp2_port *port) | |
1786 | { | |
1787 | u32 qmap; | |
1788 | int queue; | |
1789 | int tx_port_num = mvpp2_egress_port(port); | |
1790 | ||
1791 | /* Enable all initialized TXs. */ | |
1792 | qmap = 0; | |
09f83975 | 1793 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
1794 | struct mvpp2_tx_queue *txq = port->txqs[queue]; |
1795 | ||
dbbb2f03 | 1796 | if (txq->descs) |
3f518509 MW |
1797 | qmap |= (1 << queue); |
1798 | } | |
1799 | ||
1800 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
1801 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); | |
1802 | } | |
1803 | ||
1804 | /* Disable transmit via physical egress queue | |
1805 | * - HW doesn't take descriptors from DRAM | |
1806 | */ | |
1807 | static void mvpp2_egress_disable(struct mvpp2_port *port) | |
1808 | { | |
1809 | u32 reg_data; | |
1810 | int delay; | |
1811 | int tx_port_num = mvpp2_egress_port(port); | |
1812 | ||
1813 | /* Issue stop command for active channels only */ | |
1814 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
1815 | reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & | |
1816 | MVPP2_TXP_SCHED_ENQ_MASK; | |
1817 | if (reg_data != 0) | |
1818 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, | |
1819 | (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); | |
1820 | ||
1821 | /* Wait for all Tx activity to terminate. */ | |
1822 | delay = 0; | |
1823 | do { | |
1824 | if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { | |
1825 | netdev_warn(port->dev, | |
1826 | "Tx stop timed out, status=0x%08x\n", | |
1827 | reg_data); | |
1828 | break; | |
1829 | } | |
1830 | mdelay(1); | |
1831 | delay++; | |
1832 | ||
1833 | /* Check port TX Command register that all | |
1834 | * Tx queues are stopped | |
1835 | */ | |
1836 | reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); | |
1837 | } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); | |
1838 | } | |
1839 | ||
1840 | /* Rx descriptors helper methods */ | |
1841 | ||
1842 | /* Get number of Rx descriptors occupied by received packets */ | |
1843 | static inline int | |
1844 | mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) | |
1845 | { | |
1846 | u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); | |
1847 | ||
1848 | return val & MVPP2_RXQ_OCCUPIED_MASK; | |
1849 | } | |
1850 | ||
1851 | /* Update Rx queue status with the number of occupied and available | |
1852 | * Rx descriptor slots. | |
1853 | */ | |
1854 | static inline void | |
1855 | mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, | |
1856 | int used_count, int free_count) | |
1857 | { | |
1858 | /* Decrement the number of used descriptors and increment count | |
1859 | * increment the number of free descriptors. | |
1860 | */ | |
1861 | u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); | |
1862 | ||
1863 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); | |
1864 | } | |
1865 | ||
1866 | /* Get pointer to next RX descriptor to be processed by SW */ | |
1867 | static inline struct mvpp2_rx_desc * | |
1868 | mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) | |
1869 | { | |
1870 | int rx_desc = rxq->next_desc_to_proc; | |
1871 | ||
1872 | rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); | |
1873 | prefetch(rxq->descs + rxq->next_desc_to_proc); | |
1874 | return rxq->descs + rx_desc; | |
1875 | } | |
1876 | ||
1877 | /* Set rx queue offset */ | |
1878 | static void mvpp2_rxq_offset_set(struct mvpp2_port *port, | |
1879 | int prxq, int offset) | |
1880 | { | |
1881 | u32 val; | |
1882 | ||
1883 | /* Convert offset from bytes to units of 32 bytes */ | |
1884 | offset = offset >> 5; | |
1885 | ||
1886 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); | |
1887 | val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; | |
1888 | ||
1889 | /* Offset is in */ | |
1890 | val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & | |
1891 | MVPP2_RXQ_PACKET_OFFSET_MASK); | |
1892 | ||
1893 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); | |
1894 | } | |
1895 | ||
3f518509 MW |
1896 | /* Tx descriptors helper methods */ |
1897 | ||
3f518509 MW |
1898 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ |
1899 | static struct mvpp2_tx_desc * | |
1900 | mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) | |
1901 | { | |
1902 | int tx_desc = txq->next_desc_to_proc; | |
1903 | ||
1904 | txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); | |
1905 | return txq->descs + tx_desc; | |
1906 | } | |
1907 | ||
e0af22d9 TP |
1908 | /* Update HW with number of aggregated Tx descriptors to be sent |
1909 | * | |
1910 | * Called only from mvpp2_tx(), so migration is disabled, using | |
1911 | * smp_processor_id() is OK. | |
1912 | */ | |
3f518509 MW |
1913 | static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) |
1914 | { | |
1915 | /* aggregated access - relevant TXQ number is written in TX desc */ | |
1068549c | 1916 | mvpp2_thread_write(port->priv, |
e531f767 | 1917 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
a786841d | 1918 | MVPP2_AGGR_TXQ_UPDATE_REG, pending); |
3f518509 MW |
1919 | } |
1920 | ||
3f518509 MW |
1921 | /* Check if there are enough free descriptors in aggregated txq. |
1922 | * If not, update the number of occupied descriptors and repeat the check. | |
e0af22d9 TP |
1923 | * |
1924 | * Called only from mvpp2_tx(), so migration is disabled, using | |
1925 | * smp_processor_id() is OK. | |
3f518509 | 1926 | */ |
e531f767 | 1927 | static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, |
3f518509 MW |
1928 | struct mvpp2_tx_queue *aggr_txq, int num) |
1929 | { | |
02856a3b | 1930 | if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { |
3f518509 | 1931 | /* Update number of occupied aggregated Tx descriptors */ |
e531f767 AT |
1932 | unsigned int thread = |
1933 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()); | |
1934 | u32 val = mvpp2_read_relaxed(port->priv, | |
543ec376 | 1935 | MVPP2_AGGR_TXQ_STATUS_REG(thread)); |
3f518509 MW |
1936 | |
1937 | aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; | |
3f518509 | 1938 | |
914365f1 YM |
1939 | if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) |
1940 | return -ENOMEM; | |
1941 | } | |
3f518509 MW |
1942 | return 0; |
1943 | } | |
1944 | ||
e0af22d9 TP |
1945 | /* Reserved Tx descriptors allocation request |
1946 | * | |
1947 | * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called | |
1948 | * only by mvpp2_tx(), so migration is disabled, using | |
1949 | * smp_processor_id() is OK. | |
1950 | */ | |
e531f767 | 1951 | static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, |
3f518509 MW |
1952 | struct mvpp2_tx_queue *txq, int num) |
1953 | { | |
e531f767 AT |
1954 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
1955 | struct mvpp2 *priv = port->priv; | |
3f518509 MW |
1956 | u32 val; |
1957 | ||
1958 | val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; | |
1068549c | 1959 | mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); |
3f518509 | 1960 | |
1068549c | 1961 | val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); |
3f518509 MW |
1962 | |
1963 | return val & MVPP2_TXQ_RSVD_RSLT_MASK; | |
1964 | } | |
1965 | ||
1966 | /* Check if there are enough reserved descriptors for transmission. | |
1967 | * If not, request chunk of reserved descriptors and check again. | |
1968 | */ | |
074c74df | 1969 | static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, |
3f518509 MW |
1970 | struct mvpp2_tx_queue *txq, |
1971 | struct mvpp2_txq_pcpu *txq_pcpu, | |
1972 | int num) | |
1973 | { | |
850623b3 | 1974 | int req, desc_count; |
074c74df | 1975 | unsigned int thread; |
3f518509 MW |
1976 | |
1977 | if (txq_pcpu->reserved_num >= num) | |
1978 | return 0; | |
1979 | ||
1980 | /* Not enough descriptors reserved! Update the reserved descriptor | |
1981 | * count and check again. | |
1982 | */ | |
1983 | ||
1984 | desc_count = 0; | |
1985 | /* Compute total of used descriptors */ | |
e531f767 | 1986 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
3f518509 MW |
1987 | struct mvpp2_txq_pcpu *txq_pcpu_aux; |
1988 | ||
074c74df | 1989 | txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); |
3f518509 MW |
1990 | desc_count += txq_pcpu_aux->count; |
1991 | desc_count += txq_pcpu_aux->reserved_num; | |
1992 | } | |
1993 | ||
1994 | req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); | |
1995 | desc_count += req; | |
1996 | ||
1997 | if (desc_count > | |
074c74df | 1998 | (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) |
3f518509 MW |
1999 | return -ENOMEM; |
2000 | ||
e531f767 | 2001 | txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); |
3f518509 | 2002 | |
a3302baa | 2003 | /* OK, the descriptor could have been updated: check again. */ |
3f518509 MW |
2004 | if (txq_pcpu->reserved_num < num) |
2005 | return -ENOMEM; | |
2006 | return 0; | |
2007 | } | |
2008 | ||
2009 | /* Release the last allocated Tx descriptor. Useful to handle DMA | |
2010 | * mapping failures in the Tx path. | |
2011 | */ | |
2012 | static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) | |
2013 | { | |
2014 | if (txq->next_desc_to_proc == 0) | |
2015 | txq->next_desc_to_proc = txq->last_desc - 1; | |
2016 | else | |
2017 | txq->next_desc_to_proc--; | |
2018 | } | |
2019 | ||
2020 | /* Set Tx descriptors fields relevant for CSUM calculation */ | |
35f3625c | 2021 | static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, |
3f518509 MW |
2022 | int ip_hdr_len, int l4_proto) |
2023 | { | |
2024 | u32 command; | |
2025 | ||
2026 | /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, | |
2027 | * G_L4_chk, L4_type required only for checksum calculation | |
2028 | */ | |
2029 | command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); | |
2030 | command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); | |
2031 | command |= MVPP2_TXD_IP_CSUM_DISABLE; | |
2032 | ||
dc734dbe | 2033 | if (l3_proto == htons(ETH_P_IP)) { |
3f518509 MW |
2034 | command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ |
2035 | command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ | |
2036 | } else { | |
2037 | command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ | |
2038 | } | |
2039 | ||
2040 | if (l4_proto == IPPROTO_TCP) { | |
2041 | command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ | |
2042 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ | |
2043 | } else if (l4_proto == IPPROTO_UDP) { | |
2044 | command |= MVPP2_TXD_L4_UDP; /* enable UDP */ | |
2045 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ | |
2046 | } else { | |
2047 | command |= MVPP2_TXD_L4_CSUM_NOT; | |
2048 | } | |
2049 | ||
2050 | return command; | |
2051 | } | |
2052 | ||
2053 | /* Get number of sent descriptors and decrement counter. | |
2054 | * The number of sent descriptors is returned. | |
543ec376 | 2055 | * Per-thread access |
e0af22d9 TP |
2056 | * |
2057 | * Called only from mvpp2_txq_done(), called from mvpp2_tx() | |
2058 | * (migration disabled) and from the TX completion tasklet (migration | |
2059 | * disabled) so using smp_processor_id() is OK. | |
3f518509 MW |
2060 | */ |
2061 | static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, | |
2062 | struct mvpp2_tx_queue *txq) | |
2063 | { | |
2064 | u32 val; | |
2065 | ||
2066 | /* Reading status reg resets transmitted descriptor counter */ | |
1068549c | 2067 | val = mvpp2_thread_read_relaxed(port->priv, |
e531f767 | 2068 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
cdcfeb0f | 2069 | MVPP2_TXQ_SENT_REG(txq->id)); |
3f518509 MW |
2070 | |
2071 | return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> | |
2072 | MVPP2_TRANSMITTED_COUNT_OFFSET; | |
2073 | } | |
2074 | ||
e0af22d9 TP |
2075 | /* Called through on_each_cpu(), so runs on all CPUs, with migration |
2076 | * disabled, therefore using smp_processor_id() is OK. | |
2077 | */ | |
3f518509 MW |
2078 | static void mvpp2_txq_sent_counter_clear(void *arg) |
2079 | { | |
2080 | struct mvpp2_port *port = arg; | |
2081 | int queue; | |
2082 | ||
e531f767 AT |
2083 | /* If the thread isn't used, don't do anything */ |
2084 | if (smp_processor_id() > port->priv->nthreads) | |
2085 | return; | |
2086 | ||
09f83975 | 2087 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
2088 | int id = port->txqs[queue]->id; |
2089 | ||
1068549c | 2090 | mvpp2_thread_read(port->priv, |
e531f767 | 2091 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
a786841d | 2092 | MVPP2_TXQ_SENT_REG(id)); |
3f518509 MW |
2093 | } |
2094 | } | |
2095 | ||
2096 | /* Set max sizes for Tx queues */ | |
2097 | static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) | |
2098 | { | |
2099 | u32 val, size, mtu; | |
2100 | int txq, tx_port_num; | |
2101 | ||
2102 | mtu = port->pkt_size * 8; | |
2103 | if (mtu > MVPP2_TXP_MTU_MAX) | |
2104 | mtu = MVPP2_TXP_MTU_MAX; | |
2105 | ||
2106 | /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ | |
2107 | mtu = 3 * mtu; | |
2108 | ||
2109 | /* Indirect access to registers */ | |
2110 | tx_port_num = mvpp2_egress_port(port); | |
2111 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
2112 | ||
2113 | /* Set MTU */ | |
2114 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); | |
2115 | val &= ~MVPP2_TXP_MTU_MAX; | |
2116 | val |= mtu; | |
2117 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); | |
2118 | ||
2119 | /* TXP token size and all TXQs token size must be larger that MTU */ | |
2120 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); | |
2121 | size = val & MVPP2_TXP_TOKEN_SIZE_MAX; | |
2122 | if (size < mtu) { | |
2123 | size = mtu; | |
2124 | val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; | |
2125 | val |= size; | |
2126 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); | |
2127 | } | |
2128 | ||
09f83975 | 2129 | for (txq = 0; txq < port->ntxqs; txq++) { |
3f518509 MW |
2130 | val = mvpp2_read(port->priv, |
2131 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); | |
2132 | size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; | |
2133 | ||
2134 | if (size < mtu) { | |
2135 | size = mtu; | |
2136 | val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; | |
2137 | val |= size; | |
2138 | mvpp2_write(port->priv, | |
2139 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), | |
2140 | val); | |
2141 | } | |
2142 | } | |
2143 | } | |
2144 | ||
2145 | /* Set the number of packets that will be received before Rx interrupt | |
2146 | * will be generated by HW. | |
2147 | */ | |
2148 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, | |
d63f9e41 | 2149 | struct mvpp2_rx_queue *rxq) |
3f518509 | 2150 | { |
e531f767 | 2151 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
a786841d | 2152 | |
f8b0d5f8 TP |
2153 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) |
2154 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; | |
3f518509 | 2155 | |
1068549c AT |
2156 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
2157 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, | |
a786841d | 2158 | rxq->pkts_coal); |
a704bb5c TP |
2159 | |
2160 | put_cpu(); | |
3f518509 MW |
2161 | } |
2162 | ||
213f428f TP |
2163 | /* For some reason in the LSP this is done on each CPU. Why ? */ |
2164 | static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, | |
2165 | struct mvpp2_tx_queue *txq) | |
2166 | { | |
e531f767 | 2167 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
213f428f TP |
2168 | u32 val; |
2169 | ||
2170 | if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) | |
2171 | txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; | |
2172 | ||
2173 | val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); | |
1068549c AT |
2174 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
2175 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); | |
213f428f TP |
2176 | |
2177 | put_cpu(); | |
2178 | } | |
2179 | ||
ab42676a TP |
2180 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) |
2181 | { | |
2182 | u64 tmp = (u64)clk_hz * usec; | |
2183 | ||
2184 | do_div(tmp, USEC_PER_SEC); | |
2185 | ||
2186 | return tmp > U32_MAX ? U32_MAX : tmp; | |
2187 | } | |
2188 | ||
2189 | static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) | |
2190 | { | |
2191 | u64 tmp = (u64)cycles * USEC_PER_SEC; | |
2192 | ||
2193 | do_div(tmp, clk_hz); | |
2194 | ||
2195 | return tmp > U32_MAX ? U32_MAX : tmp; | |
2196 | } | |
2197 | ||
3f518509 MW |
2198 | /* Set the time delay in usec before Rx interrupt */ |
2199 | static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, | |
d63f9e41 | 2200 | struct mvpp2_rx_queue *rxq) |
3f518509 | 2201 | { |
ab42676a TP |
2202 | unsigned long freq = port->priv->tclk; |
2203 | u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); | |
2204 | ||
2205 | if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { | |
2206 | rxq->time_coal = | |
2207 | mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); | |
2208 | ||
2209 | /* re-evaluate to get actual register value */ | |
2210 | val = mvpp2_usec_to_cycles(rxq->time_coal, freq); | |
2211 | } | |
3f518509 | 2212 | |
3f518509 | 2213 | mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); |
3f518509 MW |
2214 | } |
2215 | ||
213f428f TP |
2216 | static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) |
2217 | { | |
2218 | unsigned long freq = port->priv->tclk; | |
2219 | u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); | |
2220 | ||
2221 | if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { | |
2222 | port->tx_time_coal = | |
2223 | mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); | |
2224 | ||
2225 | /* re-evaluate to get actual register value */ | |
2226 | val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); | |
2227 | } | |
2228 | ||
2229 | mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); | |
2230 | } | |
2231 | ||
3f518509 MW |
2232 | /* Free Tx queue skbuffs */ |
2233 | static void mvpp2_txq_bufs_free(struct mvpp2_port *port, | |
2234 | struct mvpp2_tx_queue *txq, | |
2235 | struct mvpp2_txq_pcpu *txq_pcpu, int num) | |
2236 | { | |
2237 | int i; | |
2238 | ||
2239 | for (i = 0; i < num; i++) { | |
8354491c TP |
2240 | struct mvpp2_txq_pcpu_buf *tx_buf = |
2241 | txq_pcpu->buffs + txq_pcpu->txq_get_index; | |
3f518509 | 2242 | |
20920267 AT |
2243 | if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) |
2244 | dma_unmap_single(port->dev->dev.parent, tx_buf->dma, | |
2245 | tx_buf->size, DMA_TO_DEVICE); | |
36fb7435 TP |
2246 | if (tx_buf->skb) |
2247 | dev_kfree_skb_any(tx_buf->skb); | |
2248 | ||
2249 | mvpp2_txq_inc_get(txq_pcpu); | |
3f518509 MW |
2250 | } |
2251 | } | |
2252 | ||
2253 | static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, | |
2254 | u32 cause) | |
2255 | { | |
2256 | int queue = fls(cause) - 1; | |
2257 | ||
2258 | return port->rxqs[queue]; | |
2259 | } | |
2260 | ||
2261 | static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, | |
2262 | u32 cause) | |
2263 | { | |
edc660fa | 2264 | int queue = fls(cause) - 1; |
3f518509 MW |
2265 | |
2266 | return port->txqs[queue]; | |
2267 | } | |
2268 | ||
2269 | /* Handle end of transmission */ | |
2270 | static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, | |
2271 | struct mvpp2_txq_pcpu *txq_pcpu) | |
2272 | { | |
2273 | struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); | |
2274 | int tx_done; | |
2275 | ||
e531f767 | 2276 | if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) |
3f518509 MW |
2277 | netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); |
2278 | ||
2279 | tx_done = mvpp2_txq_sent_desc_proc(port, txq); | |
2280 | if (!tx_done) | |
2281 | return; | |
2282 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); | |
2283 | ||
2284 | txq_pcpu->count -= tx_done; | |
2285 | ||
2286 | if (netif_tx_queue_stopped(nq)) | |
1d17db08 | 2287 | if (txq_pcpu->count <= txq_pcpu->wake_threshold) |
3f518509 MW |
2288 | netif_tx_wake_queue(nq); |
2289 | } | |
2290 | ||
213f428f | 2291 | static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, |
543ec376 | 2292 | unsigned int thread) |
edc660fa MW |
2293 | { |
2294 | struct mvpp2_tx_queue *txq; | |
2295 | struct mvpp2_txq_pcpu *txq_pcpu; | |
2296 | unsigned int tx_todo = 0; | |
2297 | ||
2298 | while (cause) { | |
2299 | txq = mvpp2_get_tx_queue(port, cause); | |
2300 | if (!txq) | |
2301 | break; | |
2302 | ||
543ec376 | 2303 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
edc660fa MW |
2304 | |
2305 | if (txq_pcpu->count) { | |
2306 | mvpp2_txq_done(port, txq, txq_pcpu); | |
2307 | tx_todo += txq_pcpu->count; | |
2308 | } | |
2309 | ||
2310 | cause &= ~(1 << txq->log_id); | |
2311 | } | |
2312 | return tx_todo; | |
2313 | } | |
2314 | ||
3f518509 MW |
2315 | /* Rx/Tx queue initialization/cleanup methods */ |
2316 | ||
2317 | /* Allocate and initialize descriptors for aggr TXQ */ | |
2318 | static int mvpp2_aggr_txq_init(struct platform_device *pdev, | |
850623b3 | 2319 | struct mvpp2_tx_queue *aggr_txq, |
543ec376 | 2320 | unsigned int thread, struct mvpp2 *priv) |
3f518509 | 2321 | { |
b02f31fb TP |
2322 | u32 txq_dma; |
2323 | ||
3f518509 | 2324 | /* Allocate memory for TX descriptors */ |
750afb08 LC |
2325 | aggr_txq->descs = dma_alloc_coherent(&pdev->dev, |
2326 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | |
2327 | &aggr_txq->descs_dma, GFP_KERNEL); | |
3f518509 MW |
2328 | if (!aggr_txq->descs) |
2329 | return -ENOMEM; | |
2330 | ||
02856a3b | 2331 | aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; |
3f518509 MW |
2332 | |
2333 | /* Aggr TXQ no reset WA */ | |
2334 | aggr_txq->next_desc_to_proc = mvpp2_read(priv, | |
543ec376 | 2335 | MVPP2_AGGR_TXQ_INDEX_REG(thread)); |
3f518509 | 2336 | |
b02f31fb TP |
2337 | /* Set Tx descriptors queue starting address indirect |
2338 | * access | |
2339 | */ | |
2340 | if (priv->hw_version == MVPP21) | |
2341 | txq_dma = aggr_txq->descs_dma; | |
2342 | else | |
2343 | txq_dma = aggr_txq->descs_dma >> | |
2344 | MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; | |
2345 | ||
543ec376 AT |
2346 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); |
2347 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), | |
85affd7e | 2348 | MVPP2_AGGR_TXQ_SIZE); |
3f518509 MW |
2349 | |
2350 | return 0; | |
2351 | } | |
2352 | ||
2353 | /* Create a specified Rx queue */ | |
2354 | static int mvpp2_rxq_init(struct mvpp2_port *port, | |
2355 | struct mvpp2_rx_queue *rxq) | |
2356 | ||
2357 | { | |
543ec376 | 2358 | unsigned int thread; |
b02f31fb TP |
2359 | u32 rxq_dma; |
2360 | ||
3f518509 MW |
2361 | rxq->size = port->rx_ring_size; |
2362 | ||
2363 | /* Allocate memory for RX descriptors */ | |
2364 | rxq->descs = dma_alloc_coherent(port->dev->dev.parent, | |
2365 | rxq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 2366 | &rxq->descs_dma, GFP_KERNEL); |
3f518509 MW |
2367 | if (!rxq->descs) |
2368 | return -ENOMEM; | |
2369 | ||
3f518509 MW |
2370 | rxq->last_desc = rxq->size - 1; |
2371 | ||
2372 | /* Zero occupied and non-occupied counters - direct access */ | |
2373 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | |
2374 | ||
2375 | /* Set Rx descriptors queue starting address - indirect access */ | |
e531f767 | 2376 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c | 2377 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
b02f31fb TP |
2378 | if (port->priv->hw_version == MVPP21) |
2379 | rxq_dma = rxq->descs_dma; | |
2380 | else | |
2381 | rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; | |
1068549c AT |
2382 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); |
2383 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); | |
2384 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); | |
a704bb5c | 2385 | put_cpu(); |
3f518509 MW |
2386 | |
2387 | /* Set Offset */ | |
2388 | mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); | |
2389 | ||
2390 | /* Set coalescing pkts and time */ | |
d63f9e41 TP |
2391 | mvpp2_rx_pkts_coal_set(port, rxq); |
2392 | mvpp2_rx_time_coal_set(port, rxq); | |
3f518509 MW |
2393 | |
2394 | /* Add number of descriptors ready for receiving packets */ | |
2395 | mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); | |
2396 | ||
2397 | return 0; | |
2398 | } | |
2399 | ||
2400 | /* Push packets received by the RXQ to BM pool */ | |
2401 | static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, | |
2402 | struct mvpp2_rx_queue *rxq) | |
2403 | { | |
2404 | int rx_received, i; | |
2405 | ||
2406 | rx_received = mvpp2_rxq_received(port, rxq->id); | |
2407 | if (!rx_received) | |
2408 | return; | |
2409 | ||
2410 | for (i = 0; i < rx_received; i++) { | |
2411 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); | |
56b8aae9 TP |
2412 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
2413 | int pool; | |
2414 | ||
2415 | pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> | |
2416 | MVPP2_RXD_BM_POOL_ID_OFFS; | |
3f518509 | 2417 | |
7d7627ba | 2418 | mvpp2_bm_pool_put(port, pool, |
ac3dd277 TP |
2419 | mvpp2_rxdesc_dma_addr_get(port, rx_desc), |
2420 | mvpp2_rxdesc_cookie_get(port, rx_desc)); | |
3f518509 MW |
2421 | } |
2422 | mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); | |
2423 | } | |
2424 | ||
2425 | /* Cleanup Rx queue */ | |
2426 | static void mvpp2_rxq_deinit(struct mvpp2_port *port, | |
2427 | struct mvpp2_rx_queue *rxq) | |
2428 | { | |
543ec376 | 2429 | unsigned int thread; |
a786841d | 2430 | |
3f518509 MW |
2431 | mvpp2_rxq_drop_pkts(port, rxq); |
2432 | ||
2433 | if (rxq->descs) | |
2434 | dma_free_coherent(port->dev->dev.parent, | |
2435 | rxq->size * MVPP2_DESC_ALIGNED_SIZE, | |
2436 | rxq->descs, | |
20396136 | 2437 | rxq->descs_dma); |
3f518509 MW |
2438 | |
2439 | rxq->descs = NULL; | |
2440 | rxq->last_desc = 0; | |
2441 | rxq->next_desc_to_proc = 0; | |
20396136 | 2442 | rxq->descs_dma = 0; |
3f518509 MW |
2443 | |
2444 | /* Clear Rx descriptors queue starting address and size; | |
2445 | * free descriptor number | |
2446 | */ | |
2447 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | |
e531f767 | 2448 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c AT |
2449 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
2450 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); | |
2451 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); | |
a704bb5c | 2452 | put_cpu(); |
3f518509 MW |
2453 | } |
2454 | ||
2455 | /* Create and initialize a Tx queue */ | |
2456 | static int mvpp2_txq_init(struct mvpp2_port *port, | |
2457 | struct mvpp2_tx_queue *txq) | |
2458 | { | |
2459 | u32 val; | |
074c74df | 2460 | unsigned int thread; |
850623b3 | 2461 | int desc, desc_per_txq, tx_port_num; |
3f518509 MW |
2462 | struct mvpp2_txq_pcpu *txq_pcpu; |
2463 | ||
2464 | txq->size = port->tx_ring_size; | |
2465 | ||
2466 | /* Allocate memory for Tx descriptors */ | |
2467 | txq->descs = dma_alloc_coherent(port->dev->dev.parent, | |
2468 | txq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 2469 | &txq->descs_dma, GFP_KERNEL); |
3f518509 MW |
2470 | if (!txq->descs) |
2471 | return -ENOMEM; | |
2472 | ||
3f518509 MW |
2473 | txq->last_desc = txq->size - 1; |
2474 | ||
2475 | /* Set Tx descriptors queue starting address - indirect access */ | |
e531f767 | 2476 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c AT |
2477 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
2478 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, | |
a786841d | 2479 | txq->descs_dma); |
1068549c | 2480 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, |
a786841d | 2481 | txq->size & MVPP2_TXQ_DESC_SIZE_MASK); |
1068549c AT |
2482 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); |
2483 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, | |
a786841d | 2484 | txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); |
1068549c | 2485 | val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); |
3f518509 | 2486 | val &= ~MVPP2_TXQ_PENDING_MASK; |
1068549c | 2487 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); |
3f518509 MW |
2488 | |
2489 | /* Calculate base address in prefetch buffer. We reserve 16 descriptors | |
2490 | * for each existing TXQ. | |
2491 | * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT | |
a3302baa | 2492 | * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS |
3f518509 MW |
2493 | */ |
2494 | desc_per_txq = 16; | |
2495 | desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + | |
2496 | (txq->log_id * desc_per_txq); | |
2497 | ||
1068549c | 2498 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, |
a786841d TP |
2499 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | |
2500 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); | |
a704bb5c | 2501 | put_cpu(); |
3f518509 MW |
2502 | |
2503 | /* WRR / EJP configuration - indirect access */ | |
2504 | tx_port_num = mvpp2_egress_port(port); | |
2505 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
2506 | ||
2507 | val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); | |
2508 | val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; | |
2509 | val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); | |
2510 | val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; | |
2511 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); | |
2512 | ||
2513 | val = MVPP2_TXQ_TOKEN_SIZE_MAX; | |
2514 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), | |
2515 | val); | |
2516 | ||
e531f767 | 2517 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 2518 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3f518509 | 2519 | txq_pcpu->size = txq->size; |
02c91ece ME |
2520 | txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, |
2521 | sizeof(*txq_pcpu->buffs), | |
2522 | GFP_KERNEL); | |
8354491c | 2523 | if (!txq_pcpu->buffs) |
ba2d8d88 | 2524 | return -ENOMEM; |
3f518509 MW |
2525 | |
2526 | txq_pcpu->count = 0; | |
2527 | txq_pcpu->reserved_num = 0; | |
2528 | txq_pcpu->txq_put_index = 0; | |
2529 | txq_pcpu->txq_get_index = 0; | |
b70d4a51 | 2530 | txq_pcpu->tso_headers = NULL; |
186cd4d4 | 2531 | |
1d17db08 AT |
2532 | txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; |
2533 | txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; | |
2534 | ||
186cd4d4 AT |
2535 | txq_pcpu->tso_headers = |
2536 | dma_alloc_coherent(port->dev->dev.parent, | |
822eaf7c | 2537 | txq_pcpu->size * TSO_HEADER_SIZE, |
186cd4d4 AT |
2538 | &txq_pcpu->tso_headers_dma, |
2539 | GFP_KERNEL); | |
2540 | if (!txq_pcpu->tso_headers) | |
ba2d8d88 | 2541 | return -ENOMEM; |
3f518509 MW |
2542 | } |
2543 | ||
2544 | return 0; | |
2545 | } | |
2546 | ||
2547 | /* Free allocated TXQ resources */ | |
2548 | static void mvpp2_txq_deinit(struct mvpp2_port *port, | |
2549 | struct mvpp2_tx_queue *txq) | |
2550 | { | |
2551 | struct mvpp2_txq_pcpu *txq_pcpu; | |
074c74df | 2552 | unsigned int thread; |
3f518509 | 2553 | |
e531f767 | 2554 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 2555 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
8354491c | 2556 | kfree(txq_pcpu->buffs); |
186cd4d4 | 2557 | |
b70d4a51 AT |
2558 | if (txq_pcpu->tso_headers) |
2559 | dma_free_coherent(port->dev->dev.parent, | |
2560 | txq_pcpu->size * TSO_HEADER_SIZE, | |
2561 | txq_pcpu->tso_headers, | |
2562 | txq_pcpu->tso_headers_dma); | |
2563 | ||
2564 | txq_pcpu->tso_headers = NULL; | |
3f518509 MW |
2565 | } |
2566 | ||
2567 | if (txq->descs) | |
2568 | dma_free_coherent(port->dev->dev.parent, | |
2569 | txq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 2570 | txq->descs, txq->descs_dma); |
3f518509 MW |
2571 | |
2572 | txq->descs = NULL; | |
2573 | txq->last_desc = 0; | |
2574 | txq->next_desc_to_proc = 0; | |
20396136 | 2575 | txq->descs_dma = 0; |
3f518509 MW |
2576 | |
2577 | /* Set minimum bandwidth for disabled TXQs */ | |
21808437 | 2578 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); |
3f518509 MW |
2579 | |
2580 | /* Set Tx descriptors queue starting address and size */ | |
e531f767 | 2581 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c AT |
2582 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
2583 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); | |
2584 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); | |
a704bb5c | 2585 | put_cpu(); |
3f518509 MW |
2586 | } |
2587 | ||
2588 | /* Cleanup Tx ports */ | |
2589 | static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) | |
2590 | { | |
2591 | struct mvpp2_txq_pcpu *txq_pcpu; | |
850623b3 | 2592 | int delay, pending; |
e531f767 | 2593 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
3f518509 MW |
2594 | u32 val; |
2595 | ||
1068549c AT |
2596 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
2597 | val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); | |
3f518509 | 2598 | val |= MVPP2_TXQ_DRAIN_EN_MASK; |
1068549c | 2599 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); |
3f518509 MW |
2600 | |
2601 | /* The napi queue has been stopped so wait for all packets | |
2602 | * to be transmitted. | |
2603 | */ | |
2604 | delay = 0; | |
2605 | do { | |
2606 | if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { | |
2607 | netdev_warn(port->dev, | |
2608 | "port %d: cleaning queue %d timed out\n", | |
2609 | port->id, txq->log_id); | |
2610 | break; | |
2611 | } | |
2612 | mdelay(1); | |
2613 | delay++; | |
2614 | ||
1068549c | 2615 | pending = mvpp2_thread_read(port->priv, thread, |
a786841d TP |
2616 | MVPP2_TXQ_PENDING_REG); |
2617 | pending &= MVPP2_TXQ_PENDING_MASK; | |
3f518509 MW |
2618 | } while (pending); |
2619 | ||
2620 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; | |
1068549c | 2621 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); |
a704bb5c | 2622 | put_cpu(); |
3f518509 | 2623 | |
e531f767 | 2624 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 2625 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3f518509 MW |
2626 | |
2627 | /* Release all packets */ | |
2628 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); | |
2629 | ||
2630 | /* Reset queue */ | |
2631 | txq_pcpu->count = 0; | |
2632 | txq_pcpu->txq_put_index = 0; | |
2633 | txq_pcpu->txq_get_index = 0; | |
2634 | } | |
2635 | } | |
2636 | ||
2637 | /* Cleanup all Tx queues */ | |
2638 | static void mvpp2_cleanup_txqs(struct mvpp2_port *port) | |
2639 | { | |
2640 | struct mvpp2_tx_queue *txq; | |
2641 | int queue; | |
2642 | u32 val; | |
2643 | ||
2644 | val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); | |
2645 | ||
2646 | /* Reset Tx ports and delete Tx queues */ | |
2647 | val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); | |
2648 | mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); | |
2649 | ||
09f83975 | 2650 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
2651 | txq = port->txqs[queue]; |
2652 | mvpp2_txq_clean(port, txq); | |
2653 | mvpp2_txq_deinit(port, txq); | |
2654 | } | |
2655 | ||
2656 | on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); | |
2657 | ||
2658 | val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); | |
2659 | mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); | |
2660 | } | |
2661 | ||
2662 | /* Cleanup all Rx queues */ | |
2663 | static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) | |
2664 | { | |
2665 | int queue; | |
2666 | ||
09f83975 | 2667 | for (queue = 0; queue < port->nrxqs; queue++) |
3f518509 MW |
2668 | mvpp2_rxq_deinit(port, port->rxqs[queue]); |
2669 | } | |
2670 | ||
2671 | /* Init all Rx queues for port */ | |
2672 | static int mvpp2_setup_rxqs(struct mvpp2_port *port) | |
2673 | { | |
2674 | int queue, err; | |
2675 | ||
09f83975 | 2676 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
2677 | err = mvpp2_rxq_init(port, port->rxqs[queue]); |
2678 | if (err) | |
2679 | goto err_cleanup; | |
2680 | } | |
2681 | return 0; | |
2682 | ||
2683 | err_cleanup: | |
2684 | mvpp2_cleanup_rxqs(port); | |
2685 | return err; | |
2686 | } | |
2687 | ||
2688 | /* Init all tx queues for port */ | |
2689 | static int mvpp2_setup_txqs(struct mvpp2_port *port) | |
2690 | { | |
2691 | struct mvpp2_tx_queue *txq; | |
0d283ab5 | 2692 | int queue, err, cpu; |
3f518509 | 2693 | |
09f83975 | 2694 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
2695 | txq = port->txqs[queue]; |
2696 | err = mvpp2_txq_init(port, txq); | |
2697 | if (err) | |
2698 | goto err_cleanup; | |
0d283ab5 MC |
2699 | |
2700 | /* Assign this queue to a CPU */ | |
2701 | cpu = queue % num_present_cpus(); | |
2702 | netif_set_xps_queue(port->dev, cpumask_of(cpu), queue); | |
3f518509 MW |
2703 | } |
2704 | ||
213f428f TP |
2705 | if (port->has_tx_irqs) { |
2706 | mvpp2_tx_time_coal_set(port); | |
2707 | for (queue = 0; queue < port->ntxqs; queue++) { | |
2708 | txq = port->txqs[queue]; | |
2709 | mvpp2_tx_pkts_coal_set(port, txq); | |
2710 | } | |
2711 | } | |
2712 | ||
3f518509 MW |
2713 | on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); |
2714 | return 0; | |
2715 | ||
2716 | err_cleanup: | |
2717 | mvpp2_cleanup_txqs(port); | |
2718 | return err; | |
2719 | } | |
2720 | ||
2721 | /* The callback for per-port interrupt */ | |
2722 | static irqreturn_t mvpp2_isr(int irq, void *dev_id) | |
2723 | { | |
591f4cfa | 2724 | struct mvpp2_queue_vector *qv = dev_id; |
3f518509 | 2725 | |
591f4cfa | 2726 | mvpp2_qvec_interrupt_disable(qv); |
3f518509 | 2727 | |
591f4cfa | 2728 | napi_schedule(&qv->napi); |
3f518509 MW |
2729 | |
2730 | return IRQ_HANDLED; | |
2731 | } | |
2732 | ||
fd3651b2 AT |
2733 | /* Per-port interrupt for link status changes */ |
2734 | static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) | |
2735 | { | |
2736 | struct mvpp2_port *port = (struct mvpp2_port *)dev_id; | |
2737 | struct net_device *dev = port->dev; | |
2738 | bool event = false, link = false; | |
2739 | u32 val; | |
2740 | ||
2741 | mvpp22_gop_mask_irq(port); | |
2742 | ||
1d9b041e | 2743 | if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { |
fd3651b2 AT |
2744 | val = readl(port->base + MVPP22_XLG_INT_STAT); |
2745 | if (val & MVPP22_XLG_INT_STAT_LINK) { | |
2746 | event = true; | |
2747 | val = readl(port->base + MVPP22_XLG_STATUS); | |
2748 | if (val & MVPP22_XLG_STATUS_LINK_UP) | |
2749 | link = true; | |
2750 | } | |
2751 | } else if (phy_interface_mode_is_rgmii(port->phy_interface) || | |
4a4cec72 RK |
2752 | phy_interface_mode_is_8023z(port->phy_interface) || |
2753 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
fd3651b2 AT |
2754 | val = readl(port->base + MVPP22_GMAC_INT_STAT); |
2755 | if (val & MVPP22_GMAC_INT_STAT_LINK) { | |
2756 | event = true; | |
2757 | val = readl(port->base + MVPP2_GMAC_STATUS0); | |
2758 | if (val & MVPP2_GMAC_STATUS0_LINK_UP) | |
2759 | link = true; | |
2760 | } | |
2761 | } | |
2762 | ||
4bb04326 AT |
2763 | if (port->phylink) { |
2764 | phylink_mac_change(port->phylink, link); | |
2765 | goto handled; | |
2766 | } | |
2767 | ||
fd3651b2 AT |
2768 | if (!netif_running(dev) || !event) |
2769 | goto handled; | |
2770 | ||
2771 | if (link) { | |
2772 | mvpp2_interrupts_enable(port); | |
2773 | ||
2774 | mvpp2_egress_enable(port); | |
2775 | mvpp2_ingress_enable(port); | |
2776 | netif_carrier_on(dev); | |
2777 | netif_tx_wake_all_queues(dev); | |
2778 | } else { | |
2779 | netif_tx_stop_all_queues(dev); | |
2780 | netif_carrier_off(dev); | |
2781 | mvpp2_ingress_disable(port); | |
2782 | mvpp2_egress_disable(port); | |
2783 | ||
2784 | mvpp2_interrupts_disable(port); | |
2785 | } | |
2786 | ||
2787 | handled: | |
2788 | mvpp22_gop_unmask_irq(port); | |
2789 | return IRQ_HANDLED; | |
2790 | } | |
2791 | ||
ecb9f80d | 2792 | static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) |
edc660fa | 2793 | { |
ecb9f80d TG |
2794 | struct net_device *dev; |
2795 | struct mvpp2_port *port; | |
074c74df | 2796 | struct mvpp2_port_pcpu *port_pcpu; |
edc660fa MW |
2797 | unsigned int tx_todo, cause; |
2798 | ||
ecb9f80d TG |
2799 | port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); |
2800 | dev = port_pcpu->dev; | |
074c74df | 2801 | |
edc660fa | 2802 | if (!netif_running(dev)) |
ecb9f80d TG |
2803 | return HRTIMER_NORESTART; |
2804 | ||
edc660fa | 2805 | port_pcpu->timer_scheduled = false; |
ecb9f80d | 2806 | port = netdev_priv(dev); |
edc660fa MW |
2807 | |
2808 | /* Process all the Tx queues */ | |
09f83975 | 2809 | cause = (1 << port->ntxqs) - 1; |
074c74df | 2810 | tx_todo = mvpp2_tx_done(port, cause, |
e531f767 | 2811 | mvpp2_cpu_to_thread(port->priv, smp_processor_id())); |
edc660fa MW |
2812 | |
2813 | /* Set the timer in case not all the packets were processed */ | |
ecb9f80d TG |
2814 | if (tx_todo && !port_pcpu->timer_scheduled) { |
2815 | port_pcpu->timer_scheduled = true; | |
2816 | hrtimer_forward_now(&port_pcpu->tx_done_timer, | |
2817 | MVPP2_TXDONE_HRTIMER_PERIOD_NS); | |
edc660fa | 2818 | |
ecb9f80d TG |
2819 | return HRTIMER_RESTART; |
2820 | } | |
edc660fa MW |
2821 | return HRTIMER_NORESTART; |
2822 | } | |
2823 | ||
3f518509 MW |
2824 | /* Main RX/TX processing routines */ |
2825 | ||
2826 | /* Display more error info */ | |
2827 | static void mvpp2_rx_error(struct mvpp2_port *port, | |
2828 | struct mvpp2_rx_desc *rx_desc) | |
2829 | { | |
ac3dd277 TP |
2830 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
2831 | size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); | |
934e0f83 | 2832 | char *err_str = NULL; |
3f518509 MW |
2833 | |
2834 | switch (status & MVPP2_RXD_ERR_CODE_MASK) { | |
2835 | case MVPP2_RXD_ERR_CRC: | |
934e0f83 | 2836 | err_str = "crc"; |
3f518509 MW |
2837 | break; |
2838 | case MVPP2_RXD_ERR_OVERRUN: | |
934e0f83 | 2839 | err_str = "overrun"; |
3f518509 MW |
2840 | break; |
2841 | case MVPP2_RXD_ERR_RESOURCE: | |
934e0f83 | 2842 | err_str = "resource"; |
3f518509 MW |
2843 | break; |
2844 | } | |
934e0f83 YM |
2845 | if (err_str && net_ratelimit()) |
2846 | netdev_err(port->dev, | |
2847 | "bad rx status %08x (%s error), size=%zu\n", | |
2848 | status, err_str, sz); | |
3f518509 MW |
2849 | } |
2850 | ||
2851 | /* Handle RX checksum offload */ | |
2852 | static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, | |
2853 | struct sk_buff *skb) | |
2854 | { | |
2855 | if (((status & MVPP2_RXD_L3_IP4) && | |
2856 | !(status & MVPP2_RXD_IP4_HEADER_ERR)) || | |
2857 | (status & MVPP2_RXD_L3_IP6)) | |
2858 | if (((status & MVPP2_RXD_L4_UDP) || | |
2859 | (status & MVPP2_RXD_L4_TCP)) && | |
2860 | (status & MVPP2_RXD_L4_CSUM_OK)) { | |
2861 | skb->csum = 0; | |
2862 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
2863 | return; | |
2864 | } | |
2865 | ||
2866 | skb->ip_summed = CHECKSUM_NONE; | |
2867 | } | |
2868 | ||
80f60a91 | 2869 | /* Allocate a new skb and add it to BM pool */ |
3f518509 | 2870 | static int mvpp2_rx_refill(struct mvpp2_port *port, |
56b8aae9 | 2871 | struct mvpp2_bm_pool *bm_pool, int pool) |
3f518509 | 2872 | { |
20396136 | 2873 | dma_addr_t dma_addr; |
4e4a105f | 2874 | phys_addr_t phys_addr; |
0e037281 | 2875 | void *buf; |
3f518509 | 2876 | |
4e4a105f TP |
2877 | buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, |
2878 | GFP_ATOMIC); | |
0e037281 | 2879 | if (!buf) |
3f518509 MW |
2880 | return -ENOMEM; |
2881 | ||
7d7627ba | 2882 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
7ef7e1d9 | 2883 | |
3f518509 MW |
2884 | return 0; |
2885 | } | |
2886 | ||
2887 | /* Handle tx checksum */ | |
2888 | static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) | |
2889 | { | |
2890 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
2891 | int ip_hdr_len = 0; | |
2892 | u8 l4_proto; | |
35f3625c | 2893 | __be16 l3_proto = vlan_get_protocol(skb); |
3f518509 | 2894 | |
35f3625c | 2895 | if (l3_proto == htons(ETH_P_IP)) { |
3f518509 MW |
2896 | struct iphdr *ip4h = ip_hdr(skb); |
2897 | ||
2898 | /* Calculate IPv4 checksum and L4 checksum */ | |
2899 | ip_hdr_len = ip4h->ihl; | |
2900 | l4_proto = ip4h->protocol; | |
35f3625c | 2901 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
3f518509 MW |
2902 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
2903 | ||
2904 | /* Read l4_protocol from one of IPv6 extra headers */ | |
2905 | if (skb_network_header_len(skb) > 0) | |
2906 | ip_hdr_len = (skb_network_header_len(skb) >> 2); | |
2907 | l4_proto = ip6h->nexthdr; | |
2908 | } else { | |
2909 | return MVPP2_TXD_L4_CSUM_NOT; | |
2910 | } | |
2911 | ||
2912 | return mvpp2_txq_desc_csum(skb_network_offset(skb), | |
35f3625c | 2913 | l3_proto, ip_hdr_len, l4_proto); |
3f518509 MW |
2914 | } |
2915 | ||
2916 | return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; | |
2917 | } | |
2918 | ||
3f518509 | 2919 | /* Main rx processing */ |
591f4cfa TP |
2920 | static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, |
2921 | int rx_todo, struct mvpp2_rx_queue *rxq) | |
3f518509 MW |
2922 | { |
2923 | struct net_device *dev = port->dev; | |
b5015854 MW |
2924 | int rx_received; |
2925 | int rx_done = 0; | |
3f518509 MW |
2926 | u32 rcvd_pkts = 0; |
2927 | u32 rcvd_bytes = 0; | |
2928 | ||
2929 | /* Get number of received packets and clamp the to-do */ | |
2930 | rx_received = mvpp2_rxq_received(port, rxq->id); | |
2931 | if (rx_todo > rx_received) | |
2932 | rx_todo = rx_received; | |
2933 | ||
b5015854 | 2934 | while (rx_done < rx_todo) { |
3f518509 MW |
2935 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
2936 | struct mvpp2_bm_pool *bm_pool; | |
2937 | struct sk_buff *skb; | |
0e037281 | 2938 | unsigned int frag_size; |
20396136 | 2939 | dma_addr_t dma_addr; |
ac3dd277 | 2940 | phys_addr_t phys_addr; |
56b8aae9 | 2941 | u32 rx_status; |
3f518509 | 2942 | int pool, rx_bytes, err; |
0e037281 | 2943 | void *data; |
3f518509 | 2944 | |
b5015854 | 2945 | rx_done++; |
ac3dd277 TP |
2946 | rx_status = mvpp2_rxdesc_status_get(port, rx_desc); |
2947 | rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); | |
2948 | rx_bytes -= MVPP2_MH_SIZE; | |
2949 | dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); | |
2950 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); | |
2951 | data = (void *)phys_to_virt(phys_addr); | |
2952 | ||
56b8aae9 TP |
2953 | pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> |
2954 | MVPP2_RXD_BM_POOL_ID_OFFS; | |
3f518509 | 2955 | bm_pool = &port->priv->bm_pools[pool]; |
3f518509 MW |
2956 | |
2957 | /* In case of an error, release the requested buffer pointer | |
2958 | * to the Buffer Manager. This request process is controlled | |
2959 | * by the hardware, and the information about the buffer is | |
2960 | * comprised by the RX descriptor. | |
2961 | */ | |
7f7183af MC |
2962 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) |
2963 | goto err_drop_frame; | |
3f518509 | 2964 | |
e1921168 MC |
2965 | dma_sync_single_for_cpu(dev->dev.parent, dma_addr, |
2966 | rx_bytes + MVPP2_MH_SIZE, | |
2967 | DMA_FROM_DEVICE); | |
a0c78337 | 2968 | prefetch(data); |
e1921168 | 2969 | |
0e037281 TP |
2970 | if (bm_pool->frag_size > PAGE_SIZE) |
2971 | frag_size = 0; | |
2972 | else | |
2973 | frag_size = bm_pool->frag_size; | |
2974 | ||
2975 | skb = build_skb(data, frag_size); | |
2976 | if (!skb) { | |
2977 | netdev_warn(port->dev, "skb build failed\n"); | |
2978 | goto err_drop_frame; | |
2979 | } | |
3f518509 | 2980 | |
56b8aae9 | 2981 | err = mvpp2_rx_refill(port, bm_pool, pool); |
b5015854 MW |
2982 | if (err) { |
2983 | netdev_err(port->dev, "failed to refill BM pools\n"); | |
2984 | goto err_drop_frame; | |
2985 | } | |
2986 | ||
e1921168 MC |
2987 | dma_unmap_single_attrs(dev->dev.parent, dma_addr, |
2988 | bm_pool->buf_size, DMA_FROM_DEVICE, | |
2989 | DMA_ATTR_SKIP_CPU_SYNC); | |
4229d502 | 2990 | |
3f518509 MW |
2991 | rcvd_pkts++; |
2992 | rcvd_bytes += rx_bytes; | |
3f518509 | 2993 | |
0e037281 | 2994 | skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); |
3f518509 MW |
2995 | skb_put(skb, rx_bytes); |
2996 | skb->protocol = eth_type_trans(skb, dev); | |
2997 | mvpp2_rx_csum(port, rx_status, skb); | |
2998 | ||
591f4cfa | 2999 | napi_gro_receive(napi, skb); |
7f7183af MC |
3000 | continue; |
3001 | ||
3002 | err_drop_frame: | |
3003 | dev->stats.rx_errors++; | |
3004 | mvpp2_rx_error(port, rx_desc); | |
3005 | /* Return the buffer to the pool */ | |
3006 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); | |
3f518509 MW |
3007 | } |
3008 | ||
3009 | if (rcvd_pkts) { | |
3010 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); | |
3011 | ||
3012 | u64_stats_update_begin(&stats->syncp); | |
3013 | stats->rx_packets += rcvd_pkts; | |
3014 | stats->rx_bytes += rcvd_bytes; | |
3015 | u64_stats_update_end(&stats->syncp); | |
3016 | } | |
3017 | ||
3018 | /* Update Rx queue management counters */ | |
3019 | wmb(); | |
b5015854 | 3020 | mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); |
3f518509 MW |
3021 | |
3022 | return rx_todo; | |
3023 | } | |
3024 | ||
3025 | static inline void | |
ac3dd277 | 3026 | tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, |
3f518509 MW |
3027 | struct mvpp2_tx_desc *desc) |
3028 | { | |
e531f767 | 3029 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
074c74df | 3030 | struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
20920267 | 3031 | |
ac3dd277 TP |
3032 | dma_addr_t buf_dma_addr = |
3033 | mvpp2_txdesc_dma_addr_get(port, desc); | |
3034 | size_t buf_sz = | |
3035 | mvpp2_txdesc_size_get(port, desc); | |
20920267 AT |
3036 | if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) |
3037 | dma_unmap_single(port->dev->dev.parent, buf_dma_addr, | |
3038 | buf_sz, DMA_TO_DEVICE); | |
3f518509 MW |
3039 | mvpp2_txq_desc_put(txq); |
3040 | } | |
3041 | ||
3042 | /* Handle tx fragmentation processing */ | |
3043 | static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, | |
3044 | struct mvpp2_tx_queue *aggr_txq, | |
3045 | struct mvpp2_tx_queue *txq) | |
3046 | { | |
e531f767 | 3047 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
074c74df | 3048 | struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3f518509 MW |
3049 | struct mvpp2_tx_desc *tx_desc; |
3050 | int i; | |
20396136 | 3051 | dma_addr_t buf_dma_addr; |
3f518509 MW |
3052 | |
3053 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
3054 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
d7840976 | 3055 | void *addr = skb_frag_address(frag); |
3f518509 MW |
3056 | |
3057 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
ac3dd277 | 3058 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
d7840976 | 3059 | mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); |
3f518509 | 3060 | |
20396136 | 3061 | buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, |
d7840976 MWO |
3062 | skb_frag_size(frag), |
3063 | DMA_TO_DEVICE); | |
20396136 | 3064 | if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { |
3f518509 | 3065 | mvpp2_txq_desc_put(txq); |
32bae631 | 3066 | goto cleanup; |
3f518509 MW |
3067 | } |
3068 | ||
6eb5d375 | 3069 | mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); |
3f518509 MW |
3070 | |
3071 | if (i == (skb_shinfo(skb)->nr_frags - 1)) { | |
3072 | /* Last descriptor */ | |
ac3dd277 TP |
3073 | mvpp2_txdesc_cmd_set(port, tx_desc, |
3074 | MVPP2_TXD_L_DESC); | |
3075 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); | |
3f518509 MW |
3076 | } else { |
3077 | /* Descriptor in the middle: Not First, Not Last */ | |
ac3dd277 TP |
3078 | mvpp2_txdesc_cmd_set(port, tx_desc, 0); |
3079 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); | |
3f518509 MW |
3080 | } |
3081 | } | |
3082 | ||
3083 | return 0; | |
32bae631 | 3084 | cleanup: |
3f518509 MW |
3085 | /* Release all descriptors that were used to map fragments of |
3086 | * this packet, as well as the corresponding DMA mappings | |
3087 | */ | |
3088 | for (i = i - 1; i >= 0; i--) { | |
3089 | tx_desc = txq->descs + i; | |
ac3dd277 | 3090 | tx_desc_unmap_put(port, txq, tx_desc); |
3f518509 MW |
3091 | } |
3092 | ||
3093 | return -ENOMEM; | |
3094 | } | |
3095 | ||
186cd4d4 AT |
3096 | static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, |
3097 | struct net_device *dev, | |
3098 | struct mvpp2_tx_queue *txq, | |
3099 | struct mvpp2_tx_queue *aggr_txq, | |
3100 | struct mvpp2_txq_pcpu *txq_pcpu, | |
3101 | int hdr_sz) | |
3102 | { | |
3103 | struct mvpp2_port *port = netdev_priv(dev); | |
3104 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
3105 | dma_addr_t addr; | |
3106 | ||
3107 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); | |
3108 | mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); | |
3109 | ||
3110 | addr = txq_pcpu->tso_headers_dma + | |
3111 | txq_pcpu->txq_put_index * TSO_HEADER_SIZE; | |
6eb5d375 | 3112 | mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); |
186cd4d4 AT |
3113 | |
3114 | mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | | |
3115 | MVPP2_TXD_F_DESC | | |
3116 | MVPP2_TXD_PADDING_DISABLE); | |
3117 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); | |
3118 | } | |
3119 | ||
3120 | static inline int mvpp2_tso_put_data(struct sk_buff *skb, | |
3121 | struct net_device *dev, struct tso_t *tso, | |
3122 | struct mvpp2_tx_queue *txq, | |
3123 | struct mvpp2_tx_queue *aggr_txq, | |
3124 | struct mvpp2_txq_pcpu *txq_pcpu, | |
3125 | int sz, bool left, bool last) | |
3126 | { | |
3127 | struct mvpp2_port *port = netdev_priv(dev); | |
3128 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
3129 | dma_addr_t buf_dma_addr; | |
3130 | ||
3131 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); | |
3132 | mvpp2_txdesc_size_set(port, tx_desc, sz); | |
3133 | ||
3134 | buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, | |
3135 | DMA_TO_DEVICE); | |
3136 | if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { | |
3137 | mvpp2_txq_desc_put(txq); | |
3138 | return -ENOMEM; | |
3139 | } | |
3140 | ||
6eb5d375 | 3141 | mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); |
186cd4d4 AT |
3142 | |
3143 | if (!left) { | |
3144 | mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); | |
3145 | if (last) { | |
3146 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); | |
3147 | return 0; | |
3148 | } | |
3149 | } else { | |
3150 | mvpp2_txdesc_cmd_set(port, tx_desc, 0); | |
3151 | } | |
3152 | ||
3153 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); | |
3154 | return 0; | |
3155 | } | |
3156 | ||
3157 | static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, | |
3158 | struct mvpp2_tx_queue *txq, | |
3159 | struct mvpp2_tx_queue *aggr_txq, | |
3160 | struct mvpp2_txq_pcpu *txq_pcpu) | |
3161 | { | |
3162 | struct mvpp2_port *port = netdev_priv(dev); | |
3163 | struct tso_t tso; | |
3164 | int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
3165 | int i, len, descs = 0; | |
3166 | ||
3167 | /* Check number of available descriptors */ | |
e531f767 | 3168 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || |
074c74df | 3169 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, |
186cd4d4 AT |
3170 | tso_count_descs(skb))) |
3171 | return 0; | |
3172 | ||
3173 | tso_start(skb, &tso); | |
3174 | len = skb->len - hdr_sz; | |
3175 | while (len > 0) { | |
3176 | int left = min_t(int, skb_shinfo(skb)->gso_size, len); | |
3177 | char *hdr = txq_pcpu->tso_headers + | |
3178 | txq_pcpu->txq_put_index * TSO_HEADER_SIZE; | |
3179 | ||
3180 | len -= left; | |
3181 | descs++; | |
3182 | ||
3183 | tso_build_hdr(skb, hdr, &tso, left, len == 0); | |
3184 | mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); | |
3185 | ||
3186 | while (left > 0) { | |
3187 | int sz = min_t(int, tso.size, left); | |
3188 | left -= sz; | |
3189 | descs++; | |
3190 | ||
3191 | if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, | |
3192 | txq_pcpu, sz, left, len == 0)) | |
3193 | goto release; | |
3194 | tso_build_data(skb, &tso, sz); | |
3195 | } | |
3196 | } | |
3197 | ||
3198 | return descs; | |
3199 | ||
3200 | release: | |
3201 | for (i = descs - 1; i >= 0; i--) { | |
3202 | struct mvpp2_tx_desc *tx_desc = txq->descs + i; | |
3203 | tx_desc_unmap_put(port, txq, tx_desc); | |
3204 | } | |
3205 | return 0; | |
3206 | } | |
3207 | ||
3f518509 | 3208 | /* Main tx processing */ |
f03508ce | 3209 | static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) |
3f518509 MW |
3210 | { |
3211 | struct mvpp2_port *port = netdev_priv(dev); | |
3212 | struct mvpp2_tx_queue *txq, *aggr_txq; | |
3213 | struct mvpp2_txq_pcpu *txq_pcpu; | |
3214 | struct mvpp2_tx_desc *tx_desc; | |
20396136 | 3215 | dma_addr_t buf_dma_addr; |
e531f767 | 3216 | unsigned long flags = 0; |
074c74df | 3217 | unsigned int thread; |
3f518509 MW |
3218 | int frags = 0; |
3219 | u16 txq_id; | |
3220 | u32 tx_cmd; | |
3221 | ||
e531f767 | 3222 | thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
074c74df | 3223 | |
3f518509 MW |
3224 | txq_id = skb_get_queue_mapping(skb); |
3225 | txq = port->txqs[txq_id]; | |
074c74df AT |
3226 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3227 | aggr_txq = &port->priv->aggr_txqs[thread]; | |
3f518509 | 3228 | |
e531f767 AT |
3229 | if (test_bit(thread, &port->priv->lock_map)) |
3230 | spin_lock_irqsave(&port->tx_lock[thread], flags); | |
3231 | ||
186cd4d4 AT |
3232 | if (skb_is_gso(skb)) { |
3233 | frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); | |
3234 | goto out; | |
3235 | } | |
3f518509 MW |
3236 | frags = skb_shinfo(skb)->nr_frags + 1; |
3237 | ||
3238 | /* Check number of available descriptors */ | |
e531f767 | 3239 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || |
074c74df | 3240 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { |
3f518509 MW |
3241 | frags = 0; |
3242 | goto out; | |
3243 | } | |
3244 | ||
3245 | /* Get a descriptor for the first part of the packet */ | |
3246 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
ac3dd277 TP |
3247 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
3248 | mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); | |
3f518509 | 3249 | |
20396136 | 3250 | buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, |
ac3dd277 | 3251 | skb_headlen(skb), DMA_TO_DEVICE); |
20396136 | 3252 | if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { |
3f518509 MW |
3253 | mvpp2_txq_desc_put(txq); |
3254 | frags = 0; | |
3255 | goto out; | |
3256 | } | |
ac3dd277 | 3257 | |
6eb5d375 | 3258 | mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); |
3f518509 MW |
3259 | |
3260 | tx_cmd = mvpp2_skb_tx_csum(port, skb); | |
3261 | ||
3262 | if (frags == 1) { | |
3263 | /* First and Last descriptor */ | |
3264 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; | |
ac3dd277 TP |
3265 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
3266 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); | |
3f518509 MW |
3267 | } else { |
3268 | /* First but not Last */ | |
3269 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; | |
ac3dd277 TP |
3270 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
3271 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); | |
3f518509 MW |
3272 | |
3273 | /* Continue with other skb fragments */ | |
3274 | if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { | |
ac3dd277 | 3275 | tx_desc_unmap_put(port, txq, tx_desc); |
3f518509 | 3276 | frags = 0; |
3f518509 MW |
3277 | } |
3278 | } | |
3279 | ||
3f518509 MW |
3280 | out: |
3281 | if (frags > 0) { | |
074c74df | 3282 | struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); |
186cd4d4 AT |
3283 | struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); |
3284 | ||
3285 | txq_pcpu->reserved_num -= frags; | |
3286 | txq_pcpu->count += frags; | |
3287 | aggr_txq->count += frags; | |
3288 | ||
3289 | /* Enable transmit */ | |
3290 | wmb(); | |
3291 | mvpp2_aggr_txq_pend_desc_add(port, frags); | |
3292 | ||
1d17db08 | 3293 | if (txq_pcpu->count >= txq_pcpu->stop_threshold) |
186cd4d4 | 3294 | netif_tx_stop_queue(nq); |
3f518509 MW |
3295 | |
3296 | u64_stats_update_begin(&stats->syncp); | |
3297 | stats->tx_packets++; | |
3298 | stats->tx_bytes += skb->len; | |
3299 | u64_stats_update_end(&stats->syncp); | |
3300 | } else { | |
3301 | dev->stats.tx_dropped++; | |
3302 | dev_kfree_skb_any(skb); | |
3303 | } | |
3304 | ||
edc660fa | 3305 | /* Finalize TX processing */ |
082297e6 | 3306 | if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) |
edc660fa MW |
3307 | mvpp2_txq_done(port, txq, txq_pcpu); |
3308 | ||
3309 | /* Set the timer in case not all frags were processed */ | |
213f428f TP |
3310 | if (!port->has_tx_irqs && txq_pcpu->count <= frags && |
3311 | txq_pcpu->count > 0) { | |
074c74df | 3312 | struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); |
edc660fa | 3313 | |
ecb9f80d TG |
3314 | if (!port_pcpu->timer_scheduled) { |
3315 | port_pcpu->timer_scheduled = true; | |
3316 | hrtimer_start(&port_pcpu->tx_done_timer, | |
3317 | MVPP2_TXDONE_HRTIMER_PERIOD_NS, | |
3318 | HRTIMER_MODE_REL_PINNED_SOFT); | |
3319 | } | |
edc660fa MW |
3320 | } |
3321 | ||
e531f767 AT |
3322 | if (test_bit(thread, &port->priv->lock_map)) |
3323 | spin_unlock_irqrestore(&port->tx_lock[thread], flags); | |
3324 | ||
3f518509 MW |
3325 | return NETDEV_TX_OK; |
3326 | } | |
3327 | ||
3328 | static inline void mvpp2_cause_error(struct net_device *dev, int cause) | |
3329 | { | |
3330 | if (cause & MVPP2_CAUSE_FCS_ERR_MASK) | |
3331 | netdev_err(dev, "FCS error\n"); | |
3332 | if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) | |
3333 | netdev_err(dev, "rx fifo overrun error\n"); | |
3334 | if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) | |
3335 | netdev_err(dev, "tx fifo underrun error\n"); | |
3336 | } | |
3337 | ||
edc660fa | 3338 | static int mvpp2_poll(struct napi_struct *napi, int budget) |
3f518509 | 3339 | { |
213f428f | 3340 | u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; |
edc660fa MW |
3341 | int rx_done = 0; |
3342 | struct mvpp2_port *port = netdev_priv(napi->dev); | |
591f4cfa | 3343 | struct mvpp2_queue_vector *qv; |
e531f767 | 3344 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
3f518509 | 3345 | |
591f4cfa TP |
3346 | qv = container_of(napi, struct mvpp2_queue_vector, napi); |
3347 | ||
3f518509 MW |
3348 | /* Rx/Tx cause register |
3349 | * | |
3350 | * Bits 0-15: each bit indicates received packets on the Rx queue | |
3351 | * (bit 0 is for Rx queue 0). | |
3352 | * | |
3353 | * Bits 16-23: each bit indicates transmitted packets on the Tx queue | |
3354 | * (bit 16 is for Tx queue 0). | |
3355 | * | |
3356 | * Each CPU has its own Rx/Tx cause register | |
3357 | */ | |
1068549c | 3358 | cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, |
cdcfeb0f | 3359 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); |
3f518509 | 3360 | |
213f428f | 3361 | cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; |
3f518509 MW |
3362 | if (cause_misc) { |
3363 | mvpp2_cause_error(port->dev, cause_misc); | |
3364 | ||
3365 | /* Clear the cause register */ | |
3366 | mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); | |
1068549c | 3367 | mvpp2_thread_write(port->priv, thread, |
a786841d TP |
3368 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id), |
3369 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); | |
3f518509 MW |
3370 | } |
3371 | ||
774268f3 AT |
3372 | if (port->has_tx_irqs) { |
3373 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; | |
3374 | if (cause_tx) { | |
3375 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; | |
3376 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | |
3377 | } | |
213f428f | 3378 | } |
3f518509 MW |
3379 | |
3380 | /* Process RX packets */ | |
70afb58e AT |
3381 | cause_rx = cause_rx_tx & |
3382 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); | |
213f428f | 3383 | cause_rx <<= qv->first_rxq; |
591f4cfa | 3384 | cause_rx |= qv->pending_cause_rx; |
3f518509 MW |
3385 | while (cause_rx && budget > 0) { |
3386 | int count; | |
3387 | struct mvpp2_rx_queue *rxq; | |
3388 | ||
3389 | rxq = mvpp2_get_rx_queue(port, cause_rx); | |
3390 | if (!rxq) | |
3391 | break; | |
3392 | ||
591f4cfa | 3393 | count = mvpp2_rx(port, napi, budget, rxq); |
3f518509 MW |
3394 | rx_done += count; |
3395 | budget -= count; | |
3396 | if (budget > 0) { | |
3397 | /* Clear the bit associated to this Rx queue | |
3398 | * so that next iteration will continue from | |
3399 | * the next Rx queue. | |
3400 | */ | |
3401 | cause_rx &= ~(1 << rxq->logic_rxq); | |
3402 | } | |
3403 | } | |
3404 | ||
3405 | if (budget > 0) { | |
3406 | cause_rx = 0; | |
6ad20165 | 3407 | napi_complete_done(napi, rx_done); |
3f518509 | 3408 | |
591f4cfa | 3409 | mvpp2_qvec_interrupt_enable(qv); |
3f518509 | 3410 | } |
591f4cfa | 3411 | qv->pending_cause_rx = cause_rx; |
3f518509 MW |
3412 | return rx_done; |
3413 | } | |
3414 | ||
4bb04326 | 3415 | static void mvpp22_mode_reconfigure(struct mvpp2_port *port) |
3f518509 | 3416 | { |
4bb04326 AT |
3417 | u32 ctrl3; |
3418 | ||
5434e8fa AT |
3419 | /* Set the GMAC & XLG MAC in reset */ |
3420 | mvpp2_mac_reset_assert(port); | |
3421 | ||
7409e66e AT |
3422 | /* Set the MPCS and XPCS in reset */ |
3423 | mvpp22_pcs_reset_assert(port); | |
3424 | ||
4bb04326 AT |
3425 | /* comphy reconfiguration */ |
3426 | mvpp22_comphy_init(port); | |
3427 | ||
3428 | /* gop reconfiguration */ | |
3429 | mvpp22_gop_init(port); | |
3430 | ||
7409e66e AT |
3431 | mvpp22_pcs_reset_deassert(port); |
3432 | ||
4bb04326 AT |
3433 | /* Only GOP port 0 has an XLG MAC */ |
3434 | if (port->gop_id == 0) { | |
3435 | ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); | |
3436 | ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; | |
3437 | ||
b7d286f0 | 3438 | if (mvpp2_is_xlg(port->phy_interface)) |
4bb04326 AT |
3439 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; |
3440 | else | |
3441 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; | |
3442 | ||
3443 | writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); | |
3444 | } | |
8e07269d | 3445 | |
b7d286f0 | 3446 | if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) |
76eb1b1d SC |
3447 | mvpp2_xlg_max_rx_size_set(port); |
3448 | else | |
3449 | mvpp2_gmac_max_rx_size_set(port); | |
4bb04326 AT |
3450 | } |
3451 | ||
3452 | /* Set hw internals when starting port */ | |
3453 | static void mvpp2_start_dev(struct mvpp2_port *port) | |
3454 | { | |
3455 | int i; | |
76eb1b1d | 3456 | |
3f518509 MW |
3457 | mvpp2_txp_max_tx_size_set(port); |
3458 | ||
591f4cfa TP |
3459 | for (i = 0; i < port->nqvecs; i++) |
3460 | napi_enable(&port->qvecs[i].napi); | |
3f518509 | 3461 | |
543ec376 | 3462 | /* Enable interrupts on all threads */ |
3f518509 MW |
3463 | mvpp2_interrupts_enable(port); |
3464 | ||
4bb04326 AT |
3465 | if (port->priv->hw_version == MVPP22) |
3466 | mvpp22_mode_reconfigure(port); | |
3467 | ||
3468 | if (port->phylink) { | |
3469 | phylink_start(port->phylink); | |
3470 | } else { | |
3471 | /* Phylink isn't used as of now for ACPI, so the MAC has to be | |
3472 | * configured manually when the interface is started. This will | |
3473 | * be removed as soon as the phylink ACPI support lands in. | |
3474 | */ | |
3475 | struct phylink_link_state state = { | |
3476 | .interface = port->phy_interface, | |
4bb04326 | 3477 | }; |
44cc27e4 | 3478 | mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); |
91a208f2 RK |
3479 | mvpp2_mac_link_up(&port->phylink_config, NULL, |
3480 | MLO_AN_INBAND, port->phy_interface, | |
3481 | SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); | |
542897d9 | 3482 | } |
f84bf386 | 3483 | |
3f518509 MW |
3484 | netif_tx_start_all_queues(port->dev); |
3485 | } | |
3486 | ||
3487 | /* Set hw internals when stopping port */ | |
3488 | static void mvpp2_stop_dev(struct mvpp2_port *port) | |
3489 | { | |
591f4cfa | 3490 | int i; |
8e07269d | 3491 | |
543ec376 | 3492 | /* Disable interrupts on all threads */ |
3f518509 MW |
3493 | mvpp2_interrupts_disable(port); |
3494 | ||
591f4cfa TP |
3495 | for (i = 0; i < port->nqvecs; i++) |
3496 | napi_disable(&port->qvecs[i].napi); | |
3f518509 | 3497 | |
4bb04326 AT |
3498 | if (port->phylink) |
3499 | phylink_stop(port->phylink); | |
542897d9 | 3500 | phy_power_off(port->comphy); |
3f518509 MW |
3501 | } |
3502 | ||
3f518509 MW |
3503 | static int mvpp2_check_ringparam_valid(struct net_device *dev, |
3504 | struct ethtool_ringparam *ring) | |
3505 | { | |
3506 | u16 new_rx_pending = ring->rx_pending; | |
3507 | u16 new_tx_pending = ring->tx_pending; | |
3508 | ||
3509 | if (ring->rx_pending == 0 || ring->tx_pending == 0) | |
3510 | return -EINVAL; | |
3511 | ||
7cf87e4a YM |
3512 | if (ring->rx_pending > MVPP2_MAX_RXD_MAX) |
3513 | new_rx_pending = MVPP2_MAX_RXD_MAX; | |
3f518509 MW |
3514 | else if (!IS_ALIGNED(ring->rx_pending, 16)) |
3515 | new_rx_pending = ALIGN(ring->rx_pending, 16); | |
3516 | ||
7cf87e4a YM |
3517 | if (ring->tx_pending > MVPP2_MAX_TXD_MAX) |
3518 | new_tx_pending = MVPP2_MAX_TXD_MAX; | |
3f518509 MW |
3519 | else if (!IS_ALIGNED(ring->tx_pending, 32)) |
3520 | new_tx_pending = ALIGN(ring->tx_pending, 32); | |
3521 | ||
76e583c5 AT |
3522 | /* The Tx ring size cannot be smaller than the minimum number of |
3523 | * descriptors needed for TSO. | |
3524 | */ | |
3525 | if (new_tx_pending < MVPP2_MAX_SKB_DESCS) | |
3526 | new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); | |
3527 | ||
3f518509 MW |
3528 | if (ring->rx_pending != new_rx_pending) { |
3529 | netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", | |
3530 | ring->rx_pending, new_rx_pending); | |
3531 | ring->rx_pending = new_rx_pending; | |
3532 | } | |
3533 | ||
3534 | if (ring->tx_pending != new_tx_pending) { | |
3535 | netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", | |
3536 | ring->tx_pending, new_tx_pending); | |
3537 | ring->tx_pending = new_tx_pending; | |
3538 | } | |
3539 | ||
3540 | return 0; | |
3541 | } | |
3542 | ||
26975821 | 3543 | static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) |
3f518509 MW |
3544 | { |
3545 | u32 mac_addr_l, mac_addr_m, mac_addr_h; | |
3546 | ||
3547 | mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); | |
3548 | mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); | |
3549 | mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); | |
3550 | addr[0] = (mac_addr_h >> 24) & 0xFF; | |
3551 | addr[1] = (mac_addr_h >> 16) & 0xFF; | |
3552 | addr[2] = (mac_addr_h >> 8) & 0xFF; | |
3553 | addr[3] = mac_addr_h & 0xFF; | |
3554 | addr[4] = mac_addr_m & 0xFF; | |
3555 | addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; | |
3556 | } | |
3557 | ||
591f4cfa TP |
3558 | static int mvpp2_irqs_init(struct mvpp2_port *port) |
3559 | { | |
3560 | int err, i; | |
3561 | ||
3562 | for (i = 0; i < port->nqvecs; i++) { | |
3563 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
3564 | ||
a6b3a3fa MZ |
3565 | if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
3566 | qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); | |
3567 | if (!qv->mask) { | |
3568 | err = -ENOMEM; | |
3569 | goto err; | |
3570 | } | |
3571 | ||
13c249a9 | 3572 | irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); |
a6b3a3fa | 3573 | } |
13c249a9 | 3574 | |
591f4cfa TP |
3575 | err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); |
3576 | if (err) | |
3577 | goto err; | |
213f428f | 3578 | |
e531f767 | 3579 | if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
e531f767 AT |
3580 | unsigned int cpu; |
3581 | ||
3582 | for_each_present_cpu(cpu) { | |
3583 | if (mvpp2_cpu_to_thread(port->priv, cpu) == | |
3584 | qv->sw_thread_id) | |
a6b3a3fa | 3585 | cpumask_set_cpu(cpu, qv->mask); |
e531f767 AT |
3586 | } |
3587 | ||
a6b3a3fa | 3588 | irq_set_affinity_hint(qv->irq, qv->mask); |
e531f767 | 3589 | } |
591f4cfa TP |
3590 | } |
3591 | ||
3592 | return 0; | |
3593 | err: | |
3594 | for (i = 0; i < port->nqvecs; i++) { | |
3595 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
3596 | ||
213f428f | 3597 | irq_set_affinity_hint(qv->irq, NULL); |
a6b3a3fa MZ |
3598 | kfree(qv->mask); |
3599 | qv->mask = NULL; | |
591f4cfa TP |
3600 | free_irq(qv->irq, qv); |
3601 | } | |
3602 | ||
3603 | return err; | |
3604 | } | |
3605 | ||
3606 | static void mvpp2_irqs_deinit(struct mvpp2_port *port) | |
3607 | { | |
3608 | int i; | |
3609 | ||
3610 | for (i = 0; i < port->nqvecs; i++) { | |
3611 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
3612 | ||
213f428f | 3613 | irq_set_affinity_hint(qv->irq, NULL); |
a6b3a3fa MZ |
3614 | kfree(qv->mask); |
3615 | qv->mask = NULL; | |
13c249a9 | 3616 | irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); |
591f4cfa TP |
3617 | free_irq(qv->irq, qv); |
3618 | } | |
3619 | } | |
3620 | ||
4c4a5686 YM |
3621 | static bool mvpp22_rss_is_supported(void) |
3622 | { | |
3623 | return queue_mode == MVPP2_QDIST_MULTI_MODE; | |
3624 | } | |
3625 | ||
3f518509 MW |
3626 | static int mvpp2_open(struct net_device *dev) |
3627 | { | |
3628 | struct mvpp2_port *port = netdev_priv(dev); | |
fd3651b2 | 3629 | struct mvpp2 *priv = port->priv; |
3f518509 MW |
3630 | unsigned char mac_bcast[ETH_ALEN] = { |
3631 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | |
4bb04326 | 3632 | bool valid = false; |
3f518509 MW |
3633 | int err; |
3634 | ||
ce2a27c7 | 3635 | err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); |
3f518509 MW |
3636 | if (err) { |
3637 | netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); | |
3638 | return err; | |
3639 | } | |
ce2a27c7 | 3640 | err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); |
3f518509 | 3641 | if (err) { |
ce2a27c7 | 3642 | netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); |
3f518509 MW |
3643 | return err; |
3644 | } | |
3645 | err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); | |
3646 | if (err) { | |
3647 | netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); | |
3648 | return err; | |
3649 | } | |
3650 | err = mvpp2_prs_def_flow(port); | |
3651 | if (err) { | |
3652 | netdev_err(dev, "mvpp2_prs_def_flow failed\n"); | |
3653 | return err; | |
3654 | } | |
3655 | ||
3656 | /* Allocate the Rx/Tx queues */ | |
3657 | err = mvpp2_setup_rxqs(port); | |
3658 | if (err) { | |
3659 | netdev_err(port->dev, "cannot allocate Rx queues\n"); | |
3660 | return err; | |
3661 | } | |
3662 | ||
3663 | err = mvpp2_setup_txqs(port); | |
3664 | if (err) { | |
3665 | netdev_err(port->dev, "cannot allocate Tx queues\n"); | |
3666 | goto err_cleanup_rxqs; | |
3667 | } | |
3668 | ||
591f4cfa | 3669 | err = mvpp2_irqs_init(port); |
3f518509 | 3670 | if (err) { |
591f4cfa | 3671 | netdev_err(port->dev, "cannot init IRQs\n"); |
3f518509 MW |
3672 | goto err_cleanup_txqs; |
3673 | } | |
3674 | ||
4bb04326 AT |
3675 | /* Phylink isn't supported yet in ACPI mode */ |
3676 | if (port->of_node) { | |
3677 | err = phylink_of_phy_connect(port->phylink, port->of_node, 0); | |
3678 | if (err) { | |
3679 | netdev_err(port->dev, "could not attach PHY (%d)\n", | |
3680 | err); | |
3681 | goto err_free_irq; | |
3682 | } | |
3683 | ||
3684 | valid = true; | |
3685 | } | |
3686 | ||
f3f2364e | 3687 | if (priv->hw_version == MVPP22 && port->link_irq) { |
fd3651b2 AT |
3688 | err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, |
3689 | dev->name, port); | |
3690 | if (err) { | |
3691 | netdev_err(port->dev, "cannot request link IRQ %d\n", | |
3692 | port->link_irq); | |
3693 | goto err_free_irq; | |
3694 | } | |
3695 | ||
3696 | mvpp22_gop_setup_irq(port); | |
fd3651b2 | 3697 | |
4bb04326 AT |
3698 | /* In default link is down */ |
3699 | netif_carrier_off(port->dev); | |
3f518509 | 3700 | |
4bb04326 AT |
3701 | valid = true; |
3702 | } else { | |
3703 | port->link_irq = 0; | |
3704 | } | |
3705 | ||
3706 | if (!valid) { | |
3707 | netdev_err(port->dev, | |
3708 | "invalid configuration: no dt or link IRQ"); | |
3709 | goto err_free_irq; | |
3710 | } | |
3f518509 MW |
3711 | |
3712 | /* Unmask interrupts on all CPUs */ | |
3713 | on_each_cpu(mvpp2_interrupts_unmask, port, 1); | |
213f428f | 3714 | mvpp2_shared_interrupt_mask_unmask(port, false); |
3f518509 MW |
3715 | |
3716 | mvpp2_start_dev(port); | |
3717 | ||
118d6298 | 3718 | /* Start hardware statistics gathering */ |
e5c500eb | 3719 | queue_delayed_work(priv->stats_queue, &port->stats_work, |
118d6298 MR |
3720 | MVPP2_MIB_COUNTERS_STATS_DELAY); |
3721 | ||
3f518509 MW |
3722 | return 0; |
3723 | ||
3724 | err_free_irq: | |
591f4cfa | 3725 | mvpp2_irqs_deinit(port); |
3f518509 MW |
3726 | err_cleanup_txqs: |
3727 | mvpp2_cleanup_txqs(port); | |
3728 | err_cleanup_rxqs: | |
3729 | mvpp2_cleanup_rxqs(port); | |
3730 | return err; | |
3731 | } | |
3732 | ||
3733 | static int mvpp2_stop(struct net_device *dev) | |
3734 | { | |
3735 | struct mvpp2_port *port = netdev_priv(dev); | |
edc660fa | 3736 | struct mvpp2_port_pcpu *port_pcpu; |
074c74df | 3737 | unsigned int thread; |
3f518509 MW |
3738 | |
3739 | mvpp2_stop_dev(port); | |
3f518509 | 3740 | |
e531f767 | 3741 | /* Mask interrupts on all threads */ |
3f518509 | 3742 | on_each_cpu(mvpp2_interrupts_mask, port, 1); |
213f428f | 3743 | mvpp2_shared_interrupt_mask_unmask(port, true); |
3f518509 | 3744 | |
4bb04326 AT |
3745 | if (port->phylink) |
3746 | phylink_disconnect_phy(port->phylink); | |
3747 | if (port->link_irq) | |
fd3651b2 AT |
3748 | free_irq(port->link_irq, port); |
3749 | ||
591f4cfa | 3750 | mvpp2_irqs_deinit(port); |
213f428f | 3751 | if (!port->has_tx_irqs) { |
e531f767 | 3752 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 3753 | port_pcpu = per_cpu_ptr(port->pcpu, thread); |
edc660fa | 3754 | |
213f428f TP |
3755 | hrtimer_cancel(&port_pcpu->tx_done_timer); |
3756 | port_pcpu->timer_scheduled = false; | |
213f428f | 3757 | } |
edc660fa | 3758 | } |
3f518509 MW |
3759 | mvpp2_cleanup_rxqs(port); |
3760 | mvpp2_cleanup_txqs(port); | |
3761 | ||
e5c500eb | 3762 | cancel_delayed_work_sync(&port->stats_work); |
118d6298 | 3763 | |
1f69afce AT |
3764 | mvpp2_mac_reset_assert(port); |
3765 | mvpp22_pcs_reset_assert(port); | |
3766 | ||
3f518509 MW |
3767 | return 0; |
3768 | } | |
3769 | ||
10fea26c MC |
3770 | static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, |
3771 | struct netdev_hw_addr_list *list) | |
3f518509 | 3772 | { |
3f518509 | 3773 | struct netdev_hw_addr *ha; |
10fea26c MC |
3774 | int ret; |
3775 | ||
3776 | netdev_hw_addr_list_for_each(ha, list) { | |
3777 | ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); | |
3778 | if (ret) | |
3779 | return ret; | |
3f518509 | 3780 | } |
56beda3d | 3781 | |
10fea26c MC |
3782 | return 0; |
3783 | } | |
3784 | ||
3785 | static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) | |
3786 | { | |
3787 | if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) | |
56beda3d | 3788 | mvpp2_prs_vid_enable_filtering(port); |
10fea26c MC |
3789 | else |
3790 | mvpp2_prs_vid_disable_filtering(port); | |
3791 | ||
3792 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
3793 | MVPP2_PRS_L2_UNI_CAST, enable); | |
3794 | ||
3795 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
3796 | MVPP2_PRS_L2_MULTI_CAST, enable); | |
3797 | } | |
3798 | ||
3799 | static void mvpp2_set_rx_mode(struct net_device *dev) | |
3800 | { | |
3801 | struct mvpp2_port *port = netdev_priv(dev); | |
3802 | ||
3803 | /* Clear the whole UC and MC list */ | |
3804 | mvpp2_prs_mac_del_all(port); | |
3805 | ||
3806 | if (dev->flags & IFF_PROMISC) { | |
3807 | mvpp2_set_rx_promisc(port, true); | |
3808 | return; | |
3809 | } | |
3810 | ||
3811 | mvpp2_set_rx_promisc(port, false); | |
3812 | ||
3813 | if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || | |
3814 | mvpp2_prs_mac_da_accept_list(port, &dev->uc)) | |
3815 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
3816 | MVPP2_PRS_L2_UNI_CAST, true); | |
3817 | ||
3818 | if (dev->flags & IFF_ALLMULTI) { | |
3819 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
3820 | MVPP2_PRS_L2_MULTI_CAST, true); | |
3821 | return; | |
3822 | } | |
3823 | ||
3824 | if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || | |
3825 | mvpp2_prs_mac_da_accept_list(port, &dev->mc)) | |
3826 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
3827 | MVPP2_PRS_L2_MULTI_CAST, true); | |
3f518509 MW |
3828 | } |
3829 | ||
3830 | static int mvpp2_set_mac_address(struct net_device *dev, void *p) | |
3831 | { | |
3f518509 MW |
3832 | const struct sockaddr *addr = p; |
3833 | int err; | |
3834 | ||
5b0ab2f4 YM |
3835 | if (!is_valid_ether_addr(addr->sa_data)) |
3836 | return -EADDRNOTAVAIL; | |
3f518509 MW |
3837 | |
3838 | err = mvpp2_prs_update_mac_da(dev, addr->sa_data); | |
5b0ab2f4 YM |
3839 | if (err) { |
3840 | /* Reconfigure parser accept the original MAC address */ | |
3841 | mvpp2_prs_update_mac_da(dev, dev->dev_addr); | |
3842 | netdev_err(dev, "failed to change MAC address\n"); | |
3843 | } | |
3f518509 MW |
3844 | return err; |
3845 | } | |
3846 | ||
7d04b0b1 MC |
3847 | /* Shut down all the ports, reconfigure the pools as percpu or shared, |
3848 | * then bring up again all ports. | |
3849 | */ | |
3850 | static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) | |
3851 | { | |
3852 | int numbufs = MVPP2_BM_POOLS_NUM, i; | |
3853 | struct mvpp2_port *port = NULL; | |
3854 | bool status[MVPP2_MAX_PORTS]; | |
3855 | ||
3856 | for (i = 0; i < priv->port_count; i++) { | |
3857 | port = priv->port_list[i]; | |
3858 | status[i] = netif_running(port->dev); | |
3859 | if (status[i]) | |
3860 | mvpp2_stop(port->dev); | |
3861 | } | |
3862 | ||
3863 | /* nrxqs is the same for all ports */ | |
3864 | if (priv->percpu_pools) | |
3865 | numbufs = port->nrxqs * 2; | |
3866 | ||
3867 | for (i = 0; i < numbufs; i++) | |
3868 | mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); | |
3869 | ||
3870 | devm_kfree(port->dev->dev.parent, priv->bm_pools); | |
3871 | priv->percpu_pools = percpu; | |
3872 | mvpp2_bm_init(port->dev->dev.parent, priv); | |
3873 | ||
3874 | for (i = 0; i < priv->port_count; i++) { | |
3875 | port = priv->port_list[i]; | |
3876 | mvpp2_swf_bm_pool_init(port); | |
3877 | if (status[i]) | |
3878 | mvpp2_open(port->dev); | |
3879 | } | |
3880 | ||
3881 | return 0; | |
3882 | } | |
3883 | ||
3f518509 MW |
3884 | static int mvpp2_change_mtu(struct net_device *dev, int mtu) |
3885 | { | |
3886 | struct mvpp2_port *port = netdev_priv(dev); | |
230bd958 | 3887 | bool running = netif_running(dev); |
7d04b0b1 | 3888 | struct mvpp2 *priv = port->priv; |
3f518509 MW |
3889 | int err; |
3890 | ||
5777987e JW |
3891 | if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { |
3892 | netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, | |
3893 | ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); | |
3894 | mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); | |
3f518509 MW |
3895 | } |
3896 | ||
7d04b0b1 MC |
3897 | if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { |
3898 | if (priv->percpu_pools) { | |
3899 | netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); | |
3900 | mvpp2_bm_switch_buffers(priv, false); | |
3901 | } | |
3902 | } else { | |
3903 | bool jumbo = false; | |
3904 | int i; | |
3905 | ||
3906 | for (i = 0; i < priv->port_count; i++) | |
3907 | if (priv->port_list[i] != port && | |
3908 | MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > | |
3909 | MVPP2_BM_LONG_PKT_SIZE) { | |
3910 | jumbo = true; | |
3911 | break; | |
3912 | } | |
3913 | ||
3914 | /* No port is using jumbo frames */ | |
3915 | if (!jumbo) { | |
3916 | dev_info(port->dev->dev.parent, | |
3917 | "all ports have a low MTU, switching to per-cpu buffers"); | |
3918 | mvpp2_bm_switch_buffers(priv, true); | |
3919 | } | |
3920 | } | |
3921 | ||
230bd958 MC |
3922 | if (running) |
3923 | mvpp2_stop_dev(port); | |
3f518509 MW |
3924 | |
3925 | err = mvpp2_bm_update_mtu(dev, mtu); | |
230bd958 MC |
3926 | if (err) { |
3927 | netdev_err(dev, "failed to change MTU\n"); | |
3928 | /* Reconfigure BM to the original MTU */ | |
3929 | mvpp2_bm_update_mtu(dev, dev->mtu); | |
3930 | } else { | |
3f518509 | 3931 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
3f518509 MW |
3932 | } |
3933 | ||
230bd958 MC |
3934 | if (running) { |
3935 | mvpp2_start_dev(port); | |
3936 | mvpp2_egress_enable(port); | |
3937 | mvpp2_ingress_enable(port); | |
3938 | } | |
3f518509 | 3939 | |
3f518509 MW |
3940 | return err; |
3941 | } | |
3942 | ||
bc1f4470 | 3943 | static void |
3f518509 MW |
3944 | mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
3945 | { | |
3946 | struct mvpp2_port *port = netdev_priv(dev); | |
3947 | unsigned int start; | |
850623b3 | 3948 | unsigned int cpu; |
3f518509 MW |
3949 | |
3950 | for_each_possible_cpu(cpu) { | |
3951 | struct mvpp2_pcpu_stats *cpu_stats; | |
3952 | u64 rx_packets; | |
3953 | u64 rx_bytes; | |
3954 | u64 tx_packets; | |
3955 | u64 tx_bytes; | |
3956 | ||
3957 | cpu_stats = per_cpu_ptr(port->stats, cpu); | |
3958 | do { | |
3959 | start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
3960 | rx_packets = cpu_stats->rx_packets; | |
3961 | rx_bytes = cpu_stats->rx_bytes; | |
3962 | tx_packets = cpu_stats->tx_packets; | |
3963 | tx_bytes = cpu_stats->tx_bytes; | |
3964 | } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
3965 | ||
3966 | stats->rx_packets += rx_packets; | |
3967 | stats->rx_bytes += rx_bytes; | |
3968 | stats->tx_packets += tx_packets; | |
3969 | stats->tx_bytes += tx_bytes; | |
3970 | } | |
3971 | ||
3972 | stats->rx_errors = dev->stats.rx_errors; | |
3973 | stats->rx_dropped = dev->stats.rx_dropped; | |
3974 | stats->tx_dropped = dev->stats.tx_dropped; | |
3f518509 MW |
3975 | } |
3976 | ||
bd695a5f TP |
3977 | static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
3978 | { | |
4bb04326 | 3979 | struct mvpp2_port *port = netdev_priv(dev); |
bd695a5f | 3980 | |
4bb04326 | 3981 | if (!port->phylink) |
bd695a5f TP |
3982 | return -ENOTSUPP; |
3983 | ||
4bb04326 | 3984 | return phylink_mii_ioctl(port->phylink, ifr, cmd); |
bd695a5f TP |
3985 | } |
3986 | ||
56beda3d MC |
3987 | static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
3988 | { | |
3989 | struct mvpp2_port *port = netdev_priv(dev); | |
3990 | int ret; | |
3991 | ||
3992 | ret = mvpp2_prs_vid_entry_add(port, vid); | |
3993 | if (ret) | |
3994 | netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", | |
3995 | MVPP2_PRS_VLAN_FILT_MAX - 1); | |
3996 | return ret; | |
3997 | } | |
3998 | ||
3999 | static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) | |
4000 | { | |
4001 | struct mvpp2_port *port = netdev_priv(dev); | |
4002 | ||
4003 | mvpp2_prs_vid_entry_remove(port, vid); | |
4004 | return 0; | |
4005 | } | |
4006 | ||
4007 | static int mvpp2_set_features(struct net_device *dev, | |
4008 | netdev_features_t features) | |
4009 | { | |
4010 | netdev_features_t changed = dev->features ^ features; | |
4011 | struct mvpp2_port *port = netdev_priv(dev); | |
4012 | ||
4013 | if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { | |
4014 | if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { | |
4015 | mvpp2_prs_vid_enable_filtering(port); | |
4016 | } else { | |
4017 | /* Invalidate all registered VID filters for this | |
4018 | * port | |
4019 | */ | |
4020 | mvpp2_prs_vid_remove_all(port); | |
4021 | ||
4022 | mvpp2_prs_vid_disable_filtering(port); | |
4023 | } | |
4024 | } | |
4025 | ||
d33ec452 MC |
4026 | if (changed & NETIF_F_RXHASH) { |
4027 | if (features & NETIF_F_RXHASH) | |
6310f77d | 4028 | mvpp22_port_rss_enable(port); |
d33ec452 | 4029 | else |
6310f77d | 4030 | mvpp22_port_rss_disable(port); |
d33ec452 MC |
4031 | } |
4032 | ||
56beda3d MC |
4033 | return 0; |
4034 | } | |
4035 | ||
3f518509 MW |
4036 | /* Ethtool methods */ |
4037 | ||
4bb04326 AT |
4038 | static int mvpp2_ethtool_nway_reset(struct net_device *dev) |
4039 | { | |
4040 | struct mvpp2_port *port = netdev_priv(dev); | |
4041 | ||
4042 | if (!port->phylink) | |
4043 | return -ENOTSUPP; | |
4044 | ||
4045 | return phylink_ethtool_nway_reset(port->phylink); | |
4046 | } | |
4047 | ||
3f518509 MW |
4048 | /* Set interrupt coalescing for ethtools */ |
4049 | static int mvpp2_ethtool_set_coalesce(struct net_device *dev, | |
4050 | struct ethtool_coalesce *c) | |
4051 | { | |
4052 | struct mvpp2_port *port = netdev_priv(dev); | |
4053 | int queue; | |
4054 | ||
09f83975 | 4055 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
4056 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; |
4057 | ||
4058 | rxq->time_coal = c->rx_coalesce_usecs; | |
4059 | rxq->pkts_coal = c->rx_max_coalesced_frames; | |
d63f9e41 TP |
4060 | mvpp2_rx_pkts_coal_set(port, rxq); |
4061 | mvpp2_rx_time_coal_set(port, rxq); | |
3f518509 MW |
4062 | } |
4063 | ||
213f428f TP |
4064 | if (port->has_tx_irqs) { |
4065 | port->tx_time_coal = c->tx_coalesce_usecs; | |
4066 | mvpp2_tx_time_coal_set(port); | |
4067 | } | |
4068 | ||
09f83975 | 4069 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
4070 | struct mvpp2_tx_queue *txq = port->txqs[queue]; |
4071 | ||
4072 | txq->done_pkts_coal = c->tx_max_coalesced_frames; | |
213f428f TP |
4073 | |
4074 | if (port->has_tx_irqs) | |
4075 | mvpp2_tx_pkts_coal_set(port, txq); | |
3f518509 MW |
4076 | } |
4077 | ||
3f518509 MW |
4078 | return 0; |
4079 | } | |
4080 | ||
4081 | /* get coalescing for ethtools */ | |
4082 | static int mvpp2_ethtool_get_coalesce(struct net_device *dev, | |
4083 | struct ethtool_coalesce *c) | |
4084 | { | |
4085 | struct mvpp2_port *port = netdev_priv(dev); | |
4086 | ||
385c284f AT |
4087 | c->rx_coalesce_usecs = port->rxqs[0]->time_coal; |
4088 | c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; | |
4089 | c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; | |
24b28ccb | 4090 | c->tx_coalesce_usecs = port->tx_time_coal; |
3f518509 MW |
4091 | return 0; |
4092 | } | |
4093 | ||
4094 | static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, | |
4095 | struct ethtool_drvinfo *drvinfo) | |
4096 | { | |
4097 | strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, | |
4098 | sizeof(drvinfo->driver)); | |
4099 | strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, | |
4100 | sizeof(drvinfo->version)); | |
4101 | strlcpy(drvinfo->bus_info, dev_name(&dev->dev), | |
4102 | sizeof(drvinfo->bus_info)); | |
4103 | } | |
4104 | ||
4105 | static void mvpp2_ethtool_get_ringparam(struct net_device *dev, | |
4106 | struct ethtool_ringparam *ring) | |
4107 | { | |
4108 | struct mvpp2_port *port = netdev_priv(dev); | |
4109 | ||
7cf87e4a YM |
4110 | ring->rx_max_pending = MVPP2_MAX_RXD_MAX; |
4111 | ring->tx_max_pending = MVPP2_MAX_TXD_MAX; | |
3f518509 MW |
4112 | ring->rx_pending = port->rx_ring_size; |
4113 | ring->tx_pending = port->tx_ring_size; | |
4114 | } | |
4115 | ||
4116 | static int mvpp2_ethtool_set_ringparam(struct net_device *dev, | |
4117 | struct ethtool_ringparam *ring) | |
4118 | { | |
4119 | struct mvpp2_port *port = netdev_priv(dev); | |
4120 | u16 prev_rx_ring_size = port->rx_ring_size; | |
4121 | u16 prev_tx_ring_size = port->tx_ring_size; | |
4122 | int err; | |
4123 | ||
4124 | err = mvpp2_check_ringparam_valid(dev, ring); | |
4125 | if (err) | |
4126 | return err; | |
4127 | ||
4128 | if (!netif_running(dev)) { | |
4129 | port->rx_ring_size = ring->rx_pending; | |
4130 | port->tx_ring_size = ring->tx_pending; | |
4131 | return 0; | |
4132 | } | |
4133 | ||
4134 | /* The interface is running, so we have to force a | |
4135 | * reallocation of the queues | |
4136 | */ | |
4137 | mvpp2_stop_dev(port); | |
4138 | mvpp2_cleanup_rxqs(port); | |
4139 | mvpp2_cleanup_txqs(port); | |
4140 | ||
4141 | port->rx_ring_size = ring->rx_pending; | |
4142 | port->tx_ring_size = ring->tx_pending; | |
4143 | ||
4144 | err = mvpp2_setup_rxqs(port); | |
4145 | if (err) { | |
4146 | /* Reallocate Rx queues with the original ring size */ | |
4147 | port->rx_ring_size = prev_rx_ring_size; | |
4148 | ring->rx_pending = prev_rx_ring_size; | |
4149 | err = mvpp2_setup_rxqs(port); | |
4150 | if (err) | |
4151 | goto err_out; | |
4152 | } | |
4153 | err = mvpp2_setup_txqs(port); | |
4154 | if (err) { | |
4155 | /* Reallocate Tx queues with the original ring size */ | |
4156 | port->tx_ring_size = prev_tx_ring_size; | |
4157 | ring->tx_pending = prev_tx_ring_size; | |
4158 | err = mvpp2_setup_txqs(port); | |
4159 | if (err) | |
4160 | goto err_clean_rxqs; | |
4161 | } | |
4162 | ||
4163 | mvpp2_start_dev(port); | |
4164 | mvpp2_egress_enable(port); | |
4165 | mvpp2_ingress_enable(port); | |
4166 | ||
4167 | return 0; | |
4168 | ||
4169 | err_clean_rxqs: | |
4170 | mvpp2_cleanup_rxqs(port); | |
4171 | err_out: | |
dfd4240a | 4172 | netdev_err(dev, "failed to change ring parameters"); |
3f518509 MW |
4173 | return err; |
4174 | } | |
4175 | ||
4bb04326 AT |
4176 | static void mvpp2_ethtool_get_pause_param(struct net_device *dev, |
4177 | struct ethtool_pauseparam *pause) | |
4178 | { | |
4179 | struct mvpp2_port *port = netdev_priv(dev); | |
4180 | ||
4181 | if (!port->phylink) | |
4182 | return; | |
4183 | ||
4184 | phylink_ethtool_get_pauseparam(port->phylink, pause); | |
4185 | } | |
4186 | ||
4187 | static int mvpp2_ethtool_set_pause_param(struct net_device *dev, | |
4188 | struct ethtool_pauseparam *pause) | |
4189 | { | |
4190 | struct mvpp2_port *port = netdev_priv(dev); | |
4191 | ||
4192 | if (!port->phylink) | |
4193 | return -ENOTSUPP; | |
4194 | ||
4195 | return phylink_ethtool_set_pauseparam(port->phylink, pause); | |
4196 | } | |
4197 | ||
4198 | static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, | |
4199 | struct ethtool_link_ksettings *cmd) | |
4200 | { | |
4201 | struct mvpp2_port *port = netdev_priv(dev); | |
4202 | ||
4203 | if (!port->phylink) | |
4204 | return -ENOTSUPP; | |
4205 | ||
4206 | return phylink_ethtool_ksettings_get(port->phylink, cmd); | |
4207 | } | |
4208 | ||
4209 | static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, | |
4210 | const struct ethtool_link_ksettings *cmd) | |
4211 | { | |
4212 | struct mvpp2_port *port = netdev_priv(dev); | |
4213 | ||
4214 | if (!port->phylink) | |
4215 | return -ENOTSUPP; | |
4216 | ||
4217 | return phylink_ethtool_ksettings_set(port->phylink, cmd); | |
4218 | } | |
4219 | ||
8179642b AT |
4220 | static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, |
4221 | struct ethtool_rxnfc *info, u32 *rules) | |
4222 | { | |
4223 | struct mvpp2_port *port = netdev_priv(dev); | |
90b509b3 | 4224 | int ret = 0, i, loc = 0; |
8179642b AT |
4225 | |
4226 | if (!mvpp22_rss_is_supported()) | |
4227 | return -EOPNOTSUPP; | |
4228 | ||
4229 | switch (info->cmd) { | |
436d4fdb MC |
4230 | case ETHTOOL_GRXFH: |
4231 | ret = mvpp2_ethtool_rxfh_get(port, info); | |
4232 | break; | |
8179642b AT |
4233 | case ETHTOOL_GRXRINGS: |
4234 | info->data = port->nrxqs; | |
4235 | break; | |
90b509b3 MC |
4236 | case ETHTOOL_GRXCLSRLCNT: |
4237 | info->rule_cnt = port->n_rfs_rules; | |
4238 | break; | |
4239 | case ETHTOOL_GRXCLSRULE: | |
4240 | ret = mvpp2_ethtool_cls_rule_get(port, info); | |
4241 | break; | |
4242 | case ETHTOOL_GRXCLSRLALL: | |
ae8e1d5e | 4243 | for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { |
90b509b3 MC |
4244 | if (port->rfs_rules[i]) |
4245 | rules[loc++] = i; | |
4246 | } | |
4247 | break; | |
8179642b AT |
4248 | default: |
4249 | return -ENOTSUPP; | |
4250 | } | |
4251 | ||
436d4fdb MC |
4252 | return ret; |
4253 | } | |
4254 | ||
4255 | static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, | |
4256 | struct ethtool_rxnfc *info) | |
4257 | { | |
4258 | struct mvpp2_port *port = netdev_priv(dev); | |
4259 | int ret = 0; | |
4260 | ||
4261 | if (!mvpp22_rss_is_supported()) | |
4262 | return -EOPNOTSUPP; | |
4263 | ||
4264 | switch (info->cmd) { | |
4265 | case ETHTOOL_SRXFH: | |
4266 | ret = mvpp2_ethtool_rxfh_set(port, info); | |
4267 | break; | |
90b509b3 MC |
4268 | case ETHTOOL_SRXCLSRLINS: |
4269 | ret = mvpp2_ethtool_cls_rule_ins(port, info); | |
4270 | break; | |
4271 | case ETHTOOL_SRXCLSRLDEL: | |
4272 | ret = mvpp2_ethtool_cls_rule_del(port, info); | |
4273 | break; | |
436d4fdb MC |
4274 | default: |
4275 | return -EOPNOTSUPP; | |
4276 | } | |
4277 | return ret; | |
8179642b AT |
4278 | } |
4279 | ||
4280 | static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) | |
4281 | { | |
4282 | return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; | |
4283 | } | |
4284 | ||
4285 | static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, | |
4286 | u8 *hfunc) | |
4287 | { | |
4288 | struct mvpp2_port *port = netdev_priv(dev); | |
895586d5 | 4289 | int ret = 0; |
8179642b AT |
4290 | |
4291 | if (!mvpp22_rss_is_supported()) | |
4292 | return -EOPNOTSUPP; | |
4293 | ||
4294 | if (indir) | |
895586d5 | 4295 | ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir); |
8179642b AT |
4296 | |
4297 | if (hfunc) | |
4298 | *hfunc = ETH_RSS_HASH_CRC32; | |
4299 | ||
895586d5 | 4300 | return ret; |
8179642b AT |
4301 | } |
4302 | ||
4303 | static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, | |
4304 | const u8 *key, const u8 hfunc) | |
4305 | { | |
4306 | struct mvpp2_port *port = netdev_priv(dev); | |
895586d5 | 4307 | int ret = 0; |
8179642b AT |
4308 | |
4309 | if (!mvpp22_rss_is_supported()) | |
4310 | return -EOPNOTSUPP; | |
4311 | ||
4312 | if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) | |
4313 | return -EOPNOTSUPP; | |
4314 | ||
4315 | if (key) | |
4316 | return -EOPNOTSUPP; | |
4317 | ||
895586d5 MC |
4318 | if (indir) |
4319 | ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir); | |
8179642b | 4320 | |
895586d5 | 4321 | return ret; |
8179642b AT |
4322 | } |
4323 | ||
895586d5 MC |
4324 | static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, |
4325 | u8 *key, u8 *hfunc, u32 rss_context) | |
4326 | { | |
4327 | struct mvpp2_port *port = netdev_priv(dev); | |
4328 | int ret = 0; | |
4329 | ||
4330 | if (!mvpp22_rss_is_supported()) | |
4331 | return -EOPNOTSUPP; | |
39bd16df DC |
4332 | if (rss_context >= MVPP22_N_RSS_TABLES) |
4333 | return -EINVAL; | |
895586d5 MC |
4334 | |
4335 | if (hfunc) | |
4336 | *hfunc = ETH_RSS_HASH_CRC32; | |
4337 | ||
4338 | if (indir) | |
4339 | ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir); | |
4340 | ||
4341 | return ret; | |
4342 | } | |
4343 | ||
4344 | static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, | |
4345 | const u32 *indir, const u8 *key, | |
4346 | const u8 hfunc, u32 *rss_context, | |
4347 | bool delete) | |
4348 | { | |
4349 | struct mvpp2_port *port = netdev_priv(dev); | |
4350 | int ret; | |
4351 | ||
4352 | if (!mvpp22_rss_is_supported()) | |
4353 | return -EOPNOTSUPP; | |
4354 | ||
4355 | if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) | |
4356 | return -EOPNOTSUPP; | |
4357 | ||
4358 | if (key) | |
4359 | return -EOPNOTSUPP; | |
4360 | ||
4361 | if (delete) | |
4362 | return mvpp22_port_rss_ctx_delete(port, *rss_context); | |
4363 | ||
4364 | if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { | |
4365 | ret = mvpp22_port_rss_ctx_create(port, rss_context); | |
4366 | if (ret) | |
4367 | return ret; | |
4368 | } | |
4369 | ||
4370 | return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir); | |
4371 | } | |
3f518509 MW |
4372 | /* Device ops */ |
4373 | ||
4374 | static const struct net_device_ops mvpp2_netdev_ops = { | |
4375 | .ndo_open = mvpp2_open, | |
4376 | .ndo_stop = mvpp2_stop, | |
4377 | .ndo_start_xmit = mvpp2_tx, | |
4378 | .ndo_set_rx_mode = mvpp2_set_rx_mode, | |
4379 | .ndo_set_mac_address = mvpp2_set_mac_address, | |
4380 | .ndo_change_mtu = mvpp2_change_mtu, | |
4381 | .ndo_get_stats64 = mvpp2_get_stats64, | |
bd695a5f | 4382 | .ndo_do_ioctl = mvpp2_ioctl, |
56beda3d MC |
4383 | .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, |
4384 | .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, | |
4385 | .ndo_set_features = mvpp2_set_features, | |
3f518509 MW |
4386 | }; |
4387 | ||
4388 | static const struct ethtool_ops mvpp2_eth_tool_ops = { | |
078db9a3 JK |
4389 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
4390 | ETHTOOL_COALESCE_MAX_FRAMES, | |
4bb04326 | 4391 | .nway_reset = mvpp2_ethtool_nway_reset, |
dcd3e73a AT |
4392 | .get_link = ethtool_op_get_link, |
4393 | .set_coalesce = mvpp2_ethtool_set_coalesce, | |
4394 | .get_coalesce = mvpp2_ethtool_get_coalesce, | |
4395 | .get_drvinfo = mvpp2_ethtool_get_drvinfo, | |
4396 | .get_ringparam = mvpp2_ethtool_get_ringparam, | |
4397 | .set_ringparam = mvpp2_ethtool_set_ringparam, | |
4398 | .get_strings = mvpp2_ethtool_get_strings, | |
4399 | .get_ethtool_stats = mvpp2_ethtool_get_stats, | |
4400 | .get_sset_count = mvpp2_ethtool_get_sset_count, | |
4bb04326 AT |
4401 | .get_pauseparam = mvpp2_ethtool_get_pause_param, |
4402 | .set_pauseparam = mvpp2_ethtool_set_pause_param, | |
4403 | .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, | |
4404 | .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, | |
8179642b | 4405 | .get_rxnfc = mvpp2_ethtool_get_rxnfc, |
436d4fdb | 4406 | .set_rxnfc = mvpp2_ethtool_set_rxnfc, |
8179642b AT |
4407 | .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, |
4408 | .get_rxfh = mvpp2_ethtool_get_rxfh, | |
4409 | .set_rxfh = mvpp2_ethtool_set_rxfh, | |
895586d5 MC |
4410 | .get_rxfh_context = mvpp2_ethtool_get_rxfh_context, |
4411 | .set_rxfh_context = mvpp2_ethtool_set_rxfh_context, | |
3f518509 MW |
4412 | }; |
4413 | ||
213f428f TP |
4414 | /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that |
4415 | * had a single IRQ defined per-port. | |
4416 | */ | |
4417 | static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, | |
4418 | struct device_node *port_node) | |
591f4cfa TP |
4419 | { |
4420 | struct mvpp2_queue_vector *v = &port->qvecs[0]; | |
4421 | ||
4422 | v->first_rxq = 0; | |
4423 | v->nrxqs = port->nrxqs; | |
4424 | v->type = MVPP2_QUEUE_VECTOR_SHARED; | |
4425 | v->sw_thread_id = 0; | |
4426 | v->sw_thread_mask = *cpumask_bits(cpu_online_mask); | |
4427 | v->port = port; | |
4428 | v->irq = irq_of_parse_and_map(port_node, 0); | |
4429 | if (v->irq <= 0) | |
4430 | return -EINVAL; | |
4431 | netif_napi_add(port->dev, &v->napi, mvpp2_poll, | |
4432 | NAPI_POLL_WEIGHT); | |
4433 | ||
4434 | port->nqvecs = 1; | |
4435 | ||
4436 | return 0; | |
4437 | } | |
4438 | ||
213f428f TP |
4439 | static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, |
4440 | struct device_node *port_node) | |
4441 | { | |
e531f767 | 4442 | struct mvpp2 *priv = port->priv; |
213f428f TP |
4443 | struct mvpp2_queue_vector *v; |
4444 | int i, ret; | |
4445 | ||
e531f767 AT |
4446 | switch (queue_mode) { |
4447 | case MVPP2_QDIST_SINGLE_MODE: | |
4448 | port->nqvecs = priv->nthreads + 1; | |
4449 | break; | |
4450 | case MVPP2_QDIST_MULTI_MODE: | |
4451 | port->nqvecs = priv->nthreads; | |
4452 | break; | |
4453 | } | |
213f428f TP |
4454 | |
4455 | for (i = 0; i < port->nqvecs; i++) { | |
4456 | char irqname[16]; | |
4457 | ||
4458 | v = port->qvecs + i; | |
4459 | ||
4460 | v->port = port; | |
4461 | v->type = MVPP2_QUEUE_VECTOR_PRIVATE; | |
4462 | v->sw_thread_id = i; | |
4463 | v->sw_thread_mask = BIT(i); | |
4464 | ||
a9aac385 AT |
4465 | if (port->flags & MVPP2_F_DT_COMPAT) |
4466 | snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); | |
4467 | else | |
4468 | snprintf(irqname, sizeof(irqname), "hif%d", i); | |
213f428f TP |
4469 | |
4470 | if (queue_mode == MVPP2_QDIST_MULTI_MODE) { | |
3f136849 AT |
4471 | v->first_rxq = i; |
4472 | v->nrxqs = 1; | |
213f428f TP |
4473 | } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && |
4474 | i == (port->nqvecs - 1)) { | |
4475 | v->first_rxq = 0; | |
4476 | v->nrxqs = port->nrxqs; | |
4477 | v->type = MVPP2_QUEUE_VECTOR_SHARED; | |
a9aac385 AT |
4478 | |
4479 | if (port->flags & MVPP2_F_DT_COMPAT) | |
4480 | strncpy(irqname, "rx-shared", sizeof(irqname)); | |
213f428f TP |
4481 | } |
4482 | ||
a75edc7c MW |
4483 | if (port_node) |
4484 | v->irq = of_irq_get_byname(port_node, irqname); | |
4485 | else | |
4486 | v->irq = fwnode_irq_get(port->fwnode, i); | |
213f428f TP |
4487 | if (v->irq <= 0) { |
4488 | ret = -EINVAL; | |
4489 | goto err; | |
4490 | } | |
4491 | ||
4492 | netif_napi_add(port->dev, &v->napi, mvpp2_poll, | |
4493 | NAPI_POLL_WEIGHT); | |
4494 | } | |
4495 | ||
4496 | return 0; | |
4497 | ||
4498 | err: | |
4499 | for (i = 0; i < port->nqvecs; i++) | |
4500 | irq_dispose_mapping(port->qvecs[i].irq); | |
4501 | return ret; | |
4502 | } | |
4503 | ||
4504 | static int mvpp2_queue_vectors_init(struct mvpp2_port *port, | |
4505 | struct device_node *port_node) | |
4506 | { | |
4507 | if (port->has_tx_irqs) | |
4508 | return mvpp2_multi_queue_vectors_init(port, port_node); | |
4509 | else | |
4510 | return mvpp2_simple_queue_vectors_init(port, port_node); | |
4511 | } | |
4512 | ||
591f4cfa TP |
4513 | static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) |
4514 | { | |
4515 | int i; | |
4516 | ||
4517 | for (i = 0; i < port->nqvecs; i++) | |
4518 | irq_dispose_mapping(port->qvecs[i].irq); | |
4519 | } | |
4520 | ||
4521 | /* Configure Rx queue group interrupt for this port */ | |
4522 | static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) | |
4523 | { | |
4524 | struct mvpp2 *priv = port->priv; | |
4525 | u32 val; | |
4526 | int i; | |
4527 | ||
4528 | if (priv->hw_version == MVPP21) { | |
4529 | mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), | |
4530 | port->nrxqs); | |
4531 | return; | |
4532 | } | |
4533 | ||
4534 | /* Handle the more complicated PPv2.2 case */ | |
4535 | for (i = 0; i < port->nqvecs; i++) { | |
4536 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
4537 | ||
4538 | if (!qv->nrxqs) | |
4539 | continue; | |
4540 | ||
4541 | val = qv->sw_thread_id; | |
4542 | val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; | |
4543 | mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); | |
4544 | ||
4545 | val = qv->first_rxq; | |
4546 | val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; | |
4547 | mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); | |
4548 | } | |
4549 | } | |
4550 | ||
3f518509 MW |
4551 | /* Initialize port HW */ |
4552 | static int mvpp2_port_init(struct mvpp2_port *port) | |
4553 | { | |
4554 | struct device *dev = port->dev->dev.parent; | |
4555 | struct mvpp2 *priv = port->priv; | |
4556 | struct mvpp2_txq_pcpu *txq_pcpu; | |
074c74df | 4557 | unsigned int thread; |
9bea6897 | 4558 | int queue, err; |
3f518509 | 4559 | |
09f83975 TP |
4560 | /* Checks for hardware constraints */ |
4561 | if (port->first_rxq + port->nrxqs > | |
59b9a31e | 4562 | MVPP2_MAX_PORTS * priv->max_port_rxqs) |
3f518509 MW |
4563 | return -EINVAL; |
4564 | ||
3f136849 | 4565 | if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) |
09f83975 TP |
4566 | return -EINVAL; |
4567 | ||
3f518509 MW |
4568 | /* Disable port */ |
4569 | mvpp2_egress_disable(port); | |
4570 | mvpp2_port_disable(port); | |
4571 | ||
213f428f TP |
4572 | port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; |
4573 | ||
09f83975 | 4574 | port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), |
3f518509 MW |
4575 | GFP_KERNEL); |
4576 | if (!port->txqs) | |
4577 | return -ENOMEM; | |
4578 | ||
4579 | /* Associate physical Tx queues to this port and initialize. | |
4580 | * The mapping is predefined. | |
4581 | */ | |
09f83975 | 4582 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
4583 | int queue_phy_id = mvpp2_txq_phys(port->id, queue); |
4584 | struct mvpp2_tx_queue *txq; | |
4585 | ||
4586 | txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); | |
177c8d1c CJ |
4587 | if (!txq) { |
4588 | err = -ENOMEM; | |
4589 | goto err_free_percpu; | |
4590 | } | |
3f518509 MW |
4591 | |
4592 | txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); | |
4593 | if (!txq->pcpu) { | |
4594 | err = -ENOMEM; | |
4595 | goto err_free_percpu; | |
4596 | } | |
4597 | ||
4598 | txq->id = queue_phy_id; | |
4599 | txq->log_id = queue; | |
4600 | txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; | |
e531f767 | 4601 | for (thread = 0; thread < priv->nthreads; thread++) { |
074c74df AT |
4602 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
4603 | txq_pcpu->thread = thread; | |
3f518509 MW |
4604 | } |
4605 | ||
4606 | port->txqs[queue] = txq; | |
4607 | } | |
4608 | ||
09f83975 | 4609 | port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), |
3f518509 MW |
4610 | GFP_KERNEL); |
4611 | if (!port->rxqs) { | |
4612 | err = -ENOMEM; | |
4613 | goto err_free_percpu; | |
4614 | } | |
4615 | ||
4616 | /* Allocate and initialize Rx queue for this port */ | |
09f83975 | 4617 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
4618 | struct mvpp2_rx_queue *rxq; |
4619 | ||
4620 | /* Map physical Rx queue to port's logical Rx queue */ | |
4621 | rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); | |
d82b0c21 JZ |
4622 | if (!rxq) { |
4623 | err = -ENOMEM; | |
3f518509 | 4624 | goto err_free_percpu; |
d82b0c21 | 4625 | } |
3f518509 MW |
4626 | /* Map this Rx queue to a physical queue */ |
4627 | rxq->id = port->first_rxq + queue; | |
4628 | rxq->port = port->id; | |
4629 | rxq->logic_rxq = queue; | |
4630 | ||
4631 | port->rxqs[queue] = rxq; | |
4632 | } | |
4633 | ||
591f4cfa | 4634 | mvpp2_rx_irqs_setup(port); |
3f518509 MW |
4635 | |
4636 | /* Create Rx descriptor rings */ | |
09f83975 | 4637 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
4638 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; |
4639 | ||
4640 | rxq->size = port->rx_ring_size; | |
4641 | rxq->pkts_coal = MVPP2_RX_COAL_PKTS; | |
4642 | rxq->time_coal = MVPP2_RX_COAL_USEC; | |
4643 | } | |
4644 | ||
4645 | mvpp2_ingress_disable(port); | |
4646 | ||
4647 | /* Port default configuration */ | |
4648 | mvpp2_defaults_set(port); | |
4649 | ||
4650 | /* Port's classifier configuration */ | |
4651 | mvpp2_cls_oversize_rxq_set(port); | |
4652 | mvpp2_cls_port_config(port); | |
4653 | ||
e6e21c02 | 4654 | if (mvpp22_rss_is_supported()) |
6310f77d | 4655 | mvpp22_port_rss_init(port); |
e6e21c02 | 4656 | |
3f518509 MW |
4657 | /* Provide an initial Rx packet size */ |
4658 | port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); | |
4659 | ||
4660 | /* Initialize pools for swf */ | |
4661 | err = mvpp2_swf_bm_pool_init(port); | |
4662 | if (err) | |
4663 | goto err_free_percpu; | |
4664 | ||
9bea6897 MC |
4665 | /* Clear all port stats */ |
4666 | mvpp2_read_stats(port); | |
4667 | memset(port->ethtool_stats, 0, | |
4668 | MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); | |
6410c139 | 4669 | |
3f518509 MW |
4670 | return 0; |
4671 | ||
4672 | err_free_percpu: | |
09f83975 | 4673 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
4674 | if (!port->txqs[queue]) |
4675 | continue; | |
4676 | free_percpu(port->txqs[queue]->pcpu); | |
4677 | } | |
4678 | return err; | |
4679 | } | |
4680 | ||
a9aac385 AT |
4681 | static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, |
4682 | unsigned long *flags) | |
4683 | { | |
4684 | char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", | |
4685 | "tx-cpu3" }; | |
4686 | int i; | |
4687 | ||
4688 | for (i = 0; i < 5; i++) | |
4689 | if (of_property_match_string(port_node, "interrupt-names", | |
4690 | irqs[i]) < 0) | |
4691 | return false; | |
4692 | ||
4693 | *flags |= MVPP2_F_DT_COMPAT; | |
4694 | return true; | |
4695 | } | |
4696 | ||
4697 | /* Checks if the port dt description has the required Tx interrupts: | |
4698 | * - PPv2.1: there are no such interrupts. | |
4699 | * - PPv2.2: | |
4700 | * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] | |
4701 | * - The new ones have: "hifX" with X in [0..8] | |
4702 | * | |
4703 | * All those variants are supported to keep the backward compatibility. | |
213f428f | 4704 | */ |
a9aac385 AT |
4705 | static bool mvpp2_port_has_irqs(struct mvpp2 *priv, |
4706 | struct device_node *port_node, | |
4707 | unsigned long *flags) | |
213f428f | 4708 | { |
a9aac385 AT |
4709 | char name[5]; |
4710 | int i; | |
213f428f | 4711 | |
fd4a1056 AT |
4712 | /* ACPI */ |
4713 | if (!port_node) | |
4714 | return true; | |
4715 | ||
213f428f TP |
4716 | if (priv->hw_version == MVPP21) |
4717 | return false; | |
4718 | ||
a9aac385 AT |
4719 | if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) |
4720 | return true; | |
4721 | ||
4722 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { | |
4723 | snprintf(name, 5, "hif%d", i); | |
4724 | if (of_property_match_string(port_node, "interrupt-names", | |
4725 | name) < 0) | |
213f428f TP |
4726 | return false; |
4727 | } | |
4728 | ||
4729 | return true; | |
4730 | } | |
4731 | ||
3ba8c81e | 4732 | static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, |
24812221 | 4733 | struct fwnode_handle *fwnode, |
3ba8c81e AT |
4734 | char **mac_from) |
4735 | { | |
4736 | struct mvpp2_port *port = netdev_priv(dev); | |
4737 | char hw_mac_addr[ETH_ALEN] = {0}; | |
24812221 | 4738 | char fw_mac_addr[ETH_ALEN]; |
3ba8c81e | 4739 | |
24812221 MW |
4740 | if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { |
4741 | *mac_from = "firmware node"; | |
4742 | ether_addr_copy(dev->dev_addr, fw_mac_addr); | |
688cbaf2 AT |
4743 | return; |
4744 | } | |
d2a6e48e | 4745 | |
688cbaf2 AT |
4746 | if (priv->hw_version == MVPP21) { |
4747 | mvpp21_get_mac_address(port, hw_mac_addr); | |
4748 | if (is_valid_ether_addr(hw_mac_addr)) { | |
4749 | *mac_from = "hardware"; | |
4750 | ether_addr_copy(dev->dev_addr, hw_mac_addr); | |
4751 | return; | |
4752 | } | |
3ba8c81e | 4753 | } |
688cbaf2 AT |
4754 | |
4755 | *mac_from = "random"; | |
4756 | eth_hw_addr_random(dev); | |
3ba8c81e AT |
4757 | } |
4758 | ||
44cc27e4 | 4759 | static void mvpp2_phylink_validate(struct phylink_config *config, |
4bb04326 AT |
4760 | unsigned long *supported, |
4761 | struct phylink_link_state *state) | |
4762 | { | |
44cc27e4 IC |
4763 | struct mvpp2_port *port = container_of(config, struct mvpp2_port, |
4764 | phylink_config); | |
4bb04326 AT |
4765 | __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
4766 | ||
0fb628f0 BS |
4767 | /* Invalid combinations */ |
4768 | switch (state->interface) { | |
e0f909bc | 4769 | case PHY_INTERFACE_MODE_10GBASER: |
0fb628f0 BS |
4770 | case PHY_INTERFACE_MODE_XAUI: |
4771 | if (port->gop_id != 0) | |
4772 | goto empty_set; | |
4773 | break; | |
4774 | case PHY_INTERFACE_MODE_RGMII: | |
4775 | case PHY_INTERFACE_MODE_RGMII_ID: | |
4776 | case PHY_INTERFACE_MODE_RGMII_RXID: | |
4777 | case PHY_INTERFACE_MODE_RGMII_TXID: | |
8b318f30 | 4778 | if (port->priv->hw_version == MVPP22 && port->gop_id == 0) |
0fb628f0 BS |
4779 | goto empty_set; |
4780 | break; | |
4781 | default: | |
4782 | break; | |
4783 | } | |
4784 | ||
4bb04326 AT |
4785 | phylink_set(mask, Autoneg); |
4786 | phylink_set_port_modes(mask); | |
4787 | phylink_set(mask, Pause); | |
4788 | phylink_set(mask, Asym_Pause); | |
4789 | ||
d97c9f4a | 4790 | switch (state->interface) { |
e0f909bc | 4791 | case PHY_INTERFACE_MODE_10GBASER: |
0fb628f0 | 4792 | case PHY_INTERFACE_MODE_XAUI: |
01b3fd5a | 4793 | case PHY_INTERFACE_MODE_NA: |
00679177 | 4794 | if (port->gop_id == 0) { |
1b451fb2 | 4795 | phylink_set(mask, 10000baseT_Full); |
00679177 AT |
4796 | phylink_set(mask, 10000baseCR_Full); |
4797 | phylink_set(mask, 10000baseSR_Full); | |
4798 | phylink_set(mask, 10000baseLR_Full); | |
4799 | phylink_set(mask, 10000baseLRM_Full); | |
4800 | phylink_set(mask, 10000baseER_Full); | |
4801 | phylink_set(mask, 10000baseKR_Full); | |
4802 | } | |
ef8e0b80 RK |
4803 | if (state->interface != PHY_INTERFACE_MODE_NA) |
4804 | break; | |
d97c9f4a | 4805 | /* Fall-through */ |
0fb628f0 BS |
4806 | case PHY_INTERFACE_MODE_RGMII: |
4807 | case PHY_INTERFACE_MODE_RGMII_ID: | |
4808 | case PHY_INTERFACE_MODE_RGMII_RXID: | |
4809 | case PHY_INTERFACE_MODE_RGMII_TXID: | |
4810 | case PHY_INTERFACE_MODE_SGMII: | |
d97c9f4a AT |
4811 | phylink_set(mask, 10baseT_Half); |
4812 | phylink_set(mask, 10baseT_Full); | |
4813 | phylink_set(mask, 100baseT_Half); | |
4814 | phylink_set(mask, 100baseT_Full); | |
ef8e0b80 RK |
4815 | phylink_set(mask, 1000baseT_Full); |
4816 | phylink_set(mask, 1000baseX_Full); | |
4817 | if (state->interface != PHY_INTERFACE_MODE_NA) | |
4818 | break; | |
d97c9f4a AT |
4819 | /* Fall-through */ |
4820 | case PHY_INTERFACE_MODE_1000BASEX: | |
a6fe31de | 4821 | case PHY_INTERFACE_MODE_2500BASEX: |
ef8e0b80 RK |
4822 | if (port->comphy || |
4823 | state->interface != PHY_INTERFACE_MODE_2500BASEX) { | |
4824 | phylink_set(mask, 1000baseT_Full); | |
4825 | phylink_set(mask, 1000baseX_Full); | |
4826 | } | |
4827 | if (port->comphy || | |
4828 | state->interface == PHY_INTERFACE_MODE_2500BASEX) { | |
4829 | phylink_set(mask, 2500baseT_Full); | |
4830 | phylink_set(mask, 2500baseX_Full); | |
4831 | } | |
0fb628f0 BS |
4832 | break; |
4833 | default: | |
4834 | goto empty_set; | |
4bb04326 AT |
4835 | } |
4836 | ||
4837 | bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); | |
4838 | bitmap_and(state->advertising, state->advertising, mask, | |
4839 | __ETHTOOL_LINK_MODE_MASK_NBITS); | |
ef8e0b80 RK |
4840 | |
4841 | phylink_helper_basex_speed(state); | |
0fb628f0 BS |
4842 | return; |
4843 | ||
4844 | empty_set: | |
4845 | bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); | |
4bb04326 AT |
4846 | } |
4847 | ||
d46b7e4f RK |
4848 | static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port, |
4849 | struct phylink_link_state *state) | |
4bb04326 AT |
4850 | { |
4851 | u32 val; | |
4852 | ||
4853 | state->speed = SPEED_10000; | |
4854 | state->duplex = 1; | |
4855 | state->an_complete = 1; | |
4856 | ||
4857 | val = readl(port->base + MVPP22_XLG_STATUS); | |
4858 | state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); | |
4859 | ||
4860 | state->pause = 0; | |
4861 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); | |
4862 | if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) | |
4863 | state->pause |= MLO_PAUSE_TX; | |
4864 | if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) | |
4865 | state->pause |= MLO_PAUSE_RX; | |
4866 | } | |
4867 | ||
d46b7e4f RK |
4868 | static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port, |
4869 | struct phylink_link_state *state) | |
4bb04326 AT |
4870 | { |
4871 | u32 val; | |
4872 | ||
4873 | val = readl(port->base + MVPP2_GMAC_STATUS0); | |
4874 | ||
4875 | state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); | |
4876 | state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); | |
4877 | state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); | |
4878 | ||
d97c9f4a AT |
4879 | switch (port->phy_interface) { |
4880 | case PHY_INTERFACE_MODE_1000BASEX: | |
4bb04326 | 4881 | state->speed = SPEED_1000; |
d97c9f4a | 4882 | break; |
a6fe31de AT |
4883 | case PHY_INTERFACE_MODE_2500BASEX: |
4884 | state->speed = SPEED_2500; | |
4885 | break; | |
d97c9f4a AT |
4886 | default: |
4887 | if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) | |
4888 | state->speed = SPEED_1000; | |
4889 | else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) | |
4890 | state->speed = SPEED_100; | |
4891 | else | |
4892 | state->speed = SPEED_10; | |
4893 | } | |
4bb04326 AT |
4894 | |
4895 | state->pause = 0; | |
4896 | if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) | |
4897 | state->pause |= MLO_PAUSE_RX; | |
4898 | if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) | |
4899 | state->pause |= MLO_PAUSE_TX; | |
4900 | } | |
4901 | ||
d46b7e4f RK |
4902 | static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config, |
4903 | struct phylink_link_state *state) | |
4bb04326 | 4904 | { |
44cc27e4 IC |
4905 | struct mvpp2_port *port = container_of(config, struct mvpp2_port, |
4906 | phylink_config); | |
4bb04326 AT |
4907 | |
4908 | if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { | |
4909 | u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); | |
4910 | mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; | |
4911 | ||
4912 | if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { | |
d46b7e4f RK |
4913 | mvpp22_xlg_pcs_get_state(port, state); |
4914 | return; | |
4bb04326 AT |
4915 | } |
4916 | } | |
4917 | ||
d46b7e4f | 4918 | mvpp2_gmac_pcs_get_state(port, state); |
4bb04326 AT |
4919 | } |
4920 | ||
44cc27e4 | 4921 | static void mvpp2_mac_an_restart(struct phylink_config *config) |
4bb04326 | 4922 | { |
44cc27e4 IC |
4923 | struct mvpp2_port *port = container_of(config, struct mvpp2_port, |
4924 | phylink_config); | |
a4650477 | 4925 | u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
4bb04326 | 4926 | |
a4650477 RK |
4927 | writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, |
4928 | port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
4929 | writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, | |
4930 | port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
4bb04326 AT |
4931 | } |
4932 | ||
4933 | static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, | |
4934 | const struct phylink_link_state *state) | |
4935 | { | |
f17e70d2 AT |
4936 | u32 old_ctrl0, ctrl0; |
4937 | u32 old_ctrl4, ctrl4; | |
4bb04326 | 4938 | |
f17e70d2 AT |
4939 | old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); |
4940 | old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); | |
4bb04326 | 4941 | |
649e51d5 AT |
4942 | ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS; |
4943 | ||
4bb04326 AT |
4944 | if (state->pause & MLO_PAUSE_TX) |
4945 | ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; | |
e240b7db RK |
4946 | else |
4947 | ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; | |
4948 | ||
4bb04326 AT |
4949 | if (state->pause & MLO_PAUSE_RX) |
4950 | ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; | |
e240b7db RK |
4951 | else |
4952 | ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; | |
4bb04326 | 4953 | |
bba18318 MC |
4954 | ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | |
4955 | MVPP22_XLG_CTRL4_EN_IDLE_CHECK); | |
4956 | ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; | |
4bb04326 | 4957 | |
f17e70d2 AT |
4958 | if (old_ctrl0 != ctrl0) |
4959 | writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); | |
4960 | if (old_ctrl4 != ctrl4) | |
4961 | writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); | |
649e51d5 AT |
4962 | |
4963 | if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) { | |
4964 | while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) & | |
4965 | MVPP22_XLG_CTRL0_MAC_RESET_DIS)) | |
4966 | continue; | |
4967 | } | |
4bb04326 AT |
4968 | } |
4969 | ||
4970 | static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, | |
4971 | const struct phylink_link_state *state) | |
4972 | { | |
d14e078f RK |
4973 | u32 old_an, an; |
4974 | u32 old_ctrl0, ctrl0; | |
4975 | u32 old_ctrl2, ctrl2; | |
4976 | u32 old_ctrl4, ctrl4; | |
4bb04326 | 4977 | |
d14e078f RK |
4978 | old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
4979 | old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
4980 | old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); | |
4981 | old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); | |
4bb04326 | 4982 | |
24cb72df | 4983 | an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | |
4bb04326 | 4984 | MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | |
24cb72df RK |
4985 | MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG | |
4986 | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS); | |
4bb04326 | 4987 | ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; |
388ca27f RK |
4988 | ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK | |
4989 | MVPP2_GMAC_PCS_ENABLE_MASK); | |
4bb04326 | 4990 | |
388ca27f | 4991 | /* Configure port type */ |
4a4cec72 | 4992 | if (phy_interface_mode_is_8023z(state->interface)) { |
388ca27f RK |
4993 | ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; |
4994 | ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; | |
4995 | ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | | |
4996 | MVPP22_CTRL4_DP_CLK_SEL | | |
4997 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; | |
4998 | } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { | |
4999 | ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; | |
5000 | ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; | |
5001 | ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | | |
5002 | MVPP22_CTRL4_DP_CLK_SEL | | |
5003 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; | |
5004 | } else if (phy_interface_mode_is_rgmii(state->interface)) { | |
5005 | ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; | |
5006 | ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | | |
5007 | MVPP22_CTRL4_SYNC_BYPASS_DIS | | |
5008 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; | |
d97c9f4a | 5009 | } |
4bb04326 | 5010 | |
388ca27f | 5011 | /* Configure advertisement bits */ |
4bb04326 AT |
5012 | if (phylink_test(state->advertising, Pause)) |
5013 | an |= MVPP2_GMAC_FC_ADV_EN; | |
5014 | if (phylink_test(state->advertising, Asym_Pause)) | |
5015 | an |= MVPP2_GMAC_FC_ADV_ASM_EN; | |
5016 | ||
388ca27f RK |
5017 | /* Configure negotiation style */ |
5018 | if (!phylink_autoneg_inband(mode)) { | |
24cb72df RK |
5019 | /* Phy or fixed speed - no in-band AN, nothing to do, leave the |
5020 | * configured speed, duplex and flow control as-is. | |
5021 | */ | |
388ca27f RK |
5022 | } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
5023 | /* SGMII in-band mode receives the speed and duplex from | |
5024 | * the PHY. Flow control information is not received. */ | |
24cb72df RK |
5025 | an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | |
5026 | MVPP2_GMAC_FORCE_LINK_PASS | | |
5027 | MVPP2_GMAC_CONFIG_MII_SPEED | | |
5028 | MVPP2_GMAC_CONFIG_GMII_SPEED | | |
5029 | MVPP2_GMAC_CONFIG_FULL_DUPLEX); | |
388ca27f RK |
5030 | an |= MVPP2_GMAC_IN_BAND_AUTONEG | |
5031 | MVPP2_GMAC_AN_SPEED_EN | | |
5032 | MVPP2_GMAC_AN_DUPLEX_EN; | |
388ca27f RK |
5033 | } else if (phy_interface_mode_is_8023z(state->interface)) { |
5034 | /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can | |
5035 | * they negotiate duplex: they are always operating with a fixed | |
5036 | * speed of 1000/2500Mbps in full duplex, so force 1000/2500 | |
5037 | * speed and full duplex here. | |
5038 | */ | |
5039 | ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; | |
24cb72df RK |
5040 | an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | |
5041 | MVPP2_GMAC_FORCE_LINK_PASS | | |
5042 | MVPP2_GMAC_CONFIG_MII_SPEED | | |
5043 | MVPP2_GMAC_CONFIG_GMII_SPEED | | |
5044 | MVPP2_GMAC_CONFIG_FULL_DUPLEX); | |
d14e078f RK |
5045 | an |= MVPP2_GMAC_IN_BAND_AUTONEG | |
5046 | MVPP2_GMAC_CONFIG_GMII_SPEED | | |
388ca27f | 5047 | MVPP2_GMAC_CONFIG_FULL_DUPLEX; |
4bb04326 | 5048 | |
24cb72df | 5049 | if (state->pause & MLO_PAUSE_AN && state->an_enabled) |
388ca27f | 5050 | an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; |
4bb04326 AT |
5051 | } |
5052 | ||
9a490e34 AT |
5053 | /* Some fields of the auto-negotiation register require the port to be down when |
5054 | * their value is updated. | |
5055 | */ | |
5056 | #define MVPP2_GMAC_AN_PORT_DOWN_MASK \ | |
5057 | (MVPP2_GMAC_IN_BAND_AUTONEG | \ | |
5058 | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \ | |
5059 | MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \ | |
5060 | MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \ | |
5061 | MVPP2_GMAC_AN_DUPLEX_EN) | |
5062 | ||
d14e078f RK |
5063 | if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK || |
5064 | (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK || | |
9a490e34 | 5065 | (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) { |
d14e078f RK |
5066 | /* Force link down */ |
5067 | old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS; | |
5068 | old_an |= MVPP2_GMAC_FORCE_LINK_DOWN; | |
5069 | writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5070 | ||
5071 | /* Set the GMAC in a reset state - do this in a way that | |
5072 | * ensures we clear it below. | |
5073 | */ | |
5074 | old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; | |
5075 | writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); | |
5076 | } | |
5077 | ||
5078 | if (old_ctrl0 != ctrl0) | |
5079 | writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); | |
5080 | if (old_ctrl2 != ctrl2) | |
5081 | writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); | |
5082 | if (old_ctrl4 != ctrl4) | |
5083 | writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); | |
5084 | if (old_an != an) | |
5085 | writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
316734fd RK |
5086 | |
5087 | if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) { | |
5088 | while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & | |
5089 | MVPP2_GMAC_PORT_RESET_MASK) | |
5090 | continue; | |
5091 | } | |
4bb04326 AT |
5092 | } |
5093 | ||
44cc27e4 | 5094 | static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, |
4bb04326 AT |
5095 | const struct phylink_link_state *state) |
5096 | { | |
44cc27e4 | 5097 | struct net_device *dev = to_net_dev(config->dev); |
4bb04326 | 5098 | struct mvpp2_port *port = netdev_priv(dev); |
bf2fa125 | 5099 | bool change_interface = port->phy_interface != state->interface; |
4bb04326 AT |
5100 | |
5101 | /* Check for invalid configuration */ | |
1d9b041e | 5102 | if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) { |
4bb04326 AT |
5103 | netdev_err(dev, "Invalid mode on %s\n", dev->name); |
5104 | return; | |
5105 | } | |
5106 | ||
4bb04326 AT |
5107 | /* Make sure the port is disabled when reconfiguring the mode */ |
5108 | mvpp2_port_disable(port); | |
1970ee96 | 5109 | |
d78a1809 | 5110 | if (port->priv->hw_version == MVPP22 && change_interface) { |
bf2fa125 | 5111 | mvpp22_gop_mask_irq(port); |
4bb04326 | 5112 | |
d78a1809 | 5113 | port->phy_interface = state->interface; |
4bb04326 | 5114 | |
d78a1809 AT |
5115 | /* Reconfigure the serdes lanes */ |
5116 | phy_power_off(port->comphy); | |
5117 | mvpp22_mode_reconfigure(port); | |
4bb04326 AT |
5118 | } |
5119 | ||
5120 | /* mac (re)configuration */ | |
1d9b041e | 5121 | if (mvpp2_is_xlg(state->interface)) |
4bb04326 AT |
5122 | mvpp2_xlg_config(port, mode, state); |
5123 | else if (phy_interface_mode_is_rgmii(state->interface) || | |
4a4cec72 RK |
5124 | phy_interface_mode_is_8023z(state->interface) || |
5125 | state->interface == PHY_INTERFACE_MODE_SGMII) | |
4bb04326 AT |
5126 | mvpp2_gmac_config(port, mode, state); |
5127 | ||
5128 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) | |
5129 | mvpp2_port_loopback_set(port, state); | |
5130 | ||
d78a1809 | 5131 | if (port->priv->hw_version == MVPP22 && change_interface) |
bf2fa125 RK |
5132 | mvpp22_gop_unmask_irq(port); |
5133 | ||
41948ccb | 5134 | mvpp2_port_enable(port); |
4bb04326 AT |
5135 | } |
5136 | ||
91a208f2 RK |
5137 | static void mvpp2_mac_link_up(struct phylink_config *config, |
5138 | struct phy_device *phy, | |
5139 | unsigned int mode, phy_interface_t interface, | |
5140 | int speed, int duplex, | |
5141 | bool tx_pause, bool rx_pause) | |
4bb04326 | 5142 | { |
44cc27e4 | 5143 | struct net_device *dev = to_net_dev(config->dev); |
4bb04326 AT |
5144 | struct mvpp2_port *port = netdev_priv(dev); |
5145 | u32 val; | |
5146 | ||
24cb72df RK |
5147 | if (mvpp2_is_xlg(interface)) { |
5148 | if (!phylink_autoneg_inband(mode)) { | |
1970ee96 AT |
5149 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
5150 | val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; | |
5151 | val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS; | |
5152 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
24cb72df RK |
5153 | } |
5154 | } else { | |
5155 | if (!phylink_autoneg_inband(mode)) { | |
1970ee96 | 5156 | val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
24cb72df RK |
5157 | val &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | |
5158 | MVPP2_GMAC_CONFIG_MII_SPEED | | |
5159 | MVPP2_GMAC_CONFIG_GMII_SPEED | | |
5160 | MVPP2_GMAC_CONFIG_FULL_DUPLEX); | |
1970ee96 | 5161 | val |= MVPP2_GMAC_FORCE_LINK_PASS; |
24cb72df RK |
5162 | |
5163 | if (speed == SPEED_1000 || speed == SPEED_2500) | |
5164 | val |= MVPP2_GMAC_CONFIG_GMII_SPEED; | |
5165 | else if (speed == SPEED_100) | |
5166 | val |= MVPP2_GMAC_CONFIG_MII_SPEED; | |
5167 | ||
5168 | if (duplex == DUPLEX_FULL) | |
5169 | val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; | |
5170 | ||
1970ee96 AT |
5171 | writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
5172 | } | |
24cb72df RK |
5173 | |
5174 | /* We can always update the flow control enable bits; | |
5175 | * these will only be effective if flow control AN | |
5176 | * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. | |
5177 | */ | |
5178 | val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); | |
5179 | val &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); | |
5180 | if (tx_pause) | |
5181 | val |= MVPP22_CTRL4_TX_FC_EN; | |
5182 | if (rx_pause) | |
5183 | val |= MVPP22_CTRL4_RX_FC_EN; | |
5184 | writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); | |
4bb04326 AT |
5185 | } |
5186 | ||
5187 | mvpp2_port_enable(port); | |
5188 | ||
5189 | mvpp2_egress_enable(port); | |
5190 | mvpp2_ingress_enable(port); | |
5191 | netif_tx_wake_all_queues(dev); | |
5192 | } | |
5193 | ||
44cc27e4 IC |
5194 | static void mvpp2_mac_link_down(struct phylink_config *config, |
5195 | unsigned int mode, phy_interface_t interface) | |
4bb04326 | 5196 | { |
44cc27e4 | 5197 | struct net_device *dev = to_net_dev(config->dev); |
4bb04326 AT |
5198 | struct mvpp2_port *port = netdev_priv(dev); |
5199 | u32 val; | |
5200 | ||
1970ee96 AT |
5201 | if (!phylink_autoneg_inband(mode)) { |
5202 | if (mvpp2_is_xlg(interface)) { | |
5203 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); | |
5204 | val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; | |
5205 | val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; | |
5206 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
5207 | } else { | |
5208 | val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5209 | val &= ~MVPP2_GMAC_FORCE_LINK_PASS; | |
5210 | val |= MVPP2_GMAC_FORCE_LINK_DOWN; | |
5211 | writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5212 | } | |
4bb04326 AT |
5213 | } |
5214 | ||
5215 | netif_tx_stop_all_queues(dev); | |
5216 | mvpp2_egress_disable(port); | |
5217 | mvpp2_ingress_disable(port); | |
5218 | ||
4bb04326 AT |
5219 | mvpp2_port_disable(port); |
5220 | } | |
5221 | ||
5222 | static const struct phylink_mac_ops mvpp2_phylink_ops = { | |
5223 | .validate = mvpp2_phylink_validate, | |
d46b7e4f | 5224 | .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state, |
4bb04326 AT |
5225 | .mac_an_restart = mvpp2_mac_an_restart, |
5226 | .mac_config = mvpp2_mac_config, | |
5227 | .mac_link_up = mvpp2_mac_link_up, | |
5228 | .mac_link_down = mvpp2_mac_link_down, | |
5229 | }; | |
5230 | ||
3f518509 MW |
5231 | /* Ports initialization */ |
5232 | static int mvpp2_port_probe(struct platform_device *pdev, | |
24812221 | 5233 | struct fwnode_handle *port_fwnode, |
bf147153 | 5234 | struct mvpp2 *priv) |
3f518509 | 5235 | { |
a75edc7c | 5236 | struct phy *comphy = NULL; |
3f518509 | 5237 | struct mvpp2_port *port; |
edc660fa | 5238 | struct mvpp2_port_pcpu *port_pcpu; |
24812221 | 5239 | struct device_node *port_node = to_of_node(port_fwnode); |
c9dbb6cf | 5240 | netdev_features_t features; |
3f518509 | 5241 | struct net_device *dev; |
4bb04326 | 5242 | struct phylink *phylink; |
3ba8c81e | 5243 | char *mac_from = ""; |
074c74df | 5244 | unsigned int ntxqs, nrxqs, thread; |
a9aac385 | 5245 | unsigned long flags = 0; |
213f428f | 5246 | bool has_tx_irqs; |
3f518509 | 5247 | u32 id; |
3f518509 | 5248 | int phy_mode; |
850623b3 | 5249 | int err, i; |
3f518509 | 5250 | |
fd4a1056 AT |
5251 | has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); |
5252 | if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { | |
5253 | dev_err(&pdev->dev, | |
5254 | "not enough IRQs to support multi queue mode\n"); | |
5255 | return -EINVAL; | |
a75edc7c | 5256 | } |
213f428f | 5257 | |
09f83975 | 5258 | ntxqs = MVPP2_MAX_TXQ; |
7d04b0b1 | 5259 | nrxqs = mvpp2_get_nrxqs(priv); |
09f83975 TP |
5260 | |
5261 | dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); | |
3f518509 MW |
5262 | if (!dev) |
5263 | return -ENOMEM; | |
5264 | ||
24812221 | 5265 | phy_mode = fwnode_get_phy_mode(port_fwnode); |
3f518509 MW |
5266 | if (phy_mode < 0) { |
5267 | dev_err(&pdev->dev, "incorrect phy mode\n"); | |
5268 | err = phy_mode; | |
5269 | goto err_free_netdev; | |
5270 | } | |
5271 | ||
e0f909bc RK |
5272 | /* |
5273 | * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. | |
5274 | * Existing usage of 10GBASE-KR is not correct; no backplane | |
5275 | * negotiation is done, and this driver does not actually support | |
5276 | * 10GBASE-KR. | |
5277 | */ | |
5278 | if (phy_mode == PHY_INTERFACE_MODE_10GKR) | |
5279 | phy_mode = PHY_INTERFACE_MODE_10GBASER; | |
5280 | ||
a75edc7c MW |
5281 | if (port_node) { |
5282 | comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); | |
5283 | if (IS_ERR(comphy)) { | |
5284 | if (PTR_ERR(comphy) == -EPROBE_DEFER) { | |
5285 | err = -EPROBE_DEFER; | |
5286 | goto err_free_netdev; | |
5287 | } | |
5288 | comphy = NULL; | |
542897d9 | 5289 | } |
542897d9 AT |
5290 | } |
5291 | ||
24812221 | 5292 | if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { |
3f518509 MW |
5293 | err = -EINVAL; |
5294 | dev_err(&pdev->dev, "missing port-id value\n"); | |
5295 | goto err_free_netdev; | |
5296 | } | |
5297 | ||
7cf87e4a | 5298 | dev->tx_queue_len = MVPP2_MAX_TXD_MAX; |
3f518509 MW |
5299 | dev->watchdog_timeo = 5 * HZ; |
5300 | dev->netdev_ops = &mvpp2_netdev_ops; | |
5301 | dev->ethtool_ops = &mvpp2_eth_tool_ops; | |
5302 | ||
5303 | port = netdev_priv(dev); | |
591f4cfa | 5304 | port->dev = dev; |
a75edc7c | 5305 | port->fwnode = port_fwnode; |
4bb04326 | 5306 | port->has_phy = !!of_find_property(port_node, "phy", NULL); |
09f83975 TP |
5307 | port->ntxqs = ntxqs; |
5308 | port->nrxqs = nrxqs; | |
213f428f TP |
5309 | port->priv = priv; |
5310 | port->has_tx_irqs = has_tx_irqs; | |
a9aac385 | 5311 | port->flags = flags; |
3f518509 | 5312 | |
591f4cfa TP |
5313 | err = mvpp2_queue_vectors_init(port, port_node); |
5314 | if (err) | |
3f518509 | 5315 | goto err_free_netdev; |
3f518509 | 5316 | |
a75edc7c MW |
5317 | if (port_node) |
5318 | port->link_irq = of_irq_get_byname(port_node, "link"); | |
5319 | else | |
5320 | port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); | |
fd3651b2 AT |
5321 | if (port->link_irq == -EPROBE_DEFER) { |
5322 | err = -EPROBE_DEFER; | |
5323 | goto err_deinit_qvecs; | |
5324 | } | |
5325 | if (port->link_irq <= 0) | |
5326 | /* the link irq is optional */ | |
5327 | port->link_irq = 0; | |
5328 | ||
24812221 | 5329 | if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) |
3f518509 MW |
5330 | port->flags |= MVPP2_F_LOOPBACK; |
5331 | ||
3f518509 | 5332 | port->id = id; |
59b9a31e | 5333 | if (priv->hw_version == MVPP21) |
09f83975 | 5334 | port->first_rxq = port->id * port->nrxqs; |
59b9a31e TP |
5335 | else |
5336 | port->first_rxq = port->id * priv->max_port_rxqs; | |
5337 | ||
4bb04326 | 5338 | port->of_node = port_node; |
3f518509 | 5339 | port->phy_interface = phy_mode; |
542897d9 | 5340 | port->comphy = comphy; |
3f518509 | 5341 | |
a786841d | 5342 | if (priv->hw_version == MVPP21) { |
3230a55b | 5343 | port->base = devm_platform_ioremap_resource(pdev, 2 + id); |
a786841d TP |
5344 | if (IS_ERR(port->base)) { |
5345 | err = PTR_ERR(port->base); | |
fd3651b2 | 5346 | goto err_free_irq; |
a786841d | 5347 | } |
118d6298 MR |
5348 | |
5349 | port->stats_base = port->priv->lms_base + | |
5350 | MVPP21_MIB_COUNTERS_OFFSET + | |
5351 | port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; | |
a786841d | 5352 | } else { |
24812221 MW |
5353 | if (fwnode_property_read_u32(port_fwnode, "gop-port-id", |
5354 | &port->gop_id)) { | |
a786841d TP |
5355 | err = -EINVAL; |
5356 | dev_err(&pdev->dev, "missing gop-port-id value\n"); | |
591f4cfa | 5357 | goto err_deinit_qvecs; |
a786841d TP |
5358 | } |
5359 | ||
5360 | port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); | |
118d6298 MR |
5361 | port->stats_base = port->priv->iface_base + |
5362 | MVPP22_MIB_COUNTERS_OFFSET + | |
5363 | port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; | |
3f518509 MW |
5364 | } |
5365 | ||
118d6298 | 5366 | /* Alloc per-cpu and ethtool stats */ |
3f518509 MW |
5367 | port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); |
5368 | if (!port->stats) { | |
5369 | err = -ENOMEM; | |
fd3651b2 | 5370 | goto err_free_irq; |
3f518509 MW |
5371 | } |
5372 | ||
118d6298 | 5373 | port->ethtool_stats = devm_kcalloc(&pdev->dev, |
9bea6897 | 5374 | MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), |
118d6298 MR |
5375 | sizeof(u64), GFP_KERNEL); |
5376 | if (!port->ethtool_stats) { | |
5377 | err = -ENOMEM; | |
5378 | goto err_free_stats; | |
5379 | } | |
5380 | ||
e5c500eb MR |
5381 | mutex_init(&port->gather_stats_lock); |
5382 | INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); | |
5383 | ||
24812221 | 5384 | mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); |
3f518509 | 5385 | |
7cf87e4a YM |
5386 | port->tx_ring_size = MVPP2_MAX_TXD_DFLT; |
5387 | port->rx_ring_size = MVPP2_MAX_RXD_DFLT; | |
3f518509 MW |
5388 | SET_NETDEV_DEV(dev, &pdev->dev); |
5389 | ||
5390 | err = mvpp2_port_init(port); | |
5391 | if (err < 0) { | |
5392 | dev_err(&pdev->dev, "failed to init port %d\n", id); | |
5393 | goto err_free_stats; | |
5394 | } | |
26975821 | 5395 | |
26975821 TP |
5396 | mvpp2_port_periodic_xon_disable(port); |
5397 | ||
649e51d5 | 5398 | mvpp2_mac_reset_assert(port); |
7409e66e | 5399 | mvpp22_pcs_reset_assert(port); |
3f518509 | 5400 | |
edc660fa MW |
5401 | port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); |
5402 | if (!port->pcpu) { | |
5403 | err = -ENOMEM; | |
5404 | goto err_free_txq_pcpu; | |
5405 | } | |
5406 | ||
213f428f | 5407 | if (!port->has_tx_irqs) { |
e531f767 | 5408 | for (thread = 0; thread < priv->nthreads; thread++) { |
074c74df | 5409 | port_pcpu = per_cpu_ptr(port->pcpu, thread); |
edc660fa | 5410 | |
213f428f | 5411 | hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, |
ecb9f80d | 5412 | HRTIMER_MODE_REL_PINNED_SOFT); |
213f428f TP |
5413 | port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; |
5414 | port_pcpu->timer_scheduled = false; | |
ecb9f80d | 5415 | port_pcpu->dev = dev; |
213f428f | 5416 | } |
edc660fa MW |
5417 | } |
5418 | ||
381c5671 AT |
5419 | features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
5420 | NETIF_F_TSO; | |
3f518509 | 5421 | dev->features = features | NETIF_F_RXCSUM; |
56beda3d MC |
5422 | dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | |
5423 | NETIF_F_HW_VLAN_CTAG_FILTER; | |
576193f2 | 5424 | |
da86f59f | 5425 | if (mvpp22_rss_is_supported()) { |
d33ec452 | 5426 | dev->hw_features |= NETIF_F_RXHASH; |
da86f59f MC |
5427 | dev->features |= NETIF_F_NTUPLE; |
5428 | } | |
d33ec452 | 5429 | |
7d04b0b1 MC |
5430 | if (!port->priv->percpu_pools) |
5431 | mvpp2_set_hw_csum(port, port->pool_long->id); | |
576193f2 | 5432 | |
3f518509 | 5433 | dev->vlan_features |= features; |
1d17db08 | 5434 | dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; |
10fea26c | 5435 | dev->priv_flags |= IFF_UNICAST_FLT; |
3f518509 | 5436 | |
576193f2 | 5437 | /* MTU range: 68 - 9704 */ |
5777987e | 5438 | dev->min_mtu = ETH_MIN_MTU; |
576193f2 SC |
5439 | /* 9704 == 9728 - 20 and rounding to 8 */ |
5440 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; | |
c4053ef3 | 5441 | dev->dev.of_node = port_node; |
5777987e | 5442 | |
4bb04326 AT |
5443 | /* Phylink isn't used w/ ACPI as of now */ |
5444 | if (port_node) { | |
44cc27e4 IC |
5445 | port->phylink_config.dev = &dev->dev; |
5446 | port->phylink_config.type = PHYLINK_NETDEV; | |
5447 | ||
5448 | phylink = phylink_create(&port->phylink_config, port_fwnode, | |
5449 | phy_mode, &mvpp2_phylink_ops); | |
4bb04326 AT |
5450 | if (IS_ERR(phylink)) { |
5451 | err = PTR_ERR(phylink); | |
5452 | goto err_free_port_pcpu; | |
5453 | } | |
5454 | port->phylink = phylink; | |
5455 | } else { | |
5456 | port->phylink = NULL; | |
5457 | } | |
5458 | ||
6791c102 RK |
5459 | /* Cycle the comphy to power it down, saving 270mW per port - |
5460 | * don't worry about an error powering it up. When the comphy | |
5461 | * driver does this, we can remove this code. | |
5462 | */ | |
5463 | if (port->comphy) { | |
5464 | err = mvpp22_comphy_init(port); | |
5465 | if (err == 0) | |
5466 | phy_power_off(port->comphy); | |
5467 | } | |
5468 | ||
3f518509 MW |
5469 | err = register_netdev(dev); |
5470 | if (err < 0) { | |
5471 | dev_err(&pdev->dev, "failed to register netdev\n"); | |
4bb04326 | 5472 | goto err_phylink; |
3f518509 MW |
5473 | } |
5474 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); | |
5475 | ||
bf147153 MW |
5476 | priv->port_list[priv->port_count++] = port; |
5477 | ||
3f518509 MW |
5478 | return 0; |
5479 | ||
4bb04326 AT |
5480 | err_phylink: |
5481 | if (port->phylink) | |
5482 | phylink_destroy(port->phylink); | |
edc660fa MW |
5483 | err_free_port_pcpu: |
5484 | free_percpu(port->pcpu); | |
3f518509 | 5485 | err_free_txq_pcpu: |
09f83975 | 5486 | for (i = 0; i < port->ntxqs; i++) |
3f518509 MW |
5487 | free_percpu(port->txqs[i]->pcpu); |
5488 | err_free_stats: | |
5489 | free_percpu(port->stats); | |
fd3651b2 AT |
5490 | err_free_irq: |
5491 | if (port->link_irq) | |
5492 | irq_dispose_mapping(port->link_irq); | |
591f4cfa TP |
5493 | err_deinit_qvecs: |
5494 | mvpp2_queue_vectors_deinit(port); | |
3f518509 MW |
5495 | err_free_netdev: |
5496 | free_netdev(dev); | |
5497 | return err; | |
5498 | } | |
5499 | ||
5500 | /* Ports removal routine */ | |
5501 | static void mvpp2_port_remove(struct mvpp2_port *port) | |
5502 | { | |
5503 | int i; | |
5504 | ||
5505 | unregister_netdev(port->dev); | |
4bb04326 AT |
5506 | if (port->phylink) |
5507 | phylink_destroy(port->phylink); | |
edc660fa | 5508 | free_percpu(port->pcpu); |
3f518509 | 5509 | free_percpu(port->stats); |
09f83975 | 5510 | for (i = 0; i < port->ntxqs; i++) |
3f518509 | 5511 | free_percpu(port->txqs[i]->pcpu); |
591f4cfa | 5512 | mvpp2_queue_vectors_deinit(port); |
fd3651b2 AT |
5513 | if (port->link_irq) |
5514 | irq_dispose_mapping(port->link_irq); | |
3f518509 MW |
5515 | free_netdev(port->dev); |
5516 | } | |
5517 | ||
5518 | /* Initialize decoding windows */ | |
5519 | static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, | |
5520 | struct mvpp2 *priv) | |
5521 | { | |
5522 | u32 win_enable; | |
5523 | int i; | |
5524 | ||
5525 | for (i = 0; i < 6; i++) { | |
5526 | mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); | |
5527 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); | |
5528 | ||
5529 | if (i < 4) | |
5530 | mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); | |
5531 | } | |
5532 | ||
5533 | win_enable = 0; | |
5534 | ||
5535 | for (i = 0; i < dram->num_cs; i++) { | |
5536 | const struct mbus_dram_window *cs = dram->cs + i; | |
5537 | ||
5538 | mvpp2_write(priv, MVPP2_WIN_BASE(i), | |
5539 | (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | | |
5540 | dram->mbus_dram_target_id); | |
5541 | ||
5542 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), | |
5543 | (cs->size - 1) & 0xffff0000); | |
5544 | ||
5545 | win_enable |= (1 << i); | |
5546 | } | |
5547 | ||
5548 | mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); | |
5549 | } | |
5550 | ||
5551 | /* Initialize Rx FIFO's */ | |
5552 | static void mvpp2_rx_fifo_init(struct mvpp2 *priv) | |
5553 | { | |
5554 | int port; | |
5555 | ||
5556 | for (port = 0; port < MVPP2_MAX_PORTS; port++) { | |
5557 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), | |
2d1d7df8 | 5558 | MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); |
3f518509 | 5559 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), |
2d1d7df8 AT |
5560 | MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); |
5561 | } | |
5562 | ||
5563 | mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, | |
5564 | MVPP2_RX_FIFO_PORT_MIN_PKT); | |
5565 | mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); | |
5566 | } | |
5567 | ||
5568 | static void mvpp22_rx_fifo_init(struct mvpp2 *priv) | |
5569 | { | |
5570 | int port; | |
5571 | ||
5572 | /* The FIFO size parameters are set depending on the maximum speed a | |
5573 | * given port can handle: | |
5574 | * - Port 0: 10Gbps | |
5575 | * - Port 1: 2.5Gbps | |
5576 | * - Ports 2 and 3: 1Gbps | |
5577 | */ | |
5578 | ||
5579 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), | |
5580 | MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); | |
5581 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), | |
5582 | MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); | |
5583 | ||
5584 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), | |
5585 | MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); | |
5586 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), | |
5587 | MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); | |
5588 | ||
5589 | for (port = 2; port < MVPP2_MAX_PORTS; port++) { | |
5590 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), | |
5591 | MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); | |
5592 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), | |
5593 | MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); | |
3f518509 MW |
5594 | } |
5595 | ||
5596 | mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, | |
5597 | MVPP2_RX_FIFO_PORT_MIN_PKT); | |
5598 | mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); | |
5599 | } | |
5600 | ||
93ff130f YM |
5601 | /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G |
5602 | * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, | |
5603 | * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. | |
5604 | */ | |
7c10f974 AT |
5605 | static void mvpp22_tx_fifo_init(struct mvpp2 *priv) |
5606 | { | |
93ff130f | 5607 | int port, size, thrs; |
7c10f974 | 5608 | |
93ff130f YM |
5609 | for (port = 0; port < MVPP2_MAX_PORTS; port++) { |
5610 | if (port == 0) { | |
5611 | size = MVPP22_TX_FIFO_DATA_SIZE_10KB; | |
5612 | thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; | |
5613 | } else { | |
5614 | size = MVPP22_TX_FIFO_DATA_SIZE_3KB; | |
5615 | thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; | |
5616 | } | |
5617 | mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); | |
5618 | mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); | |
5619 | } | |
7c10f974 AT |
5620 | } |
5621 | ||
6763ce31 TP |
5622 | static void mvpp2_axi_init(struct mvpp2 *priv) |
5623 | { | |
5624 | u32 val, rdval, wrval; | |
5625 | ||
5626 | mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); | |
5627 | ||
5628 | /* AXI Bridge Configuration */ | |
5629 | ||
5630 | rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE | |
5631 | << MVPP22_AXI_ATTR_CACHE_OFFS; | |
5632 | rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
5633 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; | |
5634 | ||
5635 | wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE | |
5636 | << MVPP22_AXI_ATTR_CACHE_OFFS; | |
5637 | wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
5638 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; | |
5639 | ||
5640 | /* BM */ | |
5641 | mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); | |
5642 | mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); | |
5643 | ||
5644 | /* Descriptors */ | |
5645 | mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); | |
5646 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); | |
5647 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); | |
5648 | mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); | |
5649 | ||
5650 | /* Buffer Data */ | |
5651 | mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); | |
5652 | mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); | |
5653 | ||
5654 | val = MVPP22_AXI_CODE_CACHE_NON_CACHE | |
5655 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
5656 | val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM | |
5657 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
5658 | mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); | |
5659 | mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); | |
5660 | ||
5661 | val = MVPP22_AXI_CODE_CACHE_RD_CACHE | |
5662 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
5663 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
5664 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
5665 | ||
5666 | mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); | |
5667 | ||
5668 | val = MVPP22_AXI_CODE_CACHE_WR_CACHE | |
5669 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
5670 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
5671 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
5672 | ||
5673 | mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); | |
5674 | } | |
5675 | ||
3f518509 MW |
5676 | /* Initialize network controller common part HW */ |
5677 | static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) | |
5678 | { | |
5679 | const struct mbus_dram_target_info *dram_target_info; | |
5680 | int err, i; | |
08a23755 | 5681 | u32 val; |
3f518509 | 5682 | |
3f518509 MW |
5683 | /* MBUS windows configuration */ |
5684 | dram_target_info = mv_mbus_dram_info(); | |
5685 | if (dram_target_info) | |
5686 | mvpp2_conf_mbus_windows(dram_target_info, priv); | |
5687 | ||
6763ce31 TP |
5688 | if (priv->hw_version == MVPP22) |
5689 | mvpp2_axi_init(priv); | |
5690 | ||
08a23755 | 5691 | /* Disable HW PHY polling */ |
26975821 TP |
5692 | if (priv->hw_version == MVPP21) { |
5693 | val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); | |
5694 | val |= MVPP2_PHY_AN_STOP_SMI0_MASK; | |
5695 | writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); | |
5696 | } else { | |
5697 | val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); | |
5698 | val &= ~MVPP22_SMI_POLLING_EN; | |
5699 | writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); | |
5700 | } | |
08a23755 | 5701 | |
3f518509 | 5702 | /* Allocate and initialize aggregated TXQs */ |
074c74df | 5703 | priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, |
d7ce3cec | 5704 | sizeof(*priv->aggr_txqs), |
3f518509 MW |
5705 | GFP_KERNEL); |
5706 | if (!priv->aggr_txqs) | |
5707 | return -ENOMEM; | |
5708 | ||
074c74df | 5709 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
3f518509 MW |
5710 | priv->aggr_txqs[i].id = i; |
5711 | priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; | |
85affd7e | 5712 | err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); |
3f518509 MW |
5713 | if (err < 0) |
5714 | return err; | |
5715 | } | |
5716 | ||
7c10f974 AT |
5717 | /* Fifo Init */ |
5718 | if (priv->hw_version == MVPP21) { | |
2d1d7df8 | 5719 | mvpp2_rx_fifo_init(priv); |
7c10f974 | 5720 | } else { |
2d1d7df8 | 5721 | mvpp22_rx_fifo_init(priv); |
7c10f974 AT |
5722 | mvpp22_tx_fifo_init(priv); |
5723 | } | |
3f518509 | 5724 | |
26975821 TP |
5725 | if (priv->hw_version == MVPP21) |
5726 | writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, | |
5727 | priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); | |
3f518509 MW |
5728 | |
5729 | /* Allow cache snoop when transmiting packets */ | |
5730 | mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); | |
5731 | ||
5732 | /* Buffer Manager initialization */ | |
13616361 | 5733 | err = mvpp2_bm_init(&pdev->dev, priv); |
3f518509 MW |
5734 | if (err < 0) |
5735 | return err; | |
5736 | ||
5737 | /* Parser default initialization */ | |
5738 | err = mvpp2_prs_default_init(pdev, priv); | |
5739 | if (err < 0) | |
5740 | return err; | |
5741 | ||
5742 | /* Classifier default initialization */ | |
5743 | mvpp2_cls_init(priv); | |
5744 | ||
5745 | return 0; | |
5746 | } | |
5747 | ||
5748 | static int mvpp2_probe(struct platform_device *pdev) | |
5749 | { | |
a75edc7c | 5750 | const struct acpi_device_id *acpi_id; |
24812221 MW |
5751 | struct fwnode_handle *fwnode = pdev->dev.fwnode; |
5752 | struct fwnode_handle *port_fwnode; | |
3f518509 MW |
5753 | struct mvpp2 *priv; |
5754 | struct resource *res; | |
a786841d | 5755 | void __iomem *base; |
e531f767 | 5756 | int i, shared; |
3f518509 MW |
5757 | int err; |
5758 | ||
0b92e594 | 5759 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
3f518509 MW |
5760 | if (!priv) |
5761 | return -ENOMEM; | |
5762 | ||
a75edc7c MW |
5763 | if (has_acpi_companion(&pdev->dev)) { |
5764 | acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, | |
5765 | &pdev->dev); | |
92ee77d1 KL |
5766 | if (!acpi_id) |
5767 | return -EINVAL; | |
a75edc7c MW |
5768 | priv->hw_version = (unsigned long)acpi_id->driver_data; |
5769 | } else { | |
5770 | priv->hw_version = | |
5771 | (unsigned long)of_device_get_match_data(&pdev->dev); | |
5772 | } | |
faca9247 | 5773 | |
1e27a628 MC |
5774 | /* multi queue mode isn't supported on PPV2.1, fallback to single |
5775 | * mode | |
5776 | */ | |
5777 | if (priv->hw_version == MVPP21) | |
5778 | queue_mode = MVPP2_QDIST_SINGLE_MODE; | |
5779 | ||
3230a55b | 5780 | base = devm_platform_ioremap_resource(pdev, 0); |
a786841d TP |
5781 | if (IS_ERR(base)) |
5782 | return PTR_ERR(base); | |
5783 | ||
5784 | if (priv->hw_version == MVPP21) { | |
3230a55b | 5785 | priv->lms_base = devm_platform_ioremap_resource(pdev, 1); |
a786841d TP |
5786 | if (IS_ERR(priv->lms_base)) |
5787 | return PTR_ERR(priv->lms_base); | |
5788 | } else { | |
5789 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
a75edc7c MW |
5790 | if (has_acpi_companion(&pdev->dev)) { |
5791 | /* In case the MDIO memory region is declared in | |
5792 | * the ACPI, it can already appear as 'in-use' | |
5793 | * in the OS. Because it is overlapped by second | |
5794 | * region of the network controller, make | |
5795 | * sure it is released, before requesting it again. | |
5796 | * The care is taken by mvpp2 driver to avoid | |
5797 | * concurrent access to this memory region. | |
5798 | */ | |
5799 | release_resource(res); | |
5800 | } | |
a786841d TP |
5801 | priv->iface_base = devm_ioremap_resource(&pdev->dev, res); |
5802 | if (IS_ERR(priv->iface_base)) | |
5803 | return PTR_ERR(priv->iface_base); | |
a75edc7c | 5804 | } |
f84bf386 | 5805 | |
a75edc7c | 5806 | if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { |
f84bf386 AT |
5807 | priv->sysctrl_base = |
5808 | syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
5809 | "marvell,system-controller"); | |
5810 | if (IS_ERR(priv->sysctrl_base)) | |
5811 | /* The system controller regmap is optional for dt | |
5812 | * compatibility reasons. When not provided, the | |
5813 | * configuration of the GoP relies on the | |
5814 | * firmware/bootloader. | |
5815 | */ | |
5816 | priv->sysctrl_base = NULL; | |
a786841d TP |
5817 | } |
5818 | ||
7d04b0b1 MC |
5819 | if (priv->hw_version == MVPP22 && |
5820 | mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) | |
5821 | priv->percpu_pools = 1; | |
5822 | ||
01d04936 SC |
5823 | mvpp2_setup_bm_pool(); |
5824 | ||
e531f767 AT |
5825 | |
5826 | priv->nthreads = min_t(unsigned int, num_present_cpus(), | |
5827 | MVPP2_MAX_THREADS); | |
5828 | ||
5829 | shared = num_present_cpus() - priv->nthreads; | |
5830 | if (shared > 0) | |
5831 | bitmap_fill(&priv->lock_map, | |
5832 | min_t(int, shared, MVPP2_MAX_THREADS)); | |
5833 | ||
df089aa0 | 5834 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
a786841d TP |
5835 | u32 addr_space_sz; |
5836 | ||
5837 | addr_space_sz = (priv->hw_version == MVPP21 ? | |
5838 | MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); | |
df089aa0 | 5839 | priv->swth_base[i] = base + i * addr_space_sz; |
a786841d | 5840 | } |
3f518509 | 5841 | |
59b9a31e TP |
5842 | if (priv->hw_version == MVPP21) |
5843 | priv->max_port_rxqs = 8; | |
5844 | else | |
5845 | priv->max_port_rxqs = 32; | |
5846 | ||
a75edc7c MW |
5847 | if (dev_of_node(&pdev->dev)) { |
5848 | priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); | |
5849 | if (IS_ERR(priv->pp_clk)) | |
5850 | return PTR_ERR(priv->pp_clk); | |
5851 | err = clk_prepare_enable(priv->pp_clk); | |
5852 | if (err < 0) | |
5853 | return err; | |
3f518509 | 5854 | |
a75edc7c MW |
5855 | priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); |
5856 | if (IS_ERR(priv->gop_clk)) { | |
5857 | err = PTR_ERR(priv->gop_clk); | |
5858 | goto err_pp_clk; | |
fceb55d4 | 5859 | } |
a75edc7c | 5860 | err = clk_prepare_enable(priv->gop_clk); |
fceb55d4 | 5861 | if (err < 0) |
a75edc7c MW |
5862 | goto err_pp_clk; |
5863 | ||
5864 | if (priv->hw_version == MVPP22) { | |
5865 | priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); | |
5866 | if (IS_ERR(priv->mg_clk)) { | |
5867 | err = PTR_ERR(priv->mg_clk); | |
5868 | goto err_gop_clk; | |
5869 | } | |
5870 | ||
5871 | err = clk_prepare_enable(priv->mg_clk); | |
5872 | if (err < 0) | |
5873 | goto err_gop_clk; | |
9af771ce MC |
5874 | |
5875 | priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); | |
5876 | if (IS_ERR(priv->mg_core_clk)) { | |
5877 | priv->mg_core_clk = NULL; | |
5878 | } else { | |
5879 | err = clk_prepare_enable(priv->mg_core_clk); | |
5880 | if (err < 0) | |
5881 | goto err_mg_clk; | |
5882 | } | |
a75edc7c | 5883 | } |
4792ea04 GC |
5884 | |
5885 | priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); | |
5886 | if (IS_ERR(priv->axi_clk)) { | |
5887 | err = PTR_ERR(priv->axi_clk); | |
5888 | if (err == -EPROBE_DEFER) | |
9af771ce | 5889 | goto err_mg_core_clk; |
4792ea04 GC |
5890 | priv->axi_clk = NULL; |
5891 | } else { | |
5892 | err = clk_prepare_enable(priv->axi_clk); | |
5893 | if (err < 0) | |
9af771ce | 5894 | goto err_mg_core_clk; |
4792ea04 | 5895 | } |
fceb55d4 | 5896 | |
a75edc7c MW |
5897 | /* Get system's tclk rate */ |
5898 | priv->tclk = clk_get_rate(priv->pp_clk); | |
5899 | } else if (device_property_read_u32(&pdev->dev, "clock-frequency", | |
5900 | &priv->tclk)) { | |
5901 | dev_err(&pdev->dev, "missing clock-frequency value\n"); | |
5902 | return -EINVAL; | |
5903 | } | |
3f518509 | 5904 | |
2067e0a1 | 5905 | if (priv->hw_version == MVPP22) { |
da42bb27 | 5906 | err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); |
2067e0a1 | 5907 | if (err) |
45f972ad | 5908 | goto err_axi_clk; |
2067e0a1 TP |
5909 | /* Sadly, the BM pools all share the same register to |
5910 | * store the high 32 bits of their address. So they | |
5911 | * must all have the same high 32 bits, which forces | |
5912 | * us to restrict coherent memory to DMA_BIT_MASK(32). | |
5913 | */ | |
5914 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
5915 | if (err) | |
45f972ad | 5916 | goto err_axi_clk; |
2067e0a1 TP |
5917 | } |
5918 | ||
3f518509 MW |
5919 | /* Initialize network controller */ |
5920 | err = mvpp2_init(pdev, priv); | |
5921 | if (err < 0) { | |
5922 | dev_err(&pdev->dev, "failed to initialize controller\n"); | |
45f972ad | 5923 | goto err_axi_clk; |
3f518509 MW |
5924 | } |
5925 | ||
3f518509 | 5926 | /* Initialize ports */ |
24812221 MW |
5927 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
5928 | err = mvpp2_port_probe(pdev, port_fwnode, priv); | |
3f518509 | 5929 | if (err < 0) |
26146b0e | 5930 | goto err_port_probe; |
bf147153 MW |
5931 | } |
5932 | ||
5933 | if (priv->port_count == 0) { | |
5934 | dev_err(&pdev->dev, "no ports enabled\n"); | |
5935 | err = -ENODEV; | |
45f972ad | 5936 | goto err_axi_clk; |
3f518509 MW |
5937 | } |
5938 | ||
118d6298 MR |
5939 | /* Statistics must be gathered regularly because some of them (like |
5940 | * packets counters) are 32-bit registers and could overflow quite | |
5941 | * quickly. For instance, a 10Gb link used at full bandwidth with the | |
5942 | * smallest packets (64B) will overflow a 32-bit counter in less than | |
5943 | * 30 seconds. Then, use a workqueue to fill 64-bit counters. | |
5944 | */ | |
118d6298 MR |
5945 | snprintf(priv->queue_name, sizeof(priv->queue_name), |
5946 | "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), | |
5947 | priv->port_count > 1 ? "+" : ""); | |
5948 | priv->stats_queue = create_singlethread_workqueue(priv->queue_name); | |
5949 | if (!priv->stats_queue) { | |
5950 | err = -ENOMEM; | |
26146b0e | 5951 | goto err_port_probe; |
118d6298 MR |
5952 | } |
5953 | ||
21da57a2 MC |
5954 | mvpp2_dbgfs_init(priv, pdev->name); |
5955 | ||
3f518509 MW |
5956 | platform_set_drvdata(pdev, priv); |
5957 | return 0; | |
5958 | ||
26146b0e AT |
5959 | err_port_probe: |
5960 | i = 0; | |
24812221 | 5961 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
26146b0e AT |
5962 | if (priv->port_list[i]) |
5963 | mvpp2_port_remove(priv->port_list[i]); | |
5964 | i++; | |
5965 | } | |
45f972ad | 5966 | err_axi_clk: |
4792ea04 | 5967 | clk_disable_unprepare(priv->axi_clk); |
9af771ce MC |
5968 | |
5969 | err_mg_core_clk: | |
5970 | if (priv->hw_version == MVPP22) | |
5971 | clk_disable_unprepare(priv->mg_core_clk); | |
45f972ad | 5972 | err_mg_clk: |
fceb55d4 TP |
5973 | if (priv->hw_version == MVPP22) |
5974 | clk_disable_unprepare(priv->mg_clk); | |
3f518509 MW |
5975 | err_gop_clk: |
5976 | clk_disable_unprepare(priv->gop_clk); | |
5977 | err_pp_clk: | |
5978 | clk_disable_unprepare(priv->pp_clk); | |
5979 | return err; | |
5980 | } | |
5981 | ||
5982 | static int mvpp2_remove(struct platform_device *pdev) | |
5983 | { | |
5984 | struct mvpp2 *priv = platform_get_drvdata(pdev); | |
24812221 MW |
5985 | struct fwnode_handle *fwnode = pdev->dev.fwnode; |
5986 | struct fwnode_handle *port_fwnode; | |
3f518509 MW |
5987 | int i = 0; |
5988 | ||
21da57a2 MC |
5989 | mvpp2_dbgfs_cleanup(priv); |
5990 | ||
24812221 | 5991 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
e5c500eb MR |
5992 | if (priv->port_list[i]) { |
5993 | mutex_destroy(&priv->port_list[i]->gather_stats_lock); | |
3f518509 | 5994 | mvpp2_port_remove(priv->port_list[i]); |
e5c500eb | 5995 | } |
3f518509 MW |
5996 | i++; |
5997 | } | |
5998 | ||
944a83a2 MC |
5999 | destroy_workqueue(priv->stats_queue); |
6000 | ||
3f518509 MW |
6001 | for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { |
6002 | struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; | |
6003 | ||
13616361 | 6004 | mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); |
3f518509 MW |
6005 | } |
6006 | ||
074c74df | 6007 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
3f518509 MW |
6008 | struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; |
6009 | ||
6010 | dma_free_coherent(&pdev->dev, | |
6011 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | |
6012 | aggr_txq->descs, | |
20396136 | 6013 | aggr_txq->descs_dma); |
3f518509 MW |
6014 | } |
6015 | ||
a75edc7c MW |
6016 | if (is_acpi_node(port_fwnode)) |
6017 | return 0; | |
6018 | ||
4792ea04 | 6019 | clk_disable_unprepare(priv->axi_clk); |
9af771ce | 6020 | clk_disable_unprepare(priv->mg_core_clk); |
fceb55d4 | 6021 | clk_disable_unprepare(priv->mg_clk); |
3f518509 MW |
6022 | clk_disable_unprepare(priv->pp_clk); |
6023 | clk_disable_unprepare(priv->gop_clk); | |
6024 | ||
6025 | return 0; | |
6026 | } | |
6027 | ||
6028 | static const struct of_device_id mvpp2_match[] = { | |
faca9247 TP |
6029 | { |
6030 | .compatible = "marvell,armada-375-pp2", | |
6031 | .data = (void *)MVPP21, | |
6032 | }, | |
fc5e1550 TP |
6033 | { |
6034 | .compatible = "marvell,armada-7k-pp22", | |
6035 | .data = (void *)MVPP22, | |
6036 | }, | |
3f518509 MW |
6037 | { } |
6038 | }; | |
6039 | MODULE_DEVICE_TABLE(of, mvpp2_match); | |
6040 | ||
a75edc7c MW |
6041 | static const struct acpi_device_id mvpp2_acpi_match[] = { |
6042 | { "MRVL0110", MVPP22 }, | |
6043 | { }, | |
6044 | }; | |
6045 | MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); | |
6046 | ||
3f518509 MW |
6047 | static struct platform_driver mvpp2_driver = { |
6048 | .probe = mvpp2_probe, | |
6049 | .remove = mvpp2_remove, | |
6050 | .driver = { | |
6051 | .name = MVPP2_DRIVER_NAME, | |
6052 | .of_match_table = mvpp2_match, | |
a75edc7c | 6053 | .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), |
3f518509 MW |
6054 | }, |
6055 | }; | |
6056 | ||
6057 | module_platform_driver(mvpp2_driver); | |
6058 | ||
6059 | MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); | |
6060 | MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); | |
c634099d | 6061 | MODULE_LICENSE("GPL v2"); |