]> git.ipfire.org Git - thirdparty/openwrt.git/blob
ae521432828ba8e4c1b298d84b3627ed11d75601
[thirdparty/openwrt.git] /
1 From f9246c9597e89510ae016c33ffa3b367ed83cf2d Mon Sep 17 00:00:00 2001
2 From: Pavithra R <quic_pavir@quicinc.com>
3 Date: Wed, 28 Feb 2024 11:25:15 +0530
4 Subject: [PATCH 38/50] net: ethernet: qualcomm: Add EDMA support for QCOM
5 IPQ9574 chipset.
6
7 Add the infrastructure functions such as Makefile,
8 EDMA hardware configuration, clock and IRQ initializations.
9
10 Change-Id: I64f65e554e70e9095b0cf3636fec421569ae6895
11 Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
12 Co-developed-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
13 Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
14 ---
15 drivers/net/ethernet/qualcomm/ppe/Makefile | 3 +
16 drivers/net/ethernet/qualcomm/ppe/edma.c | 456 +++++++++++++++++++
17 drivers/net/ethernet/qualcomm/ppe/edma.h | 99 ++++
18 drivers/net/ethernet/qualcomm/ppe/ppe.c | 10 +-
19 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 253 ++++++++++
20 5 files changed, 820 insertions(+), 1 deletion(-)
21 create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.c
22 create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.h
23
24 --- a/drivers/net/ethernet/qualcomm/ppe/Makefile
25 +++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
26 @@ -5,3 +5,6 @@
27
28 obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
29 qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
30 +
31 +#EDMA
32 +qcom-ppe-objs += edma.o
33 \ No newline at end of file
34 --- /dev/null
35 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
36 @@ -0,0 +1,456 @@
37 +// SPDX-License-Identifier: GPL-2.0-only
38 + /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
39 + */
40 +
41 + /* Qualcomm Ethernet DMA driver setup, HW configuration, clocks and
42 + * interrupt initializations.
43 + */
44 +
45 +#include <linux/clk.h>
46 +#include <linux/delay.h>
47 +#include <linux/kernel.h>
48 +#include <linux/module.h>
49 +#include <linux/netdevice.h>
50 +#include <linux/of_irq.h>
51 +#include <linux/platform_device.h>
52 +#include <linux/printk.h>
53 +#include <linux/regmap.h>
54 +#include <linux/reset.h>
55 +
56 +#include "edma.h"
57 +#include "ppe_regs.h"
58 +
59 +#define EDMA_IRQ_NAME_SIZE 32
60 +
61 +/* Global EDMA context. */
62 +struct edma_context *edma_ctx;
63 +
64 +/* Priority to multi-queue mapping. */
65 +static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
66 + 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
67 +
68 +enum edma_clk_id {
69 + EDMA_CLK,
70 + EDMA_CFG_CLK,
71 + EDMA_CLK_MAX
72 +};
73 +
74 +static const char * const clock_name[EDMA_CLK_MAX] = {
75 + [EDMA_CLK] = "edma",
76 + [EDMA_CFG_CLK] = "edma-cfg",
77 +};
78 +
79 +/* Rx Fill ring info for IPQ9574. */
80 +static struct edma_ring_info ipq9574_rxfill_ring_info = {
81 + .max_rings = 8,
82 + .ring_start = 4,
83 + .num_rings = 4,
84 +};
85 +
86 +/* Rx ring info for IPQ9574. */
87 +static struct edma_ring_info ipq9574_rx_ring_info = {
88 + .max_rings = 24,
89 + .ring_start = 20,
90 + .num_rings = 4,
91 +};
92 +
93 +/* Tx ring info for IPQ9574. */
94 +static struct edma_ring_info ipq9574_tx_ring_info = {
95 + .max_rings = 32,
96 + .ring_start = 8,
97 + .num_rings = 24,
98 +};
99 +
100 +/* Tx complete ring info for IPQ9574. */
101 +static struct edma_ring_info ipq9574_txcmpl_ring_info = {
102 + .max_rings = 32,
103 + .ring_start = 8,
104 + .num_rings = 24,
105 +};
106 +
107 +/* HW info for IPQ9574. */
108 +static struct edma_hw_info ipq9574_hw_info = {
109 + .rxfill = &ipq9574_rxfill_ring_info,
110 + .rx = &ipq9574_rx_ring_info,
111 + .tx = &ipq9574_tx_ring_info,
112 + .txcmpl = &ipq9574_txcmpl_ring_info,
113 + .max_ports = 6,
114 + .napi_budget_rx = 128,
115 + .napi_budget_tx = 512,
116 +};
117 +
118 +static int edma_clock_set_and_enable(struct device *dev,
119 + const char *id, unsigned long rate)
120 +{
121 + struct device_node *edma_np;
122 + struct clk *clk = NULL;
123 + int ret;
124 +
125 + edma_np = of_get_child_by_name(dev->of_node, "edma");
126 +
127 + clk = devm_get_clk_from_child(dev, edma_np, id);
128 + if (IS_ERR(clk)) {
129 + dev_err(dev, "clk %s get failed\n", id);
130 + of_node_put(edma_np);
131 + return PTR_ERR(clk);
132 + }
133 +
134 + ret = clk_set_rate(clk, rate);
135 + if (ret) {
136 + dev_err(dev, "set %lu rate for %s failed\n", rate, id);
137 + of_node_put(edma_np);
138 + return ret;
139 + }
140 +
141 + ret = clk_prepare_enable(clk);
142 + if (ret) {
143 + dev_err(dev, "clk %s enable failed\n", id);
144 + of_node_put(edma_np);
145 + return ret;
146 + }
147 +
148 + of_node_put(edma_np);
149 +
150 + dev_dbg(dev, "set %lu rate for %s\n", rate, id);
151 +
152 + return 0;
153 +}
154 +
155 +static int edma_clock_init(void)
156 +{
157 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
158 + struct device *dev = ppe_dev->dev;
159 + unsigned long ppe_rate;
160 + int ret;
161 +
162 + ppe_rate = ppe_dev->clk_rate;
163 +
164 + ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CLK],
165 + ppe_rate);
166 + if (ret)
167 + return ret;
168 +
169 + ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CFG_CLK],
170 + ppe_rate);
171 + if (ret)
172 + return ret;
173 +
174 + return 0;
175 +}
176 +
177 +/**
178 + * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
179 + *
180 + * Map int_priority values to priority class and initialize
181 + * unicast priority map table for default profile_id.
182 + */
183 +static int edma_configure_ucast_prio_map_tbl(void)
184 +{
185 + u8 pri_class, int_pri;
186 + int ret = 0;
187 +
188 + /* Set the priority class value for every possible priority. */
189 + for (int_pri = 0; int_pri < PPE_QUEUE_INTER_PRI_NUM; int_pri++) {
190 + pri_class = edma_pri_map[int_pri];
191 +
192 + /* Priority offset should be less than maximum supported
193 + * queue priority.
194 + */
195 + if (pri_class > EDMA_PRI_MAX_PER_CORE - 1) {
196 + pr_err("Configured incorrect priority offset: %d\n",
197 + pri_class);
198 + return -EINVAL;
199 + }
200 +
201 + ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
202 + PPE_QUEUE_CLASS_PRIORITY, int_pri, pri_class);
203 +
204 + if (ret) {
205 + pr_err("Failed with error: %d to set queue priority class for int_pri: %d for profile_id: %d\n",
206 + ret, int_pri, 0);
207 + return ret;
208 + }
209 +
210 + pr_debug("profile_id: %d, int_priority: %d, pri_class: %d\n",
211 + 0, int_pri, pri_class);
212 + }
213 +
214 + return ret;
215 +}
216 +
217 +static int edma_irq_init(void)
218 +{
219 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
220 + struct edma_ring_info *txcmpl = hw_info->txcmpl;
221 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
222 + struct edma_ring_info *rx = hw_info->rx;
223 + char edma_irq_name[EDMA_IRQ_NAME_SIZE];
224 + struct device *dev = ppe_dev->dev;
225 + struct platform_device *pdev;
226 + struct device_node *edma_np;
227 + u32 i;
228 +
229 + pdev = to_platform_device(dev);
230 + edma_np = of_get_child_by_name(dev->of_node, "edma");
231 + edma_ctx->intr_info.intr_txcmpl = kzalloc((sizeof(*edma_ctx->intr_info.intr_txcmpl) *
232 + txcmpl->num_rings), GFP_KERNEL);
233 + if (!edma_ctx->intr_info.intr_txcmpl) {
234 + of_node_put(edma_np);
235 + return -ENOMEM;
236 + }
237 +
238 + /* Get TXCMPL rings IRQ numbers. */
239 + for (i = 0; i < txcmpl->num_rings; i++) {
240 + snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_txcmpl_%d",
241 + txcmpl->ring_start + i);
242 + edma_ctx->intr_info.intr_txcmpl[i] = of_irq_get_byname(edma_np, edma_irq_name);
243 + if (edma_ctx->intr_info.intr_txcmpl[i] < 0) {
244 + dev_err(dev, "%s: txcmpl_info.intr[%u] irq get failed\n",
245 + edma_np->name, i);
246 + of_node_put(edma_np);
247 + kfree(edma_ctx->intr_info.intr_txcmpl);
248 + return edma_ctx->intr_info.intr_txcmpl[i];
249 + }
250 +
251 + dev_dbg(dev, "%s: intr_info.intr_txcmpl[%u] = %u\n",
252 + edma_np->name, i, edma_ctx->intr_info.intr_txcmpl[i]);
253 + }
254 +
255 + edma_ctx->intr_info.intr_rx = kzalloc((sizeof(*edma_ctx->intr_info.intr_rx) *
256 + rx->num_rings), GFP_KERNEL);
257 + if (!edma_ctx->intr_info.intr_rx) {
258 + of_node_put(edma_np);
259 + kfree(edma_ctx->intr_info.intr_txcmpl);
260 + return -ENOMEM;
261 + }
262 +
263 + /* Get RXDESC rings IRQ numbers. */
264 + for (i = 0; i < rx->num_rings; i++) {
265 + snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_rxdesc_%d",
266 + rx->ring_start + i);
267 + edma_ctx->intr_info.intr_rx[i] = of_irq_get_byname(edma_np, edma_irq_name);
268 + if (edma_ctx->intr_info.intr_rx[i] < 0) {
269 + dev_err(dev, "%s: rx_queue_map_info.intr[%u] irq get failed\n",
270 + edma_np->name, i);
271 + of_node_put(edma_np);
272 + kfree(edma_ctx->intr_info.intr_rx);
273 + kfree(edma_ctx->intr_info.intr_txcmpl);
274 + return edma_ctx->intr_info.intr_rx[i];
275 + }
276 +
277 + dev_dbg(dev, "%s: intr_info.intr_rx[%u] = %u\n",
278 + edma_np->name, i, edma_ctx->intr_info.intr_rx[i]);
279 + }
280 +
281 + /* Get misc IRQ number. */
282 + edma_ctx->intr_info.intr_misc = of_irq_get_byname(edma_np, "edma_misc");
283 + if (edma_ctx->intr_info.intr_misc < 0) {
284 + dev_err(dev, "%s: misc_intr irq get failed\n", edma_np->name);
285 + of_node_put(edma_np);
286 + kfree(edma_ctx->intr_info.intr_rx);
287 + kfree(edma_ctx->intr_info.intr_txcmpl);
288 + return edma_ctx->intr_info.intr_misc;
289 + }
290 +
291 + of_node_put(edma_np);
292 +
293 + dev_dbg(dev, "%s: misc IRQ:%u\n", edma_np->name,
294 + edma_ctx->intr_info.intr_misc);
295 +
296 + return 0;
297 +}
298 +
299 +static int edma_hw_reset(void)
300 +{
301 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
302 + struct device *dev = ppe_dev->dev;
303 + struct reset_control *edma_hw_rst;
304 + struct device_node *edma_np;
305 + const char *reset_string;
306 + u32 count, i;
307 + int ret;
308 +
309 + /* Count and parse reset names from DTSI. */
310 + edma_np = of_get_child_by_name(dev->of_node, "edma");
311 + count = of_property_count_strings(edma_np, "reset-names");
312 + if (count < 0) {
313 + dev_err(dev, "EDMA reset entry not found\n");
314 + of_node_put(edma_np);
315 + return -EINVAL;
316 + }
317 +
318 + for (i = 0; i < count; i++) {
319 + ret = of_property_read_string_index(edma_np, "reset-names",
320 + i, &reset_string);
321 + if (ret) {
322 + dev_err(dev, "Error reading reset-names");
323 + of_node_put(edma_np);
324 + return -EINVAL;
325 + }
326 +
327 + edma_hw_rst = of_reset_control_get_exclusive(edma_np, reset_string);
328 + if (IS_ERR(edma_hw_rst)) {
329 + of_node_put(edma_np);
330 + return PTR_ERR(edma_hw_rst);
331 + }
332 +
333 + /* 100ms delay is required by hardware to reset EDMA. */
334 + reset_control_assert(edma_hw_rst);
335 + fsleep(100);
336 +
337 + reset_control_deassert(edma_hw_rst);
338 + fsleep(100);
339 +
340 + reset_control_put(edma_hw_rst);
341 + dev_dbg(dev, "EDMA HW reset, i:%d reset_string:%s\n", i, reset_string);
342 + }
343 +
344 + of_node_put(edma_np);
345 +
346 + return 0;
347 +}
348 +
349 +static int edma_hw_configure(void)
350 +{
351 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
352 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
353 + struct regmap *regmap = ppe_dev->regmap;
354 + u32 data, reg;
355 + int ret;
356 +
357 + reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
358 + ret = regmap_read(regmap, reg, &data);
359 + if (ret)
360 + return ret;
361 +
362 + pr_debug("EDMA ver %d hw init\n", data);
363 +
364 + /* Setup private data structure. */
365 + edma_ctx->intr_info.intr_mask_rx = EDMA_RXDESC_INT_MASK_PKT_INT;
366 + edma_ctx->intr_info.intr_mask_txcmpl = EDMA_TX_INT_MASK_PKT_INT;
367 +
368 + /* Reset EDMA. */
369 + ret = edma_hw_reset();
370 + if (ret) {
371 + pr_err("Error in resetting the hardware. ret: %d\n", ret);
372 + return ret;
373 + }
374 +
375 + /* Allocate memory for netdevices. */
376 + edma_ctx->netdev_arr = kzalloc((sizeof(**edma_ctx->netdev_arr) *
377 + hw_info->max_ports),
378 + GFP_KERNEL);
379 + if (!edma_ctx->netdev_arr)
380 + return -ENOMEM;
381 +
382 + /* Configure DMA request priority, DMA read burst length,
383 + * and AXI write size.
384 + */
385 + data = FIELD_PREP(EDMA_DMAR_BURST_LEN_MASK, EDMA_BURST_LEN_ENABLE);
386 + data |= FIELD_PREP(EDMA_DMAR_REQ_PRI_MASK, 0);
387 + data |= FIELD_PREP(EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK, 31);
388 + data |= FIELD_PREP(EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK, 7);
389 + data |= FIELD_PREP(EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK, 7);
390 +
391 + reg = EDMA_BASE_OFFSET + EDMA_REG_DMAR_CTRL_ADDR;
392 + ret = regmap_write(regmap, reg, data);
393 + if (ret)
394 + return ret;
395 +
396 + /* Configure Tx Timeout Threshold. */
397 + data = EDMA_TX_TIMEOUT_THRESH_VAL;
398 +
399 + reg = EDMA_BASE_OFFSET + EDMA_REG_TX_TIMEOUT_THRESH_ADDR;
400 + ret = regmap_write(regmap, reg, data);
401 + if (ret)
402 + return ret;
403 +
404 + /* Set Miscellaneous error mask. */
405 + data = EDMA_MISC_AXI_RD_ERR_MASK |
406 + EDMA_MISC_AXI_WR_ERR_MASK |
407 + EDMA_MISC_RX_DESC_FIFO_FULL_MASK |
408 + EDMA_MISC_RX_ERR_BUF_SIZE_MASK |
409 + EDMA_MISC_TX_SRAM_FULL_MASK |
410 + EDMA_MISC_TX_CMPL_BUF_FULL_MASK |
411 + EDMA_MISC_DATA_LEN_ERR_MASK;
412 + data |= EDMA_MISC_TX_TIMEOUT_MASK;
413 + edma_ctx->intr_info.intr_mask_misc = data;
414 +
415 + /* Global EDMA enable and padding enable. */
416 + data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
417 +
418 + reg = EDMA_BASE_OFFSET + EDMA_REG_PORT_CTRL_ADDR;
419 + ret = regmap_write(regmap, reg, data);
420 + if (ret)
421 + return ret;
422 +
423 + /* Initialize unicast priority map table. */
424 + ret = (int)edma_configure_ucast_prio_map_tbl();
425 + if (ret) {
426 + pr_err("Failed to initialize unicast priority map table: %d\n",
427 + ret);
428 + kfree(edma_ctx->netdev_arr);
429 + return ret;
430 + }
431 +
432 + return 0;
433 +}
434 +
435 +/**
436 + * edma_destroy - EDMA Destroy.
437 + * @ppe_dev: PPE device
438 + *
439 + * Free the memory allocated during setup.
440 + */
441 +void edma_destroy(struct ppe_device *ppe_dev)
442 +{
443 + kfree(edma_ctx->intr_info.intr_rx);
444 + kfree(edma_ctx->intr_info.intr_txcmpl);
445 + kfree(edma_ctx->netdev_arr);
446 +}
447 +
448 +/**
449 + * edma_setup - EDMA Setup.
450 + * @ppe_dev: PPE device
451 + *
452 + * Configure Ethernet global ctx, clocks, hardware and interrupts.
453 + *
454 + * Return 0 on success, negative error code on failure.
455 + */
456 +int edma_setup(struct ppe_device *ppe_dev)
457 +{
458 + struct device *dev = ppe_dev->dev;
459 + int ret;
460 +
461 + edma_ctx = devm_kzalloc(dev, sizeof(*edma_ctx), GFP_KERNEL);
462 + if (!edma_ctx)
463 + return -ENOMEM;
464 +
465 + edma_ctx->hw_info = &ipq9574_hw_info;
466 + edma_ctx->ppe_dev = ppe_dev;
467 +
468 + /* Configure the EDMA common clocks. */
469 + ret = edma_clock_init();
470 + if (ret) {
471 + dev_err(dev, "Error in configuring the EDMA clocks\n");
472 + return ret;
473 + }
474 +
475 + dev_dbg(dev, "QCOM EDMA common clocks are configured\n");
476 +
477 + ret = edma_hw_configure();
478 + if (ret) {
479 + dev_err(dev, "Error in edma configuration\n");
480 + return ret;
481 + }
482 +
483 + ret = edma_irq_init();
484 + if (ret) {
485 + dev_err(dev, "Error in irq initialization\n");
486 + return ret;
487 + }
488 +
489 + dev_info(dev, "EDMA configuration successful\n");
490 +
491 + return 0;
492 +}
493 --- /dev/null
494 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
495 @@ -0,0 +1,99 @@
496 +/* SPDX-License-Identifier: GPL-2.0-only
497 + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
498 + */
499 +
500 +#ifndef __EDMA_MAIN__
501 +#define __EDMA_MAIN__
502 +
503 +#include "ppe_api.h"
504 +
505 +/* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
506 + *
507 + * One timer unit is 128 clock cycles.
508 + *
509 + * So, therefore the microsecond to timer unit calculation is:
510 + * Timer unit = time in microseconds / (one clock cycle in microsecond * cycles in 1 timer unit)
511 + * = ('x' microsecond * EDMA clock frequency in MHz ('y') / 128).
512 + *
513 + */
514 +#define EDMA_CYCLE_PER_TIMER_UNIT 128
515 +#define EDMA_MICROSEC_TO_TIMER_UNIT(x, y) ((x) * (y) / EDMA_CYCLE_PER_TIMER_UNIT)
516 +#define MHZ 1000000UL
517 +
518 +/* EDMA profile ID. */
519 +#define EDMA_CPU_PORT_PROFILE_ID 0
520 +
521 +/* Number of PPE queue priorities supported per ARM core. */
522 +#define EDMA_PRI_MAX_PER_CORE 8
523 +
524 +/**
525 + * struct edma_ring_info - EDMA ring data structure.
526 + * @max_rings: Maximum number of rings
527 + * @ring_start: Ring start ID
528 + * @num_rings: Number of rings
529 + */
530 +struct edma_ring_info {
531 + u32 max_rings;
532 + u32 ring_start;
533 + u32 num_rings;
534 +};
535 +
536 +/**
537 + * struct edma_hw_info - EDMA hardware data structure.
538 + * @rxfill: Rx Fill ring information
539 + * @rx: Rx Desc ring information
540 + * @tx: Tx Desc ring information
541 + * @txcmpl: Tx complete ring information
542 + * @max_ports: Maximum number of ports
543 + * @napi_budget_rx: Rx NAPI budget
544 + * @napi_budget_tx: Tx NAPI budget
545 + */
546 +struct edma_hw_info {
547 + struct edma_ring_info *rxfill;
548 + struct edma_ring_info *rx;
549 + struct edma_ring_info *tx;
550 + struct edma_ring_info *txcmpl;
551 + u32 max_ports;
552 + u32 napi_budget_rx;
553 + u32 napi_budget_tx;
554 +};
555 +
556 +/**
557 + * struct edma_intr_info - EDMA interrupt data structure.
558 + * @intr_mask_rx: RX interrupt mask
559 + * @intr_rx: Rx interrupts
560 + * @intr_mask_txcmpl: Tx completion interrupt mask
561 + * @intr_txcmpl: Tx completion interrupts
562 + * @intr_mask_misc: Miscellaneous interrupt mask
563 + * @intr_misc: Miscellaneous interrupts
564 + */
565 +struct edma_intr_info {
566 + u32 intr_mask_rx;
567 + u32 *intr_rx;
568 + u32 intr_mask_txcmpl;
569 + u32 *intr_txcmpl;
570 + u32 intr_mask_misc;
571 + u32 intr_misc;
572 +};
573 +
574 +/**
575 + * struct edma_context - EDMA context.
576 + * @netdev_arr: Net device for each EDMA port
577 + * @ppe_dev: PPE device
578 + * @hw_info: EDMA Hardware info
579 + * @intr_info: EDMA Interrupt info
580 + */
581 +struct edma_context {
582 + struct net_device **netdev_arr;
583 + struct ppe_device *ppe_dev;
584 + struct edma_hw_info *hw_info;
585 + struct edma_intr_info intr_info;
586 +};
587 +
588 +/* Global EDMA context. */
589 +extern struct edma_context *edma_ctx;
590 +
591 +void edma_destroy(struct ppe_device *ppe_dev);
592 +int edma_setup(struct ppe_device *ppe_dev);
593 +
594 +#endif
595 --- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
596 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
597 @@ -14,6 +14,7 @@
598 #include <linux/regmap.h>
599 #include <linux/reset.h>
600
601 +#include "edma.h"
602 #include "ppe.h"
603 #include "ppe_config.h"
604 #include "ppe_debugfs.h"
605 @@ -208,10 +209,16 @@ static int qcom_ppe_probe(struct platfor
606 if (ret)
607 return dev_err_probe(dev, ret, "PPE HW config failed\n");
608
609 - ret = ppe_port_mac_init(ppe_dev);
610 + ret = edma_setup(ppe_dev);
611 if (ret)
612 + return dev_err_probe(dev, ret, "EDMA setup failed\n");
613 +
614 + ret = ppe_port_mac_init(ppe_dev);
615 + if (ret) {
616 + edma_destroy(ppe_dev);
617 return dev_err_probe(dev, ret,
618 "PPE Port MAC initialization failed\n");
619 + }
620
621 ppe_debugfs_setup(ppe_dev);
622 platform_set_drvdata(pdev, ppe_dev);
623 @@ -226,6 +233,7 @@ static void qcom_ppe_remove(struct platf
624 ppe_dev = platform_get_drvdata(pdev);
625 ppe_debugfs_teardown(ppe_dev);
626 ppe_port_mac_deinit(ppe_dev);
627 + edma_destroy(ppe_dev);
628
629 platform_set_drvdata(pdev, NULL);
630 }
631 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
632 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
633 @@ -788,4 +788,257 @@
634 #define XGMAC_RXDISCARD_GB_ADDR 0x9AC
635 #define XGMAC_RXDISCARDBYTE_GB_ADDR 0x9B4
636
637 +#define EDMA_BASE_OFFSET 0xb00000
638 +
639 +/* EDMA register offsets */
640 +#define EDMA_REG_MAS_CTRL_ADDR 0x0
641 +#define EDMA_REG_PORT_CTRL_ADDR 0x4
642 +#define EDMA_REG_VLAN_CTRL_ADDR 0x8
643 +#define EDMA_REG_RXDESC2FILL_MAP_0_ADDR 0x14
644 +#define EDMA_REG_RXDESC2FILL_MAP_1_ADDR 0x18
645 +#define EDMA_REG_RXDESC2FILL_MAP_2_ADDR 0x1c
646 +#define EDMA_REG_TXQ_CTRL_ADDR 0x20
647 +#define EDMA_REG_TXQ_CTRL_2_ADDR 0x24
648 +#define EDMA_REG_TXQ_FC_0_ADDR 0x28
649 +#define EDMA_REG_TXQ_FC_1_ADDR 0x30
650 +#define EDMA_REG_TXQ_FC_2_ADDR 0x34
651 +#define EDMA_REG_TXQ_FC_3_ADDR 0x38
652 +#define EDMA_REG_RXQ_CTRL_ADDR 0x3c
653 +#define EDMA_REG_MISC_ERR_QID_ADDR 0x40
654 +#define EDMA_REG_RXQ_FC_THRE_ADDR 0x44
655 +#define EDMA_REG_DMAR_CTRL_ADDR 0x48
656 +#define EDMA_REG_AXIR_CTRL_ADDR 0x4c
657 +#define EDMA_REG_AXIW_CTRL_ADDR 0x50
658 +#define EDMA_REG_MIN_MSS_ADDR 0x54
659 +#define EDMA_REG_LOOPBACK_CTRL_ADDR 0x58
660 +#define EDMA_REG_MISC_INT_STAT_ADDR 0x5c
661 +#define EDMA_REG_MISC_INT_MASK_ADDR 0x60
662 +#define EDMA_REG_DBG_CTRL_ADDR 0x64
663 +#define EDMA_REG_DBG_DATA_ADDR 0x68
664 +#define EDMA_REG_TX_TIMEOUT_THRESH_ADDR 0x6c
665 +#define EDMA_REG_REQ0_FIFO_THRESH_ADDR 0x80
666 +#define EDMA_REG_WB_OS_THRESH_ADDR 0x84
667 +#define EDMA_REG_MISC_ERR_QID_REG2_ADDR 0x88
668 +#define EDMA_REG_TXDESC2CMPL_MAP_0_ADDR 0x8c
669 +#define EDMA_REG_TXDESC2CMPL_MAP_1_ADDR 0x90
670 +#define EDMA_REG_TXDESC2CMPL_MAP_2_ADDR 0x94
671 +#define EDMA_REG_TXDESC2CMPL_MAP_3_ADDR 0x98
672 +#define EDMA_REG_TXDESC2CMPL_MAP_4_ADDR 0x9c
673 +#define EDMA_REG_TXDESC2CMPL_MAP_5_ADDR 0xa0
674 +
675 +/* Tx descriptor ring configuration register addresses */
676 +#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * (n)))
677 +#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * (n)))
678 +#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * (n)))
679 +#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * (n)))
680 +#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * (n)))
681 +#define EDMA_REG_TXDESC_BA2(n) (0x1014 + (0x1000 * (n)))
682 +
683 +/* RxFill ring configuration register addresses */
684 +#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * (n)))
685 +#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * (n)))
686 +#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * (n)))
687 +#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * (n)))
688 +#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * (n)))
689 +#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * (n)))
690 +#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * (n)))
691 +#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * (n)))
692 +#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * (n)))
693 +#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * (n)))
694 +#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * (n)))
695 +#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * (n)))
696 +
697 +/* Rx descriptor ring configuration register addresses */
698 +#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * (n)))
699 +#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * (n)))
700 +#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * (n)))
701 +#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * (n)))
702 +#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * (n)))
703 +#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * (n)))
704 +#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * (n)))
705 +#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * (n)))
706 +#define EDMA_REG_RXDESC_DISABLE(n) (0x39020 + (0x1000 * (n)))
707 +#define EDMA_REG_RXDESC_DISABLE_DONE(n) (0x39024 + (0x1000 * (n)))
708 +#define EDMA_REG_RXDESC_PREHEADER_BA(n) (0x39028 + (0x1000 * (n)))
709 +#define EDMA_REG_RXDESC_INT_STAT(n) (0x59000 + (0x1000 * (n)))
710 +#define EDMA_REG_RXDESC_INT_MASK(n) (0x59004 + (0x1000 * (n)))
711 +
712 +#define EDMA_REG_RX_MOD_TIMER(n) (0x59008 + (0x1000 * (n)))
713 +#define EDMA_REG_RX_INT_CTRL(n) (0x5900c + (0x1000 * (n)))
714 +
715 +/* Tx completion ring configuration register addresses */
716 +#define EDMA_REG_TXCMPL_BA(n) (0x79000 + (0x1000 * (n)))
717 +#define EDMA_REG_TXCMPL_PROD_IDX(n) (0x79004 + (0x1000 * (n)))
718 +#define EDMA_REG_TXCMPL_CONS_IDX(n) (0x79008 + (0x1000 * (n)))
719 +#define EDMA_REG_TXCMPL_RING_SIZE(n) (0x7900c + (0x1000 * (n)))
720 +#define EDMA_REG_TXCMPL_UGT_THRE(n) (0x79010 + (0x1000 * (n)))
721 +#define EDMA_REG_TXCMPL_CTRL(n) (0x79014 + (0x1000 * (n)))
722 +#define EDMA_REG_TXCMPL_BPC(n) (0x79018 + (0x1000 * (n)))
723 +
724 +#define EDMA_REG_TX_INT_STAT(n) (0x99000 + (0x1000 * (n)))
725 +#define EDMA_REG_TX_INT_MASK(n) (0x99004 + (0x1000 * (n)))
726 +#define EDMA_REG_TX_MOD_TIMER(n) (0x99008 + (0x1000 * (n)))
727 +#define EDMA_REG_TX_INT_CTRL(n) (0x9900c + (0x1000 * (n)))
728 +
729 +/* EDMA_QID2RID_TABLE_MEM register field masks */
730 +#define EDMA_RX_RING_ID_QUEUE0_MASK GENMASK(7, 0)
731 +#define EDMA_RX_RING_ID_QUEUE1_MASK GENMASK(15, 8)
732 +#define EDMA_RX_RING_ID_QUEUE2_MASK GENMASK(23, 16)
733 +#define EDMA_RX_RING_ID_QUEUE3_MASK GENMASK(31, 24)
734 +
735 +/* EDMA_REG_PORT_CTRL register bit definitions */
736 +#define EDMA_PORT_PAD_EN 0x1
737 +#define EDMA_PORT_EDMA_EN 0x2
738 +
739 +/* EDMA_REG_DMAR_CTRL register field masks */
740 +#define EDMA_DMAR_REQ_PRI_MASK GENMASK(2, 0)
741 +#define EDMA_DMAR_BURST_LEN_MASK BIT(3)
742 +#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK GENMASK(8, 4)
743 +#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK GENMASK(11, 9)
744 +#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK GENMASK(14, 12)
745 +
746 +#define EDMA_BURST_LEN_ENABLE 0
747 +
748 +/* Tx timeout threshold */
749 +#define EDMA_TX_TIMEOUT_THRESH_VAL 0xFFFF
750 +
751 +/* Rx descriptor ring base address mask */
752 +#define EDMA_RXDESC_BA_MASK 0xffffffff
753 +
754 +/* Rx Descriptor ring pre-header base address mask */
755 +#define EDMA_RXDESC_PREHEADER_BA_MASK 0xffffffff
756 +
757 +/* Tx descriptor prod ring index mask */
758 +#define EDMA_TXDESC_PROD_IDX_MASK 0xffff
759 +
760 +/* Tx descriptor consumer ring index mask */
761 +#define EDMA_TXDESC_CONS_IDX_MASK 0xffff
762 +
763 +/* Tx descriptor ring size mask */
764 +#define EDMA_TXDESC_RING_SIZE_MASK 0xffff
765 +
766 +/* Tx descriptor ring enable */
767 +#define EDMA_TXDESC_TX_ENABLE 0x1
768 +
769 +#define EDMA_TXDESC_CTRL_TXEN_MASK BIT(0)
770 +#define EDMA_TXDESC_CTRL_FC_GRP_ID_MASK GENMASK(3, 1)
771 +
772 +/* Tx completion ring prod index mask */
773 +#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff
774 +
775 +/* Tx completion ring urgent threshold mask */
776 +#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff
777 +#define EDMA_TXCMPL_LOW_THRE_SHIFT 0
778 +
779 +/* EDMA_REG_TX_MOD_TIMER mask */
780 +#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
781 +#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0
782 +
783 +/* Rx fill ring prod index mask */
784 +#define EDMA_RXFILL_PROD_IDX_MASK 0xffff
785 +
786 +/* Rx fill ring consumer index mask */
787 +#define EDMA_RXFILL_CONS_IDX_MASK 0xffff
788 +
789 +/* Rx fill ring size mask */
790 +#define EDMA_RXFILL_RING_SIZE_MASK 0xffff
791 +
792 +/* Rx fill ring flow control threshold masks */
793 +#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff
794 +#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12
795 +#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff
796 +#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0
797 +
798 +/* Rx fill ring enable bit */
799 +#define EDMA_RXFILL_RING_EN 0x1
800 +
801 +/* Rx desc ring prod index mask */
802 +#define EDMA_RXDESC_PROD_IDX_MASK 0xffff
803 +
804 +/* Rx descriptor ring cons index mask */
805 +#define EDMA_RXDESC_CONS_IDX_MASK 0xffff
806 +
807 +/* Rx descriptor ring size masks */
808 +#define EDMA_RXDESC_RING_SIZE_MASK 0xffff
809 +#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff
810 +#define EDMA_RXDESC_PL_OFFSET_SHIFT 16
811 +#define EDMA_RXDESC_PL_DEFAULT_VALUE 0
812 +
813 +/* Rx descriptor ring flow control threshold masks */
814 +#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff
815 +#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12
816 +#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff
817 +#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0
818 +
819 +/* Rx descriptor ring urgent threshold mask */
820 +#define EDMA_RXDESC_LOW_THRE_MASK 0xffff
821 +#define EDMA_RXDESC_LOW_THRE_SHIFT 0
822 +
823 +/* Rx descriptor ring enable bit */
824 +#define EDMA_RXDESC_RX_EN 0x1
825 +
826 +/* Tx interrupt status bit */
827 +#define EDMA_TX_INT_MASK_PKT_INT 0x1
828 +
829 +/* Rx interrupt mask */
830 +#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1
831 +
832 +#define EDMA_MASK_INT_DISABLE 0x0
833 +#define EDMA_MASK_INT_CLEAR 0x0
834 +
835 +/* EDMA_REG_RX_MOD_TIMER register field masks */
836 +#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
837 +#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0
838 +
839 +/* EDMA Ring mask */
840 +#define EDMA_RING_DMA_MASK 0xffffffff
841 +
842 +/* RXDESC threshold interrupt. */
843 +#define EDMA_RXDESC_UGT_INT_STAT 0x2
844 +
845 +/* RXDESC timer interrupt */
846 +#define EDMA_RXDESC_PKT_INT_STAT 0x1
847 +
848 +/* RXDESC Interrupt status mask */
849 +#define EDMA_RXDESC_RING_INT_STATUS_MASK \
850 + (EDMA_RXDESC_UGT_INT_STAT | EDMA_RXDESC_PKT_INT_STAT)
851 +
852 +/* TXCMPL threshold interrupt. */
853 +#define EDMA_TXCMPL_UGT_INT_STAT 0x2
854 +
855 +/* TXCMPL timer interrupt */
856 +#define EDMA_TXCMPL_PKT_INT_STAT 0x1
857 +
858 +/* TXCMPL Interrupt status mask */
859 +#define EDMA_TXCMPL_RING_INT_STATUS_MASK \
860 + (EDMA_TXCMPL_UGT_INT_STAT | EDMA_TXCMPL_PKT_INT_STAT)
861 +
862 +#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0
863 +
864 +#define EDMA_RXDESC_LOW_THRE 0
865 +#define EDMA_RX_MOD_TIMER_INIT 1000
866 +#define EDMA_RX_NE_INT_EN 0x2
867 +
868 +#define EDMA_TX_MOD_TIMER 150
869 +
870 +#define EDMA_TX_INITIAL_PROD_IDX 0x0
871 +#define EDMA_TX_NE_INT_EN 0x2
872 +
873 +/* EDMA misc error mask */
874 +#define EDMA_MISC_AXI_RD_ERR_MASK BIT(0)
875 +#define EDMA_MISC_AXI_WR_ERR_MASK BIT(1)
876 +#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK BIT(2)
877 +#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK BIT(3)
878 +#define EDMA_MISC_TX_SRAM_FULL_MASK BIT(4)
879 +#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK BIT(5)
880 +
881 +#define EDMA_MISC_DATA_LEN_ERR_MASK BIT(6)
882 +#define EDMA_MISC_TX_TIMEOUT_MASK BIT(7)
883 +
884 +/* EDMA txdesc2cmpl map */
885 +#define EDMA_TXDESC2CMPL_MAP_TXDESC_MASK 0x1F
886 +
887 +/* EDMA rxdesc2fill map */
888 +#define EDMA_RXDESC2FILL_MAP_RXDESC_MASK 0x7
889 +
890 #endif