1 From f9246c9597e89510ae016c33ffa3b367ed83cf2d Mon Sep 17 00:00:00 2001
2 From: Pavithra R <quic_pavir@quicinc.com>
3 Date: Wed, 28 Feb 2024 11:25:15 +0530
4 Subject: [PATCH 38/50] net: ethernet: qualcomm: Add EDMA support for QCOM
7 Add the infrastructure functions such as Makefile,
8 EDMA hardware configuration, clock and IRQ initializations.
10 Change-Id: I64f65e554e70e9095b0cf3636fec421569ae6895
11 Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
12 Co-developed-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
13 Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
15 drivers/net/ethernet/qualcomm/ppe/Makefile | 3 +
16 drivers/net/ethernet/qualcomm/ppe/edma.c | 456 +++++++++++++++++++
17 drivers/net/ethernet/qualcomm/ppe/edma.h | 99 ++++
18 drivers/net/ethernet/qualcomm/ppe/ppe.c | 10 +-
19 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 253 ++++++++++
20 5 files changed, 820 insertions(+), 1 deletion(-)
21 create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.c
22 create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.h
24 --- a/drivers/net/ethernet/qualcomm/ppe/Makefile
25 +++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
28 obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
29 qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
32 +qcom-ppe-objs += edma.o
33 \ No newline at end of file
35 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
37 +// SPDX-License-Identifier: GPL-2.0-only
38 + /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
41 + /* Qualcomm Ethernet DMA driver setup, HW configuration, clocks and
42 + * interrupt initializations.
45 +#include <linux/clk.h>
46 +#include <linux/delay.h>
47 +#include <linux/kernel.h>
48 +#include <linux/module.h>
49 +#include <linux/netdevice.h>
50 +#include <linux/of_irq.h>
51 +#include <linux/platform_device.h>
52 +#include <linux/printk.h>
53 +#include <linux/regmap.h>
54 +#include <linux/reset.h>
57 +#include "ppe_regs.h"
59 +#define EDMA_IRQ_NAME_SIZE 32
61 +/* Global EDMA context. */
62 +struct edma_context *edma_ctx;
64 +/* Priority to multi-queue mapping. */
65 +static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
66 + 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
74 +static const char * const clock_name[EDMA_CLK_MAX] = {
75 + [EDMA_CLK] = "edma",
76 + [EDMA_CFG_CLK] = "edma-cfg",
79 +/* Rx Fill ring info for IPQ9574. */
80 +static struct edma_ring_info ipq9574_rxfill_ring_info = {
86 +/* Rx ring info for IPQ9574. */
87 +static struct edma_ring_info ipq9574_rx_ring_info = {
93 +/* Tx ring info for IPQ9574. */
94 +static struct edma_ring_info ipq9574_tx_ring_info = {
100 +/* Tx complete ring info for IPQ9574. */
101 +static struct edma_ring_info ipq9574_txcmpl_ring_info = {
107 +/* HW info for IPQ9574. */
108 +static struct edma_hw_info ipq9574_hw_info = {
109 + .rxfill = &ipq9574_rxfill_ring_info,
110 + .rx = &ipq9574_rx_ring_info,
111 + .tx = &ipq9574_tx_ring_info,
112 + .txcmpl = &ipq9574_txcmpl_ring_info,
114 + .napi_budget_rx = 128,
115 + .napi_budget_tx = 512,
118 +static int edma_clock_set_and_enable(struct device *dev,
119 + const char *id, unsigned long rate)
121 + struct device_node *edma_np;
122 + struct clk *clk = NULL;
125 + edma_np = of_get_child_by_name(dev->of_node, "edma");
127 + clk = devm_get_clk_from_child(dev, edma_np, id);
129 + dev_err(dev, "clk %s get failed\n", id);
130 + of_node_put(edma_np);
131 + return PTR_ERR(clk);
134 + ret = clk_set_rate(clk, rate);
136 + dev_err(dev, "set %lu rate for %s failed\n", rate, id);
137 + of_node_put(edma_np);
141 + ret = clk_prepare_enable(clk);
143 + dev_err(dev, "clk %s enable failed\n", id);
144 + of_node_put(edma_np);
148 + of_node_put(edma_np);
150 + dev_dbg(dev, "set %lu rate for %s\n", rate, id);
155 +static int edma_clock_init(void)
157 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
158 + struct device *dev = ppe_dev->dev;
159 + unsigned long ppe_rate;
162 + ppe_rate = ppe_dev->clk_rate;
164 + ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CLK],
169 + ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CFG_CLK],
178 + * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
180 + * Map int_priority values to priority class and initialize
181 + * unicast priority map table for default profile_id.
183 +static int edma_configure_ucast_prio_map_tbl(void)
185 + u8 pri_class, int_pri;
188 + /* Set the priority class value for every possible priority. */
189 + for (int_pri = 0; int_pri < PPE_QUEUE_INTER_PRI_NUM; int_pri++) {
190 + pri_class = edma_pri_map[int_pri];
192 + /* Priority offset should be less than maximum supported
195 + if (pri_class > EDMA_PRI_MAX_PER_CORE - 1) {
196 + pr_err("Configured incorrect priority offset: %d\n",
201 + ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
202 + PPE_QUEUE_CLASS_PRIORITY, int_pri, pri_class);
205 + pr_err("Failed with error: %d to set queue priority class for int_pri: %d for profile_id: %d\n",
210 + pr_debug("profile_id: %d, int_priority: %d, pri_class: %d\n",
211 + 0, int_pri, pri_class);
217 +static int edma_irq_init(void)
219 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
220 + struct edma_ring_info *txcmpl = hw_info->txcmpl;
221 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
222 + struct edma_ring_info *rx = hw_info->rx;
223 + char edma_irq_name[EDMA_IRQ_NAME_SIZE];
224 + struct device *dev = ppe_dev->dev;
225 + struct platform_device *pdev;
226 + struct device_node *edma_np;
229 + pdev = to_platform_device(dev);
230 + edma_np = of_get_child_by_name(dev->of_node, "edma");
231 + edma_ctx->intr_info.intr_txcmpl = kzalloc((sizeof(*edma_ctx->intr_info.intr_txcmpl) *
232 + txcmpl->num_rings), GFP_KERNEL);
233 + if (!edma_ctx->intr_info.intr_txcmpl) {
234 + of_node_put(edma_np);
238 + /* Get TXCMPL rings IRQ numbers. */
239 + for (i = 0; i < txcmpl->num_rings; i++) {
240 + snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_txcmpl_%d",
241 + txcmpl->ring_start + i);
242 + edma_ctx->intr_info.intr_txcmpl[i] = of_irq_get_byname(edma_np, edma_irq_name);
243 + if (edma_ctx->intr_info.intr_txcmpl[i] < 0) {
244 + dev_err(dev, "%s: txcmpl_info.intr[%u] irq get failed\n",
246 + of_node_put(edma_np);
247 + kfree(edma_ctx->intr_info.intr_txcmpl);
248 + return edma_ctx->intr_info.intr_txcmpl[i];
251 + dev_dbg(dev, "%s: intr_info.intr_txcmpl[%u] = %u\n",
252 + edma_np->name, i, edma_ctx->intr_info.intr_txcmpl[i]);
255 + edma_ctx->intr_info.intr_rx = kzalloc((sizeof(*edma_ctx->intr_info.intr_rx) *
256 + rx->num_rings), GFP_KERNEL);
257 + if (!edma_ctx->intr_info.intr_rx) {
258 + of_node_put(edma_np);
259 + kfree(edma_ctx->intr_info.intr_txcmpl);
263 + /* Get RXDESC rings IRQ numbers. */
264 + for (i = 0; i < rx->num_rings; i++) {
265 + snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_rxdesc_%d",
266 + rx->ring_start + i);
267 + edma_ctx->intr_info.intr_rx[i] = of_irq_get_byname(edma_np, edma_irq_name);
268 + if (edma_ctx->intr_info.intr_rx[i] < 0) {
269 + dev_err(dev, "%s: rx_queue_map_info.intr[%u] irq get failed\n",
271 + of_node_put(edma_np);
272 + kfree(edma_ctx->intr_info.intr_rx);
273 + kfree(edma_ctx->intr_info.intr_txcmpl);
274 + return edma_ctx->intr_info.intr_rx[i];
277 + dev_dbg(dev, "%s: intr_info.intr_rx[%u] = %u\n",
278 + edma_np->name, i, edma_ctx->intr_info.intr_rx[i]);
281 + /* Get misc IRQ number. */
282 + edma_ctx->intr_info.intr_misc = of_irq_get_byname(edma_np, "edma_misc");
283 + if (edma_ctx->intr_info.intr_misc < 0) {
284 + dev_err(dev, "%s: misc_intr irq get failed\n", edma_np->name);
285 + of_node_put(edma_np);
286 + kfree(edma_ctx->intr_info.intr_rx);
287 + kfree(edma_ctx->intr_info.intr_txcmpl);
288 + return edma_ctx->intr_info.intr_misc;
291 + of_node_put(edma_np);
293 + dev_dbg(dev, "%s: misc IRQ:%u\n", edma_np->name,
294 + edma_ctx->intr_info.intr_misc);
299 +static int edma_hw_reset(void)
301 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
302 + struct device *dev = ppe_dev->dev;
303 + struct reset_control *edma_hw_rst;
304 + struct device_node *edma_np;
305 + const char *reset_string;
309 + /* Count and parse reset names from DTSI. */
310 + edma_np = of_get_child_by_name(dev->of_node, "edma");
311 + count = of_property_count_strings(edma_np, "reset-names");
313 + dev_err(dev, "EDMA reset entry not found\n");
314 + of_node_put(edma_np);
318 + for (i = 0; i < count; i++) {
319 + ret = of_property_read_string_index(edma_np, "reset-names",
322 + dev_err(dev, "Error reading reset-names");
323 + of_node_put(edma_np);
327 + edma_hw_rst = of_reset_control_get_exclusive(edma_np, reset_string);
328 + if (IS_ERR(edma_hw_rst)) {
329 + of_node_put(edma_np);
330 + return PTR_ERR(edma_hw_rst);
333 + /* 100ms delay is required by hardware to reset EDMA. */
334 + reset_control_assert(edma_hw_rst);
337 + reset_control_deassert(edma_hw_rst);
340 + reset_control_put(edma_hw_rst);
341 + dev_dbg(dev, "EDMA HW reset, i:%d reset_string:%s\n", i, reset_string);
344 + of_node_put(edma_np);
349 +static int edma_hw_configure(void)
351 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
352 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
353 + struct regmap *regmap = ppe_dev->regmap;
357 + reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
358 + ret = regmap_read(regmap, reg, &data);
362 + pr_debug("EDMA ver %d hw init\n", data);
364 + /* Setup private data structure. */
365 + edma_ctx->intr_info.intr_mask_rx = EDMA_RXDESC_INT_MASK_PKT_INT;
366 + edma_ctx->intr_info.intr_mask_txcmpl = EDMA_TX_INT_MASK_PKT_INT;
369 + ret = edma_hw_reset();
371 + pr_err("Error in resetting the hardware. ret: %d\n", ret);
375 + /* Allocate memory for netdevices. */
376 + edma_ctx->netdev_arr = kzalloc((sizeof(**edma_ctx->netdev_arr) *
377 + hw_info->max_ports),
379 + if (!edma_ctx->netdev_arr)
382 + /* Configure DMA request priority, DMA read burst length,
383 + * and AXI write size.
385 + data = FIELD_PREP(EDMA_DMAR_BURST_LEN_MASK, EDMA_BURST_LEN_ENABLE);
386 + data |= FIELD_PREP(EDMA_DMAR_REQ_PRI_MASK, 0);
387 + data |= FIELD_PREP(EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK, 31);
388 + data |= FIELD_PREP(EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK, 7);
389 + data |= FIELD_PREP(EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK, 7);
391 + reg = EDMA_BASE_OFFSET + EDMA_REG_DMAR_CTRL_ADDR;
392 + ret = regmap_write(regmap, reg, data);
396 + /* Configure Tx Timeout Threshold. */
397 + data = EDMA_TX_TIMEOUT_THRESH_VAL;
399 + reg = EDMA_BASE_OFFSET + EDMA_REG_TX_TIMEOUT_THRESH_ADDR;
400 + ret = regmap_write(regmap, reg, data);
404 + /* Set Miscellaneous error mask. */
405 + data = EDMA_MISC_AXI_RD_ERR_MASK |
406 + EDMA_MISC_AXI_WR_ERR_MASK |
407 + EDMA_MISC_RX_DESC_FIFO_FULL_MASK |
408 + EDMA_MISC_RX_ERR_BUF_SIZE_MASK |
409 + EDMA_MISC_TX_SRAM_FULL_MASK |
410 + EDMA_MISC_TX_CMPL_BUF_FULL_MASK |
411 + EDMA_MISC_DATA_LEN_ERR_MASK;
412 + data |= EDMA_MISC_TX_TIMEOUT_MASK;
413 + edma_ctx->intr_info.intr_mask_misc = data;
415 + /* Global EDMA enable and padding enable. */
416 + data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
418 + reg = EDMA_BASE_OFFSET + EDMA_REG_PORT_CTRL_ADDR;
419 + ret = regmap_write(regmap, reg, data);
423 + /* Initialize unicast priority map table. */
424 + ret = (int)edma_configure_ucast_prio_map_tbl();
426 + pr_err("Failed to initialize unicast priority map table: %d\n",
428 + kfree(edma_ctx->netdev_arr);
436 + * edma_destroy - EDMA Destroy.
437 + * @ppe_dev: PPE device
439 + * Free the memory allocated during setup.
441 +void edma_destroy(struct ppe_device *ppe_dev)
443 + kfree(edma_ctx->intr_info.intr_rx);
444 + kfree(edma_ctx->intr_info.intr_txcmpl);
445 + kfree(edma_ctx->netdev_arr);
449 + * edma_setup - EDMA Setup.
450 + * @ppe_dev: PPE device
452 + * Configure Ethernet global ctx, clocks, hardware and interrupts.
454 + * Return 0 on success, negative error code on failure.
456 +int edma_setup(struct ppe_device *ppe_dev)
458 + struct device *dev = ppe_dev->dev;
461 + edma_ctx = devm_kzalloc(dev, sizeof(*edma_ctx), GFP_KERNEL);
465 + edma_ctx->hw_info = &ipq9574_hw_info;
466 + edma_ctx->ppe_dev = ppe_dev;
468 + /* Configure the EDMA common clocks. */
469 + ret = edma_clock_init();
471 + dev_err(dev, "Error in configuring the EDMA clocks\n");
475 + dev_dbg(dev, "QCOM EDMA common clocks are configured\n");
477 + ret = edma_hw_configure();
479 + dev_err(dev, "Error in edma configuration\n");
483 + ret = edma_irq_init();
485 + dev_err(dev, "Error in irq initialization\n");
489 + dev_info(dev, "EDMA configuration successful\n");
494 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
496 +/* SPDX-License-Identifier: GPL-2.0-only
497 + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
500 +#ifndef __EDMA_MAIN__
501 +#define __EDMA_MAIN__
503 +#include "ppe_api.h"
505 +/* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
507 + * One timer unit is 128 clock cycles.
509 + * So, therefore the microsecond to timer unit calculation is:
510 + * Timer unit = time in microseconds / (one clock cycle in microsecond * cycles in 1 timer unit)
511 + * = ('x' microsecond * EDMA clock frequency in MHz ('y') / 128).
514 +#define EDMA_CYCLE_PER_TIMER_UNIT 128
515 +#define EDMA_MICROSEC_TO_TIMER_UNIT(x, y) ((x) * (y) / EDMA_CYCLE_PER_TIMER_UNIT)
516 +#define MHZ 1000000UL
518 +/* EDMA profile ID. */
519 +#define EDMA_CPU_PORT_PROFILE_ID 0
521 +/* Number of PPE queue priorities supported per ARM core. */
522 +#define EDMA_PRI_MAX_PER_CORE 8
525 + * struct edma_ring_info - EDMA ring data structure.
526 + * @max_rings: Maximum number of rings
527 + * @ring_start: Ring start ID
528 + * @num_rings: Number of rings
530 +struct edma_ring_info {
537 + * struct edma_hw_info - EDMA hardware data structure.
538 + * @rxfill: Rx Fill ring information
539 + * @rx: Rx Desc ring information
540 + * @tx: Tx Desc ring information
541 + * @txcmpl: Tx complete ring information
542 + * @max_ports: Maximum number of ports
543 + * @napi_budget_rx: Rx NAPI budget
544 + * @napi_budget_tx: Tx NAPI budget
546 +struct edma_hw_info {
547 + struct edma_ring_info *rxfill;
548 + struct edma_ring_info *rx;
549 + struct edma_ring_info *tx;
550 + struct edma_ring_info *txcmpl;
552 + u32 napi_budget_rx;
553 + u32 napi_budget_tx;
557 + * struct edma_intr_info - EDMA interrupt data structure.
558 + * @intr_mask_rx: RX interrupt mask
559 + * @intr_rx: Rx interrupts
560 + * @intr_mask_txcmpl: Tx completion interrupt mask
561 + * @intr_txcmpl: Tx completion interrupts
562 + * @intr_mask_misc: Miscellaneous interrupt mask
563 + * @intr_misc: Miscellaneous interrupts
565 +struct edma_intr_info {
568 + u32 intr_mask_txcmpl;
570 + u32 intr_mask_misc;
575 + * struct edma_context - EDMA context.
576 + * @netdev_arr: Net device for each EDMA port
577 + * @ppe_dev: PPE device
578 + * @hw_info: EDMA Hardware info
579 + * @intr_info: EDMA Interrupt info
581 +struct edma_context {
582 + struct net_device **netdev_arr;
583 + struct ppe_device *ppe_dev;
584 + struct edma_hw_info *hw_info;
585 + struct edma_intr_info intr_info;
588 +/* Global EDMA context. */
589 +extern struct edma_context *edma_ctx;
591 +void edma_destroy(struct ppe_device *ppe_dev);
592 +int edma_setup(struct ppe_device *ppe_dev);
595 --- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
596 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
598 #include <linux/regmap.h>
599 #include <linux/reset.h>
603 #include "ppe_config.h"
604 #include "ppe_debugfs.h"
605 @@ -208,10 +209,16 @@ static int qcom_ppe_probe(struct platfor
607 return dev_err_probe(dev, ret, "PPE HW config failed\n");
609 - ret = ppe_port_mac_init(ppe_dev);
610 + ret = edma_setup(ppe_dev);
612 + return dev_err_probe(dev, ret, "EDMA setup failed\n");
614 + ret = ppe_port_mac_init(ppe_dev);
616 + edma_destroy(ppe_dev);
617 return dev_err_probe(dev, ret,
618 "PPE Port MAC initialization failed\n");
621 ppe_debugfs_setup(ppe_dev);
622 platform_set_drvdata(pdev, ppe_dev);
623 @@ -226,6 +233,7 @@ static void qcom_ppe_remove(struct platf
624 ppe_dev = platform_get_drvdata(pdev);
625 ppe_debugfs_teardown(ppe_dev);
626 ppe_port_mac_deinit(ppe_dev);
627 + edma_destroy(ppe_dev);
629 platform_set_drvdata(pdev, NULL);
631 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
632 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
633 @@ -788,4 +788,257 @@
634 #define XGMAC_RXDISCARD_GB_ADDR 0x9AC
635 #define XGMAC_RXDISCARDBYTE_GB_ADDR 0x9B4
637 +#define EDMA_BASE_OFFSET 0xb00000
639 +/* EDMA register offsets */
640 +#define EDMA_REG_MAS_CTRL_ADDR 0x0
641 +#define EDMA_REG_PORT_CTRL_ADDR 0x4
642 +#define EDMA_REG_VLAN_CTRL_ADDR 0x8
643 +#define EDMA_REG_RXDESC2FILL_MAP_0_ADDR 0x14
644 +#define EDMA_REG_RXDESC2FILL_MAP_1_ADDR 0x18
645 +#define EDMA_REG_RXDESC2FILL_MAP_2_ADDR 0x1c
646 +#define EDMA_REG_TXQ_CTRL_ADDR 0x20
647 +#define EDMA_REG_TXQ_CTRL_2_ADDR 0x24
648 +#define EDMA_REG_TXQ_FC_0_ADDR 0x28
649 +#define EDMA_REG_TXQ_FC_1_ADDR 0x30
650 +#define EDMA_REG_TXQ_FC_2_ADDR 0x34
651 +#define EDMA_REG_TXQ_FC_3_ADDR 0x38
652 +#define EDMA_REG_RXQ_CTRL_ADDR 0x3c
653 +#define EDMA_REG_MISC_ERR_QID_ADDR 0x40
654 +#define EDMA_REG_RXQ_FC_THRE_ADDR 0x44
655 +#define EDMA_REG_DMAR_CTRL_ADDR 0x48
656 +#define EDMA_REG_AXIR_CTRL_ADDR 0x4c
657 +#define EDMA_REG_AXIW_CTRL_ADDR 0x50
658 +#define EDMA_REG_MIN_MSS_ADDR 0x54
659 +#define EDMA_REG_LOOPBACK_CTRL_ADDR 0x58
660 +#define EDMA_REG_MISC_INT_STAT_ADDR 0x5c
661 +#define EDMA_REG_MISC_INT_MASK_ADDR 0x60
662 +#define EDMA_REG_DBG_CTRL_ADDR 0x64
663 +#define EDMA_REG_DBG_DATA_ADDR 0x68
664 +#define EDMA_REG_TX_TIMEOUT_THRESH_ADDR 0x6c
665 +#define EDMA_REG_REQ0_FIFO_THRESH_ADDR 0x80
666 +#define EDMA_REG_WB_OS_THRESH_ADDR 0x84
667 +#define EDMA_REG_MISC_ERR_QID_REG2_ADDR 0x88
668 +#define EDMA_REG_TXDESC2CMPL_MAP_0_ADDR 0x8c
669 +#define EDMA_REG_TXDESC2CMPL_MAP_1_ADDR 0x90
670 +#define EDMA_REG_TXDESC2CMPL_MAP_2_ADDR 0x94
671 +#define EDMA_REG_TXDESC2CMPL_MAP_3_ADDR 0x98
672 +#define EDMA_REG_TXDESC2CMPL_MAP_4_ADDR 0x9c
673 +#define EDMA_REG_TXDESC2CMPL_MAP_5_ADDR 0xa0
675 +/* Tx descriptor ring configuration register addresses */
676 +#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * (n)))
677 +#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * (n)))
678 +#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * (n)))
679 +#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * (n)))
680 +#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * (n)))
681 +#define EDMA_REG_TXDESC_BA2(n) (0x1014 + (0x1000 * (n)))
683 +/* RxFill ring configuration register addresses */
684 +#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * (n)))
685 +#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * (n)))
686 +#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * (n)))
687 +#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * (n)))
688 +#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * (n)))
689 +#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * (n)))
690 +#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * (n)))
691 +#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * (n)))
692 +#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * (n)))
693 +#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * (n)))
694 +#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * (n)))
695 +#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * (n)))
697 +/* Rx descriptor ring configuration register addresses */
698 +#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * (n)))
699 +#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * (n)))
700 +#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * (n)))
701 +#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * (n)))
702 +#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * (n)))
703 +#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * (n)))
704 +#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * (n)))
705 +#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * (n)))
706 +#define EDMA_REG_RXDESC_DISABLE(n) (0x39020 + (0x1000 * (n)))
707 +#define EDMA_REG_RXDESC_DISABLE_DONE(n) (0x39024 + (0x1000 * (n)))
708 +#define EDMA_REG_RXDESC_PREHEADER_BA(n) (0x39028 + (0x1000 * (n)))
709 +#define EDMA_REG_RXDESC_INT_STAT(n) (0x59000 + (0x1000 * (n)))
710 +#define EDMA_REG_RXDESC_INT_MASK(n) (0x59004 + (0x1000 * (n)))
712 +#define EDMA_REG_RX_MOD_TIMER(n) (0x59008 + (0x1000 * (n)))
713 +#define EDMA_REG_RX_INT_CTRL(n) (0x5900c + (0x1000 * (n)))
715 +/* Tx completion ring configuration register addresses */
716 +#define EDMA_REG_TXCMPL_BA(n) (0x79000 + (0x1000 * (n)))
717 +#define EDMA_REG_TXCMPL_PROD_IDX(n) (0x79004 + (0x1000 * (n)))
718 +#define EDMA_REG_TXCMPL_CONS_IDX(n) (0x79008 + (0x1000 * (n)))
719 +#define EDMA_REG_TXCMPL_RING_SIZE(n) (0x7900c + (0x1000 * (n)))
720 +#define EDMA_REG_TXCMPL_UGT_THRE(n) (0x79010 + (0x1000 * (n)))
721 +#define EDMA_REG_TXCMPL_CTRL(n) (0x79014 + (0x1000 * (n)))
722 +#define EDMA_REG_TXCMPL_BPC(n) (0x79018 + (0x1000 * (n)))
724 +#define EDMA_REG_TX_INT_STAT(n) (0x99000 + (0x1000 * (n)))
725 +#define EDMA_REG_TX_INT_MASK(n) (0x99004 + (0x1000 * (n)))
726 +#define EDMA_REG_TX_MOD_TIMER(n) (0x99008 + (0x1000 * (n)))
727 +#define EDMA_REG_TX_INT_CTRL(n) (0x9900c + (0x1000 * (n)))
729 +/* EDMA_QID2RID_TABLE_MEM register field masks */
730 +#define EDMA_RX_RING_ID_QUEUE0_MASK GENMASK(7, 0)
731 +#define EDMA_RX_RING_ID_QUEUE1_MASK GENMASK(15, 8)
732 +#define EDMA_RX_RING_ID_QUEUE2_MASK GENMASK(23, 16)
733 +#define EDMA_RX_RING_ID_QUEUE3_MASK GENMASK(31, 24)
735 +/* EDMA_REG_PORT_CTRL register bit definitions */
736 +#define EDMA_PORT_PAD_EN 0x1
737 +#define EDMA_PORT_EDMA_EN 0x2
739 +/* EDMA_REG_DMAR_CTRL register field masks */
740 +#define EDMA_DMAR_REQ_PRI_MASK GENMASK(2, 0)
741 +#define EDMA_DMAR_BURST_LEN_MASK BIT(3)
742 +#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK GENMASK(8, 4)
743 +#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK GENMASK(11, 9)
744 +#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK GENMASK(14, 12)
746 +#define EDMA_BURST_LEN_ENABLE 0
748 +/* Tx timeout threshold */
749 +#define EDMA_TX_TIMEOUT_THRESH_VAL 0xFFFF
751 +/* Rx descriptor ring base address mask */
752 +#define EDMA_RXDESC_BA_MASK 0xffffffff
754 +/* Rx Descriptor ring pre-header base address mask */
755 +#define EDMA_RXDESC_PREHEADER_BA_MASK 0xffffffff
757 +/* Tx descriptor prod ring index mask */
758 +#define EDMA_TXDESC_PROD_IDX_MASK 0xffff
760 +/* Tx descriptor consumer ring index mask */
761 +#define EDMA_TXDESC_CONS_IDX_MASK 0xffff
763 +/* Tx descriptor ring size mask */
764 +#define EDMA_TXDESC_RING_SIZE_MASK 0xffff
766 +/* Tx descriptor ring enable */
767 +#define EDMA_TXDESC_TX_ENABLE 0x1
769 +#define EDMA_TXDESC_CTRL_TXEN_MASK BIT(0)
770 +#define EDMA_TXDESC_CTRL_FC_GRP_ID_MASK GENMASK(3, 1)
772 +/* Tx completion ring prod index mask */
773 +#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff
775 +/* Tx completion ring urgent threshold mask */
776 +#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff
777 +#define EDMA_TXCMPL_LOW_THRE_SHIFT 0
779 +/* EDMA_REG_TX_MOD_TIMER mask */
780 +#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
781 +#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0
783 +/* Rx fill ring prod index mask */
784 +#define EDMA_RXFILL_PROD_IDX_MASK 0xffff
786 +/* Rx fill ring consumer index mask */
787 +#define EDMA_RXFILL_CONS_IDX_MASK 0xffff
789 +/* Rx fill ring size mask */
790 +#define EDMA_RXFILL_RING_SIZE_MASK 0xffff
792 +/* Rx fill ring flow control threshold masks */
793 +#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff
794 +#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12
795 +#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff
796 +#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0
798 +/* Rx fill ring enable bit */
799 +#define EDMA_RXFILL_RING_EN 0x1
801 +/* Rx desc ring prod index mask */
802 +#define EDMA_RXDESC_PROD_IDX_MASK 0xffff
804 +/* Rx descriptor ring cons index mask */
805 +#define EDMA_RXDESC_CONS_IDX_MASK 0xffff
807 +/* Rx descriptor ring size masks */
808 +#define EDMA_RXDESC_RING_SIZE_MASK 0xffff
809 +#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff
810 +#define EDMA_RXDESC_PL_OFFSET_SHIFT 16
811 +#define EDMA_RXDESC_PL_DEFAULT_VALUE 0
813 +/* Rx descriptor ring flow control threshold masks */
814 +#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff
815 +#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12
816 +#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff
817 +#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0
819 +/* Rx descriptor ring urgent threshold mask */
820 +#define EDMA_RXDESC_LOW_THRE_MASK 0xffff
821 +#define EDMA_RXDESC_LOW_THRE_SHIFT 0
823 +/* Rx descriptor ring enable bit */
824 +#define EDMA_RXDESC_RX_EN 0x1
826 +/* Tx interrupt status bit */
827 +#define EDMA_TX_INT_MASK_PKT_INT 0x1
829 +/* Rx interrupt mask */
830 +#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1
832 +#define EDMA_MASK_INT_DISABLE 0x0
833 +#define EDMA_MASK_INT_CLEAR 0x0
835 +/* EDMA_REG_RX_MOD_TIMER register field masks */
836 +#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
837 +#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0
839 +/* EDMA Ring mask */
840 +#define EDMA_RING_DMA_MASK 0xffffffff
842 +/* RXDESC threshold interrupt. */
843 +#define EDMA_RXDESC_UGT_INT_STAT 0x2
845 +/* RXDESC timer interrupt */
846 +#define EDMA_RXDESC_PKT_INT_STAT 0x1
848 +/* RXDESC Interrupt status mask */
849 +#define EDMA_RXDESC_RING_INT_STATUS_MASK \
850 + (EDMA_RXDESC_UGT_INT_STAT | EDMA_RXDESC_PKT_INT_STAT)
852 +/* TXCMPL threshold interrupt. */
853 +#define EDMA_TXCMPL_UGT_INT_STAT 0x2
855 +/* TXCMPL timer interrupt */
856 +#define EDMA_TXCMPL_PKT_INT_STAT 0x1
858 +/* TXCMPL Interrupt status mask */
859 +#define EDMA_TXCMPL_RING_INT_STATUS_MASK \
860 + (EDMA_TXCMPL_UGT_INT_STAT | EDMA_TXCMPL_PKT_INT_STAT)
862 +#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0
864 +#define EDMA_RXDESC_LOW_THRE 0
865 +#define EDMA_RX_MOD_TIMER_INIT 1000
866 +#define EDMA_RX_NE_INT_EN 0x2
868 +#define EDMA_TX_MOD_TIMER 150
870 +#define EDMA_TX_INITIAL_PROD_IDX 0x0
871 +#define EDMA_TX_NE_INT_EN 0x2
873 +/* EDMA misc error mask */
874 +#define EDMA_MISC_AXI_RD_ERR_MASK BIT(0)
875 +#define EDMA_MISC_AXI_WR_ERR_MASK BIT(1)
876 +#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK BIT(2)
877 +#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK BIT(3)
878 +#define EDMA_MISC_TX_SRAM_FULL_MASK BIT(4)
879 +#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK BIT(5)
881 +#define EDMA_MISC_DATA_LEN_ERR_MASK BIT(6)
882 +#define EDMA_MISC_TX_TIMEOUT_MASK BIT(7)
884 +/* EDMA txdesc2cmpl map */
885 +#define EDMA_TXDESC2CMPL_MAP_TXDESC_MASK 0x1F
887 +/* EDMA rxdesc2fill map */
888 +#define EDMA_RXDESC2FILL_MAP_RXDESC_MASK 0x7