2 * Xilinx xps_ll_temac ethernet driver for u-boot
6 * Copyright (C) 2011 - 2012 Stephan Linz <linz@li-pro.net>
7 * Copyright (C) 2008 - 2011 Michal Simek <monstr@monstr.eu>
8 * Copyright (C) 2008 - 2011 PetaLogix
10 * Based on Yoshio Kashiwagi kashiwagi@co-nss.co.jp driver
11 * Copyright (C) 2008 Nissin Systems Co.,Ltd.
14 * CREDITS: tsec driver
16 * SPDX-License-Identifier: GPL-2.0+
18 * [0]: http://www.xilinx.com/support/documentation
20 * [M]: [0]/ip_documentation/mpmc.pdf
21 * [S]: [0]/ip_documentation/xps_ll_temac.pdf
22 * [A]: [0]/application_notes/xapp1041.pdf
29 #include <asm/types.h>
32 #include "xilinx_ll_temac.h"
33 #include "xilinx_ll_temac_sdma.h"
37 static unsigned int rx_idx
; /* index of the current RX buffer */
38 static unsigned int tx_idx
; /* index of the current TX buffer */
41 struct cdmac_bd rx
[PKTBUFSRX
];
42 struct cdmac_bd tx
[TX_BUF_CNT
];
46 * DMA Buffer Descriptor alignment
48 * If the address contained in the Next Descriptor Pointer register is not
49 * 8-word aligned or reaches beyond the range of available memory, the SDMA
50 * halts processing and sets the CDMAC_BD_STCTRL_ERROR bit in the respective
51 * status register (tx_chnl_sts or rx_chnl_sts).
53 * [1]: [0]/ip_documentation/mpmc.pdf
54 * page 161, Next Descriptor Pointer
56 static struct rtx_cdmac_bd cdmac_bd
__aligned(32);
58 #if defined(CONFIG_XILINX_440) || defined(CONFIG_XILINX_405)
61 * Indirect DCR access operations mi{ft}dcr_xilinx() espacialy
62 * for Xilinx PowerPC implementations on FPGA.
64 * FIXME: This part should go up to arch/powerpc -- but where?
66 #include <asm/processor.h>
67 #define XILINX_INDIRECT_DCR_ADDRESS_REG 0
68 #define XILINX_INDIRECT_DCR_ACCESS_REG 1
69 inline unsigned mifdcr_xilinx(const unsigned dcrn
)
71 mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG
, dcrn
);
72 return mfdcr(XILINX_INDIRECT_DCR_ACCESS_REG
);
74 inline void mitdcr_xilinx(const unsigned dcrn
, int val
)
76 mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG
, dcrn
);
77 mtdcr(XILINX_INDIRECT_DCR_ACCESS_REG
, val
);
80 /* Xilinx Device Control Register (DCR) in/out accessors */
81 inline unsigned ll_temac_xldcr_in32(phys_addr_t addr
)
83 return mifdcr_xilinx((const unsigned)addr
);
85 inline void ll_temac_xldcr_out32(phys_addr_t addr
, unsigned value
)
87 mitdcr_xilinx((const unsigned)addr
, value
);
90 void ll_temac_collect_xldcr_sdma_reg_addr(struct eth_device
*dev
)
92 struct ll_temac
*ll_temac
= dev
->priv
;
93 phys_addr_t dmac_ctrl
= ll_temac
->ctrladdr
;
94 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
96 ra
[TX_NXTDESC_PTR
] = dmac_ctrl
+ TX_NXTDESC_PTR
;
97 ra
[TX_CURBUF_ADDR
] = dmac_ctrl
+ TX_CURBUF_ADDR
;
98 ra
[TX_CURBUF_LENGTH
] = dmac_ctrl
+ TX_CURBUF_LENGTH
;
99 ra
[TX_CURDESC_PTR
] = dmac_ctrl
+ TX_CURDESC_PTR
;
100 ra
[TX_TAILDESC_PTR
] = dmac_ctrl
+ TX_TAILDESC_PTR
;
101 ra
[TX_CHNL_CTRL
] = dmac_ctrl
+ TX_CHNL_CTRL
;
102 ra
[TX_IRQ_REG
] = dmac_ctrl
+ TX_IRQ_REG
;
103 ra
[TX_CHNL_STS
] = dmac_ctrl
+ TX_CHNL_STS
;
104 ra
[RX_NXTDESC_PTR
] = dmac_ctrl
+ RX_NXTDESC_PTR
;
105 ra
[RX_CURBUF_ADDR
] = dmac_ctrl
+ RX_CURBUF_ADDR
;
106 ra
[RX_CURBUF_LENGTH
] = dmac_ctrl
+ RX_CURBUF_LENGTH
;
107 ra
[RX_CURDESC_PTR
] = dmac_ctrl
+ RX_CURDESC_PTR
;
108 ra
[RX_TAILDESC_PTR
] = dmac_ctrl
+ RX_TAILDESC_PTR
;
109 ra
[RX_CHNL_CTRL
] = dmac_ctrl
+ RX_CHNL_CTRL
;
110 ra
[RX_IRQ_REG
] = dmac_ctrl
+ RX_IRQ_REG
;
111 ra
[RX_CHNL_STS
] = dmac_ctrl
+ RX_CHNL_STS
;
112 ra
[DMA_CONTROL_REG
] = dmac_ctrl
+ DMA_CONTROL_REG
;
115 #endif /* CONFIG_XILINX_440 || ONFIG_XILINX_405 */
117 /* Xilinx Processor Local Bus (PLB) in/out accessors */
118 inline unsigned ll_temac_xlplb_in32(phys_addr_t addr
)
120 return in_be32((void *)addr
);
122 inline void ll_temac_xlplb_out32(phys_addr_t addr
, unsigned value
)
124 out_be32((void *)addr
, value
);
127 /* collect all register addresses for Xilinx PLB in/out accessors */
128 void ll_temac_collect_xlplb_sdma_reg_addr(struct eth_device
*dev
)
130 struct ll_temac
*ll_temac
= dev
->priv
;
131 struct sdma_ctrl
*sdma_ctrl
= (void *)ll_temac
->ctrladdr
;
132 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
134 ra
[TX_NXTDESC_PTR
] = (phys_addr_t
)&sdma_ctrl
->tx_nxtdesc_ptr
;
135 ra
[TX_CURBUF_ADDR
] = (phys_addr_t
)&sdma_ctrl
->tx_curbuf_addr
;
136 ra
[TX_CURBUF_LENGTH
] = (phys_addr_t
)&sdma_ctrl
->tx_curbuf_length
;
137 ra
[TX_CURDESC_PTR
] = (phys_addr_t
)&sdma_ctrl
->tx_curdesc_ptr
;
138 ra
[TX_TAILDESC_PTR
] = (phys_addr_t
)&sdma_ctrl
->tx_taildesc_ptr
;
139 ra
[TX_CHNL_CTRL
] = (phys_addr_t
)&sdma_ctrl
->tx_chnl_ctrl
;
140 ra
[TX_IRQ_REG
] = (phys_addr_t
)&sdma_ctrl
->tx_irq_reg
;
141 ra
[TX_CHNL_STS
] = (phys_addr_t
)&sdma_ctrl
->tx_chnl_sts
;
142 ra
[RX_NXTDESC_PTR
] = (phys_addr_t
)&sdma_ctrl
->rx_nxtdesc_ptr
;
143 ra
[RX_CURBUF_ADDR
] = (phys_addr_t
)&sdma_ctrl
->rx_curbuf_addr
;
144 ra
[RX_CURBUF_LENGTH
] = (phys_addr_t
)&sdma_ctrl
->rx_curbuf_length
;
145 ra
[RX_CURDESC_PTR
] = (phys_addr_t
)&sdma_ctrl
->rx_curdesc_ptr
;
146 ra
[RX_TAILDESC_PTR
] = (phys_addr_t
)&sdma_ctrl
->rx_taildesc_ptr
;
147 ra
[RX_CHNL_CTRL
] = (phys_addr_t
)&sdma_ctrl
->rx_chnl_ctrl
;
148 ra
[RX_IRQ_REG
] = (phys_addr_t
)&sdma_ctrl
->rx_irq_reg
;
149 ra
[RX_CHNL_STS
] = (phys_addr_t
)&sdma_ctrl
->rx_chnl_sts
;
150 ra
[DMA_CONTROL_REG
] = (phys_addr_t
)&sdma_ctrl
->dma_control_reg
;
153 /* Check for TX and RX channel errors. */
154 static inline int ll_temac_sdma_error(struct eth_device
*dev
)
157 struct ll_temac
*ll_temac
= dev
->priv
;
158 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
160 err
= ll_temac
->in32(ra
[TX_CHNL_STS
]) & CHNL_STS_ERROR
;
161 err
|= ll_temac
->in32(ra
[RX_CHNL_STS
]) & CHNL_STS_ERROR
;
166 int ll_temac_init_sdma(struct eth_device
*dev
)
168 struct ll_temac
*ll_temac
= dev
->priv
;
169 struct cdmac_bd
*rx_dp
;
170 struct cdmac_bd
*tx_dp
;
171 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
174 printf("%s: SDMA: %d Rx buffers, %d Tx buffers\n",
175 dev
->name
, PKTBUFSRX
, TX_BUF_CNT
);
177 /* Initialize the Rx Buffer descriptors */
178 for (i
= 0; i
< PKTBUFSRX
; i
++) {
179 rx_dp
= &cdmac_bd
.rx
[i
];
180 memset(rx_dp
, 0, sizeof(*rx_dp
));
181 rx_dp
->next_p
= rx_dp
;
182 rx_dp
->buf_len
= PKTSIZE_ALIGN
;
183 rx_dp
->phys_buf_p
= (u8
*)net_rx_packets
[i
];
184 flush_cache((u32
)rx_dp
->phys_buf_p
, PKTSIZE_ALIGN
);
186 flush_cache((u32
)cdmac_bd
.rx
, sizeof(cdmac_bd
.rx
));
188 /* Initialize the TX Buffer Descriptors */
189 for (i
= 0; i
< TX_BUF_CNT
; i
++) {
190 tx_dp
= &cdmac_bd
.tx
[i
];
191 memset(tx_dp
, 0, sizeof(*tx_dp
));
192 tx_dp
->next_p
= tx_dp
;
194 flush_cache((u32
)cdmac_bd
.tx
, sizeof(cdmac_bd
.tx
));
196 /* Reset index counter to the Rx and Tx Buffer descriptors */
199 /* initial Rx DMA start by writing to respective TAILDESC_PTR */
200 ll_temac
->out32(ra
[RX_CURDESC_PTR
], (int)&cdmac_bd
.rx
[rx_idx
]);
201 ll_temac
->out32(ra
[RX_TAILDESC_PTR
], (int)&cdmac_bd
.rx
[rx_idx
]);
206 int ll_temac_halt_sdma(struct eth_device
*dev
)
208 unsigned timeout
= 50; /* 1usec * 50 = 50usec */
209 struct ll_temac
*ll_temac
= dev
->priv
;
210 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
215 * Quote from MPMC documentation: Writing a 1 to this field
216 * forces the DMA engine to shutdown and reset itself. After
217 * setting this bit, software must poll it until the bit is
218 * cleared by the DMA. This indicates that the reset process
219 * is done and the pipeline has been flushed.
221 ll_temac
->out32(ra
[DMA_CONTROL_REG
], DMA_CONTROL_RESET
);
222 while (timeout
&& (ll_temac
->in32(ra
[DMA_CONTROL_REG
])
223 & DMA_CONTROL_RESET
)) {
229 printf("%s: Timeout\n", __func__
);
236 int ll_temac_reset_sdma(struct eth_device
*dev
)
239 struct ll_temac
*ll_temac
= dev
->priv
;
240 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
242 /* Soft reset the DMA. */
243 if (ll_temac_halt_sdma(dev
))
246 /* Now clear the interrupts. */
247 r
= ll_temac
->in32(ra
[TX_CHNL_CTRL
]);
248 r
&= ~CHNL_CTRL_IRQ_MASK
;
249 ll_temac
->out32(ra
[TX_CHNL_CTRL
], r
);
251 r
= ll_temac
->in32(ra
[RX_CHNL_CTRL
]);
252 r
&= ~CHNL_CTRL_IRQ_MASK
;
253 ll_temac
->out32(ra
[RX_CHNL_CTRL
], r
);
255 /* Now ACK pending IRQs. */
256 ll_temac
->out32(ra
[TX_IRQ_REG
], IRQ_REG_IRQ_MASK
);
257 ll_temac
->out32(ra
[RX_IRQ_REG
], IRQ_REG_IRQ_MASK
);
259 /* Set tail-ptr mode, disable errors for both channels. */
260 ll_temac
->out32(ra
[DMA_CONTROL_REG
],
261 /* Enable use of tail pointer register */
263 /* Disable error when 2 or 4 bit coalesce cnt overfl */
264 DMA_CONTROL_RXOCEID
|
265 /* Disable error when 2 or 4 bit coalesce cnt overfl */
266 DMA_CONTROL_TXOCEID
);
271 int ll_temac_recv_sdma(struct eth_device
*dev
)
274 struct cdmac_bd
*rx_dp
= &cdmac_bd
.rx
[rx_idx
];
275 struct ll_temac
*ll_temac
= dev
->priv
;
276 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
278 if (ll_temac_sdma_error(dev
)) {
280 if (ll_temac_reset_sdma(dev
))
283 ll_temac_init_sdma(dev
);
286 flush_cache((u32
)rx_dp
, sizeof(*rx_dp
));
288 if (!(rx_dp
->sca
.stctrl
& CDMAC_BD_STCTRL_COMPLETED
))
291 if (rx_dp
->sca
.stctrl
& (CDMAC_BD_STCTRL_SOP
| CDMAC_BD_STCTRL_EOP
)) {
293 length
= rx_dp
->sca
.app
[4] & CDMAC_BD_APP4_RXBYTECNT_MASK
;
297 printf("%s: Got part of package, unsupported (%x)\n",
298 __func__
, rx_dp
->sca
.stctrl
);
301 /* flip the buffer */
302 flush_cache((u32
)rx_dp
->phys_buf_p
, length
);
304 /* reset the current descriptor */
305 rx_dp
->sca
.stctrl
= 0;
306 rx_dp
->sca
.app
[4] = 0;
307 flush_cache((u32
)rx_dp
, sizeof(*rx_dp
));
309 /* Find next empty buffer descriptor, preparation for next iteration */
310 rx_idx
= (rx_idx
+ 1) % PKTBUFSRX
;
311 rx_dp
= &cdmac_bd
.rx
[rx_idx
];
312 flush_cache((u32
)rx_dp
, sizeof(*rx_dp
));
314 /* DMA start by writing to respective TAILDESC_PTR */
315 ll_temac
->out32(ra
[RX_CURDESC_PTR
], (int)&cdmac_bd
.rx
[rx_idx
]);
316 ll_temac
->out32(ra
[RX_TAILDESC_PTR
], (int)&cdmac_bd
.rx
[rx_idx
]);
318 if (length
> 0 && pb_idx
!= -1)
319 net_process_received_packet(net_rx_packets
[pb_idx
], length
);
324 int ll_temac_send_sdma(struct eth_device
*dev
, void *packet
, int length
)
326 unsigned timeout
= 50; /* 1usec * 50 = 50usec */
327 struct cdmac_bd
*tx_dp
= &cdmac_bd
.tx
[tx_idx
];
328 struct ll_temac
*ll_temac
= dev
->priv
;
329 phys_addr_t
*ra
= ll_temac
->sdma_reg_addr
;
331 if (ll_temac_sdma_error(dev
)) {
333 if (ll_temac_reset_sdma(dev
))
336 ll_temac_init_sdma(dev
);
339 tx_dp
->phys_buf_p
= (u8
*)packet
;
340 tx_dp
->buf_len
= length
;
341 tx_dp
->sca
.stctrl
= CDMAC_BD_STCTRL_SOP
| CDMAC_BD_STCTRL_EOP
|
342 CDMAC_BD_STCTRL_STOP_ON_END
;
344 flush_cache((u32
)packet
, length
);
345 flush_cache((u32
)tx_dp
, sizeof(*tx_dp
));
347 /* DMA start by writing to respective TAILDESC_PTR */
348 ll_temac
->out32(ra
[TX_CURDESC_PTR
], (int)tx_dp
);
349 ll_temac
->out32(ra
[TX_TAILDESC_PTR
], (int)tx_dp
);
351 /* Find next empty buffer descriptor, preparation for next iteration */
352 tx_idx
= (tx_idx
+ 1) % TX_BUF_CNT
;
353 tx_dp
= &cdmac_bd
.tx
[tx_idx
];
356 flush_cache((u32
)tx_dp
, sizeof(*tx_dp
));
358 } while (timeout
-- && !(tx_dp
->sca
.stctrl
& CDMAC_BD_STCTRL_COMPLETED
));
361 printf("%s: Timeout\n", __func__
);