2 * Multicore Navigator driver for TI Keystone 2 devices.
4 * (C) Copyright 2012-2014
5 * Texas Instruments Incorporated, <www.ti.com>
7 * SPDX-License-Identifier: GPL-2.0+
11 #include <asm/ti-common/keystone_nav.h>
13 struct qm_config qm_memmap
= {
14 .stat_cfg
= CONFIG_KSNAV_QM_QUEUE_STATUS_BASE
,
15 .queue
= (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE
,
16 .mngr_vbusm
= CONFIG_KSNAV_QM_BASE_ADDRESS
,
17 .i_lram
= CONFIG_KSNAV_QM_LINK_RAM_BASE
,
18 .proxy
= (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE
,
19 .status_ram
= CONFIG_KSNAV_QM_STATUS_RAM_BASE
,
20 .mngr_cfg
= (void *)CONFIG_KSNAV_QM_CONF_BASE
,
21 .intd_cfg
= CONFIG_KSNAV_QM_INTD_CONF_BASE
,
22 .desc_mem
= (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE
,
23 .region_num
= CONFIG_KSNAV_QM_REGION_NUM
,
24 .pdsp_cmd
= CONFIG_KSNAV_QM_PDSP1_CMD_BASE
,
25 .pdsp_ctl
= CONFIG_KSNAV_QM_PDSP1_CTRL_BASE
,
26 .pdsp_iram
= CONFIG_KSNAV_QM_PDSP1_IRAM_BASE
,
27 .qpool_num
= CONFIG_KSNAV_QM_QPOOL_NUM
,
31 * We are going to use only one type of descriptors - host packet
32 * descriptors. We staticaly allocate memory for them here
34 struct qm_host_desc desc_pool
[HDESC_NUM
] __aligned(sizeof(struct qm_host_desc
));
36 static struct qm_config
*qm_cfg
;
38 inline int num_of_desc_to_reg(int num_descr
)
42 for (j
= 0, num
= 32; j
< 15; j
++, num
*= 2) {
50 int _qm_init(struct qm_config
*cfg
)
56 qm_cfg
->mngr_cfg
->link_ram_base0
= qm_cfg
->i_lram
;
57 qm_cfg
->mngr_cfg
->link_ram_size0
= HDESC_NUM
* 8 - 1;
58 qm_cfg
->mngr_cfg
->link_ram_base1
= 0;
59 qm_cfg
->mngr_cfg
->link_ram_size1
= 0;
60 qm_cfg
->mngr_cfg
->link_ram_base2
= 0;
62 qm_cfg
->desc_mem
[0].base_addr
= (u32
)desc_pool
;
63 qm_cfg
->desc_mem
[0].start_idx
= 0;
64 qm_cfg
->desc_mem
[0].desc_reg_size
=
65 (((sizeof(struct qm_host_desc
) >> 4) - 1) << 16) |
66 num_of_desc_to_reg(HDESC_NUM
);
68 memset(desc_pool
, 0, sizeof(desc_pool
));
69 for (j
= 0; j
< HDESC_NUM
; j
++)
70 qm_push(&desc_pool
[j
], qm_cfg
->qpool_num
);
77 return _qm_init(&qm_memmap
);
84 queue_close(qm_cfg
->qpool_num
);
86 qm_cfg
->mngr_cfg
->link_ram_base0
= 0;
87 qm_cfg
->mngr_cfg
->link_ram_size0
= 0;
88 qm_cfg
->mngr_cfg
->link_ram_base1
= 0;
89 qm_cfg
->mngr_cfg
->link_ram_size1
= 0;
90 qm_cfg
->mngr_cfg
->link_ram_base2
= 0;
92 for (j
= 0; j
< qm_cfg
->region_num
; j
++) {
93 qm_cfg
->desc_mem
[j
].base_addr
= 0;
94 qm_cfg
->desc_mem
[j
].start_idx
= 0;
95 qm_cfg
->desc_mem
[j
].desc_reg_size
= 0;
101 void qm_push(struct qm_host_desc
*hd
, u32 qnum
)
105 cpu_to_bus((u32
*)hd
, sizeof(struct qm_host_desc
)/4);
106 regd
= (u32
)hd
| ((sizeof(struct qm_host_desc
) >> 4) - 1);
107 writel(regd
, &qm_cfg
->queue
[qnum
].ptr_size_thresh
);
110 void qm_buff_push(struct qm_host_desc
*hd
, u32 qnum
,
111 void *buff_ptr
, u32 buff_len
)
113 hd
->orig_buff_len
= buff_len
;
114 hd
->buff_len
= buff_len
;
115 hd
->orig_buff_ptr
= (u32
)buff_ptr
;
116 hd
->buff_ptr
= (u32
)buff_ptr
;
120 struct qm_host_desc
*qm_pop(u32 qnum
)
124 uhd
= readl(&qm_cfg
->queue
[qnum
].ptr_size_thresh
) & ~0xf;
126 cpu_to_bus((u32
*)uhd
, sizeof(struct qm_host_desc
)/4);
128 return (struct qm_host_desc
*)uhd
;
131 struct qm_host_desc
*qm_pop_from_free_pool(void)
133 return qm_pop(qm_cfg
->qpool_num
);
136 void queue_close(u32 qnum
)
138 struct qm_host_desc
*hd
;
140 while ((hd
= qm_pop(qnum
)))
148 static int ksnav_rx_disable(struct pktdma_cfg
*pktdma
)
152 for (j
= 0; j
< pktdma
->rx_ch_num
; j
++) {
153 v
= readl(&pktdma
->rx_ch
[j
].cfg_a
);
154 if (!(v
& CPDMA_CHAN_A_ENABLE
))
157 writel(v
| CPDMA_CHAN_A_TDOWN
, &pktdma
->rx_ch
[j
].cfg_a
);
158 for (k
= 0; k
< TDOWN_TIMEOUT_COUNT
; k
++) {
160 v
= readl(&pktdma
->rx_ch
[j
].cfg_a
);
161 if (!(v
& CPDMA_CHAN_A_ENABLE
))
164 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
167 /* Clear all of the flow registers */
168 for (j
= 0; j
< pktdma
->rx_flow_num
; j
++) {
169 writel(0, &pktdma
->rx_flows
[j
].control
);
170 writel(0, &pktdma
->rx_flows
[j
].tags
);
171 writel(0, &pktdma
->rx_flows
[j
].tag_sel
);
172 writel(0, &pktdma
->rx_flows
[j
].fdq_sel
[0]);
173 writel(0, &pktdma
->rx_flows
[j
].fdq_sel
[1]);
174 writel(0, &pktdma
->rx_flows
[j
].thresh
[0]);
175 writel(0, &pktdma
->rx_flows
[j
].thresh
[1]);
176 writel(0, &pktdma
->rx_flows
[j
].thresh
[2]);
182 static int ksnav_tx_disable(struct pktdma_cfg
*pktdma
)
186 for (j
= 0; j
< pktdma
->tx_ch_num
; j
++) {
187 v
= readl(&pktdma
->tx_ch
[j
].cfg_a
);
188 if (!(v
& CPDMA_CHAN_A_ENABLE
))
191 writel(v
| CPDMA_CHAN_A_TDOWN
, &pktdma
->tx_ch
[j
].cfg_a
);
192 for (k
= 0; k
< TDOWN_TIMEOUT_COUNT
; k
++) {
194 v
= readl(&pktdma
->tx_ch
[j
].cfg_a
);
195 if (!(v
& CPDMA_CHAN_A_ENABLE
))
198 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
204 int ksnav_init(struct pktdma_cfg
*pktdma
, struct rx_buff_desc
*rx_buffers
)
207 struct qm_host_desc
*hd
;
210 if (pktdma
== NULL
|| rx_buffers
== NULL
||
211 rx_buffers
->buff_ptr
== NULL
|| qm_cfg
== NULL
)
214 pktdma
->rx_flow
= rx_buffers
->rx_flow
;
217 rx_ptr
= rx_buffers
->buff_ptr
;
219 for (j
= 0; j
< rx_buffers
->num_buffs
; j
++) {
220 hd
= qm_pop(qm_cfg
->qpool_num
);
224 qm_buff_push(hd
, pktdma
->rx_free_q
,
225 rx_ptr
, rx_buffers
->buff_len
);
227 rx_ptr
+= rx_buffers
->buff_len
;
230 ksnav_rx_disable(pktdma
);
232 /* configure rx channels */
233 v
= CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma
->rx_rcv_q
);
234 writel(v
, &pktdma
->rx_flows
[pktdma
->rx_flow
].control
);
235 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].tags
);
236 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].tag_sel
);
238 v
= CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma
->rx_free_q
, 0,
241 writel(v
, &pktdma
->rx_flows
[pktdma
->rx_flow
].fdq_sel
[0]);
242 writel(v
, &pktdma
->rx_flows
[pktdma
->rx_flow
].fdq_sel
[1]);
243 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].thresh
[0]);
244 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].thresh
[1]);
245 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].thresh
[2]);
247 for (j
= 0; j
< pktdma
->rx_ch_num
; j
++)
248 writel(CPDMA_CHAN_A_ENABLE
, &pktdma
->rx_ch
[j
].cfg_a
);
250 /* configure tx channels */
251 /* Disable loopback in the tx direction */
252 writel(0, &pktdma
->global
->emulation_control
);
254 /* Set QM base address, only for K2x devices */
255 writel(CONFIG_KSNAV_QM_BASE_ADDRESS
, &pktdma
->global
->qm_base_addr
[0]);
257 /* Enable all channels. The current state isn't important */
258 for (j
= 0; j
< pktdma
->tx_ch_num
; j
++) {
259 writel(0, &pktdma
->tx_ch
[j
].cfg_b
);
260 writel(CPDMA_CHAN_A_ENABLE
, &pktdma
->tx_ch
[j
].cfg_a
);
266 int ksnav_close(struct pktdma_cfg
*pktdma
)
271 ksnav_tx_disable(pktdma
);
272 ksnav_rx_disable(pktdma
);
274 queue_close(pktdma
->rx_free_q
);
275 queue_close(pktdma
->rx_rcv_q
);
276 queue_close(pktdma
->tx_snd_q
);
281 int ksnav_send(struct pktdma_cfg
*pktdma
, u32
*pkt
, int num_bytes
, u32 swinfo2
)
283 struct qm_host_desc
*hd
;
285 hd
= qm_pop(qm_cfg
->qpool_num
);
289 hd
->desc_info
= num_bytes
;
290 hd
->swinfo
[2] = swinfo2
;
291 hd
->packet_info
= qm_cfg
->qpool_num
;
293 qm_buff_push(hd
, pktdma
->tx_snd_q
, pkt
, num_bytes
);
298 void *ksnav_recv(struct pktdma_cfg
*pktdma
, u32
**pkt
, int *num_bytes
)
300 struct qm_host_desc
*hd
;
302 hd
= qm_pop(pktdma
->rx_rcv_q
);
306 *pkt
= (u32
*)hd
->buff_ptr
;
307 *num_bytes
= hd
->desc_info
& 0x3fffff;
312 void ksnav_release_rxhd(struct pktdma_cfg
*pktdma
, void *hd
)
314 struct qm_host_desc
*_hd
= (struct qm_host_desc
*)hd
;
316 _hd
->buff_len
= _hd
->orig_buff_len
;
317 _hd
->buff_ptr
= _hd
->orig_buff_ptr
;
319 qm_push(_hd
, pktdma
->rx_free_q
);