2 * Multicore Navigator driver for TI Keystone 2 devices.
4 * (C) Copyright 2012-2014
5 * Texas Instruments Incorporated, <www.ti.com>
7 * SPDX-License-Identifier: GPL-2.0+
11 #include <asm/ti-common/keystone_nav.h>
13 struct qm_config qm_memmap
= {
14 .stat_cfg
= CONFIG_KSNAV_QM_QUEUE_STATUS_BASE
,
15 .queue
= (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE
,
16 .mngr_vbusm
= CONFIG_KSNAV_QM_BASE_ADDRESS
,
17 .i_lram
= CONFIG_KSNAV_QM_LINK_RAM_BASE
,
18 .proxy
= (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE
,
19 .status_ram
= CONFIG_KSNAV_QM_STATUS_RAM_BASE
,
20 .mngr_cfg
= (void *)CONFIG_KSNAV_QM_CONF_BASE
,
21 .intd_cfg
= CONFIG_KSNAV_QM_INTD_CONF_BASE
,
22 .desc_mem
= (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE
,
23 .region_num
= CONFIG_KSNAV_QM_REGION_NUM
,
24 .pdsp_cmd
= CONFIG_KSNAV_QM_PDSP1_CMD_BASE
,
25 .pdsp_ctl
= CONFIG_KSNAV_QM_PDSP1_CTRL_BASE
,
26 .pdsp_iram
= CONFIG_KSNAV_QM_PDSP1_IRAM_BASE
,
27 .qpool_num
= CONFIG_KSNAV_QM_QPOOL_NUM
,
31 * We are going to use only one type of descriptors - host packet
32 * descriptors. We staticaly allocate memory for them here
34 struct qm_host_desc desc_pool
[HDESC_NUM
] __aligned(sizeof(struct qm_host_desc
));
36 static struct qm_config
*qm_cfg
;
38 inline int num_of_desc_to_reg(int num_descr
)
42 for (j
= 0, num
= 32; j
< 15; j
++, num
*= 2) {
50 int _qm_init(struct qm_config
*cfg
)
56 qm_cfg
->mngr_cfg
->link_ram_base0
= qm_cfg
->i_lram
;
57 qm_cfg
->mngr_cfg
->link_ram_size0
= HDESC_NUM
* 8;
58 qm_cfg
->mngr_cfg
->link_ram_base1
= 0;
59 qm_cfg
->mngr_cfg
->link_ram_size1
= 0;
60 qm_cfg
->mngr_cfg
->link_ram_base2
= 0;
62 qm_cfg
->desc_mem
[0].base_addr
= (u32
)desc_pool
;
63 qm_cfg
->desc_mem
[0].start_idx
= 0;
64 qm_cfg
->desc_mem
[0].desc_reg_size
=
65 (((sizeof(struct qm_host_desc
) >> 4) - 1) << 16) |
66 num_of_desc_to_reg(HDESC_NUM
);
68 memset(desc_pool
, 0, sizeof(desc_pool
));
69 for (j
= 0; j
< HDESC_NUM
; j
++)
70 qm_push(&desc_pool
[j
], qm_cfg
->qpool_num
);
77 return _qm_init(&qm_memmap
);
87 queue_close(qm_cfg
->qpool_num
);
89 qm_cfg
->mngr_cfg
->link_ram_base0
= 0;
90 qm_cfg
->mngr_cfg
->link_ram_size0
= 0;
91 qm_cfg
->mngr_cfg
->link_ram_base1
= 0;
92 qm_cfg
->mngr_cfg
->link_ram_size1
= 0;
93 qm_cfg
->mngr_cfg
->link_ram_base2
= 0;
95 for (j
= 0; j
< qm_cfg
->region_num
; j
++) {
96 qm_cfg
->desc_mem
[j
].base_addr
= 0;
97 qm_cfg
->desc_mem
[j
].start_idx
= 0;
98 qm_cfg
->desc_mem
[j
].desc_reg_size
= 0;
104 void qm_push(struct qm_host_desc
*hd
, u32 qnum
)
111 cpu_to_bus((u32
*)hd
, sizeof(struct qm_host_desc
)/4);
112 regd
= (u32
)hd
| ((sizeof(struct qm_host_desc
) >> 4) - 1);
113 writel(regd
, &qm_cfg
->queue
[qnum
].ptr_size_thresh
);
116 void qm_buff_push(struct qm_host_desc
*hd
, u32 qnum
,
117 void *buff_ptr
, u32 buff_len
)
119 hd
->orig_buff_len
= buff_len
;
120 hd
->buff_len
= buff_len
;
121 hd
->orig_buff_ptr
= (u32
)buff_ptr
;
122 hd
->buff_ptr
= (u32
)buff_ptr
;
126 struct qm_host_desc
*qm_pop(u32 qnum
)
133 uhd
= readl(&qm_cfg
->queue
[qnum
].ptr_size_thresh
) & ~0xf;
135 cpu_to_bus((u32
*)uhd
, sizeof(struct qm_host_desc
)/4);
137 return (struct qm_host_desc
*)uhd
;
140 struct qm_host_desc
*qm_pop_from_free_pool(void)
145 return qm_pop(qm_cfg
->qpool_num
);
148 void queue_close(u32 qnum
)
150 struct qm_host_desc
*hd
;
152 while ((hd
= qm_pop(qnum
)))
160 static int ksnav_rx_disable(struct pktdma_cfg
*pktdma
)
164 for (j
= 0; j
< pktdma
->rx_ch_num
; j
++) {
165 v
= readl(&pktdma
->rx_ch
[j
].cfg_a
);
166 if (!(v
& CPDMA_CHAN_A_ENABLE
))
169 writel(v
| CPDMA_CHAN_A_TDOWN
, &pktdma
->rx_ch
[j
].cfg_a
);
170 for (k
= 0; k
< TDOWN_TIMEOUT_COUNT
; k
++) {
172 v
= readl(&pktdma
->rx_ch
[j
].cfg_a
);
173 if (!(v
& CPDMA_CHAN_A_ENABLE
))
176 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
179 /* Clear all of the flow registers */
180 for (j
= 0; j
< pktdma
->rx_flow_num
; j
++) {
181 writel(0, &pktdma
->rx_flows
[j
].control
);
182 writel(0, &pktdma
->rx_flows
[j
].tags
);
183 writel(0, &pktdma
->rx_flows
[j
].tag_sel
);
184 writel(0, &pktdma
->rx_flows
[j
].fdq_sel
[0]);
185 writel(0, &pktdma
->rx_flows
[j
].fdq_sel
[1]);
186 writel(0, &pktdma
->rx_flows
[j
].thresh
[0]);
187 writel(0, &pktdma
->rx_flows
[j
].thresh
[1]);
188 writel(0, &pktdma
->rx_flows
[j
].thresh
[2]);
194 static int ksnav_tx_disable(struct pktdma_cfg
*pktdma
)
198 for (j
= 0; j
< pktdma
->tx_ch_num
; j
++) {
199 v
= readl(&pktdma
->tx_ch
[j
].cfg_a
);
200 if (!(v
& CPDMA_CHAN_A_ENABLE
))
203 writel(v
| CPDMA_CHAN_A_TDOWN
, &pktdma
->tx_ch
[j
].cfg_a
);
204 for (k
= 0; k
< TDOWN_TIMEOUT_COUNT
; k
++) {
206 v
= readl(&pktdma
->tx_ch
[j
].cfg_a
);
207 if (!(v
& CPDMA_CHAN_A_ENABLE
))
210 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */
216 int ksnav_init(struct pktdma_cfg
*pktdma
, struct rx_buff_desc
*rx_buffers
)
219 struct qm_host_desc
*hd
;
222 if (pktdma
== NULL
|| rx_buffers
== NULL
||
223 rx_buffers
->buff_ptr
== NULL
|| qm_cfg
== NULL
)
226 pktdma
->rx_flow
= rx_buffers
->rx_flow
;
229 rx_ptr
= rx_buffers
->buff_ptr
;
231 for (j
= 0; j
< rx_buffers
->num_buffs
; j
++) {
232 hd
= qm_pop(qm_cfg
->qpool_num
);
236 qm_buff_push(hd
, pktdma
->rx_free_q
,
237 rx_ptr
, rx_buffers
->buff_len
);
239 rx_ptr
+= rx_buffers
->buff_len
;
242 ksnav_rx_disable(pktdma
);
244 /* configure rx channels */
245 v
= CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma
->rx_rcv_q
);
246 writel(v
, &pktdma
->rx_flows
[pktdma
->rx_flow
].control
);
247 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].tags
);
248 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].tag_sel
);
250 v
= CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma
->rx_free_q
, 0,
253 writel(v
, &pktdma
->rx_flows
[pktdma
->rx_flow
].fdq_sel
[0]);
254 writel(v
, &pktdma
->rx_flows
[pktdma
->rx_flow
].fdq_sel
[1]);
255 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].thresh
[0]);
256 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].thresh
[1]);
257 writel(0, &pktdma
->rx_flows
[pktdma
->rx_flow
].thresh
[2]);
259 for (j
= 0; j
< pktdma
->rx_ch_num
; j
++)
260 writel(CPDMA_CHAN_A_ENABLE
, &pktdma
->rx_ch
[j
].cfg_a
);
262 /* configure tx channels */
263 /* Disable loopback in the tx direction */
264 writel(0, &pktdma
->global
->emulation_control
);
266 /* Set QM base address, only for K2x devices */
267 writel(CONFIG_KSNAV_QM_BASE_ADDRESS
, &pktdma
->global
->qm_base_addr
[0]);
269 /* Enable all channels. The current state isn't important */
270 for (j
= 0; j
< pktdma
->tx_ch_num
; j
++) {
271 writel(0, &pktdma
->tx_ch
[j
].cfg_b
);
272 writel(CPDMA_CHAN_A_ENABLE
, &pktdma
->tx_ch
[j
].cfg_a
);
278 int ksnav_close(struct pktdma_cfg
*pktdma
)
283 ksnav_tx_disable(pktdma
);
284 ksnav_rx_disable(pktdma
);
286 queue_close(pktdma
->rx_free_q
);
287 queue_close(pktdma
->rx_rcv_q
);
288 queue_close(pktdma
->tx_snd_q
);
293 int ksnav_send(struct pktdma_cfg
*pktdma
, u32
*pkt
, int num_bytes
, u32 swinfo2
)
295 struct qm_host_desc
*hd
;
297 hd
= qm_pop(qm_cfg
->qpool_num
);
301 hd
->desc_info
= num_bytes
;
302 hd
->swinfo
[2] = swinfo2
;
303 hd
->packet_info
= qm_cfg
->qpool_num
;
305 qm_buff_push(hd
, pktdma
->tx_snd_q
, pkt
, num_bytes
);
310 void *ksnav_recv(struct pktdma_cfg
*pktdma
, u32
**pkt
, int *num_bytes
)
312 struct qm_host_desc
*hd
;
314 hd
= qm_pop(pktdma
->rx_rcv_q
);
318 *pkt
= (u32
*)hd
->buff_ptr
;
319 *num_bytes
= hd
->desc_info
& 0x3fffff;
324 void ksnav_release_rxhd(struct pktdma_cfg
*pktdma
, void *hd
)
326 struct qm_host_desc
*_hd
= (struct qm_host_desc
*)hd
;
328 _hd
->buff_len
= _hd
->orig_buff_len
;
329 _hd
->buff_ptr
= _hd
->orig_buff_ptr
;
331 qm_push(_hd
, pktdma
->rx_free_q
);