]>
Commit | Line | Data |
---|---|---|
30fe8c15 VA |
1 | /* |
2 | * Multicore Navigator driver for TI Keystone 2 devices. | |
3 | * | |
4 | * (C) Copyright 2012-2014 | |
5 | * Texas Instruments Incorporated, <www.ti.com> | |
6 | * | |
7 | * SPDX-License-Identifier: GPL-2.0+ | |
8 | */ | |
9 | #include <common.h> | |
10 | #include <asm/io.h> | |
ef454717 | 11 | #include <asm/ti-common/keystone_nav.h> |
30fe8c15 | 12 | |
afa479cf | 13 | struct qm_config qm_memmap = { |
ef454717 KI |
14 | .stat_cfg = CONFIG_KSNAV_QM_QUEUE_STATUS_BASE, |
15 | .queue = (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE, | |
16 | .mngr_vbusm = CONFIG_KSNAV_QM_BASE_ADDRESS, | |
17 | .i_lram = CONFIG_KSNAV_QM_LINK_RAM_BASE, | |
18 | .proxy = (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE, | |
19 | .status_ram = CONFIG_KSNAV_QM_STATUS_RAM_BASE, | |
20 | .mngr_cfg = (void *)CONFIG_KSNAV_QM_CONF_BASE, | |
21 | .intd_cfg = CONFIG_KSNAV_QM_INTD_CONF_BASE, | |
22 | .desc_mem = (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE, | |
23 | .region_num = CONFIG_KSNAV_QM_REGION_NUM, | |
24 | .pdsp_cmd = CONFIG_KSNAV_QM_PDSP1_CMD_BASE, | |
25 | .pdsp_ctl = CONFIG_KSNAV_QM_PDSP1_CTRL_BASE, | |
26 | .pdsp_iram = CONFIG_KSNAV_QM_PDSP1_IRAM_BASE, | |
27 | .qpool_num = CONFIG_KSNAV_QM_QPOOL_NUM, | |
30fe8c15 VA |
28 | }; |
29 | ||
30 | /* | |
31 | * We are going to use only one type of descriptors - host packet | |
32 | * descriptors. We staticaly allocate memory for them here | |
33 | */ | |
34 | struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc)); | |
35 | ||
36 | static struct qm_config *qm_cfg; | |
37 | ||
38 | inline int num_of_desc_to_reg(int num_descr) | |
39 | { | |
40 | int j, num; | |
41 | ||
42 | for (j = 0, num = 32; j < 15; j++, num *= 2) { | |
43 | if (num_descr <= num) | |
44 | return j; | |
45 | } | |
46 | ||
47 | return 15; | |
48 | } | |
49 | ||
afa479cf | 50 | int _qm_init(struct qm_config *cfg) |
30fe8c15 | 51 | { |
afa479cf | 52 | u32 j; |
30fe8c15 VA |
53 | |
54 | qm_cfg = cfg; | |
55 | ||
56 | qm_cfg->mngr_cfg->link_ram_base0 = qm_cfg->i_lram; | |
bc3003b9 | 57 | qm_cfg->mngr_cfg->link_ram_size0 = HDESC_NUM * 8 - 1; |
30fe8c15 VA |
58 | qm_cfg->mngr_cfg->link_ram_base1 = 0; |
59 | qm_cfg->mngr_cfg->link_ram_size1 = 0; | |
60 | qm_cfg->mngr_cfg->link_ram_base2 = 0; | |
61 | ||
62 | qm_cfg->desc_mem[0].base_addr = (u32)desc_pool; | |
63 | qm_cfg->desc_mem[0].start_idx = 0; | |
64 | qm_cfg->desc_mem[0].desc_reg_size = | |
65 | (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) | | |
66 | num_of_desc_to_reg(HDESC_NUM); | |
67 | ||
68 | memset(desc_pool, 0, sizeof(desc_pool)); | |
69 | for (j = 0; j < HDESC_NUM; j++) | |
70 | qm_push(&desc_pool[j], qm_cfg->qpool_num); | |
71 | ||
72 | return QM_OK; | |
73 | } | |
74 | ||
75 | int qm_init(void) | |
76 | { | |
afa479cf | 77 | return _qm_init(&qm_memmap); |
30fe8c15 VA |
78 | } |
79 | ||
80 | void qm_close(void) | |
81 | { | |
82 | u32 j; | |
83 | ||
30fe8c15 VA |
84 | queue_close(qm_cfg->qpool_num); |
85 | ||
86 | qm_cfg->mngr_cfg->link_ram_base0 = 0; | |
87 | qm_cfg->mngr_cfg->link_ram_size0 = 0; | |
88 | qm_cfg->mngr_cfg->link_ram_base1 = 0; | |
89 | qm_cfg->mngr_cfg->link_ram_size1 = 0; | |
90 | qm_cfg->mngr_cfg->link_ram_base2 = 0; | |
91 | ||
92 | for (j = 0; j < qm_cfg->region_num; j++) { | |
93 | qm_cfg->desc_mem[j].base_addr = 0; | |
94 | qm_cfg->desc_mem[j].start_idx = 0; | |
95 | qm_cfg->desc_mem[j].desc_reg_size = 0; | |
96 | } | |
97 | ||
98 | qm_cfg = NULL; | |
99 | } | |
100 | ||
101 | void qm_push(struct qm_host_desc *hd, u32 qnum) | |
102 | { | |
103 | u32 regd; | |
104 | ||
30fe8c15 VA |
105 | cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4); |
106 | regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1); | |
107 | writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh); | |
108 | } | |
109 | ||
110 | void qm_buff_push(struct qm_host_desc *hd, u32 qnum, | |
111 | void *buff_ptr, u32 buff_len) | |
112 | { | |
113 | hd->orig_buff_len = buff_len; | |
114 | hd->buff_len = buff_len; | |
115 | hd->orig_buff_ptr = (u32)buff_ptr; | |
116 | hd->buff_ptr = (u32)buff_ptr; | |
117 | qm_push(hd, qnum); | |
118 | } | |
119 | ||
120 | struct qm_host_desc *qm_pop(u32 qnum) | |
121 | { | |
122 | u32 uhd; | |
123 | ||
30fe8c15 VA |
124 | uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf; |
125 | if (uhd) | |
126 | cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4); | |
127 | ||
128 | return (struct qm_host_desc *)uhd; | |
129 | } | |
130 | ||
131 | struct qm_host_desc *qm_pop_from_free_pool(void) | |
132 | { | |
30fe8c15 VA |
133 | return qm_pop(qm_cfg->qpool_num); |
134 | } | |
135 | ||
136 | void queue_close(u32 qnum) | |
137 | { | |
138 | struct qm_host_desc *hd; | |
139 | ||
140 | while ((hd = qm_pop(qnum))) | |
141 | ; | |
142 | } | |
143 | ||
ed948e29 | 144 | /** |
30fe8c15 VA |
145 | * DMA API |
146 | */ | |
30fe8c15 | 147 | |
9ea9021a | 148 | static int ksnav_rx_disable(struct pktdma_cfg *pktdma) |
30fe8c15 VA |
149 | { |
150 | u32 j, v, k; | |
151 | ||
9ea9021a KI |
152 | for (j = 0; j < pktdma->rx_ch_num; j++) { |
153 | v = readl(&pktdma->rx_ch[j].cfg_a); | |
30fe8c15 VA |
154 | if (!(v & CPDMA_CHAN_A_ENABLE)) |
155 | continue; | |
156 | ||
9ea9021a | 157 | writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a); |
30fe8c15 VA |
158 | for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { |
159 | udelay(100); | |
9ea9021a | 160 | v = readl(&pktdma->rx_ch[j].cfg_a); |
30fe8c15 VA |
161 | if (!(v & CPDMA_CHAN_A_ENABLE)) |
162 | continue; | |
163 | } | |
164 | /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ | |
165 | } | |
166 | ||
167 | /* Clear all of the flow registers */ | |
9ea9021a KI |
168 | for (j = 0; j < pktdma->rx_flow_num; j++) { |
169 | writel(0, &pktdma->rx_flows[j].control); | |
170 | writel(0, &pktdma->rx_flows[j].tags); | |
171 | writel(0, &pktdma->rx_flows[j].tag_sel); | |
172 | writel(0, &pktdma->rx_flows[j].fdq_sel[0]); | |
173 | writel(0, &pktdma->rx_flows[j].fdq_sel[1]); | |
174 | writel(0, &pktdma->rx_flows[j].thresh[0]); | |
175 | writel(0, &pktdma->rx_flows[j].thresh[1]); | |
176 | writel(0, &pktdma->rx_flows[j].thresh[2]); | |
30fe8c15 VA |
177 | } |
178 | ||
179 | return QM_OK; | |
180 | } | |
181 | ||
9ea9021a | 182 | static int ksnav_tx_disable(struct pktdma_cfg *pktdma) |
30fe8c15 VA |
183 | { |
184 | u32 j, v, k; | |
185 | ||
9ea9021a KI |
186 | for (j = 0; j < pktdma->tx_ch_num; j++) { |
187 | v = readl(&pktdma->tx_ch[j].cfg_a); | |
30fe8c15 VA |
188 | if (!(v & CPDMA_CHAN_A_ENABLE)) |
189 | continue; | |
190 | ||
9ea9021a | 191 | writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a); |
30fe8c15 VA |
192 | for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { |
193 | udelay(100); | |
9ea9021a | 194 | v = readl(&pktdma->tx_ch[j].cfg_a); |
30fe8c15 VA |
195 | if (!(v & CPDMA_CHAN_A_ENABLE)) |
196 | continue; | |
197 | } | |
198 | /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ | |
199 | } | |
200 | ||
201 | return QM_OK; | |
202 | } | |
203 | ||
9ea9021a | 204 | int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers) |
30fe8c15 VA |
205 | { |
206 | u32 j, v; | |
207 | struct qm_host_desc *hd; | |
208 | u8 *rx_ptr; | |
209 | ||
9ea9021a | 210 | if (pktdma == NULL || rx_buffers == NULL || |
30fe8c15 VA |
211 | rx_buffers->buff_ptr == NULL || qm_cfg == NULL) |
212 | return QM_ERR; | |
213 | ||
9ea9021a | 214 | pktdma->rx_flow = rx_buffers->rx_flow; |
30fe8c15 VA |
215 | |
216 | /* init rx queue */ | |
217 | rx_ptr = rx_buffers->buff_ptr; | |
218 | ||
219 | for (j = 0; j < rx_buffers->num_buffs; j++) { | |
220 | hd = qm_pop(qm_cfg->qpool_num); | |
221 | if (hd == NULL) | |
222 | return QM_ERR; | |
223 | ||
9ea9021a | 224 | qm_buff_push(hd, pktdma->rx_free_q, |
30fe8c15 VA |
225 | rx_ptr, rx_buffers->buff_len); |
226 | ||
227 | rx_ptr += rx_buffers->buff_len; | |
228 | } | |
229 | ||
9ea9021a | 230 | ksnav_rx_disable(pktdma); |
30fe8c15 VA |
231 | |
232 | /* configure rx channels */ | |
9ea9021a KI |
233 | v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q); |
234 | writel(v, &pktdma->rx_flows[pktdma->rx_flow].control); | |
235 | writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags); | |
236 | writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel); | |
30fe8c15 | 237 | |
9ea9021a KI |
238 | v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0, |
239 | pktdma->rx_free_q); | |
30fe8c15 | 240 | |
9ea9021a KI |
241 | writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]); |
242 | writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]); | |
243 | writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]); | |
244 | writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]); | |
245 | writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]); | |
30fe8c15 | 246 | |
9ea9021a KI |
247 | for (j = 0; j < pktdma->rx_ch_num; j++) |
248 | writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a); | |
30fe8c15 VA |
249 | |
250 | /* configure tx channels */ | |
251 | /* Disable loopback in the tx direction */ | |
9ea9021a | 252 | writel(0, &pktdma->global->emulation_control); |
30fe8c15 | 253 | |
30fe8c15 | 254 | /* Set QM base address, only for K2x devices */ |
9ea9021a | 255 | writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]); |
30fe8c15 VA |
256 | |
257 | /* Enable all channels. The current state isn't important */ | |
9ea9021a KI |
258 | for (j = 0; j < pktdma->tx_ch_num; j++) { |
259 | writel(0, &pktdma->tx_ch[j].cfg_b); | |
260 | writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a); | |
30fe8c15 VA |
261 | } |
262 | ||
263 | return QM_OK; | |
264 | } | |
265 | ||
9ea9021a | 266 | int ksnav_close(struct pktdma_cfg *pktdma) |
30fe8c15 | 267 | { |
9ea9021a | 268 | if (!pktdma) |
30fe8c15 VA |
269 | return QM_ERR; |
270 | ||
9ea9021a KI |
271 | ksnav_tx_disable(pktdma); |
272 | ksnav_rx_disable(pktdma); | |
30fe8c15 | 273 | |
9ea9021a KI |
274 | queue_close(pktdma->rx_free_q); |
275 | queue_close(pktdma->rx_rcv_q); | |
276 | queue_close(pktdma->tx_snd_q); | |
30fe8c15 VA |
277 | |
278 | return QM_OK; | |
279 | } | |
280 | ||
9ea9021a | 281 | int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2) |
30fe8c15 VA |
282 | { |
283 | struct qm_host_desc *hd; | |
284 | ||
285 | hd = qm_pop(qm_cfg->qpool_num); | |
286 | if (hd == NULL) | |
287 | return QM_ERR; | |
288 | ||
289 | hd->desc_info = num_bytes; | |
290 | hd->swinfo[2] = swinfo2; | |
291 | hd->packet_info = qm_cfg->qpool_num; | |
292 | ||
9ea9021a | 293 | qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes); |
30fe8c15 VA |
294 | |
295 | return QM_OK; | |
296 | } | |
297 | ||
9ea9021a | 298 | void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes) |
30fe8c15 VA |
299 | { |
300 | struct qm_host_desc *hd; | |
301 | ||
9ea9021a | 302 | hd = qm_pop(pktdma->rx_rcv_q); |
30fe8c15 VA |
303 | if (!hd) |
304 | return NULL; | |
305 | ||
306 | *pkt = (u32 *)hd->buff_ptr; | |
307 | *num_bytes = hd->desc_info & 0x3fffff; | |
308 | ||
309 | return hd; | |
310 | } | |
311 | ||
9ea9021a | 312 | void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd) |
30fe8c15 VA |
313 | { |
314 | struct qm_host_desc *_hd = (struct qm_host_desc *)hd; | |
315 | ||
316 | _hd->buff_len = _hd->orig_buff_len; | |
317 | _hd->buff_ptr = _hd->orig_buff_ptr; | |
318 | ||
9ea9021a | 319 | qm_push(_hd, pktdma->rx_free_q); |
30fe8c15 | 320 | } |