]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/dma/ti/k3-udma.c
eea9ec96598ad708f26e1c5000da5b865cf86fbb
[thirdparty/u-boot.git] / drivers / dma / ti / k3-udma.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6 #define pr_fmt(fmt) "udma: " fmt
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <log.h>
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <asm/bitops.h>
14 #include <malloc.h>
15 #include <linux/bitops.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/sizes.h>
18 #include <dm.h>
19 #include <dm/device_compat.h>
20 #include <dm/devres.h>
21 #include <dm/read.h>
22 #include <dm/of_access.h>
23 #include <dma.h>
24 #include <dma-uclass.h>
25 #include <linux/delay.h>
26 #include <linux/bitmap.h>
27 #include <linux/err.h>
28 #include <linux/printk.h>
29 #include <linux/soc/ti/k3-navss-ringacc.h>
30 #include <linux/soc/ti/cppi5.h>
31 #include <linux/soc/ti/ti-udma.h>
32 #include <linux/soc/ti/ti_sci_protocol.h>
33 #include <linux/soc/ti/cppi5.h>
34
35 #include "k3-udma-hwdef.h"
36 #include "k3-psil-priv.h"
37
38 #define K3_UDMA_MAX_RFLOWS 1024
39
40 struct udma_chan;
41
42 enum k3_dma_type {
43 DMA_TYPE_UDMA = 0,
44 DMA_TYPE_BCDMA,
45 DMA_TYPE_PKTDMA,
46 };
47
48 enum udma_mmr {
49 MMR_GCFG = 0,
50 MMR_BCHANRT,
51 MMR_RCHANRT,
52 MMR_TCHANRT,
53 MMR_RCHAN,
54 MMR_TCHAN,
55 MMR_RFLOW,
56 MMR_LAST,
57 };
58
59 static const char * const mmr_names[] = {
60 [MMR_GCFG] = "gcfg",
61 [MMR_BCHANRT] = "bchanrt",
62 [MMR_RCHANRT] = "rchanrt",
63 [MMR_TCHANRT] = "tchanrt",
64 [MMR_RCHAN] = "rchan",
65 [MMR_TCHAN] = "tchan",
66 [MMR_RFLOW] = "rflow",
67 };
68
69 struct udma_tchan {
70 void __iomem *reg_chan;
71 void __iomem *reg_rt;
72
73 int id;
74 struct k3_nav_ring *t_ring; /* Transmit ring */
75 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
76 int tflow_id; /* applicable only for PKTDMA */
77
78 };
79
80 #define udma_bchan udma_tchan
81
82 struct udma_rflow {
83 void __iomem *reg_rflow;
84 int id;
85 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
86 struct k3_nav_ring *r_ring; /* Receive ring */
87 };
88
89 struct udma_rchan {
90 void __iomem *reg_chan;
91 void __iomem *reg_rt;
92
93 int id;
94 };
95
96 struct udma_oes_offsets {
97 /* K3 UDMA Output Event Offset */
98 u32 udma_rchan;
99
100 /* BCDMA Output Event Offsets */
101 u32 bcdma_bchan_data;
102 u32 bcdma_bchan_ring;
103 u32 bcdma_tchan_data;
104 u32 bcdma_tchan_ring;
105 u32 bcdma_rchan_data;
106 u32 bcdma_rchan_ring;
107
108 /* PKTDMA Output Event Offsets */
109 u32 pktdma_tchan_flow;
110 u32 pktdma_rchan_flow;
111 };
112
113 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
114 #define UDMA_FLAG_PDMA_BURST BIT(1)
115 #define UDMA_FLAG_TDTYPE BIT(2)
116
117 struct udma_match_data {
118 enum k3_dma_type type;
119 u32 psil_base;
120 bool enable_memcpy_support;
121 u32 flags;
122 u32 statictr_z_mask;
123 struct udma_oes_offsets oes;
124
125 u8 tpl_levels;
126 u32 level_start_idx[];
127 };
128
129 enum udma_rm_range {
130 RM_RANGE_BCHAN = 0,
131 RM_RANGE_TCHAN,
132 RM_RANGE_RCHAN,
133 RM_RANGE_RFLOW,
134 RM_RANGE_TFLOW,
135 RM_RANGE_LAST,
136 };
137
138 struct udma_tisci_rm {
139 const struct ti_sci_handle *tisci;
140 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
141 u32 tisci_dev_id;
142
143 /* tisci information for PSI-L thread pairing/unpairing */
144 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
145 u32 tisci_navss_dev_id;
146
147 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
148 };
149
150 struct udma_dev {
151 struct udevice *dev;
152 void __iomem *mmrs[MMR_LAST];
153
154 struct udma_tisci_rm tisci_rm;
155 struct k3_nav_ringacc *ringacc;
156
157 u32 features;
158
159 int bchan_cnt;
160 int tchan_cnt;
161 int echan_cnt;
162 int rchan_cnt;
163 int rflow_cnt;
164 int tflow_cnt;
165 unsigned long *bchan_map;
166 unsigned long *tchan_map;
167 unsigned long *rchan_map;
168 unsigned long *rflow_map;
169 unsigned long *rflow_map_reserved;
170 unsigned long *tflow_map;
171
172 struct udma_bchan *bchans;
173 struct udma_tchan *tchans;
174 struct udma_rchan *rchans;
175 struct udma_rflow *rflows;
176
177 struct udma_match_data *match_data;
178
179 struct udma_chan *channels;
180 u32 psil_base;
181
182 u32 ch_count;
183 };
184
185 struct udma_chan_config {
186 u32 psd_size; /* size of Protocol Specific Data */
187 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
188 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
189 int remote_thread_id;
190 u32 atype;
191 u32 src_thread;
192 u32 dst_thread;
193 enum psil_endpoint_type ep_type;
194 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
195
196 /* PKTDMA mapped channel */
197 int mapped_channel_id;
198 /* PKTDMA default tflow or rflow for mapped channel */
199 int default_flow_id;
200
201 enum dma_direction dir;
202
203 unsigned int pkt_mode:1; /* TR or packet */
204 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
205 unsigned int enable_acc32:1;
206 unsigned int enable_burst:1;
207 unsigned int notdpkt:1; /* Suppress sending TDC packet */
208 };
209
210 struct udma_chan {
211 struct udma_dev *ud;
212 char name[20];
213
214 struct udma_bchan *bchan;
215 struct udma_tchan *tchan;
216 struct udma_rchan *rchan;
217 struct udma_rflow *rflow;
218
219 struct ti_udma_drv_chan_cfg_data cfg_data;
220
221 u32 bcnt; /* number of bytes completed since the start of the channel */
222
223 struct udma_chan_config config;
224
225 u32 id;
226
227 struct cppi5_host_desc_t *desc_tx;
228 bool in_use;
229 void *desc_rx;
230 u32 num_rx_bufs;
231 u32 desc_rx_cur;
232
233 };
234
235 #define UDMA_CH_1000(ch) (ch * 0x1000)
236 #define UDMA_CH_100(ch) (ch * 0x100)
237 #define UDMA_CH_40(ch) (ch * 0x40)
238
239 #ifdef PKTBUFSRX
240 #define UDMA_RX_DESC_NUM PKTBUFSRX
241 #else
242 #define UDMA_RX_DESC_NUM 4
243 #endif
244
245 /* Generic register access functions */
246 static inline u32 udma_read(void __iomem *base, int reg)
247 {
248 u32 v;
249
250 v = __raw_readl(base + reg);
251 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
252 return v;
253 }
254
255 static inline void udma_write(void __iomem *base, int reg, u32 val)
256 {
257 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
258 __raw_writel(val, base + reg);
259 }
260
261 static inline void udma_update_bits(void __iomem *base, int reg,
262 u32 mask, u32 val)
263 {
264 u32 tmp, orig;
265
266 orig = udma_read(base, reg);
267 tmp = orig & ~mask;
268 tmp |= (val & mask);
269
270 if (tmp != orig)
271 udma_write(base, reg, tmp);
272 }
273
274 /* TCHANRT */
275 static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
276 {
277 if (!tchan)
278 return 0;
279 return udma_read(tchan->reg_rt, reg);
280 }
281
282 static inline void udma_tchanrt_write(struct udma_tchan *tchan,
283 int reg, u32 val)
284 {
285 if (!tchan)
286 return;
287 udma_write(tchan->reg_rt, reg, val);
288 }
289
290 /* RCHANRT */
291 static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
292 {
293 if (!rchan)
294 return 0;
295 return udma_read(rchan->reg_rt, reg);
296 }
297
298 static inline void udma_rchanrt_write(struct udma_rchan *rchan,
299 int reg, u32 val)
300 {
301 if (!rchan)
302 return;
303 udma_write(rchan->reg_rt, reg, val);
304 }
305
306 static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
307 u32 dst_thread)
308 {
309 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
310
311 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
312
313 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
314 tisci_rm->tisci_navss_dev_id,
315 src_thread, dst_thread);
316 }
317
318 static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
319 u32 dst_thread)
320 {
321 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
322
323 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
324
325 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
326 tisci_rm->tisci_navss_dev_id,
327 src_thread, dst_thread);
328 }
329
330 static inline char *udma_get_dir_text(enum dma_direction dir)
331 {
332 switch (dir) {
333 case DMA_DEV_TO_MEM:
334 return "DEV_TO_MEM";
335 case DMA_MEM_TO_DEV:
336 return "MEM_TO_DEV";
337 case DMA_MEM_TO_MEM:
338 return "MEM_TO_MEM";
339 case DMA_DEV_TO_DEV:
340 return "DEV_TO_DEV";
341 default:
342 break;
343 }
344
345 return "invalid";
346 }
347
348 #include "k3-udma-u-boot.c"
349
350 static void udma_reset_uchan(struct udma_chan *uc)
351 {
352 memset(&uc->config, 0, sizeof(uc->config));
353 uc->config.remote_thread_id = -1;
354 uc->config.mapped_channel_id = -1;
355 uc->config.default_flow_id = -1;
356 }
357
358 static inline bool udma_is_chan_running(struct udma_chan *uc)
359 {
360 u32 trt_ctl = 0;
361 u32 rrt_ctl = 0;
362
363 switch (uc->config.dir) {
364 case DMA_DEV_TO_MEM:
365 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
366 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
367 __func__, rrt_ctl,
368 udma_rchanrt_read(uc->rchan,
369 UDMA_RCHAN_RT_PEER_RT_EN_REG));
370 break;
371 case DMA_MEM_TO_DEV:
372 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
373 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
374 __func__, trt_ctl,
375 udma_tchanrt_read(uc->tchan,
376 UDMA_TCHAN_RT_PEER_RT_EN_REG));
377 break;
378 case DMA_MEM_TO_MEM:
379 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
380 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
381 break;
382 default:
383 break;
384 }
385
386 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
387 return true;
388
389 return false;
390 }
391
392 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
393 {
394 struct k3_nav_ring *ring = NULL;
395 int ret = -ENOENT;
396
397 switch (uc->config.dir) {
398 case DMA_DEV_TO_MEM:
399 ring = uc->rflow->r_ring;
400 break;
401 case DMA_MEM_TO_DEV:
402 ring = uc->tchan->tc_ring;
403 break;
404 case DMA_MEM_TO_MEM:
405 ring = uc->tchan->tc_ring;
406 break;
407 default:
408 break;
409 }
410
411 if (ring && k3_nav_ringacc_ring_get_occ(ring))
412 ret = k3_nav_ringacc_ring_pop(ring, addr);
413
414 return ret;
415 }
416
417 static void udma_reset_rings(struct udma_chan *uc)
418 {
419 struct k3_nav_ring *ring1 = NULL;
420 struct k3_nav_ring *ring2 = NULL;
421
422 switch (uc->config.dir) {
423 case DMA_DEV_TO_MEM:
424 ring1 = uc->rflow->fd_ring;
425 ring2 = uc->rflow->r_ring;
426 break;
427 case DMA_MEM_TO_DEV:
428 ring1 = uc->tchan->t_ring;
429 ring2 = uc->tchan->tc_ring;
430 break;
431 case DMA_MEM_TO_MEM:
432 ring1 = uc->tchan->t_ring;
433 ring2 = uc->tchan->tc_ring;
434 break;
435 default:
436 break;
437 }
438
439 if (ring1)
440 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
441 if (ring2)
442 k3_nav_ringacc_ring_reset(ring2);
443 }
444
445 static void udma_reset_counters(struct udma_chan *uc)
446 {
447 u32 val;
448
449 if (uc->tchan) {
450 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
451 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
452
453 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
454 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
455
456 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
457 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
458
459 if (!uc->bchan) {
460 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
461 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
462 }
463 }
464
465 if (uc->rchan) {
466 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
467 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
468
469 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
470 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
471
472 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
473 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
474
475 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
476 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
477 }
478
479 uc->bcnt = 0;
480 }
481
482 static inline int udma_stop_hard(struct udma_chan *uc)
483 {
484 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
485
486 switch (uc->config.dir) {
487 case DMA_DEV_TO_MEM:
488 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
489 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
490 break;
491 case DMA_MEM_TO_DEV:
492 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
493 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
494 break;
495 case DMA_MEM_TO_MEM:
496 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
497 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
498 break;
499 default:
500 return -EINVAL;
501 }
502
503 return 0;
504 }
505
506 static int udma_start(struct udma_chan *uc)
507 {
508 /* Channel is already running, no need to proceed further */
509 if (udma_is_chan_running(uc))
510 goto out;
511
512 pr_debug("%s: chan:%d dir:%s\n",
513 __func__, uc->id, udma_get_dir_text(uc->config.dir));
514
515 /* Make sure that we clear the teardown bit, if it is set */
516 udma_stop_hard(uc);
517
518 /* Reset all counters */
519 udma_reset_counters(uc);
520
521 switch (uc->config.dir) {
522 case DMA_DEV_TO_MEM:
523 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
524 UDMA_CHAN_RT_CTL_EN);
525
526 /* Enable remote */
527 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
528 UDMA_PEER_RT_EN_ENABLE);
529
530 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
531 __func__,
532 udma_rchanrt_read(uc->rchan,
533 UDMA_RCHAN_RT_CTL_REG),
534 udma_rchanrt_read(uc->rchan,
535 UDMA_RCHAN_RT_PEER_RT_EN_REG));
536 break;
537 case DMA_MEM_TO_DEV:
538 /* Enable remote */
539 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
540 UDMA_PEER_RT_EN_ENABLE);
541
542 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
543 UDMA_CHAN_RT_CTL_EN);
544
545 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
546 __func__,
547 udma_tchanrt_read(uc->tchan,
548 UDMA_TCHAN_RT_CTL_REG),
549 udma_tchanrt_read(uc->tchan,
550 UDMA_TCHAN_RT_PEER_RT_EN_REG));
551 break;
552 case DMA_MEM_TO_MEM:
553 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
554 UDMA_CHAN_RT_CTL_EN);
555 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
556 UDMA_CHAN_RT_CTL_EN);
557
558 break;
559 default:
560 return -EINVAL;
561 }
562
563 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
564 out:
565 return 0;
566 }
567
568 static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
569 {
570 int i = 0;
571 u32 val;
572
573 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
574 UDMA_CHAN_RT_CTL_EN |
575 UDMA_CHAN_RT_CTL_TDOWN);
576
577 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
578
579 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
580 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
581 udelay(1);
582 if (i > 1000) {
583 printf(" %s TIMEOUT !\n", __func__);
584 break;
585 }
586 i++;
587 }
588
589 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
590 if (val & UDMA_PEER_RT_EN_ENABLE)
591 printf("%s: peer not stopped TIMEOUT !\n", __func__);
592 }
593
594 static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
595 {
596 int i = 0;
597 u32 val;
598
599 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
600 UDMA_PEER_RT_EN_ENABLE |
601 UDMA_PEER_RT_EN_TEARDOWN);
602
603 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
604
605 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
606 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
607 udelay(1);
608 if (i > 1000) {
609 printf("%s TIMEOUT !\n", __func__);
610 break;
611 }
612 i++;
613 }
614
615 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
616 if (val & UDMA_PEER_RT_EN_ENABLE)
617 printf("%s: peer not stopped TIMEOUT !\n", __func__);
618 }
619
620 static inline int udma_stop(struct udma_chan *uc)
621 {
622 pr_debug("%s: chan:%d dir:%s\n",
623 __func__, uc->id, udma_get_dir_text(uc->config.dir));
624
625 udma_reset_counters(uc);
626 switch (uc->config.dir) {
627 case DMA_DEV_TO_MEM:
628 udma_stop_dev2mem(uc, true);
629 break;
630 case DMA_MEM_TO_DEV:
631 udma_stop_mem2dev(uc, true);
632 break;
633 case DMA_MEM_TO_MEM:
634 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
635 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
636 break;
637 default:
638 return -EINVAL;
639 }
640
641 return 0;
642 }
643
644 static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
645 {
646 int i = 1;
647
648 while (udma_pop_from_ring(uc, paddr)) {
649 udelay(1);
650 if (!(i % 1000000))
651 printf(".");
652 i++;
653 }
654 }
655
656 static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
657 {
658 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
659
660 if (id >= 0) {
661 if (test_bit(id, ud->rflow_map)) {
662 dev_err(ud->dev, "rflow%d is in use\n", id);
663 return ERR_PTR(-ENOENT);
664 }
665 } else {
666 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
667 ud->rflow_cnt);
668
669 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
670 if (id >= ud->rflow_cnt)
671 return ERR_PTR(-ENOENT);
672 }
673
674 __set_bit(id, ud->rflow_map);
675 return &ud->rflows[id];
676 }
677
678 #define UDMA_RESERVE_RESOURCE(res) \
679 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
680 int id) \
681 { \
682 if (id >= 0) { \
683 if (test_bit(id, ud->res##_map)) { \
684 dev_err(ud->dev, "res##%d is in use\n", id); \
685 return ERR_PTR(-ENOENT); \
686 } \
687 } else { \
688 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
689 if (id == ud->res##_cnt) { \
690 return ERR_PTR(-ENOENT); \
691 } \
692 } \
693 \
694 __set_bit(id, ud->res##_map); \
695 return &ud->res##s[id]; \
696 }
697
698 UDMA_RESERVE_RESOURCE(tchan);
699 UDMA_RESERVE_RESOURCE(rchan);
700
701 static int udma_get_tchan(struct udma_chan *uc)
702 {
703 struct udma_dev *ud = uc->ud;
704
705 if (uc->tchan) {
706 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
707 uc->id, uc->tchan->id);
708 return 0;
709 }
710
711 uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
712 if (IS_ERR(uc->tchan))
713 return PTR_ERR(uc->tchan);
714
715 if (ud->tflow_cnt) {
716 int tflow_id;
717
718 /* Only PKTDMA have support for tx flows */
719 if (uc->config.default_flow_id >= 0)
720 tflow_id = uc->config.default_flow_id;
721 else
722 tflow_id = uc->tchan->id;
723
724 if (test_bit(tflow_id, ud->tflow_map)) {
725 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
726 __clear_bit(uc->tchan->id, ud->tchan_map);
727 uc->tchan = NULL;
728 return -ENOENT;
729 }
730
731 uc->tchan->tflow_id = tflow_id;
732 __set_bit(tflow_id, ud->tflow_map);
733 } else {
734 uc->tchan->tflow_id = -1;
735 }
736
737 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
738
739 return 0;
740 }
741
742 static int udma_get_rchan(struct udma_chan *uc)
743 {
744 struct udma_dev *ud = uc->ud;
745
746 if (uc->rchan) {
747 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
748 uc->id, uc->rchan->id);
749 return 0;
750 }
751
752 uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
753 if (IS_ERR(uc->rchan))
754 return PTR_ERR(uc->rchan);
755
756 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
757
758 return 0;
759 }
760
761 static int udma_get_chan_pair(struct udma_chan *uc)
762 {
763 struct udma_dev *ud = uc->ud;
764 int chan_id, end;
765
766 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
767 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
768 uc->id, uc->tchan->id);
769 return 0;
770 }
771
772 if (uc->tchan) {
773 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
774 uc->id, uc->tchan->id);
775 return -EBUSY;
776 } else if (uc->rchan) {
777 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
778 uc->id, uc->rchan->id);
779 return -EBUSY;
780 }
781
782 /* Can be optimized, but let's have it like this for now */
783 end = min(ud->tchan_cnt, ud->rchan_cnt);
784 for (chan_id = 0; chan_id < end; chan_id++) {
785 if (!test_bit(chan_id, ud->tchan_map) &&
786 !test_bit(chan_id, ud->rchan_map))
787 break;
788 }
789
790 if (chan_id == end)
791 return -ENOENT;
792
793 __set_bit(chan_id, ud->tchan_map);
794 __set_bit(chan_id, ud->rchan_map);
795 uc->tchan = &ud->tchans[chan_id];
796 uc->rchan = &ud->rchans[chan_id];
797
798 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
799
800 return 0;
801 }
802
803 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
804 {
805 struct udma_dev *ud = uc->ud;
806
807 if (uc->rflow) {
808 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
809 uc->id, uc->rflow->id);
810 return 0;
811 }
812
813 if (!uc->rchan)
814 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
815
816 uc->rflow = __udma_reserve_rflow(ud, flow_id);
817 if (IS_ERR(uc->rflow))
818 return PTR_ERR(uc->rflow);
819
820 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
821 return 0;
822 }
823
824 static void udma_put_rchan(struct udma_chan *uc)
825 {
826 struct udma_dev *ud = uc->ud;
827
828 if (uc->rchan) {
829 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
830 uc->rchan->id);
831 __clear_bit(uc->rchan->id, ud->rchan_map);
832 uc->rchan = NULL;
833 }
834 }
835
836 static void udma_put_tchan(struct udma_chan *uc)
837 {
838 struct udma_dev *ud = uc->ud;
839
840 if (uc->tchan) {
841 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
842 uc->tchan->id);
843 __clear_bit(uc->tchan->id, ud->tchan_map);
844 if (uc->tchan->tflow_id >= 0)
845 __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
846 uc->tchan = NULL;
847 }
848 }
849
850 static void udma_put_rflow(struct udma_chan *uc)
851 {
852 struct udma_dev *ud = uc->ud;
853
854 if (uc->rflow) {
855 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
856 uc->rflow->id);
857 __clear_bit(uc->rflow->id, ud->rflow_map);
858 uc->rflow = NULL;
859 }
860 }
861
862 static void udma_free_tx_resources(struct udma_chan *uc)
863 {
864 if (!uc->tchan)
865 return;
866
867 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
868 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
869 uc->tchan->t_ring = NULL;
870 uc->tchan->tc_ring = NULL;
871
872 udma_put_tchan(uc);
873 }
874
875 static int udma_alloc_tx_resources(struct udma_chan *uc)
876 {
877 struct k3_nav_ring_cfg ring_cfg;
878 struct udma_dev *ud = uc->ud;
879 struct udma_tchan *tchan;
880 int ring_idx, ret;
881
882 ret = udma_get_tchan(uc);
883 if (ret)
884 return ret;
885
886 tchan = uc->tchan;
887 if (tchan->tflow_id >= 0)
888 ring_idx = tchan->tflow_id;
889 else
890 ring_idx = ud->bchan_cnt + tchan->id;
891
892 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
893 &uc->tchan->t_ring,
894 &uc->tchan->tc_ring);
895 if (ret) {
896 ret = -EBUSY;
897 goto err_tx_ring;
898 }
899
900 memset(&ring_cfg, 0, sizeof(ring_cfg));
901 ring_cfg.size = 16;
902 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
903 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
904
905 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
906 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
907
908 if (ret)
909 goto err_ringcfg;
910
911 return 0;
912
913 err_ringcfg:
914 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
915 uc->tchan->tc_ring = NULL;
916 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
917 uc->tchan->t_ring = NULL;
918 err_tx_ring:
919 udma_put_tchan(uc);
920
921 return ret;
922 }
923
924 static void udma_free_rx_resources(struct udma_chan *uc)
925 {
926 if (!uc->rchan)
927 return;
928
929 if (uc->rflow) {
930 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
931 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
932 uc->rflow->fd_ring = NULL;
933 uc->rflow->r_ring = NULL;
934
935 udma_put_rflow(uc);
936 }
937
938 udma_put_rchan(uc);
939 }
940
941 static int udma_alloc_rx_resources(struct udma_chan *uc)
942 {
943 struct k3_nav_ring_cfg ring_cfg;
944 struct udma_dev *ud = uc->ud;
945 struct udma_rflow *rflow;
946 int fd_ring_id;
947 int ret;
948
949 ret = udma_get_rchan(uc);
950 if (ret)
951 return ret;
952
953 /* For MEM_TO_MEM we don't need rflow or rings */
954 if (uc->config.dir == DMA_MEM_TO_MEM)
955 return 0;
956
957 if (uc->config.default_flow_id >= 0)
958 ret = udma_get_rflow(uc, uc->config.default_flow_id);
959 else
960 ret = udma_get_rflow(uc, uc->rchan->id);
961
962 if (ret) {
963 ret = -EBUSY;
964 goto err_rflow;
965 }
966
967 rflow = uc->rflow;
968 if (ud->tflow_cnt) {
969 fd_ring_id = ud->tflow_cnt + rflow->id;
970 } else {
971 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
972 uc->rchan->id;
973 }
974
975 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
976 &rflow->fd_ring, &rflow->r_ring);
977 if (ret) {
978 ret = -EBUSY;
979 goto err_rx_ring;
980 }
981
982 memset(&ring_cfg, 0, sizeof(ring_cfg));
983 ring_cfg.size = 16;
984 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
985 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
986
987 ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
988 ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
989 if (ret)
990 goto err_ringcfg;
991
992 return 0;
993
994 err_ringcfg:
995 k3_nav_ringacc_ring_free(rflow->r_ring);
996 rflow->r_ring = NULL;
997 k3_nav_ringacc_ring_free(rflow->fd_ring);
998 rflow->fd_ring = NULL;
999 err_rx_ring:
1000 udma_put_rflow(uc);
1001 err_rflow:
1002 udma_put_rchan(uc);
1003
1004 return ret;
1005 }
1006
1007 static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1008 {
1009 struct udma_dev *ud = uc->ud;
1010 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1011 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
1012 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1013 u32 mode;
1014 int ret;
1015
1016 if (uc->config.pkt_mode)
1017 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1018 else
1019 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1020
1021 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1022 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1023 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
1024 req.nav_id = tisci_rm->tisci_dev_id;
1025 req.index = uc->tchan->id;
1026 req.tx_chan_type = mode;
1027 if (uc->config.dir == DMA_MEM_TO_MEM)
1028 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1029 else
1030 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1031 uc->config.psd_size,
1032 0) >> 2;
1033 req.txcq_qnum = tc_ring;
1034
1035 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
1036 if (ret) {
1037 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
1038 return ret;
1039 }
1040
1041 /*
1042 * Above TI SCI call handles firewall configuration, cfg
1043 * register configuration still has to be done locally in
1044 * absence of RM services.
1045 */
1046 if (IS_ENABLED(CONFIG_K3_DM_FW))
1047 udma_alloc_tchan_raw(uc);
1048
1049 return 0;
1050 }
1051
1052 static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1053 {
1054 struct udma_dev *ud = uc->ud;
1055 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1056 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
1057 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1058 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1059 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1060 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1061 u32 mode;
1062 int ret;
1063
1064 if (uc->config.pkt_mode)
1065 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1066 else
1067 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1068
1069 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1070 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
1071 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
1072 req.nav_id = tisci_rm->tisci_dev_id;
1073 req.index = uc->rchan->id;
1074 req.rx_chan_type = mode;
1075 if (uc->config.dir == DMA_MEM_TO_MEM) {
1076 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1077 req.rxcq_qnum = tc_ring;
1078 } else {
1079 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1080 uc->config.psd_size,
1081 0) >> 2;
1082 req.rxcq_qnum = rx_ring;
1083 }
1084 if (ud->match_data->type == DMA_TYPE_UDMA &&
1085 uc->rflow->id != uc->rchan->id &&
1086 uc->config.dir != DMA_MEM_TO_MEM) {
1087 req.flowid_start = uc->rflow->id;
1088 req.flowid_cnt = 1;
1089 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1090 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
1091 }
1092
1093 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
1094 if (ret) {
1095 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1096 uc->rchan->id, ret);
1097 return ret;
1098 }
1099 if (uc->config.dir == DMA_MEM_TO_MEM)
1100 return ret;
1101
1102 flow_req.valid_params =
1103 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1104 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1105 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1106 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1107 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1108 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1109 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1110 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1111 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1112 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1113 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1114 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1115 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1116 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1117
1118 flow_req.nav_id = tisci_rm->tisci_dev_id;
1119 flow_req.flow_index = uc->rflow->id;
1120
1121 if (uc->config.needs_epib)
1122 flow_req.rx_einfo_present = 1;
1123 else
1124 flow_req.rx_einfo_present = 0;
1125
1126 if (uc->config.psd_size)
1127 flow_req.rx_psinfo_present = 1;
1128 else
1129 flow_req.rx_psinfo_present = 0;
1130
1131 flow_req.rx_error_handling = 0;
1132 flow_req.rx_desc_type = 0;
1133 flow_req.rx_dest_qnum = rx_ring;
1134 flow_req.rx_src_tag_hi_sel = 2;
1135 flow_req.rx_src_tag_lo_sel = 4;
1136 flow_req.rx_dest_tag_hi_sel = 5;
1137 flow_req.rx_dest_tag_lo_sel = 4;
1138 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1139 flow_req.rx_fdq1_qnum = fd_ring;
1140 flow_req.rx_fdq2_qnum = fd_ring;
1141 flow_req.rx_fdq3_qnum = fd_ring;
1142 flow_req.rx_ps_location = 0;
1143
1144 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1145 &flow_req);
1146 if (ret) {
1147 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1148 uc->rchan->id, uc->rflow->id, ret);
1149 return ret;
1150 }
1151
1152 /*
1153 * Above TI SCI call handles firewall configuration, cfg
1154 * register configuration still has to be done locally in
1155 * absence of RM services.
1156 */
1157 if (IS_ENABLED(CONFIG_K3_DM_FW))
1158 udma_alloc_rchan_raw(uc);
1159
1160 return 0;
1161 }
1162
1163 static int udma_alloc_chan_resources(struct udma_chan *uc)
1164 {
1165 struct udma_dev *ud = uc->ud;
1166 int ret;
1167
1168 pr_debug("%s: chan:%d as %s\n",
1169 __func__, uc->id, udma_get_dir_text(uc->config.dir));
1170
1171 switch (uc->config.dir) {
1172 case DMA_MEM_TO_MEM:
1173 /* Non synchronized - mem to mem type of transfer */
1174 uc->config.pkt_mode = false;
1175 ret = udma_get_chan_pair(uc);
1176 if (ret)
1177 return ret;
1178
1179 ret = udma_alloc_tx_resources(uc);
1180 if (ret)
1181 goto err_free_res;
1182
1183 ret = udma_alloc_rx_resources(uc);
1184 if (ret)
1185 goto err_free_res;
1186
1187 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1188 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1189 break;
1190 case DMA_MEM_TO_DEV:
1191 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1192 ret = udma_alloc_tx_resources(uc);
1193 if (ret)
1194 goto err_free_res;
1195
1196 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1197 uc->config.dst_thread = uc->config.remote_thread_id;
1198 uc->config.dst_thread |= 0x8000;
1199
1200 break;
1201 case DMA_DEV_TO_MEM:
1202 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1203 ret = udma_alloc_rx_resources(uc);
1204 if (ret)
1205 goto err_free_res;
1206
1207 uc->config.src_thread = uc->config.remote_thread_id;
1208 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1209
1210 break;
1211 default:
1212 /* Can not happen */
1213 pr_debug("%s: chan:%d invalid direction (%u)\n",
1214 __func__, uc->id, uc->config.dir);
1215 return -EINVAL;
1216 }
1217
1218 /* We have channel indexes and rings */
1219 if (uc->config.dir == DMA_MEM_TO_MEM) {
1220 ret = udma_alloc_tchan_sci_req(uc);
1221 if (ret)
1222 goto err_free_res;
1223
1224 ret = udma_alloc_rchan_sci_req(uc);
1225 if (ret)
1226 goto err_free_res;
1227 } else {
1228 /* Slave transfer */
1229 if (uc->config.dir == DMA_MEM_TO_DEV) {
1230 ret = udma_alloc_tchan_sci_req(uc);
1231 if (ret)
1232 goto err_free_res;
1233 } else {
1234 ret = udma_alloc_rchan_sci_req(uc);
1235 if (ret)
1236 goto err_free_res;
1237 }
1238 }
1239
1240 if (udma_is_chan_running(uc)) {
1241 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1242 udma_stop(uc);
1243 if (udma_is_chan_running(uc)) {
1244 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1245 goto err_free_res;
1246 }
1247 }
1248
1249 /* PSI-L pairing */
1250 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1251 if (ret) {
1252 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1253 goto err_free_res;
1254 }
1255
1256 return 0;
1257
1258 err_free_res:
1259 udma_free_tx_resources(uc);
1260 udma_free_rx_resources(uc);
1261 uc->config.remote_thread_id = -1;
1262 return ret;
1263 }
1264
1265 static void udma_free_chan_resources(struct udma_chan *uc)
1266 {
1267 /* Hard reset UDMA channel */
1268 udma_stop_hard(uc);
1269 udma_reset_counters(uc);
1270
1271 /* Release PSI-L pairing */
1272 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
1273
1274 /* Reset the rings for a new start */
1275 udma_reset_rings(uc);
1276 udma_free_tx_resources(uc);
1277 udma_free_rx_resources(uc);
1278
1279 uc->config.remote_thread_id = -1;
1280 uc->config.dir = DMA_MEM_TO_MEM;
1281 }
1282
1283 static const char * const range_names[] = {
1284 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1285 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1286 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1287 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1288 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1289 };
1290
1291 static int udma_get_mmrs(struct udevice *dev)
1292 {
1293 struct udma_dev *ud = dev_get_priv(dev);
1294 u32 cap2, cap3, cap4;
1295 int i;
1296
1297 ud->mmrs[MMR_GCFG] = dev_read_addr_name_ptr(dev, mmr_names[MMR_GCFG]);
1298 if (!ud->mmrs[MMR_GCFG])
1299 return -EINVAL;
1300
1301 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1302 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1303
1304 switch (ud->match_data->type) {
1305 case DMA_TYPE_UDMA:
1306 ud->rflow_cnt = cap3 & 0x3fff;
1307 ud->tchan_cnt = cap2 & 0x1ff;
1308 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1309 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1310 break;
1311 case DMA_TYPE_BCDMA:
1312 ud->bchan_cnt = cap2 & 0x1ff;
1313 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1314 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1315 break;
1316 case DMA_TYPE_PKTDMA:
1317 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1318 ud->tchan_cnt = cap2 & 0x1ff;
1319 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1320 ud->rflow_cnt = cap3 & 0x3fff;
1321 ud->tflow_cnt = cap4 & 0x3fff;
1322 break;
1323 default:
1324 return -EINVAL;
1325 }
1326
1327 for (i = 1; i < MMR_LAST; i++) {
1328 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1329 continue;
1330 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1331 continue;
1332 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1333 continue;
1334
1335 ud->mmrs[i] = dev_read_addr_name_ptr(dev, mmr_names[i]);
1336 if (!ud->mmrs[i])
1337 return -EINVAL;
1338 }
1339
1340 return 0;
1341 }
1342
1343 static int udma_setup_resources(struct udma_dev *ud)
1344 {
1345 struct udevice *dev = ud->dev;
1346 int i;
1347 struct ti_sci_resource_desc *rm_desc;
1348 struct ti_sci_resource *rm_res;
1349 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1350
1351 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1352 sizeof(unsigned long), GFP_KERNEL);
1353 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1354 GFP_KERNEL);
1355 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1356 sizeof(unsigned long), GFP_KERNEL);
1357 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1358 GFP_KERNEL);
1359 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1360 sizeof(unsigned long), GFP_KERNEL);
1361 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1362 sizeof(unsigned long),
1363 GFP_KERNEL);
1364 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1365 GFP_KERNEL);
1366
1367 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1368 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1369 !ud->rflows)
1370 return -ENOMEM;
1371
1372 /*
1373 * RX flows with the same Ids as RX channels are reserved to be used
1374 * as default flows if remote HW can't generate flow_ids. Those
1375 * RX flows can be requested only explicitly by id.
1376 */
1377 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1378
1379 /* Get resource ranges from tisci */
1380 for (i = 0; i < RM_RANGE_LAST; i++) {
1381 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1382 continue;
1383
1384 tisci_rm->rm_ranges[i] =
1385 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1386 tisci_rm->tisci_dev_id,
1387 (char *)range_names[i]);
1388 }
1389
1390 /* tchan ranges */
1391 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1392 if (IS_ERR(rm_res)) {
1393 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1394 } else {
1395 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1396 for (i = 0; i < rm_res->sets; i++) {
1397 rm_desc = &rm_res->desc[i];
1398 bitmap_clear(ud->tchan_map, rm_desc->start,
1399 rm_desc->num);
1400 }
1401 }
1402
1403 /* rchan and matching default flow ranges */
1404 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1405 if (IS_ERR(rm_res)) {
1406 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1407 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1408 } else {
1409 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1410 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1411 for (i = 0; i < rm_res->sets; i++) {
1412 rm_desc = &rm_res->desc[i];
1413 bitmap_clear(ud->rchan_map, rm_desc->start,
1414 rm_desc->num);
1415 bitmap_clear(ud->rflow_map, rm_desc->start,
1416 rm_desc->num);
1417 }
1418 }
1419
1420 /* GP rflow ranges */
1421 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1422 if (IS_ERR(rm_res)) {
1423 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1424 ud->rflow_cnt - ud->rchan_cnt);
1425 } else {
1426 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1427 ud->rflow_cnt - ud->rchan_cnt);
1428 for (i = 0; i < rm_res->sets; i++) {
1429 rm_desc = &rm_res->desc[i];
1430 bitmap_clear(ud->rflow_map, rm_desc->start,
1431 rm_desc->num);
1432 }
1433 }
1434
1435 return 0;
1436 }
1437
1438 static int bcdma_setup_resources(struct udma_dev *ud)
1439 {
1440 int i;
1441 struct udevice *dev = ud->dev;
1442 struct ti_sci_resource_desc *rm_desc;
1443 struct ti_sci_resource *rm_res;
1444 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1445
1446 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1447 sizeof(unsigned long), GFP_KERNEL);
1448 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1449 GFP_KERNEL);
1450 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1451 sizeof(unsigned long), GFP_KERNEL);
1452 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1453 GFP_KERNEL);
1454 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1455 sizeof(unsigned long), GFP_KERNEL);
1456 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1457 GFP_KERNEL);
1458 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1459 GFP_KERNEL);
1460
1461 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
1462 !ud->bchans || !ud->tchans || !ud->rchans ||
1463 !ud->rflows)
1464 return -ENOMEM;
1465
1466 /* Get resource ranges from tisci */
1467 for (i = 0; i < RM_RANGE_LAST; i++) {
1468 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1469 continue;
1470
1471 tisci_rm->rm_ranges[i] =
1472 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1473 tisci_rm->tisci_dev_id,
1474 (char *)range_names[i]);
1475 }
1476
1477 /* bchan ranges */
1478 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1479 if (IS_ERR(rm_res)) {
1480 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1481 } else {
1482 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1483 for (i = 0; i < rm_res->sets; i++) {
1484 rm_desc = &rm_res->desc[i];
1485 bitmap_clear(ud->bchan_map, rm_desc->start,
1486 rm_desc->num);
1487 dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1488 rm_desc->start, rm_desc->num);
1489 }
1490 }
1491
1492 /* tchan ranges */
1493 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1494 if (IS_ERR(rm_res)) {
1495 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1496 } else {
1497 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1498 for (i = 0; i < rm_res->sets; i++) {
1499 rm_desc = &rm_res->desc[i];
1500 bitmap_clear(ud->tchan_map, rm_desc->start,
1501 rm_desc->num);
1502 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1503 rm_desc->start, rm_desc->num);
1504 }
1505 }
1506
1507 /* rchan ranges */
1508 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1509 if (IS_ERR(rm_res)) {
1510 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1511 } else {
1512 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1513 for (i = 0; i < rm_res->sets; i++) {
1514 rm_desc = &rm_res->desc[i];
1515 bitmap_clear(ud->rchan_map, rm_desc->start,
1516 rm_desc->num);
1517 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1518 rm_desc->start, rm_desc->num);
1519 }
1520 }
1521
1522 return 0;
1523 }
1524
1525 static int pktdma_setup_resources(struct udma_dev *ud)
1526 {
1527 int i;
1528 struct udevice *dev = ud->dev;
1529 struct ti_sci_resource *rm_res;
1530 struct ti_sci_resource_desc *rm_desc;
1531 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1532
1533 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1534 sizeof(unsigned long), GFP_KERNEL);
1535 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1536 GFP_KERNEL);
1537 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1538 sizeof(unsigned long), GFP_KERNEL);
1539 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1540 GFP_KERNEL);
1541 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1542 sizeof(unsigned long),
1543 GFP_KERNEL);
1544 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1545 GFP_KERNEL);
1546 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1547 sizeof(unsigned long), GFP_KERNEL);
1548
1549 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
1550 !ud->rchans || !ud->rflows || !ud->rflow_map)
1551 return -ENOMEM;
1552
1553 /* Get resource ranges from tisci */
1554 for (i = 0; i < RM_RANGE_LAST; i++) {
1555 if (i == RM_RANGE_BCHAN)
1556 continue;
1557
1558 tisci_rm->rm_ranges[i] =
1559 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1560 tisci_rm->tisci_dev_id,
1561 (char *)range_names[i]);
1562 }
1563
1564 /* tchan ranges */
1565 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1566 if (IS_ERR(rm_res)) {
1567 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1568 } else {
1569 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1570 for (i = 0; i < rm_res->sets; i++) {
1571 rm_desc = &rm_res->desc[i];
1572 bitmap_clear(ud->tchan_map, rm_desc->start,
1573 rm_desc->num);
1574 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1575 rm_desc->start, rm_desc->num);
1576 }
1577 }
1578
1579 /* rchan ranges */
1580 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1581 if (IS_ERR(rm_res)) {
1582 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1583 } else {
1584 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1585 for (i = 0; i < rm_res->sets; i++) {
1586 rm_desc = &rm_res->desc[i];
1587 bitmap_clear(ud->rchan_map, rm_desc->start,
1588 rm_desc->num);
1589 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1590 rm_desc->start, rm_desc->num);
1591 }
1592 }
1593
1594 /* rflow ranges */
1595 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1596 if (IS_ERR(rm_res)) {
1597 /* all rflows are assigned exclusively to Linux */
1598 bitmap_zero(ud->rflow_map, ud->rflow_cnt);
1599 } else {
1600 bitmap_fill(ud->rflow_map, ud->rflow_cnt);
1601 for (i = 0; i < rm_res->sets; i++) {
1602 rm_desc = &rm_res->desc[i];
1603 bitmap_clear(ud->rflow_map, rm_desc->start,
1604 rm_desc->num);
1605 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1606 rm_desc->start, rm_desc->num);
1607 }
1608 }
1609
1610 /* tflow ranges */
1611 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1612 if (IS_ERR(rm_res)) {
1613 /* all tflows are assigned exclusively to Linux */
1614 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1615 } else {
1616 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1617 for (i = 0; i < rm_res->sets; i++) {
1618 rm_desc = &rm_res->desc[i];
1619 bitmap_clear(ud->tflow_map, rm_desc->start,
1620 rm_desc->num);
1621 dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1622 rm_desc->start, rm_desc->num);
1623 }
1624 }
1625
1626 return 0;
1627 }
1628
1629 static int setup_resources(struct udma_dev *ud)
1630 {
1631 struct udevice *dev = ud->dev;
1632 int ch_count, ret;
1633
1634 switch (ud->match_data->type) {
1635 case DMA_TYPE_UDMA:
1636 ret = udma_setup_resources(ud);
1637 break;
1638 case DMA_TYPE_BCDMA:
1639 ret = bcdma_setup_resources(ud);
1640 break;
1641 case DMA_TYPE_PKTDMA:
1642 ret = pktdma_setup_resources(ud);
1643 break;
1644 default:
1645 return -EINVAL;
1646 }
1647
1648 if (ret)
1649 return ret;
1650
1651 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1652 if (ud->bchan_cnt)
1653 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
1654 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1655 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1656 if (!ch_count)
1657 return -ENODEV;
1658
1659 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1660 GFP_KERNEL);
1661 if (!ud->channels)
1662 return -ENOMEM;
1663
1664 switch (ud->match_data->type) {
1665 case DMA_TYPE_UDMA:
1666 dev_dbg(dev,
1667 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1668 ch_count,
1669 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1670 ud->tchan_cnt),
1671 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1672 ud->rchan_cnt),
1673 ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1674 ud->rflow_cnt));
1675 break;
1676 case DMA_TYPE_BCDMA:
1677 dev_dbg(dev,
1678 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1679 ch_count,
1680 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1681 ud->bchan_cnt),
1682 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1683 ud->tchan_cnt),
1684 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1685 ud->rchan_cnt));
1686 break;
1687 case DMA_TYPE_PKTDMA:
1688 dev_dbg(dev,
1689 "Channels: %d (tchan: %u, rchan: %u)\n",
1690 ch_count,
1691 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1692 ud->tchan_cnt),
1693 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1694 ud->rchan_cnt));
1695 break;
1696 default:
1697 break;
1698 }
1699
1700 return ch_count;
1701 }
1702
1703 static int udma_probe(struct udevice *dev)
1704 {
1705 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1706 struct udma_dev *ud = dev_get_priv(dev);
1707 int i, ret;
1708 struct udevice *tmp;
1709 struct udevice *tisci_dev = NULL;
1710 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1711 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1712
1713
1714 ud->match_data = (void *)dev_get_driver_data(dev);
1715 ret = udma_get_mmrs(dev);
1716 if (ret)
1717 return ret;
1718
1719 ud->psil_base = ud->match_data->psil_base;
1720
1721 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1722 "ti,sci", &tisci_dev);
1723 if (ret) {
1724 debug("Failed to get TISCI phandle (%d)\n", ret);
1725 tisci_rm->tisci = NULL;
1726 return -EINVAL;
1727 }
1728 tisci_rm->tisci = (struct ti_sci_handle *)
1729 (ti_sci_get_handle_from_sysfw(tisci_dev));
1730
1731 tisci_rm->tisci_dev_id = -1;
1732 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1733 if (ret) {
1734 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1735 return ret;
1736 }
1737
1738 tisci_rm->tisci_navss_dev_id = -1;
1739 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1740 &tisci_rm->tisci_navss_dev_id);
1741 if (ret) {
1742 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1743 return ret;
1744 }
1745
1746 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1747 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1748
1749 if (ud->match_data->type == DMA_TYPE_UDMA) {
1750 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1751 "ti,ringacc", &tmp);
1752 ud->ringacc = dev_get_priv(tmp);
1753 } else {
1754 struct k3_ringacc_init_data ring_init_data;
1755
1756 ring_init_data.tisci = ud->tisci_rm.tisci;
1757 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1758 if (ud->match_data->type == DMA_TYPE_BCDMA) {
1759 ring_init_data.num_rings = ud->bchan_cnt +
1760 ud->tchan_cnt +
1761 ud->rchan_cnt;
1762 } else {
1763 ring_init_data.num_rings = ud->rflow_cnt +
1764 ud->tflow_cnt;
1765 }
1766
1767 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1768 }
1769 if (IS_ERR(ud->ringacc))
1770 return PTR_ERR(ud->ringacc);
1771
1772 ud->dev = dev;
1773 ud->ch_count = setup_resources(ud);
1774 if (ud->ch_count <= 0)
1775 return ud->ch_count;
1776
1777 for (i = 0; i < ud->bchan_cnt; i++) {
1778 struct udma_bchan *bchan = &ud->bchans[i];
1779
1780 bchan->id = i;
1781 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1782 }
1783
1784 for (i = 0; i < ud->tchan_cnt; i++) {
1785 struct udma_tchan *tchan = &ud->tchans[i];
1786
1787 tchan->id = i;
1788 tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
1789 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1790 }
1791
1792 for (i = 0; i < ud->rchan_cnt; i++) {
1793 struct udma_rchan *rchan = &ud->rchans[i];
1794
1795 rchan->id = i;
1796 rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
1797 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1798 }
1799
1800 for (i = 0; i < ud->rflow_cnt; i++) {
1801 struct udma_rflow *rflow = &ud->rflows[i];
1802
1803 rflow->id = i;
1804 rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
1805 }
1806
1807 for (i = 0; i < ud->ch_count; i++) {
1808 struct udma_chan *uc = &ud->channels[i];
1809
1810 uc->ud = ud;
1811 uc->id = i;
1812 uc->config.remote_thread_id = -1;
1813 uc->bchan = NULL;
1814 uc->tchan = NULL;
1815 uc->rchan = NULL;
1816 uc->config.mapped_channel_id = -1;
1817 uc->config.default_flow_id = -1;
1818 uc->config.dir = DMA_MEM_TO_MEM;
1819 sprintf(uc->name, "UDMA chan%d\n", i);
1820 if (!i)
1821 uc->in_use = true;
1822 }
1823
1824 pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1825 dev->name,
1826 udma_read(ud->mmrs[MMR_GCFG], 0),
1827 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1828 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1829 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1830 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1831
1832 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1833
1834 return ret;
1835 }
1836
1837 static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1838 {
1839 u64 addr = 0;
1840
1841 memcpy(&addr, &elem, sizeof(elem));
1842 return k3_nav_ringacc_ring_push(ring, &addr);
1843 }
1844
1845 static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1846 dma_addr_t src, size_t len)
1847 {
1848 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1849 struct cppi5_tr_type15_t *tr_req;
1850 int num_tr;
1851 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1852 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1853 unsigned long dummy;
1854 void *tr_desc;
1855 size_t desc_size;
1856
1857 if (len < SZ_64K) {
1858 num_tr = 1;
1859 tr0_cnt0 = len;
1860 tr0_cnt1 = 1;
1861 } else {
1862 unsigned long align_to = __ffs(src | dest);
1863
1864 if (align_to > 3)
1865 align_to = 3;
1866 /*
1867 * Keep simple: tr0: SZ_64K-alignment blocks,
1868 * tr1: the remaining
1869 */
1870 num_tr = 2;
1871 tr0_cnt0 = (SZ_64K - BIT(align_to));
1872 if (len / tr0_cnt0 >= SZ_64K) {
1873 dev_err(uc->ud->dev, "size %zu is not supported\n",
1874 len);
1875 return NULL;
1876 }
1877
1878 tr0_cnt1 = len / tr0_cnt0;
1879 tr1_cnt0 = len % tr0_cnt0;
1880 }
1881
1882 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1883 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1884 if (!tr_desc)
1885 return NULL;
1886 memset(tr_desc, 0, desc_size);
1887
1888 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1889 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1890 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1891
1892 tr_req = tr_desc + tr_size;
1893
1894 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1895 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1896 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1897
1898 tr_req[0].addr = src;
1899 tr_req[0].icnt0 = tr0_cnt0;
1900 tr_req[0].icnt1 = tr0_cnt1;
1901 tr_req[0].icnt2 = 1;
1902 tr_req[0].icnt3 = 1;
1903 tr_req[0].dim1 = tr0_cnt0;
1904
1905 tr_req[0].daddr = dest;
1906 tr_req[0].dicnt0 = tr0_cnt0;
1907 tr_req[0].dicnt1 = tr0_cnt1;
1908 tr_req[0].dicnt2 = 1;
1909 tr_req[0].dicnt3 = 1;
1910 tr_req[0].ddim1 = tr0_cnt0;
1911
1912 if (num_tr == 2) {
1913 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1914 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1915 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1916
1917 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1918 tr_req[1].icnt0 = tr1_cnt0;
1919 tr_req[1].icnt1 = 1;
1920 tr_req[1].icnt2 = 1;
1921 tr_req[1].icnt3 = 1;
1922
1923 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1924 tr_req[1].dicnt0 = tr1_cnt0;
1925 tr_req[1].dicnt1 = 1;
1926 tr_req[1].dicnt2 = 1;
1927 tr_req[1].dicnt3 = 1;
1928 }
1929
1930 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1931
1932 flush_dcache_range((unsigned long)tr_desc,
1933 ALIGN((unsigned long)tr_desc + desc_size,
1934 ARCH_DMA_MINALIGN));
1935
1936 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1937
1938 return 0;
1939 }
1940
1941 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1942 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1943 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1944
1945 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1946 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1947 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1948
1949 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1950 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1951
1952 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1953 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1954 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1955 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1956 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1957 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1958 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1959 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1960 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1961
1962 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1963 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1964 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1965 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1966 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1967 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1968 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1969 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1970 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1971 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1972
1973 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1974 {
1975 struct udma_dev *ud = uc->ud;
1976 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1977 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1978 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1979 struct udma_bchan *bchan = uc->bchan;
1980 int ret = 0;
1981
1982 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1983 req_tx.nav_id = tisci_rm->tisci_dev_id;
1984 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1985 req_tx.index = bchan->id;
1986
1987 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1988 if (ret)
1989 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1990
1991 return ret;
1992 }
1993
1994 static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1995 {
1996 if (id >= 0) {
1997 if (test_bit(id, ud->bchan_map)) {
1998 dev_err(ud->dev, "bchan%d is in use\n", id);
1999 return ERR_PTR(-ENOENT);
2000 }
2001 } else {
2002 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
2003 if (id == ud->bchan_cnt)
2004 return ERR_PTR(-ENOENT);
2005 }
2006 __set_bit(id, ud->bchan_map);
2007 return &ud->bchans[id];
2008 }
2009
2010 static int bcdma_get_bchan(struct udma_chan *uc)
2011 {
2012 struct udma_dev *ud = uc->ud;
2013
2014 if (uc->bchan) {
2015 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2016 uc->id, uc->bchan->id);
2017 return 0;
2018 }
2019
2020 uc->bchan = __bcdma_reserve_bchan(ud, -1);
2021 if (IS_ERR(uc->bchan))
2022 return PTR_ERR(uc->bchan);
2023
2024 uc->tchan = uc->bchan;
2025
2026 return 0;
2027 }
2028
2029 static void bcdma_put_bchan(struct udma_chan *uc)
2030 {
2031 struct udma_dev *ud = uc->ud;
2032
2033 if (uc->bchan) {
2034 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2035 uc->bchan->id);
2036 __clear_bit(uc->bchan->id, ud->bchan_map);
2037 uc->bchan = NULL;
2038 uc->tchan = NULL;
2039 }
2040 }
2041
2042 static void bcdma_free_bchan_resources(struct udma_chan *uc)
2043 {
2044 if (!uc->bchan)
2045 return;
2046
2047 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2048 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2049 uc->bchan->tc_ring = NULL;
2050 uc->bchan->t_ring = NULL;
2051
2052 bcdma_put_bchan(uc);
2053 }
2054
2055 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2056 {
2057 struct k3_nav_ring_cfg ring_cfg;
2058 struct udma_dev *ud = uc->ud;
2059 int ret;
2060
2061 ret = bcdma_get_bchan(uc);
2062 if (ret)
2063 return ret;
2064
2065 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2066 &uc->bchan->t_ring,
2067 &uc->bchan->tc_ring);
2068 if (ret) {
2069 ret = -EBUSY;
2070 goto err_ring;
2071 }
2072
2073 memset(&ring_cfg, 0, sizeof(ring_cfg));
2074 ring_cfg.size = 16;
2075 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2076 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2077
2078 ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2079 if (ret)
2080 goto err_ringcfg;
2081
2082 return 0;
2083
2084 err_ringcfg:
2085 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2086 uc->bchan->tc_ring = NULL;
2087 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2088 uc->bchan->t_ring = NULL;
2089 err_ring:
2090 bcdma_put_bchan(uc);
2091
2092 return ret;
2093 }
2094
2095 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2096 {
2097 struct udma_dev *ud = uc->ud;
2098 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2099 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2100 struct udma_tchan *tchan = uc->tchan;
2101 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2102 int ret = 0;
2103
2104 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2105 req_tx.nav_id = tisci_rm->tisci_dev_id;
2106 req_tx.index = tchan->id;
2107 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2108 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2109 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2110 /* wait for peer to complete the teardown for PDMAs */
2111 req_tx.valid_params |=
2112 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2113 req_tx.tx_tdtype = 1;
2114 }
2115
2116 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2117 if (ret)
2118 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2119
2120 return ret;
2121 }
2122
2123 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2124
2125 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2126 {
2127 struct udma_dev *ud = uc->ud;
2128 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2129 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2130 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2131 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2132 int ret = 0;
2133
2134 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2135 req_rx.nav_id = tisci_rm->tisci_dev_id;
2136 req_rx.index = uc->rchan->id;
2137
2138 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2139 if (ret) {
2140 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2141 return ret;
2142 }
2143
2144 flow_req.valid_params =
2145 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2146 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2147 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2148
2149 flow_req.nav_id = tisci_rm->tisci_dev_id;
2150 flow_req.flow_index = uc->rflow->id;
2151
2152 if (uc->config.needs_epib)
2153 flow_req.rx_einfo_present = 1;
2154 else
2155 flow_req.rx_einfo_present = 0;
2156 if (uc->config.psd_size)
2157 flow_req.rx_psinfo_present = 1;
2158 else
2159 flow_req.rx_psinfo_present = 0;
2160 flow_req.rx_error_handling = 0;
2161
2162 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2163
2164 if (ret)
2165 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2166 ret);
2167
2168 return ret;
2169 }
2170
2171 static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2172 {
2173 int ret;
2174
2175 uc->config.pkt_mode = false;
2176
2177 switch (uc->config.dir) {
2178 case DMA_MEM_TO_MEM:
2179 /* Non synchronized - mem to mem type of transfer */
2180 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2181 uc->id);
2182
2183 ret = bcdma_alloc_bchan_resources(uc);
2184 if (ret)
2185 return ret;
2186
2187 ret = bcdma_tisci_m2m_channel_config(uc);
2188 break;
2189 default:
2190 /* Can not happen */
2191 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2192 __func__, uc->id, uc->config.dir);
2193 return -EINVAL;
2194 }
2195
2196 /* check if the channel configuration was successful */
2197 if (ret)
2198 goto err_res_free;
2199
2200 if (udma_is_chan_running(uc)) {
2201 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2202 udma_stop(uc);
2203 if (udma_is_chan_running(uc)) {
2204 dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2205 goto err_res_free;
2206 }
2207 }
2208
2209 udma_reset_rings(uc);
2210
2211 return 0;
2212
2213 err_res_free:
2214 bcdma_free_bchan_resources(uc);
2215 udma_free_tx_resources(uc);
2216 udma_free_rx_resources(uc);
2217
2218 udma_reset_uchan(uc);
2219
2220 return ret;
2221 }
2222
2223 static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2224 {
2225 struct udma_dev *ud = uc->ud;
2226 int ret;
2227
2228 switch (uc->config.dir) {
2229 case DMA_MEM_TO_DEV:
2230 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2231 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2232 uc->id);
2233
2234 ret = udma_alloc_tx_resources(uc);
2235 if (ret) {
2236 uc->config.remote_thread_id = -1;
2237 return ret;
2238 }
2239
2240 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2241 uc->config.dst_thread = uc->config.remote_thread_id;
2242 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2243
2244 ret = pktdma_tisci_tx_channel_config(uc);
2245 break;
2246 case DMA_DEV_TO_MEM:
2247 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2248 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2249 uc->id);
2250
2251 ret = udma_alloc_rx_resources(uc);
2252 if (ret) {
2253 uc->config.remote_thread_id = -1;
2254 return ret;
2255 }
2256
2257 uc->config.src_thread = uc->config.remote_thread_id;
2258 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2259 K3_PSIL_DST_THREAD_ID_OFFSET;
2260
2261 ret = pktdma_tisci_rx_channel_config(uc);
2262 break;
2263 default:
2264 /* Can not happen */
2265 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2266 __func__, uc->id, uc->config.dir);
2267 return -EINVAL;
2268 }
2269
2270 /* check if the channel configuration was successful */
2271 if (ret)
2272 goto err_res_free;
2273
2274 /* PSI-L pairing */
2275 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2276 if (ret) {
2277 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2278 uc->config.src_thread, uc->config.dst_thread);
2279 goto err_res_free;
2280 }
2281
2282 if (udma_is_chan_running(uc)) {
2283 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2284 udma_stop(uc);
2285 if (udma_is_chan_running(uc)) {
2286 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2287 goto err_res_free;
2288 }
2289 }
2290
2291 udma_reset_rings(uc);
2292
2293 if (uc->tchan)
2294 dev_dbg(ud->dev,
2295 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2296 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2297 uc->config.remote_thread_id);
2298 else if (uc->rchan)
2299 dev_dbg(ud->dev,
2300 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2301 uc->id, uc->rchan->id, uc->rflow->id,
2302 uc->config.remote_thread_id);
2303 return 0;
2304
2305 err_res_free:
2306 udma_free_tx_resources(uc);
2307 udma_free_rx_resources(uc);
2308
2309 udma_reset_uchan(uc);
2310
2311 return ret;
2312 }
2313
2314 static int udma_transfer(struct udevice *dev, int direction,
2315 dma_addr_t dst, dma_addr_t src, size_t len)
2316 {
2317 struct udma_dev *ud = dev_get_priv(dev);
2318 /* Channel0 is reserved for memcpy */
2319 struct udma_chan *uc = &ud->channels[0];
2320 dma_addr_t paddr = 0;
2321 int ret;
2322
2323 switch (ud->match_data->type) {
2324 case DMA_TYPE_UDMA:
2325 ret = udma_alloc_chan_resources(uc);
2326 break;
2327 case DMA_TYPE_BCDMA:
2328 ret = bcdma_alloc_chan_resources(uc);
2329 break;
2330 default:
2331 return -EINVAL;
2332 };
2333 if (ret)
2334 return ret;
2335
2336 udma_prep_dma_memcpy(uc, dst, src, len);
2337 udma_start(uc);
2338 udma_poll_completion(uc, &paddr);
2339 udma_stop(uc);
2340
2341 switch (ud->match_data->type) {
2342 case DMA_TYPE_UDMA:
2343 udma_free_chan_resources(uc);
2344 break;
2345 case DMA_TYPE_BCDMA:
2346 bcdma_free_bchan_resources(uc);
2347 break;
2348 default:
2349 return -EINVAL;
2350 };
2351
2352 return 0;
2353 }
2354
2355 static int udma_request(struct dma *dma)
2356 {
2357 struct udma_dev *ud = dev_get_priv(dma->dev);
2358 struct udma_chan_config *ucc;
2359 struct udma_chan *uc;
2360 unsigned long dummy;
2361 int ret;
2362
2363 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2364 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2365 return -EINVAL;
2366 }
2367
2368 uc = &ud->channels[dma->id];
2369 ucc = &uc->config;
2370 switch (ud->match_data->type) {
2371 case DMA_TYPE_UDMA:
2372 ret = udma_alloc_chan_resources(uc);
2373 break;
2374 case DMA_TYPE_BCDMA:
2375 ret = bcdma_alloc_chan_resources(uc);
2376 break;
2377 case DMA_TYPE_PKTDMA:
2378 ret = pktdma_alloc_chan_resources(uc);
2379 break;
2380 default:
2381 return -EINVAL;
2382 }
2383 if (ret) {
2384 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2385 return -EINVAL;
2386 }
2387
2388 if (uc->config.dir == DMA_MEM_TO_DEV) {
2389 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2390 memset(uc->desc_tx, 0, ucc->hdesc_size);
2391 } else {
2392 uc->desc_rx = dma_alloc_coherent(
2393 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2394 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
2395 }
2396
2397 uc->in_use = true;
2398 uc->desc_rx_cur = 0;
2399 uc->num_rx_bufs = 0;
2400
2401 if (uc->config.dir == DMA_DEV_TO_MEM) {
2402 uc->cfg_data.flow_id_base = uc->rflow->id;
2403 uc->cfg_data.flow_id_cnt = 1;
2404 }
2405
2406 return 0;
2407 }
2408
2409 static int udma_rfree(struct dma *dma)
2410 {
2411 struct udma_dev *ud = dev_get_priv(dma->dev);
2412 struct udma_chan *uc;
2413
2414 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2415 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2416 return -EINVAL;
2417 }
2418 uc = &ud->channels[dma->id];
2419
2420 if (udma_is_chan_running(uc))
2421 udma_stop(uc);
2422
2423 udma_navss_psil_unpair(ud, uc->config.src_thread,
2424 uc->config.dst_thread);
2425
2426 bcdma_free_bchan_resources(uc);
2427 udma_free_tx_resources(uc);
2428 udma_free_rx_resources(uc);
2429 udma_reset_uchan(uc);
2430
2431 uc->in_use = false;
2432
2433 return 0;
2434 }
2435
2436 static int udma_enable(struct dma *dma)
2437 {
2438 struct udma_dev *ud = dev_get_priv(dma->dev);
2439 struct udma_chan *uc;
2440 int ret;
2441
2442 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2443 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2444 return -EINVAL;
2445 }
2446 uc = &ud->channels[dma->id];
2447
2448 ret = udma_start(uc);
2449
2450 return ret;
2451 }
2452
2453 static int udma_disable(struct dma *dma)
2454 {
2455 struct udma_dev *ud = dev_get_priv(dma->dev);
2456 struct udma_chan *uc;
2457 int ret = 0;
2458
2459 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2460 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2461 return -EINVAL;
2462 }
2463 uc = &ud->channels[dma->id];
2464
2465 if (udma_is_chan_running(uc))
2466 ret = udma_stop(uc);
2467 else
2468 dev_err(dma->dev, "%s not running\n", __func__);
2469
2470 return ret;
2471 }
2472
2473 static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2474 {
2475 struct udma_dev *ud = dev_get_priv(dma->dev);
2476 struct cppi5_host_desc_t *desc_tx;
2477 dma_addr_t dma_src = (dma_addr_t)src;
2478 struct ti_udma_drv_packet_data packet_data = { 0 };
2479 dma_addr_t paddr;
2480 struct udma_chan *uc;
2481 u32 tc_ring_id;
2482 int ret;
2483
2484 if (metadata)
2485 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2486
2487 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2488 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2489 return -EINVAL;
2490 }
2491 uc = &ud->channels[dma->id];
2492
2493 if (uc->config.dir != DMA_MEM_TO_DEV)
2494 return -EINVAL;
2495
2496 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2497
2498 desc_tx = uc->desc_tx;
2499
2500 cppi5_hdesc_reset_hbdesc(desc_tx);
2501
2502 cppi5_hdesc_init(desc_tx,
2503 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2504 uc->config.psd_size);
2505 cppi5_hdesc_set_pktlen(desc_tx, len);
2506 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2507 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2508 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2509 /* pass below information from caller */
2510 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2511 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2512
2513 flush_dcache_range((unsigned long)dma_src,
2514 ALIGN((unsigned long)dma_src + len,
2515 ARCH_DMA_MINALIGN));
2516 flush_dcache_range((unsigned long)desc_tx,
2517 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
2518 ARCH_DMA_MINALIGN));
2519
2520 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
2521 if (ret) {
2522 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2523 dma->id, ret);
2524 return ret;
2525 }
2526
2527 udma_poll_completion(uc, &paddr);
2528
2529 return 0;
2530 }
2531
2532 static int udma_receive(struct dma *dma, void **dst, void *metadata)
2533 {
2534 struct udma_dev *ud = dev_get_priv(dma->dev);
2535 struct udma_chan_config *ucc;
2536 struct cppi5_host_desc_t *desc_rx;
2537 dma_addr_t buf_dma;
2538 struct udma_chan *uc;
2539 u32 buf_dma_len, pkt_len;
2540 u32 port_id = 0;
2541 int ret;
2542
2543 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2544 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2545 return -EINVAL;
2546 }
2547 uc = &ud->channels[dma->id];
2548 ucc = &uc->config;
2549
2550 if (uc->config.dir != DMA_DEV_TO_MEM)
2551 return -EINVAL;
2552 if (!uc->num_rx_bufs)
2553 return -EINVAL;
2554
2555 ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
2556 if (ret && ret != -ENODATA) {
2557 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2558 return ret;
2559 } else if (ret == -ENODATA) {
2560 return 0;
2561 }
2562
2563 /* invalidate cache data */
2564 invalidate_dcache_range((ulong)desc_rx,
2565 (ulong)(desc_rx + ucc->hdesc_size));
2566
2567 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2568 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2569
2570 /* invalidate cache data */
2571 invalidate_dcache_range((ulong)buf_dma,
2572 (ulong)(buf_dma + buf_dma_len));
2573
2574 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2575
2576 *dst = (void *)buf_dma;
2577 uc->num_rx_bufs--;
2578
2579 return pkt_len;
2580 }
2581
2582 static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2583 {
2584 struct udma_chan_config *ucc;
2585 struct udma_dev *ud = dev_get_priv(dma->dev);
2586 struct udma_chan *uc = &ud->channels[0];
2587 struct psil_endpoint_config *ep_config;
2588 u32 val;
2589
2590 for (val = 0; val < ud->ch_count; val++) {
2591 uc = &ud->channels[val];
2592 if (!uc->in_use)
2593 break;
2594 }
2595
2596 if (val == ud->ch_count)
2597 return -EBUSY;
2598
2599 ucc = &uc->config;
2600 ucc->remote_thread_id = args->args[0];
2601 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2602 ucc->dir = DMA_MEM_TO_DEV;
2603 else
2604 ucc->dir = DMA_DEV_TO_MEM;
2605
2606 ep_config = psil_get_ep_config(ucc->remote_thread_id);
2607 if (IS_ERR(ep_config)) {
2608 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
2609 uc->config.remote_thread_id);
2610 ucc->dir = DMA_MEM_TO_MEM;
2611 ucc->remote_thread_id = -1;
2612 return false;
2613 }
2614
2615 ucc->pkt_mode = ep_config->pkt_mode;
2616 ucc->channel_tpl = ep_config->channel_tpl;
2617 ucc->notdpkt = ep_config->notdpkt;
2618 ucc->ep_type = ep_config->ep_type;
2619
2620 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2621 ep_config->mapped_channel_id >= 0) {
2622 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2623 ucc->default_flow_id = ep_config->default_flow_id;
2624 } else {
2625 ucc->mapped_channel_id = -1;
2626 ucc->default_flow_id = -1;
2627 }
2628
2629 ucc->needs_epib = ep_config->needs_epib;
2630 ucc->psd_size = ep_config->psd_size;
2631 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2632
2633 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2634 ucc->psd_size, 0);
2635 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
2636
2637 dma->id = uc->id;
2638 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
2639 dma->id, ucc->needs_epib,
2640 ucc->psd_size, ucc->metadata_size,
2641 ucc->remote_thread_id);
2642
2643 return 0;
2644 }
2645
2646 int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2647 {
2648 struct udma_dev *ud = dev_get_priv(dma->dev);
2649 struct cppi5_host_desc_t *desc_rx;
2650 dma_addr_t dma_dst;
2651 struct udma_chan *uc;
2652 u32 desc_num;
2653
2654 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2655 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2656 return -EINVAL;
2657 }
2658 uc = &ud->channels[dma->id];
2659
2660 if (uc->config.dir != DMA_DEV_TO_MEM)
2661 return -EINVAL;
2662
2663 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2664 return -EINVAL;
2665
2666 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
2667 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
2668 dma_dst = (dma_addr_t)dst;
2669
2670 cppi5_hdesc_reset_hbdesc(desc_rx);
2671
2672 cppi5_hdesc_init(desc_rx,
2673 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2674 uc->config.psd_size);
2675 cppi5_hdesc_set_pktlen(desc_rx, size);
2676 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2677
2678 flush_dcache_range((unsigned long)desc_rx,
2679 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
2680 ARCH_DMA_MINALIGN));
2681
2682 udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
2683
2684 uc->num_rx_bufs++;
2685 uc->desc_rx_cur++;
2686
2687 return 0;
2688 }
2689
2690 static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2691 {
2692 struct udma_dev *ud = dev_get_priv(dma->dev);
2693 struct udma_chan *uc;
2694
2695 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2696 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2697 return -EINVAL;
2698 }
2699
2700 switch (id) {
2701 case TI_UDMA_CHAN_PRIV_INFO:
2702 uc = &ud->channels[dma->id];
2703 *data = &uc->cfg_data;
2704 return 0;
2705 }
2706
2707 return -EINVAL;
2708 }
2709
2710 static const struct dma_ops udma_ops = {
2711 .transfer = udma_transfer,
2712 .of_xlate = udma_of_xlate,
2713 .request = udma_request,
2714 .rfree = udma_rfree,
2715 .enable = udma_enable,
2716 .disable = udma_disable,
2717 .send = udma_send,
2718 .receive = udma_receive,
2719 .prepare_rcv_buf = udma_prepare_rcv_buf,
2720 .get_cfg = udma_get_cfg,
2721 };
2722
2723 static struct udma_match_data am654_main_data = {
2724 .type = DMA_TYPE_UDMA,
2725 .psil_base = 0x1000,
2726 .enable_memcpy_support = true,
2727 .statictr_z_mask = GENMASK(11, 0),
2728 .oes = {
2729 .udma_rchan = 0x200,
2730 },
2731 .tpl_levels = 2,
2732 .level_start_idx = {
2733 [0] = 8, /* Normal channels */
2734 [1] = 0, /* High Throughput channels */
2735 },
2736 };
2737
2738 static struct udma_match_data am654_mcu_data = {
2739 .type = DMA_TYPE_UDMA,
2740 .psil_base = 0x6000,
2741 .enable_memcpy_support = true,
2742 .statictr_z_mask = GENMASK(11, 0),
2743 .oes = {
2744 .udma_rchan = 0x200,
2745 },
2746 .tpl_levels = 2,
2747 .level_start_idx = {
2748 [0] = 2, /* Normal channels */
2749 [1] = 0, /* High Throughput channels */
2750 },
2751 };
2752
2753 static struct udma_match_data j721e_main_data = {
2754 .type = DMA_TYPE_UDMA,
2755 .psil_base = 0x1000,
2756 .enable_memcpy_support = true,
2757 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2758 .statictr_z_mask = GENMASK(23, 0),
2759 .oes = {
2760 .udma_rchan = 0x400,
2761 },
2762 .tpl_levels = 3,
2763 .level_start_idx = {
2764 [0] = 16, /* Normal channels */
2765 [1] = 4, /* High Throughput channels */
2766 [2] = 0, /* Ultra High Throughput channels */
2767 },
2768 };
2769
2770 static struct udma_match_data j721e_mcu_data = {
2771 .type = DMA_TYPE_UDMA,
2772 .psil_base = 0x6000,
2773 .enable_memcpy_support = true,
2774 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2775 .statictr_z_mask = GENMASK(23, 0),
2776 .oes = {
2777 .udma_rchan = 0x400,
2778 },
2779 .tpl_levels = 2,
2780 .level_start_idx = {
2781 [0] = 2, /* Normal channels */
2782 [1] = 0, /* High Throughput channels */
2783 },
2784 };
2785
2786 static struct udma_match_data am64_bcdma_data = {
2787 .type = DMA_TYPE_BCDMA,
2788 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2789 .enable_memcpy_support = true, /* Supported via bchan */
2790 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2791 .statictr_z_mask = GENMASK(23, 0),
2792 .oes = {
2793 .bcdma_bchan_data = 0x2200,
2794 .bcdma_bchan_ring = 0x2400,
2795 .bcdma_tchan_data = 0x2800,
2796 .bcdma_tchan_ring = 0x2a00,
2797 .bcdma_rchan_data = 0x2e00,
2798 .bcdma_rchan_ring = 0x3000,
2799 },
2800 /* No throughput levels */
2801 };
2802
2803 static struct udma_match_data am64_pktdma_data = {
2804 .type = DMA_TYPE_PKTDMA,
2805 .psil_base = 0x1000,
2806 .enable_memcpy_support = false,
2807 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2808 .statictr_z_mask = GENMASK(23, 0),
2809 .oes = {
2810 .pktdma_tchan_flow = 0x1200,
2811 .pktdma_rchan_flow = 0x1600,
2812 },
2813 /* No throughput levels */
2814 };
2815
2816 static const struct udevice_id udma_ids[] = {
2817 {
2818 .compatible = "ti,am654-navss-main-udmap",
2819 .data = (ulong)&am654_main_data,
2820 },
2821 {
2822 .compatible = "ti,am654-navss-mcu-udmap",
2823 .data = (ulong)&am654_mcu_data,
2824 }, {
2825 .compatible = "ti,j721e-navss-main-udmap",
2826 .data = (ulong)&j721e_main_data,
2827 }, {
2828 .compatible = "ti,j721e-navss-mcu-udmap",
2829 .data = (ulong)&j721e_mcu_data,
2830 },
2831 {
2832 .compatible = "ti,am64-dmss-bcdma",
2833 .data = (ulong)&am64_bcdma_data,
2834 },
2835 {
2836 .compatible = "ti,am64-dmss-pktdma",
2837 .data = (ulong)&am64_pktdma_data,
2838 },
2839 { /* Sentinel */ },
2840 };
2841
2842 U_BOOT_DRIVER(ti_edma3) = {
2843 .name = "ti-udma",
2844 .id = UCLASS_DMA,
2845 .of_match = udma_ids,
2846 .ops = &udma_ops,
2847 .probe = udma_probe,
2848 .priv_auto = sizeof(struct udma_dev),
2849 };