]> git.ipfire.org Git - thirdparty/u-boot.git/blame - drivers/dma/ti/k3-udma.c
dm: core: Create a new header file for 'compat' features
[thirdparty/u-boot.git] / drivers / dma / ti / k3-udma.c
CommitLineData
ffcc66e8
V
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
1eb69ae4 9#include <cpu_func.h>
ffcc66e8
V
10#include <asm/io.h>
11#include <asm/bitops.h>
12#include <malloc.h>
13#include <asm/dma-mapping.h>
14#include <dm.h>
336d4615 15#include <dm/device_compat.h>
61b29b82 16#include <dm/devres.h>
ffcc66e8
V
17#include <dm/read.h>
18#include <dm/of_access.h>
19#include <dma.h>
20#include <dma-uclass.h>
21#include <linux/delay.h>
22#include <dt-bindings/dma/k3-udma.h>
a8837cf4 23#include <linux/bitmap.h>
61b29b82 24#include <linux/err.h>
ffcc66e8
V
25#include <linux/soc/ti/k3-navss-ringacc.h>
26#include <linux/soc/ti/cppi5.h>
27#include <linux/soc/ti/ti-udma.h>
28#include <linux/soc/ti/ti_sci_protocol.h>
29
30#include "k3-udma-hwdef.h"
31
32#if BITS_PER_LONG == 64
33#define RINGACC_RING_USE_PROXY (0)
34#else
35#define RINGACC_RING_USE_PROXY (1)
36#endif
37
a8837cf4
VR
38#define K3_UDMA_MAX_RFLOWS 1024
39
ffcc66e8
V
40struct udma_chan;
41
42enum udma_mmr {
43 MMR_GCFG = 0,
44 MMR_RCHANRT,
45 MMR_TCHANRT,
46 MMR_LAST,
47};
48
49static const char * const mmr_names[] = {
50 "gcfg", "rchanrt", "tchanrt"
51};
52
53struct udma_tchan {
54 void __iomem *reg_rt;
55
56 int id;
57 struct k3_nav_ring *t_ring; /* Transmit ring */
58 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
59};
60
61struct udma_rchan {
62 void __iomem *reg_rt;
63
64 int id;
65 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
66 struct k3_nav_ring *r_ring; /* Receive ring*/
67};
68
69struct udma_rflow {
70 int id;
71};
72
a8837cf4
VR
73enum udma_rm_range {
74 RM_RANGE_TCHAN = 0,
75 RM_RANGE_RCHAN,
76 RM_RANGE_RFLOW,
77 RM_RANGE_LAST,
78};
79
80struct udma_tisci_rm {
81 const struct ti_sci_handle *tisci;
82 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
83 u32 tisci_dev_id;
84
85 /* tisci information for PSI-L thread pairing/unpairing */
86 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
87 u32 tisci_navss_dev_id;
88
89 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
90};
91
ffcc66e8 92struct udma_dev {
a8837cf4 93 struct udevice *dev;
ffcc66e8
V
94 void __iomem *mmrs[MMR_LAST];
95
a8837cf4 96 struct udma_tisci_rm tisci_rm;
ffcc66e8
V
97 struct k3_nav_ringacc *ringacc;
98
99 u32 features;
100
101 int tchan_cnt;
102 int echan_cnt;
103 int rchan_cnt;
104 int rflow_cnt;
105 unsigned long *tchan_map;
106 unsigned long *rchan_map;
107 unsigned long *rflow_map;
a8837cf4 108 unsigned long *rflow_map_reserved;
ffcc66e8
V
109
110 struct udma_tchan *tchans;
111 struct udma_rchan *rchans;
112 struct udma_rflow *rflows;
113
114 struct udma_chan *channels;
115 u32 psil_base;
116
117 u32 ch_count;
ffcc66e8
V
118};
119
120struct udma_chan {
121 struct udma_dev *ud;
122 char name[20];
123
124 struct udma_tchan *tchan;
125 struct udma_rchan *rchan;
126 struct udma_rflow *rflow;
127
5e6d9ccd
VR
128 struct ti_udma_drv_chan_cfg_data cfg_data;
129
ffcc66e8
V
130 u32 bcnt; /* number of bytes completed since the start of the channel */
131
132 bool pkt_mode; /* TR or packet */
133 bool needs_epib; /* EPIB is needed for the communication or not */
134 u32 psd_size; /* size of Protocol Specific Data */
135 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
136 int slave_thread_id;
137 u32 src_thread;
138 u32 dst_thread;
139 u32 static_tr_type;
140
141 u32 id;
142 enum dma_direction dir;
143
144 struct cppi5_host_desc_t *desc_tx;
145 u32 hdesc_size;
146 bool in_use;
147 void *desc_rx;
148 u32 num_rx_bufs;
149 u32 desc_rx_cur;
150
151};
152
153#define UDMA_CH_1000(ch) (ch * 0x1000)
154#define UDMA_CH_100(ch) (ch * 0x100)
155#define UDMA_CH_40(ch) (ch * 0x40)
156
157#ifdef PKTBUFSRX
158#define UDMA_RX_DESC_NUM PKTBUFSRX
159#else
160#define UDMA_RX_DESC_NUM 4
161#endif
162
163/* Generic register access functions */
164static inline u32 udma_read(void __iomem *base, int reg)
165{
166 u32 v;
167
168 v = __raw_readl(base + reg);
169 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
170 return v;
171}
172
173static inline void udma_write(void __iomem *base, int reg, u32 val)
174{
175 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
176 __raw_writel(val, base + reg);
177}
178
179static inline void udma_update_bits(void __iomem *base, int reg,
180 u32 mask, u32 val)
181{
182 u32 tmp, orig;
183
184 orig = udma_read(base, reg);
185 tmp = orig & ~mask;
186 tmp |= (val & mask);
187
188 if (tmp != orig)
189 udma_write(base, reg, tmp);
190}
191
192/* TCHANRT */
193static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
194{
195 if (!tchan)
196 return 0;
197 return udma_read(tchan->reg_rt, reg);
198}
199
200static inline void udma_tchanrt_write(struct udma_tchan *tchan,
201 int reg, u32 val)
202{
203 if (!tchan)
204 return;
205 udma_write(tchan->reg_rt, reg, val);
206}
207
208/* RCHANRT */
209static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
210{
211 if (!rchan)
212 return 0;
213 return udma_read(rchan->reg_rt, reg);
214}
215
216static inline void udma_rchanrt_write(struct udma_rchan *rchan,
217 int reg, u32 val)
218{
219 if (!rchan)
220 return;
221 udma_write(rchan->reg_rt, reg, val);
222}
223
224static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
225 u32 dst_thread)
226{
a8837cf4
VR
227 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
228
ffcc66e8 229 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
a8837cf4
VR
230
231 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
232 tisci_rm->tisci_navss_dev_id,
233 src_thread, dst_thread);
ffcc66e8
V
234}
235
236static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
237 u32 dst_thread)
238{
a8837cf4
VR
239 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
240
ffcc66e8 241 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
a8837cf4
VR
242
243 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
244 tisci_rm->tisci_navss_dev_id,
245 src_thread, dst_thread);
ffcc66e8
V
246}
247
248static inline char *udma_get_dir_text(enum dma_direction dir)
249{
250 switch (dir) {
251 case DMA_DEV_TO_MEM:
252 return "DEV_TO_MEM";
253 case DMA_MEM_TO_DEV:
254 return "MEM_TO_DEV";
255 case DMA_MEM_TO_MEM:
256 return "MEM_TO_MEM";
257 case DMA_DEV_TO_DEV:
258 return "DEV_TO_DEV";
259 default:
260 break;
261 }
262
263 return "invalid";
264}
265
266static inline bool udma_is_chan_running(struct udma_chan *uc)
267{
268 u32 trt_ctl = 0;
269 u32 rrt_ctl = 0;
270
271 switch (uc->dir) {
272 case DMA_DEV_TO_MEM:
273 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
274 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
275 __func__, rrt_ctl,
276 udma_rchanrt_read(uc->rchan,
277 UDMA_RCHAN_RT_PEER_RT_EN_REG));
278 break;
279 case DMA_MEM_TO_DEV:
280 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
281 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
282 __func__, trt_ctl,
283 udma_tchanrt_read(uc->tchan,
284 UDMA_TCHAN_RT_PEER_RT_EN_REG));
285 break;
286 case DMA_MEM_TO_MEM:
287 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
288 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
289 break;
290 default:
291 break;
292 }
293
294 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
295 return true;
296
297 return false;
298}
299
ffcc66e8
V
300static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
301{
302 struct k3_nav_ring *ring = NULL;
303 int ret = -ENOENT;
304
305 switch (uc->dir) {
306 case DMA_DEV_TO_MEM:
307 ring = uc->rchan->r_ring;
308 break;
309 case DMA_MEM_TO_DEV:
310 ring = uc->tchan->tc_ring;
311 break;
312 case DMA_MEM_TO_MEM:
313 ring = uc->tchan->tc_ring;
314 break;
315 default:
316 break;
317 }
318
319 if (ring && k3_nav_ringacc_ring_get_occ(ring))
320 ret = k3_nav_ringacc_ring_pop(ring, addr);
321
322 return ret;
323}
324
325static void udma_reset_rings(struct udma_chan *uc)
326{
327 struct k3_nav_ring *ring1 = NULL;
328 struct k3_nav_ring *ring2 = NULL;
329
330 switch (uc->dir) {
331 case DMA_DEV_TO_MEM:
332 ring1 = uc->rchan->fd_ring;
333 ring2 = uc->rchan->r_ring;
334 break;
335 case DMA_MEM_TO_DEV:
336 ring1 = uc->tchan->t_ring;
337 ring2 = uc->tchan->tc_ring;
338 break;
339 case DMA_MEM_TO_MEM:
340 ring1 = uc->tchan->t_ring;
341 ring2 = uc->tchan->tc_ring;
342 break;
343 default:
344 break;
345 }
346
347 if (ring1)
348 k3_nav_ringacc_ring_reset_dma(ring1, 0);
349 if (ring2)
350 k3_nav_ringacc_ring_reset(ring2);
351}
352
353static void udma_reset_counters(struct udma_chan *uc)
354{
355 u32 val;
356
357 if (uc->tchan) {
358 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
359 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
360
361 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
362 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
363
364 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
365 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
366
367 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
368 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
369 }
370
371 if (uc->rchan) {
372 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
373 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
374
375 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
376 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
377
378 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
379 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
380
381 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
382 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
383 }
384
385 uc->bcnt = 0;
386}
387
388static inline int udma_stop_hard(struct udma_chan *uc)
389{
390 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
391
392 switch (uc->dir) {
393 case DMA_DEV_TO_MEM:
394 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
395 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
396 break;
397 case DMA_MEM_TO_DEV:
398 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
399 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
400 break;
401 case DMA_MEM_TO_MEM:
402 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
403 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
404 break;
405 default:
406 return -EINVAL;
407 }
408
409 return 0;
410}
411
412static int udma_start(struct udma_chan *uc)
413{
414 /* Channel is already running, no need to proceed further */
415 if (udma_is_chan_running(uc))
416 goto out;
417
418 pr_debug("%s: chan:%d dir:%s (static_tr_type: %d)\n",
419 __func__, uc->id, udma_get_dir_text(uc->dir),
420 uc->static_tr_type);
421
422 /* Make sure that we clear the teardown bit, if it is set */
423 udma_stop_hard(uc);
424
425 /* Reset all counters */
426 udma_reset_counters(uc);
427
428 switch (uc->dir) {
429 case DMA_DEV_TO_MEM:
430 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
431 UDMA_CHAN_RT_CTL_EN);
432
433 /* Enable remote */
434 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
435 UDMA_PEER_RT_EN_ENABLE);
436
437 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
438 __func__,
439 udma_rchanrt_read(uc->rchan,
440 UDMA_RCHAN_RT_CTL_REG),
441 udma_rchanrt_read(uc->rchan,
442 UDMA_RCHAN_RT_PEER_RT_EN_REG));
443 break;
444 case DMA_MEM_TO_DEV:
445 /* Enable remote */
446 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
447 UDMA_PEER_RT_EN_ENABLE);
448
449 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
450 UDMA_CHAN_RT_CTL_EN);
451
452 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
453 __func__,
c16cdd42 454 udma_tchanrt_read(uc->tchan,
ffcc66e8 455 UDMA_TCHAN_RT_CTL_REG),
c16cdd42 456 udma_tchanrt_read(uc->tchan,
ffcc66e8
V
457 UDMA_TCHAN_RT_PEER_RT_EN_REG));
458 break;
459 case DMA_MEM_TO_MEM:
460 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
461 UDMA_CHAN_RT_CTL_EN);
462 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
463 UDMA_CHAN_RT_CTL_EN);
464
465 break;
466 default:
467 return -EINVAL;
468 }
469
470 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
471out:
472 return 0;
473}
474
475static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
476{
477 int i = 0;
478 u32 val;
479
480 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
481 UDMA_CHAN_RT_CTL_EN |
482 UDMA_CHAN_RT_CTL_TDOWN);
483
484 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
485
486 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
487 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
488 udelay(1);
489 if (i > 1000) {
490 printf(" %s TIMEOUT !\n", __func__);
491 break;
492 }
493 i++;
494 }
495
496 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
497 if (val & UDMA_PEER_RT_EN_ENABLE)
498 printf("%s: peer not stopped TIMEOUT !\n", __func__);
499}
500
501static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
502{
503 int i = 0;
504 u32 val;
505
506 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
507 UDMA_PEER_RT_EN_ENABLE |
508 UDMA_PEER_RT_EN_TEARDOWN);
509
510 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
511
512 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
513 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
514 udelay(1);
515 if (i > 1000) {
516 printf("%s TIMEOUT !\n", __func__);
517 break;
518 }
519 i++;
520 }
521
522 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
523 if (val & UDMA_PEER_RT_EN_ENABLE)
524 printf("%s: peer not stopped TIMEOUT !\n", __func__);
525}
526
527static inline int udma_stop(struct udma_chan *uc)
528{
529 pr_debug("%s: chan:%d dir:%s\n",
530 __func__, uc->id, udma_get_dir_text(uc->dir));
531
532 udma_reset_counters(uc);
533 switch (uc->dir) {
534 case DMA_DEV_TO_MEM:
535 udma_stop_dev2mem(uc, true);
536 break;
537 case DMA_MEM_TO_DEV:
538 udma_stop_mem2dev(uc, true);
539 break;
540 case DMA_MEM_TO_MEM:
541 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
542 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
543 break;
544 default:
545 return -EINVAL;
546 }
547
548 return 0;
549}
550
551static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
552{
553 int i = 1;
554
555 while (udma_pop_from_ring(uc, paddr)) {
556 udelay(1);
557 if (!(i % 1000000))
558 printf(".");
559 i++;
560 }
561}
562
a8837cf4
VR
563static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
564{
565 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
566
567 if (id >= 0) {
568 if (test_bit(id, ud->rflow_map)) {
569 dev_err(ud->dev, "rflow%d is in use\n", id);
570 return ERR_PTR(-ENOENT);
571 }
572 } else {
573 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
574 ud->rflow_cnt);
575
576 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
577 if (id >= ud->rflow_cnt)
578 return ERR_PTR(-ENOENT);
579 }
580
581 __set_bit(id, ud->rflow_map);
582 return &ud->rflows[id];
583}
584
ffcc66e8
V
585#define UDMA_RESERVE_RESOURCE(res) \
586static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
587 int id) \
588{ \
589 if (id >= 0) { \
590 if (test_bit(id, ud->res##_map)) { \
591 dev_err(ud->dev, "res##%d is in use\n", id); \
592 return ERR_PTR(-ENOENT); \
593 } \
594 } else { \
595 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
596 if (id == ud->res##_cnt) { \
597 return ERR_PTR(-ENOENT); \
598 } \
599 } \
600 \
601 __set_bit(id, ud->res##_map); \
602 return &ud->res##s[id]; \
603}
604
605UDMA_RESERVE_RESOURCE(tchan);
606UDMA_RESERVE_RESOURCE(rchan);
ffcc66e8
V
607
608static int udma_get_tchan(struct udma_chan *uc)
609{
610 struct udma_dev *ud = uc->ud;
611
612 if (uc->tchan) {
613 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
614 uc->id, uc->tchan->id);
615 return 0;
616 }
617
618 uc->tchan = __udma_reserve_tchan(ud, -1);
619 if (IS_ERR(uc->tchan))
620 return PTR_ERR(uc->tchan);
621
622 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
623
ffcc66e8
V
624 return 0;
625}
626
627static int udma_get_rchan(struct udma_chan *uc)
628{
629 struct udma_dev *ud = uc->ud;
630
631 if (uc->rchan) {
632 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
633 uc->id, uc->rchan->id);
634 return 0;
635 }
636
637 uc->rchan = __udma_reserve_rchan(ud, -1);
638 if (IS_ERR(uc->rchan))
639 return PTR_ERR(uc->rchan);
640
641 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
642
ffcc66e8
V
643 return 0;
644}
645
646static int udma_get_chan_pair(struct udma_chan *uc)
647{
648 struct udma_dev *ud = uc->ud;
649 int chan_id, end;
650
651 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
652 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
653 uc->id, uc->tchan->id);
654 return 0;
655 }
656
657 if (uc->tchan) {
658 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
659 uc->id, uc->tchan->id);
660 return -EBUSY;
661 } else if (uc->rchan) {
662 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
663 uc->id, uc->rchan->id);
664 return -EBUSY;
665 }
666
667 /* Can be optimized, but let's have it like this for now */
668 end = min(ud->tchan_cnt, ud->rchan_cnt);
669 for (chan_id = 0; chan_id < end; chan_id++) {
670 if (!test_bit(chan_id, ud->tchan_map) &&
671 !test_bit(chan_id, ud->rchan_map))
672 break;
673 }
674
675 if (chan_id == end)
676 return -ENOENT;
677
678 __set_bit(chan_id, ud->tchan_map);
679 __set_bit(chan_id, ud->rchan_map);
680 uc->tchan = &ud->tchans[chan_id];
681 uc->rchan = &ud->rchans[chan_id];
682
683 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
684
ffcc66e8
V
685 return 0;
686}
687
688static int udma_get_rflow(struct udma_chan *uc, int flow_id)
689{
690 struct udma_dev *ud = uc->ud;
691
692 if (uc->rflow) {
693 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
694 uc->id, uc->rflow->id);
695 return 0;
696 }
697
698 if (!uc->rchan)
699 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
700
701 uc->rflow = __udma_reserve_rflow(ud, flow_id);
702 if (IS_ERR(uc->rflow))
703 return PTR_ERR(uc->rflow);
704
705 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
706 return 0;
707}
708
709static void udma_put_rchan(struct udma_chan *uc)
710{
711 struct udma_dev *ud = uc->ud;
712
713 if (uc->rchan) {
714 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
715 uc->rchan->id);
716 __clear_bit(uc->rchan->id, ud->rchan_map);
717 uc->rchan = NULL;
718 }
719}
720
721static void udma_put_tchan(struct udma_chan *uc)
722{
723 struct udma_dev *ud = uc->ud;
724
725 if (uc->tchan) {
726 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
727 uc->tchan->id);
728 __clear_bit(uc->tchan->id, ud->tchan_map);
729 uc->tchan = NULL;
730 }
731}
732
733static void udma_put_rflow(struct udma_chan *uc)
734{
735 struct udma_dev *ud = uc->ud;
736
737 if (uc->rflow) {
738 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
739 uc->rflow->id);
740 __clear_bit(uc->rflow->id, ud->rflow_map);
741 uc->rflow = NULL;
742 }
743}
744
745static void udma_free_tx_resources(struct udma_chan *uc)
746{
747 if (!uc->tchan)
748 return;
749
750 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
751 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
752 uc->tchan->t_ring = NULL;
753 uc->tchan->tc_ring = NULL;
754
755 udma_put_tchan(uc);
756}
757
758static int udma_alloc_tx_resources(struct udma_chan *uc)
759{
760 struct k3_nav_ring_cfg ring_cfg;
761 struct udma_dev *ud = uc->ud;
762 int ret;
763
764 ret = udma_get_tchan(uc);
765 if (ret)
766 return ret;
767
768 uc->tchan->t_ring = k3_nav_ringacc_request_ring(
769 ud->ringacc, uc->tchan->id,
770 RINGACC_RING_USE_PROXY);
771 if (!uc->tchan->t_ring) {
772 ret = -EBUSY;
773 goto err_tx_ring;
774 }
775
776 uc->tchan->tc_ring = k3_nav_ringacc_request_ring(
777 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
778 if (!uc->tchan->tc_ring) {
779 ret = -EBUSY;
780 goto err_txc_ring;
781 }
782
783 memset(&ring_cfg, 0, sizeof(ring_cfg));
784 ring_cfg.size = 16;
785 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
ce1a3073 786 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
ffcc66e8
V
787
788 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
789 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
790
791 if (ret)
792 goto err_ringcfg;
793
794 return 0;
795
796err_ringcfg:
797 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
798 uc->tchan->tc_ring = NULL;
799err_txc_ring:
800 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
801 uc->tchan->t_ring = NULL;
802err_tx_ring:
803 udma_put_tchan(uc);
804
805 return ret;
806}
807
808static void udma_free_rx_resources(struct udma_chan *uc)
809{
810 if (!uc->rchan)
811 return;
812
813 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
814 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
815 uc->rchan->fd_ring = NULL;
816 uc->rchan->r_ring = NULL;
817
818 udma_put_rflow(uc);
819 udma_put_rchan(uc);
820}
821
822static int udma_alloc_rx_resources(struct udma_chan *uc)
823{
824 struct k3_nav_ring_cfg ring_cfg;
825 struct udma_dev *ud = uc->ud;
826 int fd_ring_id;
827 int ret;
828
829 ret = udma_get_rchan(uc);
830 if (ret)
831 return ret;
832
833 /* For MEM_TO_MEM we don't need rflow or rings */
834 if (uc->dir == DMA_MEM_TO_MEM)
835 return 0;
836
837 ret = udma_get_rflow(uc, uc->rchan->id);
838 if (ret) {
839 ret = -EBUSY;
840 goto err_rflow;
841 }
842
843 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
844
845 uc->rchan->fd_ring = k3_nav_ringacc_request_ring(
846 ud->ringacc, fd_ring_id,
847 RINGACC_RING_USE_PROXY);
848 if (!uc->rchan->fd_ring) {
849 ret = -EBUSY;
850 goto err_rx_ring;
851 }
852
853 uc->rchan->r_ring = k3_nav_ringacc_request_ring(
854 ud->ringacc, -1, RINGACC_RING_USE_PROXY);
855 if (!uc->rchan->r_ring) {
856 ret = -EBUSY;
857 goto err_rxc_ring;
858 }
859
860 memset(&ring_cfg, 0, sizeof(ring_cfg));
861 ring_cfg.size = 16;
862 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
ce1a3073 863 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
ffcc66e8
V
864
865 ret = k3_nav_ringacc_ring_cfg(uc->rchan->fd_ring, &ring_cfg);
866 ret |= k3_nav_ringacc_ring_cfg(uc->rchan->r_ring, &ring_cfg);
867
868 if (ret)
869 goto err_ringcfg;
870
871 return 0;
872
873err_ringcfg:
874 k3_nav_ringacc_ring_free(uc->rchan->r_ring);
875 uc->rchan->r_ring = NULL;
876err_rxc_ring:
877 k3_nav_ringacc_ring_free(uc->rchan->fd_ring);
878 uc->rchan->fd_ring = NULL;
879err_rx_ring:
880 udma_put_rflow(uc);
881err_rflow:
882 udma_put_rchan(uc);
883
884 return ret;
885}
886
887static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
888{
889 struct udma_dev *ud = uc->ud;
890 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
891 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
a8837cf4 892 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
ffcc66e8
V
893 u32 mode;
894 int ret;
895
896 if (uc->pkt_mode)
897 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
898 else
899 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
900
901 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
902 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
903 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
a8837cf4 904 req.nav_id = tisci_rm->tisci_dev_id;
ffcc66e8
V
905 req.index = uc->tchan->id;
906 req.tx_chan_type = mode;
907 if (uc->dir == DMA_MEM_TO_MEM)
908 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
909 else
910 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
911 uc->psd_size,
912 0) >> 2;
913 req.txcq_qnum = tc_ring;
914
a8837cf4 915 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
ffcc66e8
V
916 if (ret)
917 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
918
919 return ret;
920}
921
922static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
923{
924 struct udma_dev *ud = uc->ud;
925 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rchan->fd_ring);
926 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rchan->r_ring);
927 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
928 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
929 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
a8837cf4 930 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
ffcc66e8
V
931 u32 mode;
932 int ret;
933
934 if (uc->pkt_mode)
935 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
936 else
937 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
938
939 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
940 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
941 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
a8837cf4 942 req.nav_id = tisci_rm->tisci_dev_id;
ffcc66e8
V
943 req.index = uc->rchan->id;
944 req.rx_chan_type = mode;
945 if (uc->dir == DMA_MEM_TO_MEM) {
946 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
947 req.rxcq_qnum = tc_ring;
948 } else {
949 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
950 uc->psd_size,
951 0) >> 2;
952 req.rxcq_qnum = rx_ring;
953 }
954 if (uc->rflow->id != uc->rchan->id && uc->dir != DMA_MEM_TO_MEM) {
955 req.flowid_start = uc->rflow->id;
956 req.flowid_cnt = 1;
957 req.valid_params |=
958 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
959 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
960 }
961
a8837cf4 962 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
ffcc66e8
V
963 if (ret) {
964 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
965 uc->rchan->id, ret);
966 return ret;
967 }
968 if (uc->dir == DMA_MEM_TO_MEM)
969 return ret;
970
971 flow_req.valid_params =
972 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
973 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
974 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
975 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
976 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
977 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
978 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
979 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
980 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
981 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
982 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
983 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
984 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
985 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
986
a8837cf4 987 flow_req.nav_id = tisci_rm->tisci_dev_id;
ffcc66e8
V
988 flow_req.flow_index = uc->rflow->id;
989
990 if (uc->needs_epib)
991 flow_req.rx_einfo_present = 1;
992 else
993 flow_req.rx_einfo_present = 0;
994
995 if (uc->psd_size)
996 flow_req.rx_psinfo_present = 1;
997 else
998 flow_req.rx_psinfo_present = 0;
999
1000 flow_req.rx_error_handling = 0;
1001 flow_req.rx_desc_type = 0;
1002 flow_req.rx_dest_qnum = rx_ring;
1003 flow_req.rx_src_tag_hi_sel = 2;
1004 flow_req.rx_src_tag_lo_sel = 4;
1005 flow_req.rx_dest_tag_hi_sel = 5;
1006 flow_req.rx_dest_tag_lo_sel = 4;
1007 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1008 flow_req.rx_fdq1_qnum = fd_ring;
1009 flow_req.rx_fdq2_qnum = fd_ring;
1010 flow_req.rx_fdq3_qnum = fd_ring;
1011 flow_req.rx_ps_location = 0;
1012
a8837cf4
VR
1013 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1014 &flow_req);
ffcc66e8
V
1015 if (ret)
1016 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1017 uc->rchan->id, uc->rflow->id, ret);
1018
1019 return ret;
1020}
1021
1022static int udma_alloc_chan_resources(struct udma_chan *uc)
1023{
1024 struct udma_dev *ud = uc->ud;
1025 int ret;
1026
1027 pr_debug("%s: chan:%d as %s\n",
1028 __func__, uc->id, udma_get_dir_text(uc->dir));
1029
1030 switch (uc->dir) {
1031 case DMA_MEM_TO_MEM:
1032 /* Non synchronized - mem to mem type of transfer */
1033 ret = udma_get_chan_pair(uc);
1034 if (ret)
1035 return ret;
1036
1037 ret = udma_alloc_tx_resources(uc);
1038 if (ret)
1039 goto err_free_res;
1040
1041 ret = udma_alloc_rx_resources(uc);
1042 if (ret)
1043 goto err_free_res;
1044
1045 uc->src_thread = ud->psil_base + uc->tchan->id;
1046 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1047 break;
1048 case DMA_MEM_TO_DEV:
1049 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1050 ret = udma_alloc_tx_resources(uc);
1051 if (ret)
1052 goto err_free_res;
1053
1054 uc->src_thread = ud->psil_base + uc->tchan->id;
1055 uc->dst_thread = uc->slave_thread_id;
1056 if (!(uc->dst_thread & 0x8000))
1057 uc->dst_thread |= 0x8000;
1058
1059 break;
1060 case DMA_DEV_TO_MEM:
1061 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1062 ret = udma_alloc_rx_resources(uc);
1063 if (ret)
1064 goto err_free_res;
1065
1066 uc->src_thread = uc->slave_thread_id;
1067 uc->dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1068
1069 break;
1070 default:
1071 /* Can not happen */
1072 pr_debug("%s: chan:%d invalid direction (%u)\n",
1073 __func__, uc->id, uc->dir);
1074 return -EINVAL;
1075 }
1076
1077 /* We have channel indexes and rings */
1078 if (uc->dir == DMA_MEM_TO_MEM) {
1079 ret = udma_alloc_tchan_sci_req(uc);
1080 if (ret)
1081 goto err_free_res;
1082
1083 ret = udma_alloc_rchan_sci_req(uc);
1084 if (ret)
1085 goto err_free_res;
1086 } else {
1087 /* Slave transfer */
1088 if (uc->dir == DMA_MEM_TO_DEV) {
1089 ret = udma_alloc_tchan_sci_req(uc);
1090 if (ret)
1091 goto err_free_res;
1092 } else {
1093 ret = udma_alloc_rchan_sci_req(uc);
1094 if (ret)
1095 goto err_free_res;
1096 }
1097 }
1098
54877725
PU
1099 if (udma_is_chan_running(uc)) {
1100 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1101 udma_stop(uc);
1102 if (udma_is_chan_running(uc)) {
1103 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1104 goto err_free_res;
1105 }
1106 }
1107
ffcc66e8
V
1108 /* PSI-L pairing */
1109 ret = udma_navss_psil_pair(ud, uc->src_thread, uc->dst_thread);
1110 if (ret) {
1111 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1112 goto err_free_res;
1113 }
1114
1115 return 0;
1116
1117err_free_res:
1118 udma_free_tx_resources(uc);
1119 udma_free_rx_resources(uc);
1120 uc->slave_thread_id = -1;
1121 return ret;
1122}
1123
1124static void udma_free_chan_resources(struct udma_chan *uc)
1125{
1126 /* Some configuration to UDMA-P channel: disable, reset, whatever */
1127
1128 /* Release PSI-L pairing */
1129 udma_navss_psil_unpair(uc->ud, uc->src_thread, uc->dst_thread);
1130
1131 /* Reset the rings for a new start */
1132 udma_reset_rings(uc);
1133 udma_free_tx_resources(uc);
1134 udma_free_rx_resources(uc);
1135
1136 uc->slave_thread_id = -1;
1137 uc->dir = DMA_MEM_TO_MEM;
1138}
1139
1140static int udma_get_mmrs(struct udevice *dev)
1141{
1142 struct udma_dev *ud = dev_get_priv(dev);
1143 int i;
1144
1145 for (i = 0; i < MMR_LAST; i++) {
1146 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1147 mmr_names[i]);
1148 if (!ud->mmrs[i])
1149 return -EINVAL;
1150 }
1151
1152 return 0;
1153}
1154
a8837cf4
VR
1155static int udma_setup_resources(struct udma_dev *ud)
1156{
1157 struct udevice *dev = ud->dev;
1158 int ch_count, i;
1159 u32 cap2, cap3;
1160 struct ti_sci_resource_desc *rm_desc;
1161 struct ti_sci_resource *rm_res;
1162 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1163 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
1164 "ti,sci-rm-range-rchan",
1165 "ti,sci-rm-range-rflow" };
ffcc66e8 1166
a8837cf4
VR
1167 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1168 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1169
1170 ud->rflow_cnt = cap3 & 0x3fff;
1171 ud->tchan_cnt = cap2 & 0x1ff;
1172 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1173 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1174 ch_count = ud->tchan_cnt + ud->rchan_cnt;
1175
1176 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1177 sizeof(unsigned long), GFP_KERNEL);
1178 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1179 GFP_KERNEL);
1180 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1181 sizeof(unsigned long), GFP_KERNEL);
1182 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1183 GFP_KERNEL);
1184 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1185 sizeof(unsigned long), GFP_KERNEL);
1186 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1187 sizeof(unsigned long),
1188 GFP_KERNEL);
1189 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1190 GFP_KERNEL);
1191
1192 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1193 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1194 !ud->rflows)
1195 return -ENOMEM;
1196
1197 /*
1198 * RX flows with the same Ids as RX channels are reserved to be used
1199 * as default flows if remote HW can't generate flow_ids. Those
1200 * RX flows can be requested only explicitly by id.
1201 */
1202 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1203
1204 /* Get resource ranges from tisci */
1205 for (i = 0; i < RM_RANGE_LAST; i++)
1206 tisci_rm->rm_ranges[i] =
1207 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1208 tisci_rm->tisci_dev_id,
1209 (char *)range_names[i]);
1210
1211 /* tchan ranges */
1212 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1213 if (IS_ERR(rm_res)) {
1214 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1215 } else {
1216 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1217 for (i = 0; i < rm_res->sets; i++) {
1218 rm_desc = &rm_res->desc[i];
1219 bitmap_clear(ud->tchan_map, rm_desc->start,
1220 rm_desc->num);
1221 }
1222 }
1223
1224 /* rchan and matching default flow ranges */
1225 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1226 if (IS_ERR(rm_res)) {
1227 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1228 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1229 } else {
1230 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1231 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1232 for (i = 0; i < rm_res->sets; i++) {
1233 rm_desc = &rm_res->desc[i];
1234 bitmap_clear(ud->rchan_map, rm_desc->start,
1235 rm_desc->num);
1236 bitmap_clear(ud->rflow_map, rm_desc->start,
1237 rm_desc->num);
1238 }
1239 }
1240
1241 /* GP rflow ranges */
1242 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1243 if (IS_ERR(rm_res)) {
1244 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1245 ud->rflow_cnt - ud->rchan_cnt);
1246 } else {
1247 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1248 ud->rflow_cnt - ud->rchan_cnt);
1249 for (i = 0; i < rm_res->sets; i++) {
1250 rm_desc = &rm_res->desc[i];
1251 bitmap_clear(ud->rflow_map, rm_desc->start,
1252 rm_desc->num);
1253 }
1254 }
1255
1256 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1257 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1258 if (!ch_count)
1259 return -ENODEV;
1260
1261 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1262 GFP_KERNEL);
1263 if (!ud->channels)
1264 return -ENOMEM;
1265
1266 dev_info(dev,
1267 "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n",
1268 ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
1269 ud->rflow_cnt);
1270
1271 return ch_count;
1272}
ffcc66e8
V
1273static int udma_probe(struct udevice *dev)
1274{
1275 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1276 struct udma_dev *ud = dev_get_priv(dev);
1277 int i, ret;
ffcc66e8
V
1278 struct udevice *tmp;
1279 struct udevice *tisci_dev = NULL;
a8837cf4
VR
1280 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1281 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1282
ffcc66e8
V
1283
1284 ret = udma_get_mmrs(dev);
1285 if (ret)
1286 return ret;
1287
1288 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1289 "ti,ringacc", &tmp);
1290 ud->ringacc = dev_get_priv(tmp);
1291 if (IS_ERR(ud->ringacc))
1292 return PTR_ERR(ud->ringacc);
1293
1294 ud->psil_base = dev_read_u32_default(dev, "ti,psil-base", 0);
1295 if (!ud->psil_base) {
1296 dev_info(dev,
1297 "Missing ti,psil-base property, using %d.\n", ret);
1298 return -EINVAL;
1299 }
1300
a8837cf4
VR
1301 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1302 "ti,sci", &tisci_dev);
ffcc66e8 1303 if (ret) {
a8837cf4
VR
1304 debug("Failed to get TISCI phandle (%d)\n", ret);
1305 tisci_rm->tisci = NULL;
1306 return -EINVAL;
ffcc66e8 1307 }
a8837cf4
VR
1308 tisci_rm->tisci = (struct ti_sci_handle *)
1309 (ti_sci_get_handle_from_sysfw(tisci_dev));
ffcc66e8 1310
a8837cf4
VR
1311 tisci_rm->tisci_dev_id = -1;
1312 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1313 if (ret) {
1314 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1315 return ret;
ffcc66e8
V
1316 }
1317
a8837cf4
VR
1318 tisci_rm->tisci_navss_dev_id = -1;
1319 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1320 &tisci_rm->tisci_navss_dev_id);
1321 if (ret) {
1322 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1323 return ret;
ffcc66e8
V
1324 }
1325
a8837cf4
VR
1326 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1327 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
ffcc66e8 1328
a8837cf4
VR
1329 ud->dev = dev;
1330 ud->ch_count = udma_setup_resources(ud);
1331 if (ud->ch_count <= 0)
1332 return ud->ch_count;
ffcc66e8
V
1333
1334 dev_info(dev,
1335 "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n",
1336 ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt,
a8837cf4 1337 tisci_rm->tisci_dev_id);
ffcc66e8
V
1338 dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt);
1339
ffcc66e8
V
1340 for (i = 0; i < ud->tchan_cnt; i++) {
1341 struct udma_tchan *tchan = &ud->tchans[i];
1342
1343 tchan->id = i;
1344 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1345 }
1346
1347 for (i = 0; i < ud->rchan_cnt; i++) {
1348 struct udma_rchan *rchan = &ud->rchans[i];
1349
1350 rchan->id = i;
1351 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1352 }
1353
1354 for (i = 0; i < ud->rflow_cnt; i++) {
1355 struct udma_rflow *rflow = &ud->rflows[i];
1356
1357 rflow->id = i;
1358 }
1359
1360 for (i = 0; i < ud->ch_count; i++) {
1361 struct udma_chan *uc = &ud->channels[i];
1362
1363 uc->ud = ud;
1364 uc->id = i;
1365 uc->slave_thread_id = -1;
1366 uc->tchan = NULL;
1367 uc->rchan = NULL;
1368 uc->dir = DMA_MEM_TO_MEM;
1369 sprintf(uc->name, "UDMA chan%d\n", i);
1370 if (!i)
1371 uc->in_use = true;
1372 }
1373
1374 pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1375 udma_read(ud->mmrs[MMR_GCFG], 0),
1376 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1377 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1378 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1379 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1380
1381 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1382
1383 return ret;
1384}
1385
b0ab0083
VR
1386static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1387{
1388 u64 addr = 0;
1389
1390 memcpy(&addr, &elem, sizeof(elem));
1391 return k3_nav_ringacc_ring_push(ring, &addr);
1392}
1393
ffcc66e8
V
1394static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1395 dma_addr_t src, size_t len)
1396{
1397 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1398 struct cppi5_tr_type15_t *tr_req;
1399 int num_tr;
1400 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1401 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1402 unsigned long dummy;
1403 void *tr_desc;
1404 size_t desc_size;
1405
1406 if (len < SZ_64K) {
1407 num_tr = 1;
1408 tr0_cnt0 = len;
1409 tr0_cnt1 = 1;
1410 } else {
1411 unsigned long align_to = __ffs(src | dest);
1412
1413 if (align_to > 3)
1414 align_to = 3;
1415 /*
1416 * Keep simple: tr0: SZ_64K-alignment blocks,
1417 * tr1: the remaining
1418 */
1419 num_tr = 2;
1420 tr0_cnt0 = (SZ_64K - BIT(align_to));
1421 if (len / tr0_cnt0 >= SZ_64K) {
1422 dev_err(uc->ud->dev, "size %zu is not supported\n",
1423 len);
1424 return NULL;
1425 }
1426
1427 tr0_cnt1 = len / tr0_cnt0;
1428 tr1_cnt0 = len % tr0_cnt0;
1429 }
1430
1431 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1432 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1433 if (!tr_desc)
1434 return NULL;
1435 memset(tr_desc, 0, desc_size);
1436
1437 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1438 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1439 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1440
1441 tr_req = tr_desc + tr_size;
1442
1443 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1444 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1445 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1446
1447 tr_req[0].addr = src;
1448 tr_req[0].icnt0 = tr0_cnt0;
1449 tr_req[0].icnt1 = tr0_cnt1;
1450 tr_req[0].icnt2 = 1;
1451 tr_req[0].icnt3 = 1;
1452 tr_req[0].dim1 = tr0_cnt0;
1453
1454 tr_req[0].daddr = dest;
1455 tr_req[0].dicnt0 = tr0_cnt0;
1456 tr_req[0].dicnt1 = tr0_cnt1;
1457 tr_req[0].dicnt2 = 1;
1458 tr_req[0].dicnt3 = 1;
1459 tr_req[0].ddim1 = tr0_cnt0;
1460
1461 if (num_tr == 2) {
1462 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1463 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1464 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1465
1466 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1467 tr_req[1].icnt0 = tr1_cnt0;
1468 tr_req[1].icnt1 = 1;
1469 tr_req[1].icnt2 = 1;
1470 tr_req[1].icnt3 = 1;
1471
1472 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1473 tr_req[1].dicnt0 = tr1_cnt0;
1474 tr_req[1].dicnt1 = 1;
1475 tr_req[1].dicnt2 = 1;
1476 tr_req[1].dicnt3 = 1;
1477 }
1478
1479 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1480
f03cb5c9
VR
1481 flush_dcache_range((unsigned long)tr_desc,
1482 ALIGN((unsigned long)tr_desc + desc_size,
c0b94903 1483 ARCH_DMA_MINALIGN));
ffcc66e8 1484
b0ab0083 1485 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
ffcc66e8
V
1486
1487 return 0;
1488}
1489
1490static int udma_transfer(struct udevice *dev, int direction,
1491 void *dst, void *src, size_t len)
1492{
1493 struct udma_dev *ud = dev_get_priv(dev);
1494 /* Channel0 is reserved for memcpy */
1495 struct udma_chan *uc = &ud->channels[0];
1496 dma_addr_t paddr = 0;
1497 int ret;
1498
1499 ret = udma_alloc_chan_resources(uc);
1500 if (ret)
1501 return ret;
1502
1503 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
1504 udma_start(uc);
1505 udma_poll_completion(uc, &paddr);
1506 udma_stop(uc);
1507
1508 udma_free_chan_resources(uc);
1509 return 0;
1510}
1511
1512static int udma_request(struct dma *dma)
1513{
1514 struct udma_dev *ud = dev_get_priv(dma->dev);
1515 struct udma_chan *uc;
1516 unsigned long dummy;
1517 int ret;
1518
1519 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1520 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1521 return -EINVAL;
1522 }
1523
1524 uc = &ud->channels[dma->id];
1525 ret = udma_alloc_chan_resources(uc);
1526 if (ret) {
1527 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
1528 return -EINVAL;
1529 }
1530
1531 uc->hdesc_size = cppi5_hdesc_calc_size(uc->needs_epib,
1532 uc->psd_size, 0);
1533 uc->hdesc_size = ALIGN(uc->hdesc_size, ARCH_DMA_MINALIGN);
1534
1535 if (uc->dir == DMA_MEM_TO_DEV) {
1536 uc->desc_tx = dma_alloc_coherent(uc->hdesc_size, &dummy);
1537 memset(uc->desc_tx, 0, uc->hdesc_size);
1538 } else {
1539 uc->desc_rx = dma_alloc_coherent(
1540 uc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
1541 memset(uc->desc_rx, 0, uc->hdesc_size * UDMA_RX_DESC_NUM);
1542 }
1543
1544 uc->in_use = true;
1545 uc->desc_rx_cur = 0;
1546 uc->num_rx_bufs = 0;
1547
5e6d9ccd
VR
1548 if (uc->dir == DMA_DEV_TO_MEM) {
1549 uc->cfg_data.flow_id_base = uc->rflow->id;
1550 uc->cfg_data.flow_id_cnt = 1;
1551 }
1552
ffcc66e8
V
1553 return 0;
1554}
1555
aae95882 1556static int udma_rfree(struct dma *dma)
ffcc66e8
V
1557{
1558 struct udma_dev *ud = dev_get_priv(dma->dev);
1559 struct udma_chan *uc;
1560
1561 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1562 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1563 return -EINVAL;
1564 }
1565 uc = &ud->channels[dma->id];
1566
1567 if (udma_is_chan_running(uc))
1568 udma_stop(uc);
1569 udma_free_chan_resources(uc);
1570
1571 uc->in_use = false;
1572
1573 return 0;
1574}
1575
1576static int udma_enable(struct dma *dma)
1577{
1578 struct udma_dev *ud = dev_get_priv(dma->dev);
1579 struct udma_chan *uc;
1580 int ret;
1581
1582 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1583 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1584 return -EINVAL;
1585 }
1586 uc = &ud->channels[dma->id];
1587
1588 ret = udma_start(uc);
1589
1590 return ret;
1591}
1592
1593static int udma_disable(struct dma *dma)
1594{
1595 struct udma_dev *ud = dev_get_priv(dma->dev);
1596 struct udma_chan *uc;
1597 int ret = 0;
1598
1599 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1600 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1601 return -EINVAL;
1602 }
1603 uc = &ud->channels[dma->id];
1604
1605 if (udma_is_chan_running(uc))
1606 ret = udma_stop(uc);
1607 else
1608 dev_err(dma->dev, "%s not running\n", __func__);
1609
1610 return ret;
1611}
1612
1613static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
1614{
1615 struct udma_dev *ud = dev_get_priv(dma->dev);
1616 struct cppi5_host_desc_t *desc_tx;
1617 dma_addr_t dma_src = (dma_addr_t)src;
1618 struct ti_udma_drv_packet_data packet_data = { 0 };
1619 dma_addr_t paddr;
1620 struct udma_chan *uc;
1621 u32 tc_ring_id;
1622 int ret;
1623
a3f25b92 1624 if (metadata)
ffcc66e8
V
1625 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
1626
1627 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1628 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1629 return -EINVAL;
1630 }
1631 uc = &ud->channels[dma->id];
1632
1633 if (uc->dir != DMA_MEM_TO_DEV)
1634 return -EINVAL;
1635
1636 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1637
1638 desc_tx = uc->desc_tx;
1639
1640 cppi5_hdesc_reset_hbdesc(desc_tx);
1641
1642 cppi5_hdesc_init(desc_tx,
1643 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1644 uc->psd_size);
1645 cppi5_hdesc_set_pktlen(desc_tx, len);
1646 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
1647 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
1648 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
1649 /* pass below information from caller */
1650 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
1651 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
1652
f03cb5c9
VR
1653 flush_dcache_range((unsigned long)dma_src,
1654 ALIGN((unsigned long)dma_src + len,
c0b94903 1655 ARCH_DMA_MINALIGN));
f03cb5c9
VR
1656 flush_dcache_range((unsigned long)desc_tx,
1657 ALIGN((unsigned long)desc_tx + uc->hdesc_size,
c0b94903 1658 ARCH_DMA_MINALIGN));
ffcc66e8 1659
b0ab0083 1660 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
ffcc66e8
V
1661 if (ret) {
1662 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
1663 dma->id, ret);
1664 return ret;
1665 }
1666
1667 udma_poll_completion(uc, &paddr);
1668
1669 return 0;
1670}
1671
1672static int udma_receive(struct dma *dma, void **dst, void *metadata)
1673{
1674 struct udma_dev *ud = dev_get_priv(dma->dev);
1675 struct cppi5_host_desc_t *desc_rx;
1676 dma_addr_t buf_dma;
1677 struct udma_chan *uc;
1678 u32 buf_dma_len, pkt_len;
1679 u32 port_id = 0;
1680 int ret;
1681
1682 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1683 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1684 return -EINVAL;
1685 }
1686 uc = &ud->channels[dma->id];
1687
1688 if (uc->dir != DMA_DEV_TO_MEM)
1689 return -EINVAL;
1690 if (!uc->num_rx_bufs)
1691 return -EINVAL;
1692
1693 ret = k3_nav_ringacc_ring_pop(uc->rchan->r_ring, &desc_rx);
1694 if (ret && ret != -ENODATA) {
1695 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
1696 return ret;
1697 } else if (ret == -ENODATA) {
1698 return 0;
1699 }
1700
1701 /* invalidate cache data */
c0b94903
VR
1702 invalidate_dcache_range((ulong)desc_rx,
1703 (ulong)(desc_rx + uc->hdesc_size));
ffcc66e8
V
1704
1705 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1706 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1707
1708 /* invalidate cache data */
c0b94903
VR
1709 invalidate_dcache_range((ulong)buf_dma,
1710 (ulong)(buf_dma + buf_dma_len));
ffcc66e8
V
1711
1712 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1713
1714 *dst = (void *)buf_dma;
1715 uc->num_rx_bufs--;
1716
1717 return pkt_len;
1718}
1719
1720static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
1721{
1722 struct udma_dev *ud = dev_get_priv(dma->dev);
1723 struct udma_chan *uc = &ud->channels[0];
1724 ofnode chconf_node, slave_node;
1725 char prop[50];
1726 u32 val;
1727
1728 for (val = 0; val < ud->ch_count; val++) {
1729 uc = &ud->channels[val];
1730 if (!uc->in_use)
1731 break;
1732 }
1733
1734 if (val == ud->ch_count)
1735 return -EBUSY;
1736
1737 uc->dir = DMA_DEV_TO_MEM;
1738 if (args->args[2] == UDMA_DIR_TX)
1739 uc->dir = DMA_MEM_TO_DEV;
1740
1741 slave_node = ofnode_get_by_phandle(args->args[0]);
1742 if (!ofnode_valid(slave_node)) {
1743 dev_err(ud->dev, "slave node is missing\n");
1744 return -EINVAL;
1745 }
1746
1747 snprintf(prop, sizeof(prop), "ti,psil-config%u", args->args[1]);
1748 chconf_node = ofnode_find_subnode(slave_node, prop);
1749 if (!ofnode_valid(chconf_node)) {
1750 dev_err(ud->dev, "Channel configuration node is missing\n");
1751 return -EINVAL;
1752 }
1753
1754 if (!ofnode_read_u32(chconf_node, "linux,udma-mode", &val)) {
1755 if (val == UDMA_PKT_MODE)
1756 uc->pkt_mode = true;
1757 }
1758
1759 if (!ofnode_read_u32(chconf_node, "statictr-type", &val))
1760 uc->static_tr_type = val;
1761
1762 uc->needs_epib = ofnode_read_bool(chconf_node, "ti,needs-epib");
1763 if (!ofnode_read_u32(chconf_node, "ti,psd-size", &val))
1764 uc->psd_size = val;
1765 uc->metadata_size = (uc->needs_epib ? 16 : 0) + uc->psd_size;
1766
1767 if (ofnode_read_u32(slave_node, "ti,psil-base", &val)) {
1768 dev_err(ud->dev, "ti,psil-base is missing\n");
1769 return -EINVAL;
1770 }
1771
1772 uc->slave_thread_id = val + args->args[1];
1773
1774 dma->id = uc->id;
1775 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
1776 dma->id, uc->needs_epib,
1777 uc->psd_size, uc->metadata_size,
1778 uc->slave_thread_id);
1779
1780 return 0;
1781}
1782
1783int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
1784{
1785 struct udma_dev *ud = dev_get_priv(dma->dev);
1786 struct cppi5_host_desc_t *desc_rx;
1787 dma_addr_t dma_dst;
1788 struct udma_chan *uc;
1789 u32 desc_num;
1790
1791 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1792 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1793 return -EINVAL;
1794 }
1795 uc = &ud->channels[dma->id];
1796
1797 if (uc->dir != DMA_DEV_TO_MEM)
1798 return -EINVAL;
1799
1800 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
1801 return -EINVAL;
1802
1803 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
1804 desc_rx = uc->desc_rx + (desc_num * uc->hdesc_size);
1805 dma_dst = (dma_addr_t)dst;
1806
1807 cppi5_hdesc_reset_hbdesc(desc_rx);
1808
1809 cppi5_hdesc_init(desc_rx,
1810 uc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
1811 uc->psd_size);
1812 cppi5_hdesc_set_pktlen(desc_rx, size);
1813 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
1814
f03cb5c9
VR
1815 flush_dcache_range((unsigned long)desc_rx,
1816 ALIGN((unsigned long)desc_rx + uc->hdesc_size,
c0b94903 1817 ARCH_DMA_MINALIGN));
ffcc66e8 1818
b0ab0083 1819 udma_push_to_ring(uc->rchan->fd_ring, desc_rx);
ffcc66e8
V
1820
1821 uc->num_rx_bufs++;
1822 uc->desc_rx_cur++;
1823
1824 return 0;
1825}
1826
5e6d9ccd
VR
1827static int udma_get_cfg(struct dma *dma, u32 id, void **data)
1828{
1829 struct udma_dev *ud = dev_get_priv(dma->dev);
1830 struct udma_chan *uc;
1831
1832 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
1833 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
1834 return -EINVAL;
1835 }
1836
1837 switch (id) {
1838 case TI_UDMA_CHAN_PRIV_INFO:
1839 uc = &ud->channels[dma->id];
1840 *data = &uc->cfg_data;
1841 return 0;
1842 }
1843
1844 return -EINVAL;
1845}
1846
ffcc66e8
V
1847static const struct dma_ops udma_ops = {
1848 .transfer = udma_transfer,
1849 .of_xlate = udma_of_xlate,
1850 .request = udma_request,
aae95882 1851 .rfree = udma_rfree,
ffcc66e8
V
1852 .enable = udma_enable,
1853 .disable = udma_disable,
1854 .send = udma_send,
1855 .receive = udma_receive,
1856 .prepare_rcv_buf = udma_prepare_rcv_buf,
5e6d9ccd 1857 .get_cfg = udma_get_cfg,
ffcc66e8
V
1858};
1859
1860static const struct udevice_id udma_ids[] = {
1861 { .compatible = "ti,k3-navss-udmap" },
382c0c62 1862 { .compatible = "ti,j721e-navss-mcu-udmap" },
ffcc66e8
V
1863 { }
1864};
1865
1866U_BOOT_DRIVER(ti_edma3) = {
1867 .name = "ti-udma",
1868 .id = UCLASS_DMA,
1869 .of_match = udma_ids,
1870 .ops = &udma_ops,
1871 .probe = udma_probe,
1872 .priv_auto_alloc_size = sizeof(struct udma_dev),
1873};