1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/workqueue.h>
24 #include <linux/completion.h>
25 #include <linux/soc/ti/k3-ringacc.h>
26 #include <linux/soc/ti/ti_sci_protocol.h>
27 #include <linux/soc/ti/ti_sci_inta_msi.h>
28 #include <linux/dma/ti-cppi5.h>
30 #include "../virt-dma.h"
32 #include "k3-psil-priv.h"
34 struct udma_static_tr
{
35 u8 elsize
; /* RPSTR0 */
36 u16 elcnt
; /* RPSTR0 */
37 u16 bstcnt
; /* RPSTR1 */
40 #define K3_UDMA_MAX_RFLOWS 1024
41 #define K3_UDMA_DEFAULT_RING_SIZE 16
43 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44 #define UDMA_RFLOW_SRCTAG_NONE 0
45 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
46 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
47 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
49 #define UDMA_RFLOW_DSTTAG_NONE 0
50 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
51 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
52 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
53 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
64 static const char * const mmr_names
[] = { "gcfg", "rchanrt", "tchanrt" };
70 struct k3_ring
*t_ring
; /* Transmit ring */
71 struct k3_ring
*tc_ring
; /* Transmit Completion ring */
76 struct k3_ring
*fd_ring
; /* Free Descriptor ring */
77 struct k3_ring
*r_ring
; /* Receive ring */
86 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
87 #define UDMA_FLAG_PDMA_BURST BIT(1)
89 struct udma_match_data
{
91 bool enable_memcpy_support
;
97 u32 level_start_idx
[];
101 size_t cppi5_desc_size
;
102 void *cppi5_desc_vaddr
;
103 dma_addr_t cppi5_desc_paddr
;
105 /* TR descriptor internal pointers */
107 struct cppi5_tr_resp_t
*tr_resp_base
;
110 struct udma_rx_flush
{
111 struct udma_hwdesc hwdescs
[2];
115 dma_addr_t buffer_paddr
;
119 struct dma_device ddev
;
121 void __iomem
*mmrs
[MMR_LAST
];
122 const struct udma_match_data
*match_data
;
124 size_t desc_align
; /* alignment to use for descriptors */
126 struct udma_tisci_rm tisci_rm
;
128 struct k3_ringacc
*ringacc
;
130 struct work_struct purge_work
;
131 struct list_head desc_to_purge
;
134 struct udma_rx_flush rx_flush
;
140 unsigned long *tchan_map
;
141 unsigned long *rchan_map
;
142 unsigned long *rflow_gp_map
;
143 unsigned long *rflow_gp_map_allocated
;
144 unsigned long *rflow_in_use
;
146 struct udma_tchan
*tchans
;
147 struct udma_rchan
*rchans
;
148 struct udma_rflow
*rflows
;
150 struct udma_chan
*channels
;
156 struct virt_dma_desc vd
;
160 enum dma_transfer_direction dir
;
162 struct udma_static_tr static_tr
;
166 unsigned int desc_idx
; /* Only used for cyclic in packet mode */
170 void *metadata
; /* pointer to provided metadata buffer (EPIP, PSdata) */
172 unsigned int hwdesc_count
;
173 struct udma_hwdesc hwdesc
[0];
176 enum udma_chan_state
{
177 UDMA_CHAN_IS_IDLE
= 0, /* not active, no teardown is in progress */
178 UDMA_CHAN_IS_ACTIVE
, /* Normal operation */
179 UDMA_CHAN_IS_TERMINATING
, /* channel is being terminated */
182 struct udma_tx_drain
{
183 struct delayed_work work
;
188 struct udma_chan_config
{
189 bool pkt_mode
; /* TR or packet */
190 bool needs_epib
; /* EPIB is needed for the communication or not */
191 u32 psd_size
; /* size of Protocol Specific Data */
192 u32 metadata_size
; /* (needs_epib ? 16:0) + psd_size */
193 u32 hdesc_size
; /* Size of a packet descriptor in packet mode */
194 bool notdpkt
; /* Suppress sending TDC packet */
195 int remote_thread_id
;
199 enum psil_endpoint_type ep_type
;
202 enum udma_tp_level channel_tpl
; /* Channel Throughput Level */
204 enum dma_transfer_direction dir
;
208 struct virt_dma_chan vc
;
209 struct dma_slave_config cfg
;
211 struct udma_desc
*desc
;
212 struct udma_desc
*terminated_desc
;
213 struct udma_static_tr static_tr
;
216 struct udma_tchan
*tchan
;
217 struct udma_rchan
*rchan
;
218 struct udma_rflow
*rflow
;
228 enum udma_chan_state state
;
229 struct completion teardown_completed
;
231 struct udma_tx_drain tx_drain
;
233 u32 bcnt
; /* number of bytes completed since the start of the channel */
234 u32 in_ring_cnt
; /* number of descriptors in flight */
236 /* Channel configuration parameters */
237 struct udma_chan_config config
;
239 /* dmapool for packet mode descriptors */
241 struct dma_pool
*hdesc_pool
;
246 static inline struct udma_dev
*to_udma_dev(struct dma_device
*d
)
248 return container_of(d
, struct udma_dev
, ddev
);
251 static inline struct udma_chan
*to_udma_chan(struct dma_chan
*c
)
253 return container_of(c
, struct udma_chan
, vc
.chan
);
256 static inline struct udma_desc
*to_udma_desc(struct dma_async_tx_descriptor
*t
)
258 return container_of(t
, struct udma_desc
, vd
.tx
);
261 /* Generic register access functions */
262 static inline u32
udma_read(void __iomem
*base
, int reg
)
264 return readl(base
+ reg
);
267 static inline void udma_write(void __iomem
*base
, int reg
, u32 val
)
269 writel(val
, base
+ reg
);
272 static inline void udma_update_bits(void __iomem
*base
, int reg
,
277 orig
= readl(base
+ reg
);
282 writel(tmp
, base
+ reg
);
286 static inline u32
udma_tchanrt_read(struct udma_tchan
*tchan
, int reg
)
290 return udma_read(tchan
->reg_rt
, reg
);
293 static inline void udma_tchanrt_write(struct udma_tchan
*tchan
, int reg
,
298 udma_write(tchan
->reg_rt
, reg
, val
);
301 static inline void udma_tchanrt_update_bits(struct udma_tchan
*tchan
, int reg
,
306 udma_update_bits(tchan
->reg_rt
, reg
, mask
, val
);
310 static inline u32
udma_rchanrt_read(struct udma_rchan
*rchan
, int reg
)
314 return udma_read(rchan
->reg_rt
, reg
);
317 static inline void udma_rchanrt_write(struct udma_rchan
*rchan
, int reg
,
322 udma_write(rchan
->reg_rt
, reg
, val
);
325 static inline void udma_rchanrt_update_bits(struct udma_rchan
*rchan
, int reg
,
330 udma_update_bits(rchan
->reg_rt
, reg
, mask
, val
);
333 static int navss_psil_pair(struct udma_dev
*ud
, u32 src_thread
, u32 dst_thread
)
335 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
337 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
338 return tisci_rm
->tisci_psil_ops
->pair(tisci_rm
->tisci
,
339 tisci_rm
->tisci_navss_dev_id
,
340 src_thread
, dst_thread
);
343 static int navss_psil_unpair(struct udma_dev
*ud
, u32 src_thread
,
346 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
348 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
349 return tisci_rm
->tisci_psil_ops
->unpair(tisci_rm
->tisci
,
350 tisci_rm
->tisci_navss_dev_id
,
351 src_thread
, dst_thread
);
354 static void udma_reset_uchan(struct udma_chan
*uc
)
356 memset(&uc
->config
, 0, sizeof(uc
->config
));
357 uc
->config
.remote_thread_id
= -1;
358 uc
->state
= UDMA_CHAN_IS_IDLE
;
361 static void udma_dump_chan_stdata(struct udma_chan
*uc
)
363 struct device
*dev
= uc
->ud
->dev
;
367 if (uc
->config
.dir
== DMA_MEM_TO_DEV
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
368 dev_dbg(dev
, "TCHAN State data:\n");
369 for (i
= 0; i
< 32; i
++) {
370 offset
= UDMA_TCHAN_RT_STDATA_REG
+ i
* 4;
371 dev_dbg(dev
, "TRT_STDATA[%02d]: 0x%08x\n", i
,
372 udma_tchanrt_read(uc
->tchan
, offset
));
376 if (uc
->config
.dir
== DMA_DEV_TO_MEM
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
377 dev_dbg(dev
, "RCHAN State data:\n");
378 for (i
= 0; i
< 32; i
++) {
379 offset
= UDMA_RCHAN_RT_STDATA_REG
+ i
* 4;
380 dev_dbg(dev
, "RRT_STDATA[%02d]: 0x%08x\n", i
,
381 udma_rchanrt_read(uc
->rchan
, offset
));
386 static inline dma_addr_t
udma_curr_cppi5_desc_paddr(struct udma_desc
*d
,
389 return d
->hwdesc
[idx
].cppi5_desc_paddr
;
392 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc
*d
, int idx
)
394 return d
->hwdesc
[idx
].cppi5_desc_vaddr
;
397 static struct udma_desc
*udma_udma_desc_from_paddr(struct udma_chan
*uc
,
400 struct udma_desc
*d
= uc
->terminated_desc
;
403 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
406 if (desc_paddr
!= paddr
)
413 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
416 if (desc_paddr
!= paddr
)
424 static void udma_free_hwdesc(struct udma_chan
*uc
, struct udma_desc
*d
)
426 if (uc
->use_dma_pool
) {
429 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
430 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
433 dma_pool_free(uc
->hdesc_pool
,
434 d
->hwdesc
[i
].cppi5_desc_vaddr
,
435 d
->hwdesc
[i
].cppi5_desc_paddr
);
437 d
->hwdesc
[i
].cppi5_desc_vaddr
= NULL
;
439 } else if (d
->hwdesc
[0].cppi5_desc_vaddr
) {
440 struct udma_dev
*ud
= uc
->ud
;
442 dma_free_coherent(ud
->dev
, d
->hwdesc
[0].cppi5_desc_size
,
443 d
->hwdesc
[0].cppi5_desc_vaddr
,
444 d
->hwdesc
[0].cppi5_desc_paddr
);
446 d
->hwdesc
[0].cppi5_desc_vaddr
= NULL
;
450 static void udma_purge_desc_work(struct work_struct
*work
)
452 struct udma_dev
*ud
= container_of(work
, typeof(*ud
), purge_work
);
453 struct virt_dma_desc
*vd
, *_vd
;
457 spin_lock_irqsave(&ud
->lock
, flags
);
458 list_splice_tail_init(&ud
->desc_to_purge
, &head
);
459 spin_unlock_irqrestore(&ud
->lock
, flags
);
461 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
462 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
463 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
465 udma_free_hwdesc(uc
, d
);
470 /* If more to purge, schedule the work again */
471 if (!list_empty(&ud
->desc_to_purge
))
472 schedule_work(&ud
->purge_work
);
475 static void udma_desc_free(struct virt_dma_desc
*vd
)
477 struct udma_dev
*ud
= to_udma_dev(vd
->tx
.chan
->device
);
478 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
479 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
482 if (uc
->terminated_desc
== d
)
483 uc
->terminated_desc
= NULL
;
485 if (uc
->use_dma_pool
) {
486 udma_free_hwdesc(uc
, d
);
491 spin_lock_irqsave(&ud
->lock
, flags
);
492 list_add_tail(&vd
->node
, &ud
->desc_to_purge
);
493 spin_unlock_irqrestore(&ud
->lock
, flags
);
495 schedule_work(&ud
->purge_work
);
498 static bool udma_is_chan_running(struct udma_chan
*uc
)
504 trt_ctl
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
);
506 rrt_ctl
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
);
508 if (trt_ctl
& UDMA_CHAN_RT_CTL_EN
|| rrt_ctl
& UDMA_CHAN_RT_CTL_EN
)
514 static bool udma_is_chan_paused(struct udma_chan
*uc
)
518 switch (uc
->config
.dir
) {
520 val
= udma_rchanrt_read(uc
->rchan
,
521 UDMA_RCHAN_RT_PEER_RT_EN_REG
);
522 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
525 val
= udma_tchanrt_read(uc
->tchan
,
526 UDMA_TCHAN_RT_PEER_RT_EN_REG
);
527 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
530 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
);
531 pause_mask
= UDMA_CHAN_RT_CTL_PAUSE
;
537 if (val
& pause_mask
)
543 static void udma_sync_for_device(struct udma_chan
*uc
, int idx
)
545 struct udma_desc
*d
= uc
->desc
;
547 if (uc
->cyclic
&& uc
->config
.pkt_mode
) {
548 dma_sync_single_for_device(uc
->ud
->dev
,
549 d
->hwdesc
[idx
].cppi5_desc_paddr
,
550 d
->hwdesc
[idx
].cppi5_desc_size
,
555 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
556 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
559 dma_sync_single_for_device(uc
->ud
->dev
,
560 d
->hwdesc
[i
].cppi5_desc_paddr
,
561 d
->hwdesc
[i
].cppi5_desc_size
,
567 static inline dma_addr_t
udma_get_rx_flush_hwdesc_paddr(struct udma_chan
*uc
)
569 return uc
->ud
->rx_flush
.hwdescs
[uc
->config
.pkt_mode
].cppi5_desc_paddr
;
572 static int udma_push_to_ring(struct udma_chan
*uc
, int idx
)
574 struct udma_desc
*d
= uc
->desc
;
575 struct k3_ring
*ring
= NULL
;
579 switch (uc
->config
.dir
) {
581 ring
= uc
->rflow
->fd_ring
;
585 ring
= uc
->tchan
->t_ring
;
591 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
593 paddr
= udma_get_rx_flush_hwdesc_paddr(uc
);
595 paddr
= udma_curr_cppi5_desc_paddr(d
, idx
);
597 wmb(); /* Ensure that writes are not moved over this point */
598 udma_sync_for_device(uc
, idx
);
601 ret
= k3_ringacc_ring_push(ring
, &paddr
);
608 static bool udma_desc_is_rx_flush(struct udma_chan
*uc
, dma_addr_t addr
)
610 if (uc
->config
.dir
!= DMA_DEV_TO_MEM
)
613 if (addr
== udma_get_rx_flush_hwdesc_paddr(uc
))
619 static int udma_pop_from_ring(struct udma_chan
*uc
, dma_addr_t
*addr
)
621 struct k3_ring
*ring
= NULL
;
624 switch (uc
->config
.dir
) {
626 ring
= uc
->rflow
->r_ring
;
630 ring
= uc
->tchan
->tc_ring
;
636 if (ring
&& k3_ringacc_ring_get_occ(ring
)) {
637 struct udma_desc
*d
= NULL
;
639 ret
= k3_ringacc_ring_pop(ring
, addr
);
643 /* Teardown completion */
644 if (cppi5_desc_is_tdcm(*addr
))
647 /* Check for flush descriptor */
648 if (udma_desc_is_rx_flush(uc
, *addr
))
651 d
= udma_udma_desc_from_paddr(uc
, *addr
);
654 dma_sync_single_for_cpu(uc
->ud
->dev
, *addr
,
655 d
->hwdesc
[0].cppi5_desc_size
,
657 rmb(); /* Ensure that reads are not moved before this point */
666 static void udma_reset_rings(struct udma_chan
*uc
)
668 struct k3_ring
*ring1
= NULL
;
669 struct k3_ring
*ring2
= NULL
;
671 switch (uc
->config
.dir
) {
674 ring1
= uc
->rflow
->fd_ring
;
675 ring2
= uc
->rflow
->r_ring
;
681 ring1
= uc
->tchan
->t_ring
;
682 ring2
= uc
->tchan
->tc_ring
;
690 k3_ringacc_ring_reset_dma(ring1
,
691 k3_ringacc_ring_get_occ(ring1
));
693 k3_ringacc_ring_reset(ring2
);
695 /* make sure we are not leaking memory by stalled descriptor */
696 if (uc
->terminated_desc
) {
697 udma_desc_free(&uc
->terminated_desc
->vd
);
698 uc
->terminated_desc
= NULL
;
704 static void udma_reset_counters(struct udma_chan
*uc
)
709 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
);
710 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
, val
);
712 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_SBCNT_REG
);
713 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_SBCNT_REG
, val
);
715 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PCNT_REG
);
716 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PCNT_REG
, val
);
718 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
);
719 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
, val
);
723 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_BCNT_REG
);
724 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_BCNT_REG
, val
);
726 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_SBCNT_REG
);
727 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_SBCNT_REG
, val
);
729 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_PCNT_REG
);
730 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PCNT_REG
, val
);
732 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_PEER_BCNT_REG
);
733 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_BCNT_REG
, val
);
739 static int udma_reset_chan(struct udma_chan
*uc
, bool hard
)
741 switch (uc
->config
.dir
) {
743 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
, 0);
744 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
, 0);
747 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
, 0);
748 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
, 0);
751 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
, 0);
752 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
, 0);
758 /* Reset all counters */
759 udma_reset_counters(uc
);
761 /* Hard reset: re-initialize the channel to reset */
763 struct udma_chan_config ucc_backup
;
766 memcpy(&ucc_backup
, &uc
->config
, sizeof(uc
->config
));
767 uc
->ud
->ddev
.device_free_chan_resources(&uc
->vc
.chan
);
769 /* restore the channel configuration */
770 memcpy(&uc
->config
, &ucc_backup
, sizeof(uc
->config
));
771 ret
= uc
->ud
->ddev
.device_alloc_chan_resources(&uc
->vc
.chan
);
776 * Setting forced teardown after forced reset helps recovering
779 if (uc
->config
.dir
== DMA_DEV_TO_MEM
)
780 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
781 UDMA_CHAN_RT_CTL_EN
|
782 UDMA_CHAN_RT_CTL_TDOWN
|
783 UDMA_CHAN_RT_CTL_FTDOWN
);
785 uc
->state
= UDMA_CHAN_IS_IDLE
;
790 static void udma_start_desc(struct udma_chan
*uc
)
792 struct udma_chan_config
*ucc
= &uc
->config
;
794 if (ucc
->pkt_mode
&& (uc
->cyclic
|| ucc
->dir
== DMA_DEV_TO_MEM
)) {
797 /* Push all descriptors to ring for packet mode cyclic or RX */
798 for (i
= 0; i
< uc
->desc
->sglen
; i
++)
799 udma_push_to_ring(uc
, i
);
801 udma_push_to_ring(uc
, 0);
805 static bool udma_chan_needs_reconfiguration(struct udma_chan
*uc
)
807 /* Only PDMAs have staticTR */
808 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
)
811 /* Check if the staticTR configuration has changed for TX */
812 if (memcmp(&uc
->static_tr
, &uc
->desc
->static_tr
, sizeof(uc
->static_tr
)))
818 static int udma_start(struct udma_chan
*uc
)
820 struct virt_dma_desc
*vd
= vchan_next_desc(&uc
->vc
);
829 uc
->desc
= to_udma_desc(&vd
->tx
);
831 /* Channel is already running and does not need reconfiguration */
832 if (udma_is_chan_running(uc
) && !udma_chan_needs_reconfiguration(uc
)) {
837 /* Make sure that we clear the teardown bit, if it is set */
838 udma_reset_chan(uc
, false);
840 /* Push descriptors before we start the channel */
843 switch (uc
->desc
->dir
) {
845 /* Config remote TR */
846 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
847 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
848 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
849 const struct udma_match_data
*match_data
=
852 if (uc
->config
.enable_acc32
)
853 val
|= PDMA_STATIC_TR_XY_ACC32
;
854 if (uc
->config
.enable_burst
)
855 val
|= PDMA_STATIC_TR_XY_BURST
;
857 udma_rchanrt_write(uc
->rchan
,
858 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
, val
);
860 udma_rchanrt_write(uc
->rchan
,
861 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
,
862 PDMA_STATIC_TR_Z(uc
->desc
->static_tr
.bstcnt
,
863 match_data
->statictr_z_mask
));
865 /* save the current staticTR configuration */
866 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
867 sizeof(uc
->static_tr
));
870 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
871 UDMA_CHAN_RT_CTL_EN
);
874 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
,
875 UDMA_PEER_RT_EN_ENABLE
);
879 /* Config remote TR */
880 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
881 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
882 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
884 if (uc
->config
.enable_acc32
)
885 val
|= PDMA_STATIC_TR_XY_ACC32
;
886 if (uc
->config
.enable_burst
)
887 val
|= PDMA_STATIC_TR_XY_BURST
;
889 udma_tchanrt_write(uc
->tchan
,
890 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG
, val
);
892 /* save the current staticTR configuration */
893 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
894 sizeof(uc
->static_tr
));
898 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
,
899 UDMA_PEER_RT_EN_ENABLE
);
901 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
902 UDMA_CHAN_RT_CTL_EN
);
906 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
907 UDMA_CHAN_RT_CTL_EN
);
908 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
909 UDMA_CHAN_RT_CTL_EN
);
916 uc
->state
= UDMA_CHAN_IS_ACTIVE
;
922 static int udma_stop(struct udma_chan
*uc
)
924 enum udma_chan_state old_state
= uc
->state
;
926 uc
->state
= UDMA_CHAN_IS_TERMINATING
;
927 reinit_completion(&uc
->teardown_completed
);
929 switch (uc
->config
.dir
) {
931 if (!uc
->cyclic
&& !uc
->desc
)
932 udma_push_to_ring(uc
, -1);
934 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
,
935 UDMA_PEER_RT_EN_ENABLE
|
936 UDMA_PEER_RT_EN_TEARDOWN
);
939 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
,
940 UDMA_PEER_RT_EN_ENABLE
|
941 UDMA_PEER_RT_EN_FLUSH
);
942 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
943 UDMA_CHAN_RT_CTL_EN
|
944 UDMA_CHAN_RT_CTL_TDOWN
);
947 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
948 UDMA_CHAN_RT_CTL_EN
|
949 UDMA_CHAN_RT_CTL_TDOWN
);
952 uc
->state
= old_state
;
953 complete_all(&uc
->teardown_completed
);
960 static void udma_cyclic_packet_elapsed(struct udma_chan
*uc
)
962 struct udma_desc
*d
= uc
->desc
;
963 struct cppi5_host_desc_t
*h_desc
;
965 h_desc
= d
->hwdesc
[d
->desc_idx
].cppi5_desc_vaddr
;
966 cppi5_hdesc_reset_to_original(h_desc
);
967 udma_push_to_ring(uc
, d
->desc_idx
);
968 d
->desc_idx
= (d
->desc_idx
+ 1) % d
->sglen
;
971 static inline void udma_fetch_epib(struct udma_chan
*uc
, struct udma_desc
*d
)
973 struct cppi5_host_desc_t
*h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
975 memcpy(d
->metadata
, h_desc
->epib
, d
->metadata_size
);
978 static bool udma_is_desc_really_done(struct udma_chan
*uc
, struct udma_desc
*d
)
982 /* Only TX towards PDMA is affected */
983 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
||
984 uc
->config
.dir
!= DMA_MEM_TO_DEV
)
987 peer_bcnt
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
);
988 bcnt
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
);
990 /* Transfer is incomplete, store current residue and time stamp */
991 if (peer_bcnt
< bcnt
) {
992 uc
->tx_drain
.residue
= bcnt
- peer_bcnt
;
993 uc
->tx_drain
.tstamp
= ktime_get();
1000 static void udma_check_tx_completion(struct work_struct
*work
)
1002 struct udma_chan
*uc
= container_of(work
, typeof(*uc
),
1003 tx_drain
.work
.work
);
1004 bool desc_done
= true;
1007 unsigned long delay
;
1011 /* Get previous residue and time stamp */
1012 residue_diff
= uc
->tx_drain
.residue
;
1013 time_diff
= uc
->tx_drain
.tstamp
;
1015 * Get current residue and time stamp or see if
1016 * transfer is complete
1018 desc_done
= udma_is_desc_really_done(uc
, uc
->desc
);
1023 * Find the time delta and residue delta w.r.t
1026 time_diff
= ktime_sub(uc
->tx_drain
.tstamp
,
1028 residue_diff
-= uc
->tx_drain
.residue
;
1031 * Try to guess when we should check
1032 * next time by calculating rate at
1033 * which data is being drained at the
1036 delay
= (time_diff
/ residue_diff
) *
1037 uc
->tx_drain
.residue
;
1039 /* No progress, check again in 1 second */
1040 schedule_delayed_work(&uc
->tx_drain
.work
, HZ
);
1044 usleep_range(ktime_to_us(delay
),
1045 ktime_to_us(delay
) + 10);
1050 struct udma_desc
*d
= uc
->desc
;
1052 uc
->bcnt
+= d
->residue
;
1054 vchan_cookie_complete(&d
->vd
);
1062 static irqreturn_t
udma_ring_irq_handler(int irq
, void *data
)
1064 struct udma_chan
*uc
= data
;
1065 struct udma_desc
*d
;
1066 unsigned long flags
;
1067 dma_addr_t paddr
= 0;
1069 if (udma_pop_from_ring(uc
, &paddr
) || !paddr
)
1072 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
1074 /* Teardown completion message */
1075 if (cppi5_desc_is_tdcm(paddr
)) {
1076 /* Compensate our internal pop/push counter */
1079 complete_all(&uc
->teardown_completed
);
1081 if (uc
->terminated_desc
) {
1082 udma_desc_free(&uc
->terminated_desc
->vd
);
1083 uc
->terminated_desc
= NULL
;
1092 d
= udma_udma_desc_from_paddr(uc
, paddr
);
1095 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
1097 if (desc_paddr
!= paddr
) {
1098 dev_err(uc
->ud
->dev
, "not matching descriptors!\n");
1102 if (d
== uc
->desc
) {
1103 /* active descriptor */
1105 udma_cyclic_packet_elapsed(uc
);
1106 vchan_cyclic_callback(&d
->vd
);
1108 if (udma_is_desc_really_done(uc
, d
)) {
1109 uc
->bcnt
+= d
->residue
;
1111 vchan_cookie_complete(&d
->vd
);
1113 schedule_delayed_work(&uc
->tx_drain
.work
,
1119 * terminated descriptor, mark the descriptor as
1120 * completed to update the channel's cookie marker
1122 dma_cookie_complete(&d
->vd
.tx
);
1126 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
1131 static irqreturn_t
udma_udma_irq_handler(int irq
, void *data
)
1133 struct udma_chan
*uc
= data
;
1134 struct udma_desc
*d
;
1135 unsigned long flags
;
1137 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
1140 d
->tr_idx
= (d
->tr_idx
+ 1) % d
->sglen
;
1143 vchan_cyclic_callback(&d
->vd
);
1145 /* TODO: figure out the real amount of data */
1146 uc
->bcnt
+= d
->residue
;
1148 vchan_cookie_complete(&d
->vd
);
1152 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
1158 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1160 * @from: Start the search from this flow id number
1161 * @cnt: Number of consecutive flow ids to allocate
1163 * Allocate range of RX flow ids for future use, those flows can be requested
1164 * only using explicit flow id number. if @from is set to -1 it will try to find
1165 * first free range. if @from is positive value it will force allocation only
1166 * of the specified range of flows.
1168 * Returns -ENOMEM if can't find free range.
1169 * -EEXIST if requested range is busy.
1170 * -EINVAL if wrong input values passed.
1171 * Returns flow id on success.
1173 static int __udma_alloc_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1175 int start
, tmp_from
;
1176 DECLARE_BITMAP(tmp
, K3_UDMA_MAX_RFLOWS
);
1180 tmp_from
= ud
->rchan_cnt
;
1181 /* default flows can't be allocated and accessible only by id */
1182 if (tmp_from
< ud
->rchan_cnt
)
1185 if (tmp_from
+ cnt
> ud
->rflow_cnt
)
1188 bitmap_or(tmp
, ud
->rflow_gp_map
, ud
->rflow_gp_map_allocated
,
1191 start
= bitmap_find_next_zero_area(tmp
,
1194 if (start
>= ud
->rflow_cnt
)
1197 if (from
>= 0 && start
!= from
)
1200 bitmap_set(ud
->rflow_gp_map_allocated
, start
, cnt
);
1204 static int __udma_free_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1206 if (from
< ud
->rchan_cnt
)
1208 if (from
+ cnt
> ud
->rflow_cnt
)
1211 bitmap_clear(ud
->rflow_gp_map_allocated
, from
, cnt
);
1215 static struct udma_rflow
*__udma_get_rflow(struct udma_dev
*ud
, int id
)
1218 * Attempt to request rflow by ID can be made for any rflow
1219 * if not in use with assumption that caller knows what's doing.
1220 * TI-SCI FW will perform additional permission check ant way, it's
1224 if (id
< 0 || id
>= ud
->rflow_cnt
)
1225 return ERR_PTR(-ENOENT
);
1227 if (test_bit(id
, ud
->rflow_in_use
))
1228 return ERR_PTR(-ENOENT
);
1230 /* GP rflow has to be allocated first */
1231 if (!test_bit(id
, ud
->rflow_gp_map
) &&
1232 !test_bit(id
, ud
->rflow_gp_map_allocated
))
1233 return ERR_PTR(-EINVAL
);
1235 dev_dbg(ud
->dev
, "get rflow%d\n", id
);
1236 set_bit(id
, ud
->rflow_in_use
);
1237 return &ud
->rflows
[id
];
1240 static void __udma_put_rflow(struct udma_dev
*ud
, struct udma_rflow
*rflow
)
1242 if (!test_bit(rflow
->id
, ud
->rflow_in_use
)) {
1243 dev_err(ud
->dev
, "attempt to put unused rflow%d\n", rflow
->id
);
1247 dev_dbg(ud
->dev
, "put rflow%d\n", rflow
->id
);
1248 clear_bit(rflow
->id
, ud
->rflow_in_use
);
1251 #define UDMA_RESERVE_RESOURCE(res) \
1252 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1253 enum udma_tp_level tpl, \
1257 if (test_bit(id, ud->res##_map)) { \
1258 dev_err(ud->dev, "res##%d is in use\n", id); \
1259 return ERR_PTR(-ENOENT); \
1264 if (tpl >= ud->match_data->tpl_levels) \
1265 tpl = ud->match_data->tpl_levels - 1; \
1267 start = ud->match_data->level_start_idx[tpl]; \
1269 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1271 if (id == ud->res##_cnt) { \
1272 return ERR_PTR(-ENOENT); \
1276 set_bit(id, ud->res##_map); \
1277 return &ud->res##s[id]; \
1280 UDMA_RESERVE_RESOURCE(tchan
);
1281 UDMA_RESERVE_RESOURCE(rchan
);
1283 static int udma_get_tchan(struct udma_chan
*uc
)
1285 struct udma_dev
*ud
= uc
->ud
;
1288 dev_dbg(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1289 uc
->id
, uc
->tchan
->id
);
1293 uc
->tchan
= __udma_reserve_tchan(ud
, uc
->config
.channel_tpl
, -1);
1294 if (IS_ERR(uc
->tchan
))
1295 return PTR_ERR(uc
->tchan
);
1300 static int udma_get_rchan(struct udma_chan
*uc
)
1302 struct udma_dev
*ud
= uc
->ud
;
1305 dev_dbg(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1306 uc
->id
, uc
->rchan
->id
);
1310 uc
->rchan
= __udma_reserve_rchan(ud
, uc
->config
.channel_tpl
, -1);
1311 if (IS_ERR(uc
->rchan
))
1312 return PTR_ERR(uc
->rchan
);
1317 static int udma_get_chan_pair(struct udma_chan
*uc
)
1319 struct udma_dev
*ud
= uc
->ud
;
1320 const struct udma_match_data
*match_data
= ud
->match_data
;
1323 if ((uc
->tchan
&& uc
->rchan
) && uc
->tchan
->id
== uc
->rchan
->id
) {
1324 dev_info(ud
->dev
, "chan%d: already have %d pair allocated\n",
1325 uc
->id
, uc
->tchan
->id
);
1330 dev_err(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1331 uc
->id
, uc
->tchan
->id
);
1333 } else if (uc
->rchan
) {
1334 dev_err(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1335 uc
->id
, uc
->rchan
->id
);
1339 /* Can be optimized, but let's have it like this for now */
1340 end
= min(ud
->tchan_cnt
, ud
->rchan_cnt
);
1341 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1342 chan_id
= match_data
->level_start_idx
[match_data
->tpl_levels
- 1];
1343 for (; chan_id
< end
; chan_id
++) {
1344 if (!test_bit(chan_id
, ud
->tchan_map
) &&
1345 !test_bit(chan_id
, ud
->rchan_map
))
1352 set_bit(chan_id
, ud
->tchan_map
);
1353 set_bit(chan_id
, ud
->rchan_map
);
1354 uc
->tchan
= &ud
->tchans
[chan_id
];
1355 uc
->rchan
= &ud
->rchans
[chan_id
];
1360 static int udma_get_rflow(struct udma_chan
*uc
, int flow_id
)
1362 struct udma_dev
*ud
= uc
->ud
;
1365 dev_err(ud
->dev
, "chan%d: does not have rchan??\n", uc
->id
);
1370 dev_dbg(ud
->dev
, "chan%d: already have rflow%d allocated\n",
1371 uc
->id
, uc
->rflow
->id
);
1375 uc
->rflow
= __udma_get_rflow(ud
, flow_id
);
1376 if (IS_ERR(uc
->rflow
))
1377 return PTR_ERR(uc
->rflow
);
1382 static void udma_put_rchan(struct udma_chan
*uc
)
1384 struct udma_dev
*ud
= uc
->ud
;
1387 dev_dbg(ud
->dev
, "chan%d: put rchan%d\n", uc
->id
,
1389 clear_bit(uc
->rchan
->id
, ud
->rchan_map
);
1394 static void udma_put_tchan(struct udma_chan
*uc
)
1396 struct udma_dev
*ud
= uc
->ud
;
1399 dev_dbg(ud
->dev
, "chan%d: put tchan%d\n", uc
->id
,
1401 clear_bit(uc
->tchan
->id
, ud
->tchan_map
);
1406 static void udma_put_rflow(struct udma_chan
*uc
)
1408 struct udma_dev
*ud
= uc
->ud
;
1411 dev_dbg(ud
->dev
, "chan%d: put rflow%d\n", uc
->id
,
1413 __udma_put_rflow(ud
, uc
->rflow
);
1418 static void udma_free_tx_resources(struct udma_chan
*uc
)
1423 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1424 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1425 uc
->tchan
->t_ring
= NULL
;
1426 uc
->tchan
->tc_ring
= NULL
;
1431 static int udma_alloc_tx_resources(struct udma_chan
*uc
)
1433 struct k3_ring_cfg ring_cfg
;
1434 struct udma_dev
*ud
= uc
->ud
;
1437 ret
= udma_get_tchan(uc
);
1441 uc
->tchan
->t_ring
= k3_ringacc_request_ring(ud
->ringacc
,
1443 if (!uc
->tchan
->t_ring
) {
1448 uc
->tchan
->tc_ring
= k3_ringacc_request_ring(ud
->ringacc
, -1, 0);
1449 if (!uc
->tchan
->tc_ring
) {
1454 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1455 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1456 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1457 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1459 ret
= k3_ringacc_ring_cfg(uc
->tchan
->t_ring
, &ring_cfg
);
1460 ret
|= k3_ringacc_ring_cfg(uc
->tchan
->tc_ring
, &ring_cfg
);
1468 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1469 uc
->tchan
->tc_ring
= NULL
;
1471 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1472 uc
->tchan
->t_ring
= NULL
;
1479 static void udma_free_rx_resources(struct udma_chan
*uc
)
1485 struct udma_rflow
*rflow
= uc
->rflow
;
1487 k3_ringacc_ring_free(rflow
->fd_ring
);
1488 k3_ringacc_ring_free(rflow
->r_ring
);
1489 rflow
->fd_ring
= NULL
;
1490 rflow
->r_ring
= NULL
;
1498 static int udma_alloc_rx_resources(struct udma_chan
*uc
)
1500 struct udma_dev
*ud
= uc
->ud
;
1501 struct k3_ring_cfg ring_cfg
;
1502 struct udma_rflow
*rflow
;
1506 ret
= udma_get_rchan(uc
);
1510 /* For MEM_TO_MEM we don't need rflow or rings */
1511 if (uc
->config
.dir
== DMA_MEM_TO_MEM
)
1514 ret
= udma_get_rflow(uc
, uc
->rchan
->id
);
1521 fd_ring_id
= ud
->tchan_cnt
+ ud
->echan_cnt
+ uc
->rchan
->id
;
1522 rflow
->fd_ring
= k3_ringacc_request_ring(ud
->ringacc
, fd_ring_id
, 0);
1523 if (!rflow
->fd_ring
) {
1528 rflow
->r_ring
= k3_ringacc_request_ring(ud
->ringacc
, -1, 0);
1529 if (!rflow
->r_ring
) {
1534 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1536 if (uc
->config
.pkt_mode
)
1537 ring_cfg
.size
= SG_MAX_SEGMENTS
;
1539 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1541 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1542 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1544 ret
= k3_ringacc_ring_cfg(rflow
->fd_ring
, &ring_cfg
);
1545 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1546 ret
|= k3_ringacc_ring_cfg(rflow
->r_ring
, &ring_cfg
);
1554 k3_ringacc_ring_free(rflow
->r_ring
);
1555 rflow
->r_ring
= NULL
;
1557 k3_ringacc_ring_free(rflow
->fd_ring
);
1558 rflow
->fd_ring
= NULL
;
1567 #define TISCI_TCHAN_VALID_PARAMS ( \
1568 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1569 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1570 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1571 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1572 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1573 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1574 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1575 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1577 #define TISCI_RCHAN_VALID_PARAMS ( \
1578 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1579 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1580 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1581 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1582 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1583 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1584 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1585 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1586 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1588 static int udma_tisci_m2m_channel_config(struct udma_chan
*uc
)
1590 struct udma_dev
*ud
= uc
->ud
;
1591 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1592 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1593 struct udma_tchan
*tchan
= uc
->tchan
;
1594 struct udma_rchan
*rchan
= uc
->rchan
;
1597 /* Non synchronized - mem to mem type of transfer */
1598 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1599 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1600 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1602 req_tx
.valid_params
= TISCI_TCHAN_VALID_PARAMS
;
1603 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1604 req_tx
.index
= tchan
->id
;
1605 req_tx
.tx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1606 req_tx
.tx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1607 req_tx
.txcq_qnum
= tc_ring
;
1608 req_tx
.tx_atype
= ud
->atype
;
1610 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1612 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1616 req_rx
.valid_params
= TISCI_RCHAN_VALID_PARAMS
;
1617 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1618 req_rx
.index
= rchan
->id
;
1619 req_rx
.rx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1620 req_rx
.rxcq_qnum
= tc_ring
;
1621 req_rx
.rx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1622 req_rx
.rx_atype
= ud
->atype
;
1624 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1626 dev_err(ud
->dev
, "rchan%d alloc failed %d\n", rchan
->id
, ret
);
1631 static int udma_tisci_tx_channel_config(struct udma_chan
*uc
)
1633 struct udma_dev
*ud
= uc
->ud
;
1634 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1635 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1636 struct udma_tchan
*tchan
= uc
->tchan
;
1637 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1638 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1639 u32 mode
, fetch_size
;
1642 if (uc
->config
.pkt_mode
) {
1643 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1644 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1645 uc
->config
.psd_size
, 0);
1647 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1648 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1651 req_tx
.valid_params
= TISCI_TCHAN_VALID_PARAMS
;
1652 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1653 req_tx
.index
= tchan
->id
;
1654 req_tx
.tx_chan_type
= mode
;
1655 req_tx
.tx_supr_tdpkt
= uc
->config
.notdpkt
;
1656 req_tx
.tx_fetch_size
= fetch_size
>> 2;
1657 req_tx
.txcq_qnum
= tc_ring
;
1658 req_tx
.tx_atype
= uc
->config
.atype
;
1660 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1662 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1667 static int udma_tisci_rx_channel_config(struct udma_chan
*uc
)
1669 struct udma_dev
*ud
= uc
->ud
;
1670 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1671 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1672 struct udma_rchan
*rchan
= uc
->rchan
;
1673 int fd_ring
= k3_ringacc_get_ring_id(uc
->rflow
->fd_ring
);
1674 int rx_ring
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
1675 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1676 struct ti_sci_msg_rm_udmap_flow_cfg flow_req
= { 0 };
1677 u32 mode
, fetch_size
;
1680 if (uc
->config
.pkt_mode
) {
1681 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1682 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1683 uc
->config
.psd_size
, 0);
1685 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1686 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1689 req_rx
.valid_params
= TISCI_RCHAN_VALID_PARAMS
;
1690 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1691 req_rx
.index
= rchan
->id
;
1692 req_rx
.rx_fetch_size
= fetch_size
>> 2;
1693 req_rx
.rxcq_qnum
= rx_ring
;
1694 req_rx
.rx_chan_type
= mode
;
1695 req_rx
.rx_atype
= uc
->config
.atype
;
1697 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1699 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", rchan
->id
, ret
);
1703 flow_req
.valid_params
=
1704 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
1705 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
1706 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
|
1707 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID
|
1708 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
1709 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID
|
1710 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID
|
1711 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID
|
1712 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID
|
1713 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
1714 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
1715 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
1716 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
1718 flow_req
.nav_id
= tisci_rm
->tisci_dev_id
;
1719 flow_req
.flow_index
= rchan
->id
;
1721 if (uc
->config
.needs_epib
)
1722 flow_req
.rx_einfo_present
= 1;
1724 flow_req
.rx_einfo_present
= 0;
1725 if (uc
->config
.psd_size
)
1726 flow_req
.rx_psinfo_present
= 1;
1728 flow_req
.rx_psinfo_present
= 0;
1729 flow_req
.rx_error_handling
= 1;
1730 flow_req
.rx_dest_qnum
= rx_ring
;
1731 flow_req
.rx_src_tag_hi_sel
= UDMA_RFLOW_SRCTAG_NONE
;
1732 flow_req
.rx_src_tag_lo_sel
= UDMA_RFLOW_SRCTAG_SRC_TAG
;
1733 flow_req
.rx_dest_tag_hi_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_HI
;
1734 flow_req
.rx_dest_tag_lo_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_LO
;
1735 flow_req
.rx_fdq0_sz0_qnum
= fd_ring
;
1736 flow_req
.rx_fdq1_qnum
= fd_ring
;
1737 flow_req
.rx_fdq2_qnum
= fd_ring
;
1738 flow_req
.rx_fdq3_qnum
= fd_ring
;
1740 ret
= tisci_ops
->rx_flow_cfg(tisci_rm
->tisci
, &flow_req
);
1743 dev_err(ud
->dev
, "flow%d config failed: %d\n", rchan
->id
, ret
);
1748 static int udma_alloc_chan_resources(struct dma_chan
*chan
)
1750 struct udma_chan
*uc
= to_udma_chan(chan
);
1751 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
1752 const struct udma_match_data
*match_data
= ud
->match_data
;
1753 struct k3_ring
*irq_ring
;
1757 if (uc
->config
.pkt_mode
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
1758 uc
->use_dma_pool
= true;
1759 /* in case of MEM_TO_MEM we have maximum of two TRs */
1760 if (uc
->config
.dir
== DMA_MEM_TO_MEM
) {
1761 uc
->config
.hdesc_size
= cppi5_trdesc_calc_size(
1762 sizeof(struct cppi5_tr_type15_t
), 2);
1763 uc
->config
.pkt_mode
= false;
1767 if (uc
->use_dma_pool
) {
1768 uc
->hdesc_pool
= dma_pool_create(uc
->name
, ud
->ddev
.dev
,
1769 uc
->config
.hdesc_size
,
1772 if (!uc
->hdesc_pool
) {
1773 dev_err(ud
->ddev
.dev
,
1774 "Descriptor pool allocation failed\n");
1775 uc
->use_dma_pool
= false;
1781 * Make sure that the completion is in a known state:
1782 * No teardown, the channel is idle
1784 reinit_completion(&uc
->teardown_completed
);
1785 complete_all(&uc
->teardown_completed
);
1786 uc
->state
= UDMA_CHAN_IS_IDLE
;
1788 switch (uc
->config
.dir
) {
1789 case DMA_MEM_TO_MEM
:
1790 /* Non synchronized - mem to mem type of transfer */
1791 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-MEM\n", __func__
,
1794 ret
= udma_get_chan_pair(uc
);
1798 ret
= udma_alloc_tx_resources(uc
);
1802 ret
= udma_alloc_rx_resources(uc
);
1804 udma_free_tx_resources(uc
);
1808 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
1809 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
1810 K3_PSIL_DST_THREAD_ID_OFFSET
;
1812 irq_ring
= uc
->tchan
->tc_ring
;
1813 irq_udma_idx
= uc
->tchan
->id
;
1815 ret
= udma_tisci_m2m_channel_config(uc
);
1817 case DMA_MEM_TO_DEV
:
1818 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1819 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
1822 ret
= udma_alloc_tx_resources(uc
);
1824 uc
->config
.remote_thread_id
= -1;
1828 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
1829 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
1830 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
1832 irq_ring
= uc
->tchan
->tc_ring
;
1833 irq_udma_idx
= uc
->tchan
->id
;
1835 ret
= udma_tisci_tx_channel_config(uc
);
1837 case DMA_DEV_TO_MEM
:
1838 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1839 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
1842 ret
= udma_alloc_rx_resources(uc
);
1844 uc
->config
.remote_thread_id
= -1;
1848 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
1849 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
1850 K3_PSIL_DST_THREAD_ID_OFFSET
;
1852 irq_ring
= uc
->rflow
->r_ring
;
1853 irq_udma_idx
= match_data
->rchan_oes_offset
+ uc
->rchan
->id
;
1855 ret
= udma_tisci_rx_channel_config(uc
);
1858 /* Can not happen */
1859 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
1860 __func__
, uc
->id
, uc
->config
.dir
);
1864 /* check if the channel configuration was successful */
1868 if (udma_is_chan_running(uc
)) {
1869 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
1871 if (udma_is_chan_running(uc
)) {
1872 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
1878 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
1880 dev_err(ud
->dev
, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1881 uc
->config
.src_thread
, uc
->config
.dst_thread
);
1885 uc
->psil_paired
= true;
1887 uc
->irq_num_ring
= k3_ringacc_get_ring_irq_num(irq_ring
);
1888 if (uc
->irq_num_ring
<= 0) {
1889 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
1890 k3_ringacc_get_ring_id(irq_ring
));
1895 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
1896 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
1898 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
1902 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1903 if (is_slave_direction(uc
->config
.dir
) && !uc
->config
.pkt_mode
) {
1904 uc
->irq_num_udma
= ti_sci_inta_msi_get_virq(ud
->dev
,
1906 if (uc
->irq_num_udma
<= 0) {
1907 dev_err(ud
->dev
, "Failed to get udma irq (index: %u)\n",
1909 free_irq(uc
->irq_num_ring
, uc
);
1914 ret
= request_irq(uc
->irq_num_udma
, udma_udma_irq_handler
, 0,
1917 dev_err(ud
->dev
, "chan%d: UDMA irq request failed\n",
1919 free_irq(uc
->irq_num_ring
, uc
);
1923 uc
->irq_num_udma
= 0;
1926 udma_reset_rings(uc
);
1928 INIT_DELAYED_WORK_ONSTACK(&uc
->tx_drain
.work
,
1929 udma_check_tx_completion
);
1933 uc
->irq_num_ring
= 0;
1934 uc
->irq_num_udma
= 0;
1936 navss_psil_unpair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
1937 uc
->psil_paired
= false;
1939 udma_free_tx_resources(uc
);
1940 udma_free_rx_resources(uc
);
1942 udma_reset_uchan(uc
);
1944 if (uc
->use_dma_pool
) {
1945 dma_pool_destroy(uc
->hdesc_pool
);
1946 uc
->use_dma_pool
= false;
1952 static int udma_slave_config(struct dma_chan
*chan
,
1953 struct dma_slave_config
*cfg
)
1955 struct udma_chan
*uc
= to_udma_chan(chan
);
1957 memcpy(&uc
->cfg
, cfg
, sizeof(uc
->cfg
));
1962 static struct udma_desc
*udma_alloc_tr_desc(struct udma_chan
*uc
,
1963 size_t tr_size
, int tr_count
,
1964 enum dma_transfer_direction dir
)
1966 struct udma_hwdesc
*hwdesc
;
1967 struct cppi5_desc_hdr_t
*tr_desc
;
1968 struct udma_desc
*d
;
1969 u32 reload_count
= 0;
1979 dev_err(uc
->ud
->dev
, "Unsupported TR size of %zu\n", tr_size
);
1983 /* We have only one descriptor containing multiple TRs */
1984 d
= kzalloc(sizeof(*d
) + sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
1988 d
->sglen
= tr_count
;
1990 d
->hwdesc_count
= 1;
1991 hwdesc
= &d
->hwdesc
[0];
1993 /* Allocate memory for DMA ring descriptor */
1994 if (uc
->use_dma_pool
) {
1995 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
1996 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
1998 &hwdesc
->cppi5_desc_paddr
);
2000 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
,
2002 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
2003 uc
->ud
->desc_align
);
2004 hwdesc
->cppi5_desc_vaddr
= dma_alloc_coherent(uc
->ud
->dev
,
2005 hwdesc
->cppi5_desc_size
,
2006 &hwdesc
->cppi5_desc_paddr
,
2010 if (!hwdesc
->cppi5_desc_vaddr
) {
2015 /* Start of the TR req records */
2016 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
2017 /* Start address of the TR response array */
2018 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
* tr_count
;
2020 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
2023 reload_count
= CPPI5_INFO0_TRDESC_RLDCNT_INFINITE
;
2025 if (dir
== DMA_DEV_TO_MEM
)
2026 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2028 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2030 cppi5_trdesc_init(tr_desc
, tr_count
, tr_size
, 0, reload_count
);
2031 cppi5_desc_set_pktids(tr_desc
, uc
->id
,
2032 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2033 cppi5_desc_set_retpolicy(tr_desc
, 0, ring_id
);
2039 * udma_get_tr_counters - calculate TR counters for a given length
2040 * @len: Length of the trasnfer
2041 * @align_to: Preferred alignment
2042 * @tr0_cnt0: First TR icnt0
2043 * @tr0_cnt1: First TR icnt1
2044 * @tr1_cnt0: Second (if used) TR icnt0
2046 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2047 * For len >= SZ_64K two TRs are used in a simple way:
2048 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2049 * Second TR: the remaining length (tr1_cnt0)
2051 * Returns the number of TRs the length needs (1 or 2)
2052 * -EINVAL if the length can not be supported
2054 static int udma_get_tr_counters(size_t len
, unsigned long align_to
,
2055 u16
*tr0_cnt0
, u16
*tr0_cnt1
, u16
*tr1_cnt0
)
2068 *tr0_cnt0
= SZ_64K
- BIT(align_to
);
2069 if (len
/ *tr0_cnt0
>= SZ_64K
) {
2077 *tr0_cnt1
= len
/ *tr0_cnt0
;
2078 *tr1_cnt0
= len
% *tr0_cnt0
;
2083 static struct udma_desc
*
2084 udma_prep_slave_sg_tr(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2085 unsigned int sglen
, enum dma_transfer_direction dir
,
2086 unsigned long tx_flags
, void *context
)
2088 struct scatterlist
*sgent
;
2089 struct udma_desc
*d
;
2090 struct cppi5_tr_type1_t
*tr_req
= NULL
;
2091 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2097 if (!is_slave_direction(dir
)) {
2098 dev_err(uc
->ud
->dev
, "Only slave cyclic is supported\n");
2102 /* estimate the number of TRs we will need */
2103 for_each_sg(sgl
, sgent
, sglen
, i
) {
2104 if (sg_dma_len(sgent
) < SZ_64K
)
2110 /* Now allocate and setup the descriptor. */
2111 tr_size
= sizeof(struct cppi5_tr_type1_t
);
2112 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, dir
);
2118 tr_req
= d
->hwdesc
[0].tr_req_base
;
2119 for_each_sg(sgl
, sgent
, sglen
, i
) {
2120 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2122 num_tr
= udma_get_tr_counters(sg_dma_len(sgent
), __ffs(sg_addr
),
2123 &tr0_cnt0
, &tr0_cnt1
, &tr1_cnt0
);
2125 dev_err(uc
->ud
->dev
, "size %u is not supported\n",
2127 udma_free_hwdesc(uc
, d
);
2132 cppi5_tr_init(&tr_req
[i
].flags
, CPPI5_TR_TYPE1
, false, false,
2133 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2134 cppi5_tr_csf_set(&tr_req
[i
].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2136 tr_req
[tr_idx
].addr
= sg_addr
;
2137 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
2138 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
2139 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
2143 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
2145 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2146 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2147 CPPI5_TR_CSF_SUPR_EVT
);
2149 tr_req
[tr_idx
].addr
= sg_addr
+ tr0_cnt1
* tr0_cnt0
;
2150 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
2151 tr_req
[tr_idx
].icnt1
= 1;
2152 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
2156 d
->residue
+= sg_dma_len(sgent
);
2159 cppi5_tr_csf_set(&tr_req
[tr_idx
- 1].flags
, CPPI5_TR_CSF_EOP
);
2164 static int udma_configure_statictr(struct udma_chan
*uc
, struct udma_desc
*d
,
2165 enum dma_slave_buswidth dev_width
,
2168 if (uc
->config
.ep_type
!= PSIL_EP_PDMA_XY
)
2171 /* Bus width translates to the element size (ES) */
2172 switch (dev_width
) {
2173 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2174 d
->static_tr
.elsize
= 0;
2176 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2177 d
->static_tr
.elsize
= 1;
2179 case DMA_SLAVE_BUSWIDTH_3_BYTES
:
2180 d
->static_tr
.elsize
= 2;
2182 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2183 d
->static_tr
.elsize
= 3;
2185 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2186 d
->static_tr
.elsize
= 4;
2188 default: /* not reached */
2192 d
->static_tr
.elcnt
= elcnt
;
2195 * PDMA must to close the packet when the channel is in packet mode.
2196 * For TR mode when the channel is not cyclic we also need PDMA to close
2197 * the packet otherwise the transfer will stall because PDMA holds on
2198 * the data it has received from the peripheral.
2200 if (uc
->config
.pkt_mode
|| !uc
->cyclic
) {
2201 unsigned int div
= dev_width
* elcnt
;
2204 d
->static_tr
.bstcnt
= d
->residue
/ d
->sglen
/ div
;
2206 d
->static_tr
.bstcnt
= d
->residue
/ div
;
2208 if (uc
->config
.dir
== DMA_DEV_TO_MEM
&&
2209 d
->static_tr
.bstcnt
> uc
->ud
->match_data
->statictr_z_mask
)
2212 d
->static_tr
.bstcnt
= 0;
2218 static struct udma_desc
*
2219 udma_prep_slave_sg_pkt(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2220 unsigned int sglen
, enum dma_transfer_direction dir
,
2221 unsigned long tx_flags
, void *context
)
2223 struct scatterlist
*sgent
;
2224 struct cppi5_host_desc_t
*h_desc
= NULL
;
2225 struct udma_desc
*d
;
2229 d
= kzalloc(sizeof(*d
) + sglen
* sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2234 d
->hwdesc_count
= sglen
;
2236 if (dir
== DMA_DEV_TO_MEM
)
2237 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2239 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2241 for_each_sg(sgl
, sgent
, sglen
, i
) {
2242 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
2243 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2244 struct cppi5_host_desc_t
*desc
;
2245 size_t sg_len
= sg_dma_len(sgent
);
2247 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2249 &hwdesc
->cppi5_desc_paddr
);
2250 if (!hwdesc
->cppi5_desc_vaddr
) {
2251 dev_err(uc
->ud
->dev
,
2252 "descriptor%d allocation failed\n", i
);
2254 udma_free_hwdesc(uc
, d
);
2259 d
->residue
+= sg_len
;
2260 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2261 desc
= hwdesc
->cppi5_desc_vaddr
;
2264 cppi5_hdesc_init(desc
, 0, 0);
2265 /* Flow and Packed ID */
2266 cppi5_desc_set_pktids(&desc
->hdr
, uc
->id
,
2267 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2268 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, ring_id
);
2270 cppi5_hdesc_reset_hbdesc(desc
);
2271 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0xffff);
2274 /* attach the sg buffer to the descriptor */
2275 cppi5_hdesc_attach_buf(desc
, sg_addr
, sg_len
, sg_addr
, sg_len
);
2277 /* Attach link as host buffer descriptor */
2279 cppi5_hdesc_link_hbdesc(h_desc
,
2280 hwdesc
->cppi5_desc_paddr
);
2282 if (dir
== DMA_MEM_TO_DEV
)
2286 if (d
->residue
>= SZ_4M
) {
2287 dev_err(uc
->ud
->dev
,
2288 "%s: Transfer size %u is over the supported 4M range\n",
2289 __func__
, d
->residue
);
2290 udma_free_hwdesc(uc
, d
);
2295 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2296 cppi5_hdesc_set_pktlen(h_desc
, d
->residue
);
2301 static int udma_attach_metadata(struct dma_async_tx_descriptor
*desc
,
2302 void *data
, size_t len
)
2304 struct udma_desc
*d
= to_udma_desc(desc
);
2305 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2306 struct cppi5_host_desc_t
*h_desc
;
2310 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2313 if (!data
|| len
> uc
->config
.metadata_size
)
2316 if (uc
->config
.needs_epib
&& len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
2319 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2320 if (d
->dir
== DMA_MEM_TO_DEV
)
2321 memcpy(h_desc
->epib
, data
, len
);
2323 if (uc
->config
.needs_epib
)
2324 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
2327 d
->metadata_size
= len
;
2328 if (uc
->config
.needs_epib
)
2329 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
2331 cppi5_hdesc_update_flags(h_desc
, flags
);
2332 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
2337 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
2338 size_t *payload_len
, size_t *max_len
)
2340 struct udma_desc
*d
= to_udma_desc(desc
);
2341 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2342 struct cppi5_host_desc_t
*h_desc
;
2344 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2345 return ERR_PTR(-ENOTSUPP
);
2347 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2349 *max_len
= uc
->config
.metadata_size
;
2351 *payload_len
= cppi5_hdesc_epib_present(&h_desc
->hdr
) ?
2352 CPPI5_INFO0_HDESC_EPIB_SIZE
: 0;
2353 *payload_len
+= cppi5_hdesc_get_psdata_size(h_desc
);
2355 return h_desc
->epib
;
2358 static int udma_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
2361 struct udma_desc
*d
= to_udma_desc(desc
);
2362 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2363 struct cppi5_host_desc_t
*h_desc
;
2364 u32 psd_size
= payload_len
;
2367 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2370 if (payload_len
> uc
->config
.metadata_size
)
2373 if (uc
->config
.needs_epib
&& payload_len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
2376 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2378 if (uc
->config
.needs_epib
) {
2379 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
2380 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
2383 cppi5_hdesc_update_flags(h_desc
, flags
);
2384 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
2389 static struct dma_descriptor_metadata_ops metadata_ops
= {
2390 .attach
= udma_attach_metadata
,
2391 .get_ptr
= udma_get_metadata_ptr
,
2392 .set_len
= udma_set_metadata_len
,
2395 static struct dma_async_tx_descriptor
*
2396 udma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2397 unsigned int sglen
, enum dma_transfer_direction dir
,
2398 unsigned long tx_flags
, void *context
)
2400 struct udma_chan
*uc
= to_udma_chan(chan
);
2401 enum dma_slave_buswidth dev_width
;
2402 struct udma_desc
*d
;
2405 if (dir
!= uc
->config
.dir
) {
2406 dev_err(chan
->device
->dev
,
2407 "%s: chan%d is for %s, not supporting %s\n",
2409 dmaengine_get_direction_text(uc
->config
.dir
),
2410 dmaengine_get_direction_text(dir
));
2414 if (dir
== DMA_DEV_TO_MEM
) {
2415 dev_width
= uc
->cfg
.src_addr_width
;
2416 burst
= uc
->cfg
.src_maxburst
;
2417 } else if (dir
== DMA_MEM_TO_DEV
) {
2418 dev_width
= uc
->cfg
.dst_addr_width
;
2419 burst
= uc
->cfg
.dst_maxburst
;
2421 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
2428 if (uc
->config
.pkt_mode
)
2429 d
= udma_prep_slave_sg_pkt(uc
, sgl
, sglen
, dir
, tx_flags
,
2432 d
= udma_prep_slave_sg_tr(uc
, sgl
, sglen
, dir
, tx_flags
,
2442 /* static TR for remote PDMA */
2443 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
2444 dev_err(uc
->ud
->dev
,
2445 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2446 __func__
, d
->static_tr
.bstcnt
);
2448 udma_free_hwdesc(uc
, d
);
2453 if (uc
->config
.metadata_size
)
2454 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2456 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
2459 static struct udma_desc
*
2460 udma_prep_dma_cyclic_tr(struct udma_chan
*uc
, dma_addr_t buf_addr
,
2461 size_t buf_len
, size_t period_len
,
2462 enum dma_transfer_direction dir
, unsigned long flags
)
2464 struct udma_desc
*d
;
2465 size_t tr_size
, period_addr
;
2466 struct cppi5_tr_type1_t
*tr_req
;
2467 unsigned int periods
= buf_len
/ period_len
;
2468 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2472 if (!is_slave_direction(dir
)) {
2473 dev_err(uc
->ud
->dev
, "Only slave cyclic is supported\n");
2477 num_tr
= udma_get_tr_counters(period_len
, __ffs(buf_addr
), &tr0_cnt0
,
2478 &tr0_cnt1
, &tr1_cnt0
);
2480 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
2485 /* Now allocate and setup the descriptor. */
2486 tr_size
= sizeof(struct cppi5_tr_type1_t
);
2487 d
= udma_alloc_tr_desc(uc
, tr_size
, periods
* num_tr
, dir
);
2491 tr_req
= d
->hwdesc
[0].tr_req_base
;
2492 period_addr
= buf_addr
;
2493 for (i
= 0; i
< periods
; i
++) {
2494 int tr_idx
= i
* num_tr
;
2496 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
, false,
2497 false, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2499 tr_req
[tr_idx
].addr
= period_addr
;
2500 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
2501 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
2502 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
2505 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2506 CPPI5_TR_CSF_SUPR_EVT
);
2509 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
2511 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2513 tr_req
[tr_idx
].addr
= period_addr
+ tr0_cnt1
* tr0_cnt0
;
2514 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
2515 tr_req
[tr_idx
].icnt1
= 1;
2516 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
2519 if (!(flags
& DMA_PREP_INTERRUPT
))
2520 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2521 CPPI5_TR_CSF_SUPR_EVT
);
2523 period_addr
+= period_len
;
2529 static struct udma_desc
*
2530 udma_prep_dma_cyclic_pkt(struct udma_chan
*uc
, dma_addr_t buf_addr
,
2531 size_t buf_len
, size_t period_len
,
2532 enum dma_transfer_direction dir
, unsigned long flags
)
2534 struct udma_desc
*d
;
2537 int periods
= buf_len
/ period_len
;
2539 if (periods
> (K3_UDMA_DEFAULT_RING_SIZE
- 1))
2542 if (period_len
>= SZ_4M
)
2545 d
= kzalloc(sizeof(*d
) + periods
* sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2549 d
->hwdesc_count
= periods
;
2551 /* TODO: re-check this... */
2552 if (dir
== DMA_DEV_TO_MEM
)
2553 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2555 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2557 for (i
= 0; i
< periods
; i
++) {
2558 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
2559 dma_addr_t period_addr
= buf_addr
+ (period_len
* i
);
2560 struct cppi5_host_desc_t
*h_desc
;
2562 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2564 &hwdesc
->cppi5_desc_paddr
);
2565 if (!hwdesc
->cppi5_desc_vaddr
) {
2566 dev_err(uc
->ud
->dev
,
2567 "descriptor%d allocation failed\n", i
);
2569 udma_free_hwdesc(uc
, d
);
2574 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2575 h_desc
= hwdesc
->cppi5_desc_vaddr
;
2577 cppi5_hdesc_init(h_desc
, 0, 0);
2578 cppi5_hdesc_set_pktlen(h_desc
, period_len
);
2580 /* Flow and Packed ID */
2581 cppi5_desc_set_pktids(&h_desc
->hdr
, uc
->id
,
2582 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2583 cppi5_desc_set_retpolicy(&h_desc
->hdr
, 0, ring_id
);
2585 /* attach each period to a new descriptor */
2586 cppi5_hdesc_attach_buf(h_desc
,
2587 period_addr
, period_len
,
2588 period_addr
, period_len
);
2594 static struct dma_async_tx_descriptor
*
2595 udma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
2596 size_t period_len
, enum dma_transfer_direction dir
,
2597 unsigned long flags
)
2599 struct udma_chan
*uc
= to_udma_chan(chan
);
2600 enum dma_slave_buswidth dev_width
;
2601 struct udma_desc
*d
;
2604 if (dir
!= uc
->config
.dir
) {
2605 dev_err(chan
->device
->dev
,
2606 "%s: chan%d is for %s, not supporting %s\n",
2608 dmaengine_get_direction_text(uc
->config
.dir
),
2609 dmaengine_get_direction_text(dir
));
2615 if (dir
== DMA_DEV_TO_MEM
) {
2616 dev_width
= uc
->cfg
.src_addr_width
;
2617 burst
= uc
->cfg
.src_maxburst
;
2618 } else if (dir
== DMA_MEM_TO_DEV
) {
2619 dev_width
= uc
->cfg
.dst_addr_width
;
2620 burst
= uc
->cfg
.dst_maxburst
;
2622 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
2629 if (uc
->config
.pkt_mode
)
2630 d
= udma_prep_dma_cyclic_pkt(uc
, buf_addr
, buf_len
, period_len
,
2633 d
= udma_prep_dma_cyclic_tr(uc
, buf_addr
, buf_len
, period_len
,
2639 d
->sglen
= buf_len
/ period_len
;
2642 d
->residue
= buf_len
;
2644 /* static TR for remote PDMA */
2645 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
2646 dev_err(uc
->ud
->dev
,
2647 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2648 __func__
, d
->static_tr
.bstcnt
);
2650 udma_free_hwdesc(uc
, d
);
2655 if (uc
->config
.metadata_size
)
2656 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2658 return vchan_tx_prep(&uc
->vc
, &d
->vd
, flags
);
2661 static struct dma_async_tx_descriptor
*
2662 udma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
2663 size_t len
, unsigned long tx_flags
)
2665 struct udma_chan
*uc
= to_udma_chan(chan
);
2666 struct udma_desc
*d
;
2667 struct cppi5_tr_type15_t
*tr_req
;
2669 size_t tr_size
= sizeof(struct cppi5_tr_type15_t
);
2670 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2672 if (uc
->config
.dir
!= DMA_MEM_TO_MEM
) {
2673 dev_err(chan
->device
->dev
,
2674 "%s: chan%d is for %s, not supporting %s\n",
2676 dmaengine_get_direction_text(uc
->config
.dir
),
2677 dmaengine_get_direction_text(DMA_MEM_TO_MEM
));
2681 num_tr
= udma_get_tr_counters(len
, __ffs(src
| dest
), &tr0_cnt0
,
2682 &tr0_cnt1
, &tr1_cnt0
);
2684 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
2689 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, DMA_MEM_TO_MEM
);
2693 d
->dir
= DMA_MEM_TO_MEM
;
2698 tr_req
= d
->hwdesc
[0].tr_req_base
;
2700 cppi5_tr_init(&tr_req
[0].flags
, CPPI5_TR_TYPE15
, false, true,
2701 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2702 cppi5_tr_csf_set(&tr_req
[0].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2704 tr_req
[0].addr
= src
;
2705 tr_req
[0].icnt0
= tr0_cnt0
;
2706 tr_req
[0].icnt1
= tr0_cnt1
;
2707 tr_req
[0].icnt2
= 1;
2708 tr_req
[0].icnt3
= 1;
2709 tr_req
[0].dim1
= tr0_cnt0
;
2711 tr_req
[0].daddr
= dest
;
2712 tr_req
[0].dicnt0
= tr0_cnt0
;
2713 tr_req
[0].dicnt1
= tr0_cnt1
;
2714 tr_req
[0].dicnt2
= 1;
2715 tr_req
[0].dicnt3
= 1;
2716 tr_req
[0].ddim1
= tr0_cnt0
;
2719 cppi5_tr_init(&tr_req
[1].flags
, CPPI5_TR_TYPE15
, false, true,
2720 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2721 cppi5_tr_csf_set(&tr_req
[1].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2723 tr_req
[1].addr
= src
+ tr0_cnt1
* tr0_cnt0
;
2724 tr_req
[1].icnt0
= tr1_cnt0
;
2725 tr_req
[1].icnt1
= 1;
2726 tr_req
[1].icnt2
= 1;
2727 tr_req
[1].icnt3
= 1;
2729 tr_req
[1].daddr
= dest
+ tr0_cnt1
* tr0_cnt0
;
2730 tr_req
[1].dicnt0
= tr1_cnt0
;
2731 tr_req
[1].dicnt1
= 1;
2732 tr_req
[1].dicnt2
= 1;
2733 tr_req
[1].dicnt3
= 1;
2736 cppi5_tr_csf_set(&tr_req
[num_tr
- 1].flags
, CPPI5_TR_CSF_EOP
);
2738 if (uc
->config
.metadata_size
)
2739 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2741 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
2744 static void udma_issue_pending(struct dma_chan
*chan
)
2746 struct udma_chan
*uc
= to_udma_chan(chan
);
2747 unsigned long flags
;
2749 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2751 /* If we have something pending and no active descriptor, then */
2752 if (vchan_issue_pending(&uc
->vc
) && !uc
->desc
) {
2754 * start a descriptor if the channel is NOT [marked as
2755 * terminating _and_ it is still running (teardown has not
2758 if (!(uc
->state
== UDMA_CHAN_IS_TERMINATING
&&
2759 udma_is_chan_running(uc
)))
2763 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2766 static enum dma_status
udma_tx_status(struct dma_chan
*chan
,
2767 dma_cookie_t cookie
,
2768 struct dma_tx_state
*txstate
)
2770 struct udma_chan
*uc
= to_udma_chan(chan
);
2771 enum dma_status ret
;
2772 unsigned long flags
;
2774 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2776 ret
= dma_cookie_status(chan
, cookie
, txstate
);
2778 if (!udma_is_chan_running(uc
))
2781 if (ret
== DMA_IN_PROGRESS
&& udma_is_chan_paused(uc
))
2784 if (ret
== DMA_COMPLETE
|| !txstate
)
2787 if (uc
->desc
&& uc
->desc
->vd
.tx
.cookie
== cookie
) {
2790 u32 residue
= uc
->desc
->residue
;
2793 if (uc
->desc
->dir
== DMA_MEM_TO_DEV
) {
2794 bcnt
= udma_tchanrt_read(uc
->tchan
,
2795 UDMA_TCHAN_RT_SBCNT_REG
);
2797 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
2798 peer_bcnt
= udma_tchanrt_read(uc
->tchan
,
2799 UDMA_TCHAN_RT_PEER_BCNT_REG
);
2801 if (bcnt
> peer_bcnt
)
2802 delay
= bcnt
- peer_bcnt
;
2804 } else if (uc
->desc
->dir
== DMA_DEV_TO_MEM
) {
2805 bcnt
= udma_rchanrt_read(uc
->rchan
,
2806 UDMA_RCHAN_RT_BCNT_REG
);
2808 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
2809 peer_bcnt
= udma_rchanrt_read(uc
->rchan
,
2810 UDMA_RCHAN_RT_PEER_BCNT_REG
);
2812 if (peer_bcnt
> bcnt
)
2813 delay
= peer_bcnt
- bcnt
;
2816 bcnt
= udma_tchanrt_read(uc
->tchan
,
2817 UDMA_TCHAN_RT_BCNT_REG
);
2821 if (bcnt
&& !(bcnt
% uc
->desc
->residue
))
2824 residue
-= bcnt
% uc
->desc
->residue
;
2826 if (!residue
&& (uc
->config
.dir
== DMA_DEV_TO_MEM
|| !delay
)) {
2831 dma_set_residue(txstate
, residue
);
2832 dma_set_in_flight_bytes(txstate
, delay
);
2839 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2843 static int udma_pause(struct dma_chan
*chan
)
2845 struct udma_chan
*uc
= to_udma_chan(chan
);
2847 /* pause the channel */
2848 switch (uc
->config
.dir
) {
2849 case DMA_DEV_TO_MEM
:
2850 udma_rchanrt_update_bits(uc
->rchan
,
2851 UDMA_RCHAN_RT_PEER_RT_EN_REG
,
2852 UDMA_PEER_RT_EN_PAUSE
,
2853 UDMA_PEER_RT_EN_PAUSE
);
2855 case DMA_MEM_TO_DEV
:
2856 udma_tchanrt_update_bits(uc
->tchan
,
2857 UDMA_TCHAN_RT_PEER_RT_EN_REG
,
2858 UDMA_PEER_RT_EN_PAUSE
,
2859 UDMA_PEER_RT_EN_PAUSE
);
2861 case DMA_MEM_TO_MEM
:
2862 udma_tchanrt_update_bits(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
2863 UDMA_CHAN_RT_CTL_PAUSE
,
2864 UDMA_CHAN_RT_CTL_PAUSE
);
2873 static int udma_resume(struct dma_chan
*chan
)
2875 struct udma_chan
*uc
= to_udma_chan(chan
);
2877 /* resume the channel */
2878 switch (uc
->config
.dir
) {
2879 case DMA_DEV_TO_MEM
:
2880 udma_rchanrt_update_bits(uc
->rchan
,
2881 UDMA_RCHAN_RT_PEER_RT_EN_REG
,
2882 UDMA_PEER_RT_EN_PAUSE
, 0);
2885 case DMA_MEM_TO_DEV
:
2886 udma_tchanrt_update_bits(uc
->tchan
,
2887 UDMA_TCHAN_RT_PEER_RT_EN_REG
,
2888 UDMA_PEER_RT_EN_PAUSE
, 0);
2890 case DMA_MEM_TO_MEM
:
2891 udma_tchanrt_update_bits(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
2892 UDMA_CHAN_RT_CTL_PAUSE
, 0);
2901 static int udma_terminate_all(struct dma_chan
*chan
)
2903 struct udma_chan
*uc
= to_udma_chan(chan
);
2904 unsigned long flags
;
2907 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2909 if (udma_is_chan_running(uc
))
2913 uc
->terminated_desc
= uc
->desc
;
2915 uc
->terminated_desc
->terminated
= true;
2916 cancel_delayed_work(&uc
->tx_drain
.work
);
2921 vchan_get_all_descriptors(&uc
->vc
, &head
);
2922 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2923 vchan_dma_desc_free_list(&uc
->vc
, &head
);
2928 static void udma_synchronize(struct dma_chan
*chan
)
2930 struct udma_chan
*uc
= to_udma_chan(chan
);
2931 unsigned long timeout
= msecs_to_jiffies(1000);
2933 vchan_synchronize(&uc
->vc
);
2935 if (uc
->state
== UDMA_CHAN_IS_TERMINATING
) {
2936 timeout
= wait_for_completion_timeout(&uc
->teardown_completed
,
2939 dev_warn(uc
->ud
->dev
, "chan%d teardown timeout!\n",
2941 udma_dump_chan_stdata(uc
);
2942 udma_reset_chan(uc
, true);
2946 udma_reset_chan(uc
, false);
2947 if (udma_is_chan_running(uc
))
2948 dev_warn(uc
->ud
->dev
, "chan%d refused to stop!\n", uc
->id
);
2950 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
2951 udma_reset_rings(uc
);
2954 static void udma_desc_pre_callback(struct virt_dma_chan
*vc
,
2955 struct virt_dma_desc
*vd
,
2956 struct dmaengine_result
*result
)
2958 struct udma_chan
*uc
= to_udma_chan(&vc
->chan
);
2959 struct udma_desc
*d
;
2964 d
= to_udma_desc(&vd
->tx
);
2966 if (d
->metadata_size
)
2967 udma_fetch_epib(uc
, d
);
2969 /* Provide residue information for the client */
2971 void *desc_vaddr
= udma_curr_cppi5_desc_vaddr(d
, d
->desc_idx
);
2973 if (cppi5_desc_get_type(desc_vaddr
) ==
2974 CPPI5_INFO0_DESC_TYPE_VAL_HOST
) {
2975 result
->residue
= d
->residue
-
2976 cppi5_hdesc_get_pktlen(desc_vaddr
);
2977 if (result
->residue
)
2978 result
->result
= DMA_TRANS_ABORTED
;
2980 result
->result
= DMA_TRANS_NOERROR
;
2982 result
->residue
= 0;
2983 result
->result
= DMA_TRANS_NOERROR
;
2989 * This tasklet handles the completion of a DMA descriptor by
2990 * calling its callback and freeing it.
2992 static void udma_vchan_complete(unsigned long arg
)
2994 struct virt_dma_chan
*vc
= (struct virt_dma_chan
*)arg
;
2995 struct virt_dma_desc
*vd
, *_vd
;
2996 struct dmaengine_desc_callback cb
;
2999 spin_lock_irq(&vc
->lock
);
3000 list_splice_tail_init(&vc
->desc_completed
, &head
);
3004 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
3006 memset(&cb
, 0, sizeof(cb
));
3008 spin_unlock_irq(&vc
->lock
);
3010 udma_desc_pre_callback(vc
, vd
, NULL
);
3011 dmaengine_desc_callback_invoke(&cb
, NULL
);
3013 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
3014 struct dmaengine_result result
;
3016 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
3018 list_del(&vd
->node
);
3020 udma_desc_pre_callback(vc
, vd
, &result
);
3021 dmaengine_desc_callback_invoke(&cb
, &result
);
3023 vchan_vdesc_fini(vd
);
3027 static void udma_free_chan_resources(struct dma_chan
*chan
)
3029 struct udma_chan
*uc
= to_udma_chan(chan
);
3030 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
3032 udma_terminate_all(chan
);
3033 if (uc
->terminated_desc
) {
3034 udma_reset_chan(uc
, false);
3035 udma_reset_rings(uc
);
3038 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
3039 destroy_delayed_work_on_stack(&uc
->tx_drain
.work
);
3041 if (uc
->irq_num_ring
> 0) {
3042 free_irq(uc
->irq_num_ring
, uc
);
3044 uc
->irq_num_ring
= 0;
3046 if (uc
->irq_num_udma
> 0) {
3047 free_irq(uc
->irq_num_udma
, uc
);
3049 uc
->irq_num_udma
= 0;
3052 /* Release PSI-L pairing */
3053 if (uc
->psil_paired
) {
3054 navss_psil_unpair(ud
, uc
->config
.src_thread
,
3055 uc
->config
.dst_thread
);
3056 uc
->psil_paired
= false;
3059 vchan_free_chan_resources(&uc
->vc
);
3060 tasklet_kill(&uc
->vc
.task
);
3062 udma_free_tx_resources(uc
);
3063 udma_free_rx_resources(uc
);
3064 udma_reset_uchan(uc
);
3066 if (uc
->use_dma_pool
) {
3067 dma_pool_destroy(uc
->hdesc_pool
);
3068 uc
->use_dma_pool
= false;
3072 static struct platform_driver udma_driver
;
3074 struct udma_filter_param
{
3075 int remote_thread_id
;
3079 static bool udma_dma_filter_fn(struct dma_chan
*chan
, void *param
)
3081 struct udma_chan_config
*ucc
;
3082 struct psil_endpoint_config
*ep_config
;
3083 struct udma_filter_param
*filter_param
;
3084 struct udma_chan
*uc
;
3085 struct udma_dev
*ud
;
3087 if (chan
->device
->dev
->driver
!= &udma_driver
.driver
)
3090 uc
= to_udma_chan(chan
);
3093 filter_param
= param
;
3095 if (filter_param
->atype
> 2) {
3096 dev_err(ud
->dev
, "Invalid channel atype: %u\n",
3097 filter_param
->atype
);
3101 ucc
->remote_thread_id
= filter_param
->remote_thread_id
;
3102 ucc
->atype
= filter_param
->atype
;
3104 if (ucc
->remote_thread_id
& K3_PSIL_DST_THREAD_ID_OFFSET
)
3105 ucc
->dir
= DMA_MEM_TO_DEV
;
3107 ucc
->dir
= DMA_DEV_TO_MEM
;
3109 ep_config
= psil_get_ep_config(ucc
->remote_thread_id
);
3110 if (IS_ERR(ep_config
)) {
3111 dev_err(ud
->dev
, "No configuration for psi-l thread 0x%04x\n",
3112 ucc
->remote_thread_id
);
3113 ucc
->dir
= DMA_MEM_TO_MEM
;
3114 ucc
->remote_thread_id
= -1;
3119 ucc
->pkt_mode
= ep_config
->pkt_mode
;
3120 ucc
->channel_tpl
= ep_config
->channel_tpl
;
3121 ucc
->notdpkt
= ep_config
->notdpkt
;
3122 ucc
->ep_type
= ep_config
->ep_type
;
3124 if (ucc
->ep_type
!= PSIL_EP_NATIVE
) {
3125 const struct udma_match_data
*match_data
= ud
->match_data
;
3127 if (match_data
->flags
& UDMA_FLAG_PDMA_ACC32
)
3128 ucc
->enable_acc32
= ep_config
->pdma_acc32
;
3129 if (match_data
->flags
& UDMA_FLAG_PDMA_BURST
)
3130 ucc
->enable_burst
= ep_config
->pdma_burst
;
3133 ucc
->needs_epib
= ep_config
->needs_epib
;
3134 ucc
->psd_size
= ep_config
->psd_size
;
3135 ucc
->metadata_size
=
3136 (ucc
->needs_epib
? CPPI5_INFO0_HDESC_EPIB_SIZE
: 0) +
3140 ucc
->hdesc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
3141 ucc
->metadata_size
, ud
->desc_align
);
3143 dev_dbg(ud
->dev
, "chan%d: Remote thread: 0x%04x (%s)\n", uc
->id
,
3144 ucc
->remote_thread_id
, dmaengine_get_direction_text(ucc
->dir
));
3149 static struct dma_chan
*udma_of_xlate(struct of_phandle_args
*dma_spec
,
3150 struct of_dma
*ofdma
)
3152 struct udma_dev
*ud
= ofdma
->of_dma_data
;
3153 dma_cap_mask_t mask
= ud
->ddev
.cap_mask
;
3154 struct udma_filter_param filter_param
;
3155 struct dma_chan
*chan
;
3157 if (dma_spec
->args_count
!= 1 && dma_spec
->args_count
!= 2)
3160 filter_param
.remote_thread_id
= dma_spec
->args
[0];
3161 if (dma_spec
->args_count
== 2)
3162 filter_param
.atype
= dma_spec
->args
[1];
3164 filter_param
.atype
= 0;
3166 chan
= __dma_request_channel(&mask
, udma_dma_filter_fn
, &filter_param
,
3169 dev_err(ud
->dev
, "get channel fail in %s.\n", __func__
);
3170 return ERR_PTR(-EINVAL
);
3176 static struct udma_match_data am654_main_data
= {
3177 .psil_base
= 0x1000,
3178 .enable_memcpy_support
= true,
3179 .statictr_z_mask
= GENMASK(11, 0),
3180 .rchan_oes_offset
= 0x2000,
3182 .level_start_idx
= {
3183 [0] = 8, /* Normal channels */
3184 [1] = 0, /* High Throughput channels */
3188 static struct udma_match_data am654_mcu_data
= {
3189 .psil_base
= 0x6000,
3190 .enable_memcpy_support
= true, /* TEST: DMA domains */
3191 .statictr_z_mask
= GENMASK(11, 0),
3192 .rchan_oes_offset
= 0x2000,
3194 .level_start_idx
= {
3195 [0] = 2, /* Normal channels */
3196 [1] = 0, /* High Throughput channels */
3200 static struct udma_match_data j721e_main_data
= {
3201 .psil_base
= 0x1000,
3202 .enable_memcpy_support
= true,
3203 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
,
3204 .statictr_z_mask
= GENMASK(23, 0),
3205 .rchan_oes_offset
= 0x400,
3207 .level_start_idx
= {
3208 [0] = 16, /* Normal channels */
3209 [1] = 4, /* High Throughput channels */
3210 [2] = 0, /* Ultra High Throughput channels */
3214 static struct udma_match_data j721e_mcu_data
= {
3215 .psil_base
= 0x6000,
3216 .enable_memcpy_support
= false, /* MEM_TO_MEM is slow via MCU UDMA */
3217 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
,
3218 .statictr_z_mask
= GENMASK(23, 0),
3219 .rchan_oes_offset
= 0x400,
3221 .level_start_idx
= {
3222 [0] = 2, /* Normal channels */
3223 [1] = 0, /* High Throughput channels */
3227 static const struct of_device_id udma_of_match
[] = {
3229 .compatible
= "ti,am654-navss-main-udmap",
3230 .data
= &am654_main_data
,
3233 .compatible
= "ti,am654-navss-mcu-udmap",
3234 .data
= &am654_mcu_data
,
3236 .compatible
= "ti,j721e-navss-main-udmap",
3237 .data
= &j721e_main_data
,
3239 .compatible
= "ti,j721e-navss-mcu-udmap",
3240 .data
= &j721e_mcu_data
,
3245 static int udma_get_mmrs(struct platform_device
*pdev
, struct udma_dev
*ud
)
3247 struct resource
*res
;
3250 for (i
= 0; i
< MMR_LAST
; i
++) {
3251 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
3253 ud
->mmrs
[i
] = devm_ioremap_resource(&pdev
->dev
, res
);
3254 if (IS_ERR(ud
->mmrs
[i
]))
3255 return PTR_ERR(ud
->mmrs
[i
]);
3261 static int udma_setup_resources(struct udma_dev
*ud
)
3263 struct device
*dev
= ud
->dev
;
3264 int ch_count
, ret
, i
, j
;
3266 struct ti_sci_resource_desc
*rm_desc
;
3267 struct ti_sci_resource
*rm_res
, irq_res
;
3268 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
3269 static const char * const range_names
[] = { "ti,sci-rm-range-tchan",
3270 "ti,sci-rm-range-rchan",
3271 "ti,sci-rm-range-rflow" };
3273 cap2
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x28);
3274 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
3276 ud
->rflow_cnt
= cap3
& 0x3fff;
3277 ud
->tchan_cnt
= cap2
& 0x1ff;
3278 ud
->echan_cnt
= (cap2
>> 9) & 0x1ff;
3279 ud
->rchan_cnt
= (cap2
>> 18) & 0x1ff;
3280 ch_count
= ud
->tchan_cnt
+ ud
->rchan_cnt
;
3282 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
3283 sizeof(unsigned long), GFP_KERNEL
);
3284 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
3286 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
3287 sizeof(unsigned long), GFP_KERNEL
);
3288 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
3290 ud
->rflow_gp_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
3291 sizeof(unsigned long),
3293 ud
->rflow_gp_map_allocated
= devm_kcalloc(dev
,
3294 BITS_TO_LONGS(ud
->rflow_cnt
),
3295 sizeof(unsigned long),
3297 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
3298 sizeof(unsigned long),
3300 ud
->rflows
= devm_kcalloc(dev
, ud
->rflow_cnt
, sizeof(*ud
->rflows
),
3303 if (!ud
->tchan_map
|| !ud
->rchan_map
|| !ud
->rflow_gp_map
||
3304 !ud
->rflow_gp_map_allocated
|| !ud
->tchans
|| !ud
->rchans
||
3305 !ud
->rflows
|| !ud
->rflow_in_use
)
3309 * RX flows with the same Ids as RX channels are reserved to be used
3310 * as default flows if remote HW can't generate flow_ids. Those
3311 * RX flows can be requested only explicitly by id.
3313 bitmap_set(ud
->rflow_gp_map_allocated
, 0, ud
->rchan_cnt
);
3315 /* by default no GP rflows are assigned to Linux */
3316 bitmap_set(ud
->rflow_gp_map
, 0, ud
->rflow_cnt
);
3318 /* Get resource ranges from tisci */
3319 for (i
= 0; i
< RM_RANGE_LAST
; i
++)
3320 tisci_rm
->rm_ranges
[i
] =
3321 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
3322 tisci_rm
->tisci_dev_id
,
3323 (char *)range_names
[i
]);
3326 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
3327 if (IS_ERR(rm_res
)) {
3328 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
3330 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
3331 for (i
= 0; i
< rm_res
->sets
; i
++) {
3332 rm_desc
= &rm_res
->desc
[i
];
3333 bitmap_clear(ud
->tchan_map
, rm_desc
->start
,
3335 dev_dbg(dev
, "ti-sci-res: tchan: %d:%d\n",
3336 rm_desc
->start
, rm_desc
->num
);
3339 irq_res
.sets
= rm_res
->sets
;
3341 /* rchan and matching default flow ranges */
3342 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
3343 if (IS_ERR(rm_res
)) {
3344 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
3346 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
3347 for (i
= 0; i
< rm_res
->sets
; i
++) {
3348 rm_desc
= &rm_res
->desc
[i
];
3349 bitmap_clear(ud
->rchan_map
, rm_desc
->start
,
3351 dev_dbg(dev
, "ti-sci-res: rchan: %d:%d\n",
3352 rm_desc
->start
, rm_desc
->num
);
3356 irq_res
.sets
+= rm_res
->sets
;
3357 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
3358 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
3359 for (i
= 0; i
< rm_res
->sets
; i
++) {
3360 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
;
3361 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
3363 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
3364 for (j
= 0; j
< rm_res
->sets
; j
++, i
++) {
3365 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
3366 ud
->match_data
->rchan_oes_offset
;
3367 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
3369 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
3370 kfree(irq_res
.desc
);
3372 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
3376 /* GP rflow ranges */
3377 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
3378 if (IS_ERR(rm_res
)) {
3379 /* all gp flows are assigned exclusively to Linux */
3380 bitmap_clear(ud
->rflow_gp_map
, ud
->rchan_cnt
,
3381 ud
->rflow_cnt
- ud
->rchan_cnt
);
3383 for (i
= 0; i
< rm_res
->sets
; i
++) {
3384 rm_desc
= &rm_res
->desc
[i
];
3385 bitmap_clear(ud
->rflow_gp_map
, rm_desc
->start
,
3387 dev_dbg(dev
, "ti-sci-res: rflow: %d:%d\n",
3388 rm_desc
->start
, rm_desc
->num
);
3392 ch_count
-= bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
);
3393 ch_count
-= bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
);
3397 ud
->channels
= devm_kcalloc(dev
, ch_count
, sizeof(*ud
->channels
),
3402 dev_info(dev
, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3404 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
),
3405 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
),
3406 ud
->rflow_cnt
- bitmap_weight(ud
->rflow_gp_map
,
3412 static int udma_setup_rx_flush(struct udma_dev
*ud
)
3414 struct udma_rx_flush
*rx_flush
= &ud
->rx_flush
;
3415 struct cppi5_desc_hdr_t
*tr_desc
;
3416 struct cppi5_tr_type1_t
*tr_req
;
3417 struct cppi5_host_desc_t
*desc
;
3418 struct device
*dev
= ud
->dev
;
3419 struct udma_hwdesc
*hwdesc
;
3422 /* Allocate 1K buffer for discarded data on RX channel teardown */
3423 rx_flush
->buffer_size
= SZ_1K
;
3424 rx_flush
->buffer_vaddr
= devm_kzalloc(dev
, rx_flush
->buffer_size
,
3426 if (!rx_flush
->buffer_vaddr
)
3429 rx_flush
->buffer_paddr
= dma_map_single(dev
, rx_flush
->buffer_vaddr
,
3430 rx_flush
->buffer_size
,
3432 if (dma_mapping_error(dev
, rx_flush
->buffer_paddr
))
3435 /* Set up descriptor to be used for TR mode */
3436 hwdesc
= &rx_flush
->hwdescs
[0];
3437 tr_size
= sizeof(struct cppi5_tr_type1_t
);
3438 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
, 1);
3439 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
3442 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
3444 if (!hwdesc
->cppi5_desc_vaddr
)
3447 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
3448 hwdesc
->cppi5_desc_size
,
3450 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
3453 /* Start of the TR req records */
3454 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
3455 /* Start address of the TR response array */
3456 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
;
3458 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
3459 cppi5_trdesc_init(tr_desc
, 1, tr_size
, 0, 0);
3460 cppi5_desc_set_pktids(tr_desc
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3461 cppi5_desc_set_retpolicy(tr_desc
, 0, 0);
3463 tr_req
= hwdesc
->tr_req_base
;
3464 cppi5_tr_init(&tr_req
->flags
, CPPI5_TR_TYPE1
, false, false,
3465 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3466 cppi5_tr_csf_set(&tr_req
->flags
, CPPI5_TR_CSF_SUPR_EVT
);
3468 tr_req
->addr
= rx_flush
->buffer_paddr
;
3469 tr_req
->icnt0
= rx_flush
->buffer_size
;
3472 /* Set up descriptor to be used for packet mode */
3473 hwdesc
= &rx_flush
->hwdescs
[1];
3474 hwdesc
->cppi5_desc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
3475 CPPI5_INFO0_HDESC_EPIB_SIZE
+
3476 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
,
3479 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
3481 if (!hwdesc
->cppi5_desc_vaddr
)
3484 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
3485 hwdesc
->cppi5_desc_size
,
3487 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
3490 desc
= hwdesc
->cppi5_desc_vaddr
;
3491 cppi5_hdesc_init(desc
, 0, 0);
3492 cppi5_desc_set_pktids(&desc
->hdr
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3493 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0);
3495 cppi5_hdesc_attach_buf(desc
,
3496 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
,
3497 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
);
3499 dma_sync_single_for_device(dev
, hwdesc
->cppi5_desc_paddr
,
3500 hwdesc
->cppi5_desc_size
, DMA_TO_DEVICE
);
3504 #ifdef CONFIG_DEBUG_FS
3505 static void udma_dbg_summary_show_chan(struct seq_file
*s
,
3506 struct dma_chan
*chan
)
3508 struct udma_chan
*uc
= to_udma_chan(chan
);
3509 struct udma_chan_config
*ucc
= &uc
->config
;
3511 seq_printf(s
, " %-13s| %s", dma_chan_name(chan
),
3512 chan
->dbg_client_name
?: "in-use");
3513 seq_printf(s
, " (%s, ", dmaengine_get_direction_text(uc
->config
.dir
));
3515 switch (uc
->config
.dir
) {
3516 case DMA_MEM_TO_MEM
:
3517 seq_printf(s
, "chan%d pair [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
3518 ucc
->src_thread
, ucc
->dst_thread
);
3520 case DMA_DEV_TO_MEM
:
3521 seq_printf(s
, "rchan%d [0x%04x -> 0x%04x], ", uc
->rchan
->id
,
3522 ucc
->src_thread
, ucc
->dst_thread
);
3524 case DMA_MEM_TO_DEV
:
3525 seq_printf(s
, "tchan%d [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
3526 ucc
->src_thread
, ucc
->dst_thread
);
3529 seq_printf(s
, ")\n");
3533 if (ucc
->ep_type
== PSIL_EP_NATIVE
) {
3534 seq_printf(s
, "PSI-L Native");
3535 if (ucc
->metadata_size
) {
3536 seq_printf(s
, "[%s", ucc
->needs_epib
? " EPIB" : "");
3538 seq_printf(s
, " PSDsize:%u", ucc
->psd_size
);
3539 seq_printf(s
, " ]");
3542 seq_printf(s
, "PDMA");
3543 if (ucc
->enable_acc32
|| ucc
->enable_burst
)
3544 seq_printf(s
, "[%s%s ]",
3545 ucc
->enable_acc32
? " ACC32" : "",
3546 ucc
->enable_burst
? " BURST" : "");
3549 seq_printf(s
, ", %s)\n", ucc
->pkt_mode
? "Packet mode" : "TR mode");
3552 static void udma_dbg_summary_show(struct seq_file
*s
,
3553 struct dma_device
*dma_dev
)
3555 struct dma_chan
*chan
;
3557 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
3558 if (chan
->client_count
)
3559 udma_dbg_summary_show_chan(s
, chan
);
3562 #endif /* CONFIG_DEBUG_FS */
3564 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3565 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3566 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3567 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3568 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3570 static int udma_probe(struct platform_device
*pdev
)
3572 struct device_node
*navss_node
= pdev
->dev
.parent
->of_node
;
3573 struct device
*dev
= &pdev
->dev
;
3574 struct udma_dev
*ud
;
3575 const struct of_device_id
*match
;
3579 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(48));
3581 dev_err(dev
, "failed to set dma mask stuff\n");
3583 ud
= devm_kzalloc(dev
, sizeof(*ud
), GFP_KERNEL
);
3587 ret
= udma_get_mmrs(pdev
, ud
);
3591 ud
->tisci_rm
.tisci
= ti_sci_get_by_phandle(dev
->of_node
, "ti,sci");
3592 if (IS_ERR(ud
->tisci_rm
.tisci
))
3593 return PTR_ERR(ud
->tisci_rm
.tisci
);
3595 ret
= of_property_read_u32(dev
->of_node
, "ti,sci-dev-id",
3596 &ud
->tisci_rm
.tisci_dev_id
);
3598 dev_err(dev
, "ti,sci-dev-id read failure %d\n", ret
);
3601 pdev
->id
= ud
->tisci_rm
.tisci_dev_id
;
3603 ret
= of_property_read_u32(navss_node
, "ti,sci-dev-id",
3604 &ud
->tisci_rm
.tisci_navss_dev_id
);
3606 dev_err(dev
, "NAVSS ti,sci-dev-id read failure %d\n", ret
);
3610 ret
= of_property_read_u32(navss_node
, "ti,udma-atype", &ud
->atype
);
3611 if (!ret
&& ud
->atype
> 2) {
3612 dev_err(dev
, "Invalid atype: %u\n", ud
->atype
);
3616 ud
->tisci_rm
.tisci_udmap_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_udmap_ops
;
3617 ud
->tisci_rm
.tisci_psil_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_psil_ops
;
3619 ud
->ringacc
= of_k3_ringacc_get_by_phandle(dev
->of_node
, "ti,ringacc");
3620 if (IS_ERR(ud
->ringacc
))
3621 return PTR_ERR(ud
->ringacc
);
3623 dev
->msi_domain
= of_msi_get_domain(dev
, dev
->of_node
,
3624 DOMAIN_BUS_TI_SCI_INTA_MSI
);
3625 if (!dev
->msi_domain
) {
3626 dev_err(dev
, "Failed to get MSI domain\n");
3627 return -EPROBE_DEFER
;
3630 match
= of_match_node(udma_of_match
, dev
->of_node
);
3632 dev_err(dev
, "No compatible match found\n");
3635 ud
->match_data
= match
->data
;
3637 dma_cap_set(DMA_SLAVE
, ud
->ddev
.cap_mask
);
3638 dma_cap_set(DMA_CYCLIC
, ud
->ddev
.cap_mask
);
3640 ud
->ddev
.device_alloc_chan_resources
= udma_alloc_chan_resources
;
3641 ud
->ddev
.device_config
= udma_slave_config
;
3642 ud
->ddev
.device_prep_slave_sg
= udma_prep_slave_sg
;
3643 ud
->ddev
.device_prep_dma_cyclic
= udma_prep_dma_cyclic
;
3644 ud
->ddev
.device_issue_pending
= udma_issue_pending
;
3645 ud
->ddev
.device_tx_status
= udma_tx_status
;
3646 ud
->ddev
.device_pause
= udma_pause
;
3647 ud
->ddev
.device_resume
= udma_resume
;
3648 ud
->ddev
.device_terminate_all
= udma_terminate_all
;
3649 ud
->ddev
.device_synchronize
= udma_synchronize
;
3650 #ifdef CONFIG_DEBUG_FS
3651 ud
->ddev
.dbg_summary_show
= udma_dbg_summary_show
;
3654 ud
->ddev
.device_free_chan_resources
= udma_free_chan_resources
;
3655 ud
->ddev
.src_addr_widths
= TI_UDMAC_BUSWIDTHS
;
3656 ud
->ddev
.dst_addr_widths
= TI_UDMAC_BUSWIDTHS
;
3657 ud
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
3658 ud
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
3659 ud
->ddev
.copy_align
= DMAENGINE_ALIGN_8_BYTES
;
3660 ud
->ddev
.desc_metadata_modes
= DESC_METADATA_CLIENT
|
3661 DESC_METADATA_ENGINE
;
3662 if (ud
->match_data
->enable_memcpy_support
) {
3663 dma_cap_set(DMA_MEMCPY
, ud
->ddev
.cap_mask
);
3664 ud
->ddev
.device_prep_dma_memcpy
= udma_prep_dma_memcpy
;
3665 ud
->ddev
.directions
|= BIT(DMA_MEM_TO_MEM
);
3670 ud
->psil_base
= ud
->match_data
->psil_base
;
3672 INIT_LIST_HEAD(&ud
->ddev
.channels
);
3673 INIT_LIST_HEAD(&ud
->desc_to_purge
);
3675 ch_count
= udma_setup_resources(ud
);
3679 spin_lock_init(&ud
->lock
);
3680 INIT_WORK(&ud
->purge_work
, udma_purge_desc_work
);
3682 ud
->desc_align
= 64;
3683 if (ud
->desc_align
< dma_get_cache_alignment())
3684 ud
->desc_align
= dma_get_cache_alignment();
3686 ret
= udma_setup_rx_flush(ud
);
3690 for (i
= 0; i
< ud
->tchan_cnt
; i
++) {
3691 struct udma_tchan
*tchan
= &ud
->tchans
[i
];
3694 tchan
->reg_rt
= ud
->mmrs
[MMR_TCHANRT
] + i
* 0x1000;
3697 for (i
= 0; i
< ud
->rchan_cnt
; i
++) {
3698 struct udma_rchan
*rchan
= &ud
->rchans
[i
];
3701 rchan
->reg_rt
= ud
->mmrs
[MMR_RCHANRT
] + i
* 0x1000;
3704 for (i
= 0; i
< ud
->rflow_cnt
; i
++) {
3705 struct udma_rflow
*rflow
= &ud
->rflows
[i
];
3710 for (i
= 0; i
< ch_count
; i
++) {
3711 struct udma_chan
*uc
= &ud
->channels
[i
];
3714 uc
->vc
.desc_free
= udma_desc_free
;
3718 uc
->config
.remote_thread_id
= -1;
3719 uc
->config
.dir
= DMA_MEM_TO_MEM
;
3720 uc
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s chan%d",
3723 vchan_init(&uc
->vc
, &ud
->ddev
);
3724 /* Use custom vchan completion handling */
3725 tasklet_init(&uc
->vc
.task
, udma_vchan_complete
,
3726 (unsigned long)&uc
->vc
);
3727 init_completion(&uc
->teardown_completed
);
3730 ret
= dma_async_device_register(&ud
->ddev
);
3732 dev_err(dev
, "failed to register slave DMA engine: %d\n", ret
);
3736 platform_set_drvdata(pdev
, ud
);
3738 ret
= of_dma_controller_register(dev
->of_node
, udma_of_xlate
, ud
);
3740 dev_err(dev
, "failed to register of_dma controller\n");
3741 dma_async_device_unregister(&ud
->ddev
);
3747 static struct platform_driver udma_driver
= {
3750 .of_match_table
= udma_of_match
,
3751 .suppress_bind_attrs
= true,
3753 .probe
= udma_probe
,
3755 builtin_platform_driver(udma_driver
);
3757 /* Private interfaces to UDMA */
3758 #include "k3-udma-private.c"