1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/workqueue.h>
24 #include <linux/completion.h>
25 #include <linux/soc/ti/k3-ringacc.h>
26 #include <linux/soc/ti/ti_sci_protocol.h>
27 #include <linux/soc/ti/ti_sci_inta_msi.h>
28 #include <linux/dma/ti-cppi5.h>
30 #include "../virt-dma.h"
32 #include "k3-psil-priv.h"
34 struct udma_static_tr
{
35 u8 elsize
; /* RPSTR0 */
36 u16 elcnt
; /* RPSTR0 */
37 u16 bstcnt
; /* RPSTR1 */
40 #define K3_UDMA_MAX_RFLOWS 1024
41 #define K3_UDMA_DEFAULT_RING_SIZE 16
43 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44 #define UDMA_RFLOW_SRCTAG_NONE 0
45 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
46 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
47 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
49 #define UDMA_RFLOW_DSTTAG_NONE 0
50 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
51 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
52 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
53 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
64 static const char * const mmr_names
[] = { "gcfg", "rchanrt", "tchanrt" };
70 struct k3_ring
*t_ring
; /* Transmit ring */
71 struct k3_ring
*tc_ring
; /* Transmit Completion ring */
76 struct k3_ring
*fd_ring
; /* Free Descriptor ring */
77 struct k3_ring
*r_ring
; /* Receive ring */
86 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
87 #define UDMA_FLAG_PDMA_BURST BIT(1)
89 struct udma_match_data
{
91 bool enable_memcpy_support
;
97 u32 level_start_idx
[];
101 size_t cppi5_desc_size
;
102 void *cppi5_desc_vaddr
;
103 dma_addr_t cppi5_desc_paddr
;
105 /* TR descriptor internal pointers */
107 struct cppi5_tr_resp_t
*tr_resp_base
;
110 struct udma_rx_flush
{
111 struct udma_hwdesc hwdescs
[2];
115 dma_addr_t buffer_paddr
;
119 struct dma_device ddev
;
121 void __iomem
*mmrs
[MMR_LAST
];
122 const struct udma_match_data
*match_data
;
124 size_t desc_align
; /* alignment to use for descriptors */
126 struct udma_tisci_rm tisci_rm
;
128 struct k3_ringacc
*ringacc
;
130 struct work_struct purge_work
;
131 struct list_head desc_to_purge
;
134 struct udma_rx_flush rx_flush
;
140 unsigned long *tchan_map
;
141 unsigned long *rchan_map
;
142 unsigned long *rflow_gp_map
;
143 unsigned long *rflow_gp_map_allocated
;
144 unsigned long *rflow_in_use
;
146 struct udma_tchan
*tchans
;
147 struct udma_rchan
*rchans
;
148 struct udma_rflow
*rflows
;
150 struct udma_chan
*channels
;
156 struct virt_dma_desc vd
;
160 enum dma_transfer_direction dir
;
162 struct udma_static_tr static_tr
;
166 unsigned int desc_idx
; /* Only used for cyclic in packet mode */
170 void *metadata
; /* pointer to provided metadata buffer (EPIP, PSdata) */
172 unsigned int hwdesc_count
;
173 struct udma_hwdesc hwdesc
[0];
176 enum udma_chan_state
{
177 UDMA_CHAN_IS_IDLE
= 0, /* not active, no teardown is in progress */
178 UDMA_CHAN_IS_ACTIVE
, /* Normal operation */
179 UDMA_CHAN_IS_TERMINATING
, /* channel is being terminated */
182 struct udma_tx_drain
{
183 struct delayed_work work
;
188 struct udma_chan_config
{
189 bool pkt_mode
; /* TR or packet */
190 bool needs_epib
; /* EPIB is needed for the communication or not */
191 u32 psd_size
; /* size of Protocol Specific Data */
192 u32 metadata_size
; /* (needs_epib ? 16:0) + psd_size */
193 u32 hdesc_size
; /* Size of a packet descriptor in packet mode */
194 bool notdpkt
; /* Suppress sending TDC packet */
195 int remote_thread_id
;
199 enum psil_endpoint_type ep_type
;
202 enum udma_tp_level channel_tpl
; /* Channel Throughput Level */
204 enum dma_transfer_direction dir
;
208 struct virt_dma_chan vc
;
209 struct dma_slave_config cfg
;
211 struct udma_desc
*desc
;
212 struct udma_desc
*terminated_desc
;
213 struct udma_static_tr static_tr
;
216 struct udma_tchan
*tchan
;
217 struct udma_rchan
*rchan
;
218 struct udma_rflow
*rflow
;
228 enum udma_chan_state state
;
229 struct completion teardown_completed
;
231 struct udma_tx_drain tx_drain
;
233 u32 bcnt
; /* number of bytes completed since the start of the channel */
234 u32 in_ring_cnt
; /* number of descriptors in flight */
236 /* Channel configuration parameters */
237 struct udma_chan_config config
;
239 /* dmapool for packet mode descriptors */
241 struct dma_pool
*hdesc_pool
;
246 static inline struct udma_dev
*to_udma_dev(struct dma_device
*d
)
248 return container_of(d
, struct udma_dev
, ddev
);
251 static inline struct udma_chan
*to_udma_chan(struct dma_chan
*c
)
253 return container_of(c
, struct udma_chan
, vc
.chan
);
256 static inline struct udma_desc
*to_udma_desc(struct dma_async_tx_descriptor
*t
)
258 return container_of(t
, struct udma_desc
, vd
.tx
);
261 /* Generic register access functions */
262 static inline u32
udma_read(void __iomem
*base
, int reg
)
264 return readl(base
+ reg
);
267 static inline void udma_write(void __iomem
*base
, int reg
, u32 val
)
269 writel(val
, base
+ reg
);
272 static inline void udma_update_bits(void __iomem
*base
, int reg
,
277 orig
= readl(base
+ reg
);
282 writel(tmp
, base
+ reg
);
286 static inline u32
udma_tchanrt_read(struct udma_tchan
*tchan
, int reg
)
290 return udma_read(tchan
->reg_rt
, reg
);
293 static inline void udma_tchanrt_write(struct udma_tchan
*tchan
, int reg
,
298 udma_write(tchan
->reg_rt
, reg
, val
);
301 static inline void udma_tchanrt_update_bits(struct udma_tchan
*tchan
, int reg
,
306 udma_update_bits(tchan
->reg_rt
, reg
, mask
, val
);
310 static inline u32
udma_rchanrt_read(struct udma_rchan
*rchan
, int reg
)
314 return udma_read(rchan
->reg_rt
, reg
);
317 static inline void udma_rchanrt_write(struct udma_rchan
*rchan
, int reg
,
322 udma_write(rchan
->reg_rt
, reg
, val
);
325 static inline void udma_rchanrt_update_bits(struct udma_rchan
*rchan
, int reg
,
330 udma_update_bits(rchan
->reg_rt
, reg
, mask
, val
);
333 static int navss_psil_pair(struct udma_dev
*ud
, u32 src_thread
, u32 dst_thread
)
335 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
337 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
338 return tisci_rm
->tisci_psil_ops
->pair(tisci_rm
->tisci
,
339 tisci_rm
->tisci_navss_dev_id
,
340 src_thread
, dst_thread
);
343 static int navss_psil_unpair(struct udma_dev
*ud
, u32 src_thread
,
346 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
348 dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
349 return tisci_rm
->tisci_psil_ops
->unpair(tisci_rm
->tisci
,
350 tisci_rm
->tisci_navss_dev_id
,
351 src_thread
, dst_thread
);
354 static void udma_reset_uchan(struct udma_chan
*uc
)
356 memset(&uc
->config
, 0, sizeof(uc
->config
));
357 uc
->config
.remote_thread_id
= -1;
358 uc
->state
= UDMA_CHAN_IS_IDLE
;
361 static void udma_dump_chan_stdata(struct udma_chan
*uc
)
363 struct device
*dev
= uc
->ud
->dev
;
367 if (uc
->config
.dir
== DMA_MEM_TO_DEV
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
368 dev_dbg(dev
, "TCHAN State data:\n");
369 for (i
= 0; i
< 32; i
++) {
370 offset
= UDMA_TCHAN_RT_STDATA_REG
+ i
* 4;
371 dev_dbg(dev
, "TRT_STDATA[%02d]: 0x%08x\n", i
,
372 udma_tchanrt_read(uc
->tchan
, offset
));
376 if (uc
->config
.dir
== DMA_DEV_TO_MEM
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
377 dev_dbg(dev
, "RCHAN State data:\n");
378 for (i
= 0; i
< 32; i
++) {
379 offset
= UDMA_RCHAN_RT_STDATA_REG
+ i
* 4;
380 dev_dbg(dev
, "RRT_STDATA[%02d]: 0x%08x\n", i
,
381 udma_rchanrt_read(uc
->rchan
, offset
));
386 static inline dma_addr_t
udma_curr_cppi5_desc_paddr(struct udma_desc
*d
,
389 return d
->hwdesc
[idx
].cppi5_desc_paddr
;
392 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc
*d
, int idx
)
394 return d
->hwdesc
[idx
].cppi5_desc_vaddr
;
397 static struct udma_desc
*udma_udma_desc_from_paddr(struct udma_chan
*uc
,
400 struct udma_desc
*d
= uc
->terminated_desc
;
403 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
406 if (desc_paddr
!= paddr
)
413 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
416 if (desc_paddr
!= paddr
)
424 static void udma_free_hwdesc(struct udma_chan
*uc
, struct udma_desc
*d
)
426 if (uc
->use_dma_pool
) {
429 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
430 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
433 dma_pool_free(uc
->hdesc_pool
,
434 d
->hwdesc
[i
].cppi5_desc_vaddr
,
435 d
->hwdesc
[i
].cppi5_desc_paddr
);
437 d
->hwdesc
[i
].cppi5_desc_vaddr
= NULL
;
439 } else if (d
->hwdesc
[0].cppi5_desc_vaddr
) {
440 struct udma_dev
*ud
= uc
->ud
;
442 dma_free_coherent(ud
->dev
, d
->hwdesc
[0].cppi5_desc_size
,
443 d
->hwdesc
[0].cppi5_desc_vaddr
,
444 d
->hwdesc
[0].cppi5_desc_paddr
);
446 d
->hwdesc
[0].cppi5_desc_vaddr
= NULL
;
450 static void udma_purge_desc_work(struct work_struct
*work
)
452 struct udma_dev
*ud
= container_of(work
, typeof(*ud
), purge_work
);
453 struct virt_dma_desc
*vd
, *_vd
;
457 spin_lock_irqsave(&ud
->lock
, flags
);
458 list_splice_tail_init(&ud
->desc_to_purge
, &head
);
459 spin_unlock_irqrestore(&ud
->lock
, flags
);
461 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
462 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
463 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
465 udma_free_hwdesc(uc
, d
);
470 /* If more to purge, schedule the work again */
471 if (!list_empty(&ud
->desc_to_purge
))
472 schedule_work(&ud
->purge_work
);
475 static void udma_desc_free(struct virt_dma_desc
*vd
)
477 struct udma_dev
*ud
= to_udma_dev(vd
->tx
.chan
->device
);
478 struct udma_chan
*uc
= to_udma_chan(vd
->tx
.chan
);
479 struct udma_desc
*d
= to_udma_desc(&vd
->tx
);
482 if (uc
->terminated_desc
== d
)
483 uc
->terminated_desc
= NULL
;
485 if (uc
->use_dma_pool
) {
486 udma_free_hwdesc(uc
, d
);
491 spin_lock_irqsave(&ud
->lock
, flags
);
492 list_add_tail(&vd
->node
, &ud
->desc_to_purge
);
493 spin_unlock_irqrestore(&ud
->lock
, flags
);
495 schedule_work(&ud
->purge_work
);
498 static bool udma_is_chan_running(struct udma_chan
*uc
)
504 trt_ctl
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
);
506 rrt_ctl
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
);
508 if (trt_ctl
& UDMA_CHAN_RT_CTL_EN
|| rrt_ctl
& UDMA_CHAN_RT_CTL_EN
)
514 static bool udma_is_chan_paused(struct udma_chan
*uc
)
518 switch (uc
->config
.dir
) {
520 val
= udma_rchanrt_read(uc
->rchan
,
521 UDMA_RCHAN_RT_PEER_RT_EN_REG
);
522 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
525 val
= udma_tchanrt_read(uc
->tchan
,
526 UDMA_TCHAN_RT_PEER_RT_EN_REG
);
527 pause_mask
= UDMA_PEER_RT_EN_PAUSE
;
530 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
);
531 pause_mask
= UDMA_CHAN_RT_CTL_PAUSE
;
537 if (val
& pause_mask
)
543 static void udma_sync_for_device(struct udma_chan
*uc
, int idx
)
545 struct udma_desc
*d
= uc
->desc
;
547 if (uc
->cyclic
&& uc
->config
.pkt_mode
) {
548 dma_sync_single_for_device(uc
->ud
->dev
,
549 d
->hwdesc
[idx
].cppi5_desc_paddr
,
550 d
->hwdesc
[idx
].cppi5_desc_size
,
555 for (i
= 0; i
< d
->hwdesc_count
; i
++) {
556 if (!d
->hwdesc
[i
].cppi5_desc_vaddr
)
559 dma_sync_single_for_device(uc
->ud
->dev
,
560 d
->hwdesc
[i
].cppi5_desc_paddr
,
561 d
->hwdesc
[i
].cppi5_desc_size
,
567 static inline dma_addr_t
udma_get_rx_flush_hwdesc_paddr(struct udma_chan
*uc
)
569 return uc
->ud
->rx_flush
.hwdescs
[uc
->config
.pkt_mode
].cppi5_desc_paddr
;
572 static int udma_push_to_ring(struct udma_chan
*uc
, int idx
)
574 struct udma_desc
*d
= uc
->desc
;
575 struct k3_ring
*ring
= NULL
;
579 switch (uc
->config
.dir
) {
581 ring
= uc
->rflow
->fd_ring
;
585 ring
= uc
->tchan
->t_ring
;
591 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
593 paddr
= udma_get_rx_flush_hwdesc_paddr(uc
);
595 paddr
= udma_curr_cppi5_desc_paddr(d
, idx
);
597 wmb(); /* Ensure that writes are not moved over this point */
598 udma_sync_for_device(uc
, idx
);
601 ret
= k3_ringacc_ring_push(ring
, &paddr
);
608 static bool udma_desc_is_rx_flush(struct udma_chan
*uc
, dma_addr_t addr
)
610 if (uc
->config
.dir
!= DMA_DEV_TO_MEM
)
613 if (addr
== udma_get_rx_flush_hwdesc_paddr(uc
))
619 static int udma_pop_from_ring(struct udma_chan
*uc
, dma_addr_t
*addr
)
621 struct k3_ring
*ring
= NULL
;
624 switch (uc
->config
.dir
) {
626 ring
= uc
->rflow
->r_ring
;
630 ring
= uc
->tchan
->tc_ring
;
636 if (ring
&& k3_ringacc_ring_get_occ(ring
)) {
637 struct udma_desc
*d
= NULL
;
639 ret
= k3_ringacc_ring_pop(ring
, addr
);
643 /* Teardown completion */
644 if (cppi5_desc_is_tdcm(*addr
))
647 /* Check for flush descriptor */
648 if (udma_desc_is_rx_flush(uc
, *addr
))
651 d
= udma_udma_desc_from_paddr(uc
, *addr
);
654 dma_sync_single_for_cpu(uc
->ud
->dev
, *addr
,
655 d
->hwdesc
[0].cppi5_desc_size
,
657 rmb(); /* Ensure that reads are not moved before this point */
666 static void udma_reset_rings(struct udma_chan
*uc
)
668 struct k3_ring
*ring1
= NULL
;
669 struct k3_ring
*ring2
= NULL
;
671 switch (uc
->config
.dir
) {
674 ring1
= uc
->rflow
->fd_ring
;
675 ring2
= uc
->rflow
->r_ring
;
681 ring1
= uc
->tchan
->t_ring
;
682 ring2
= uc
->tchan
->tc_ring
;
690 k3_ringacc_ring_reset_dma(ring1
,
691 k3_ringacc_ring_get_occ(ring1
));
693 k3_ringacc_ring_reset(ring2
);
695 /* make sure we are not leaking memory by stalled descriptor */
696 if (uc
->terminated_desc
) {
697 udma_desc_free(&uc
->terminated_desc
->vd
);
698 uc
->terminated_desc
= NULL
;
704 static void udma_reset_counters(struct udma_chan
*uc
)
709 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
);
710 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
, val
);
712 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_SBCNT_REG
);
713 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_SBCNT_REG
, val
);
715 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PCNT_REG
);
716 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PCNT_REG
, val
);
718 val
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
);
719 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
, val
);
723 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_BCNT_REG
);
724 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_BCNT_REG
, val
);
726 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_SBCNT_REG
);
727 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_SBCNT_REG
, val
);
729 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_PCNT_REG
);
730 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PCNT_REG
, val
);
732 val
= udma_rchanrt_read(uc
->rchan
, UDMA_RCHAN_RT_PEER_BCNT_REG
);
733 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_BCNT_REG
, val
);
739 static int udma_reset_chan(struct udma_chan
*uc
, bool hard
)
741 switch (uc
->config
.dir
) {
743 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
, 0);
744 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
, 0);
747 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
, 0);
748 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
, 0);
751 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
, 0);
752 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
, 0);
758 /* Reset all counters */
759 udma_reset_counters(uc
);
761 /* Hard reset: re-initialize the channel to reset */
763 struct udma_chan_config ucc_backup
;
766 memcpy(&ucc_backup
, &uc
->config
, sizeof(uc
->config
));
767 uc
->ud
->ddev
.device_free_chan_resources(&uc
->vc
.chan
);
769 /* restore the channel configuration */
770 memcpy(&uc
->config
, &ucc_backup
, sizeof(uc
->config
));
771 ret
= uc
->ud
->ddev
.device_alloc_chan_resources(&uc
->vc
.chan
);
776 * Setting forced teardown after forced reset helps recovering
779 if (uc
->config
.dir
== DMA_DEV_TO_MEM
)
780 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
781 UDMA_CHAN_RT_CTL_EN
|
782 UDMA_CHAN_RT_CTL_TDOWN
|
783 UDMA_CHAN_RT_CTL_FTDOWN
);
785 uc
->state
= UDMA_CHAN_IS_IDLE
;
790 static void udma_start_desc(struct udma_chan
*uc
)
792 struct udma_chan_config
*ucc
= &uc
->config
;
794 if (ucc
->pkt_mode
&& (uc
->cyclic
|| ucc
->dir
== DMA_DEV_TO_MEM
)) {
797 /* Push all descriptors to ring for packet mode cyclic or RX */
798 for (i
= 0; i
< uc
->desc
->sglen
; i
++)
799 udma_push_to_ring(uc
, i
);
801 udma_push_to_ring(uc
, 0);
805 static bool udma_chan_needs_reconfiguration(struct udma_chan
*uc
)
807 /* Only PDMAs have staticTR */
808 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
)
811 /* Check if the staticTR configuration has changed for TX */
812 if (memcmp(&uc
->static_tr
, &uc
->desc
->static_tr
, sizeof(uc
->static_tr
)))
818 static int udma_start(struct udma_chan
*uc
)
820 struct virt_dma_desc
*vd
= vchan_next_desc(&uc
->vc
);
829 uc
->desc
= to_udma_desc(&vd
->tx
);
831 /* Channel is already running and does not need reconfiguration */
832 if (udma_is_chan_running(uc
) && !udma_chan_needs_reconfiguration(uc
)) {
837 /* Make sure that we clear the teardown bit, if it is set */
838 udma_reset_chan(uc
, false);
840 /* Push descriptors before we start the channel */
843 switch (uc
->desc
->dir
) {
845 /* Config remote TR */
846 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
847 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
848 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
849 const struct udma_match_data
*match_data
=
852 if (uc
->config
.enable_acc32
)
853 val
|= PDMA_STATIC_TR_XY_ACC32
;
854 if (uc
->config
.enable_burst
)
855 val
|= PDMA_STATIC_TR_XY_BURST
;
857 udma_rchanrt_write(uc
->rchan
,
858 UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
, val
);
860 udma_rchanrt_write(uc
->rchan
,
861 UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
,
862 PDMA_STATIC_TR_Z(uc
->desc
->static_tr
.bstcnt
,
863 match_data
->statictr_z_mask
));
865 /* save the current staticTR configuration */
866 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
867 sizeof(uc
->static_tr
));
870 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
871 UDMA_CHAN_RT_CTL_EN
);
874 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
,
875 UDMA_PEER_RT_EN_ENABLE
);
879 /* Config remote TR */
880 if (uc
->config
.ep_type
== PSIL_EP_PDMA_XY
) {
881 u32 val
= PDMA_STATIC_TR_Y(uc
->desc
->static_tr
.elcnt
) |
882 PDMA_STATIC_TR_X(uc
->desc
->static_tr
.elsize
);
884 if (uc
->config
.enable_acc32
)
885 val
|= PDMA_STATIC_TR_XY_ACC32
;
886 if (uc
->config
.enable_burst
)
887 val
|= PDMA_STATIC_TR_XY_BURST
;
889 udma_tchanrt_write(uc
->tchan
,
890 UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG
, val
);
892 /* save the current staticTR configuration */
893 memcpy(&uc
->static_tr
, &uc
->desc
->static_tr
,
894 sizeof(uc
->static_tr
));
898 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
,
899 UDMA_PEER_RT_EN_ENABLE
);
901 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
902 UDMA_CHAN_RT_CTL_EN
);
906 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_CTL_REG
,
907 UDMA_CHAN_RT_CTL_EN
);
908 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
909 UDMA_CHAN_RT_CTL_EN
);
916 uc
->state
= UDMA_CHAN_IS_ACTIVE
;
922 static int udma_stop(struct udma_chan
*uc
)
924 enum udma_chan_state old_state
= uc
->state
;
926 uc
->state
= UDMA_CHAN_IS_TERMINATING
;
927 reinit_completion(&uc
->teardown_completed
);
929 switch (uc
->config
.dir
) {
931 if (!uc
->cyclic
&& !uc
->desc
)
932 udma_push_to_ring(uc
, -1);
934 udma_rchanrt_write(uc
->rchan
, UDMA_RCHAN_RT_PEER_RT_EN_REG
,
935 UDMA_PEER_RT_EN_ENABLE
|
936 UDMA_PEER_RT_EN_TEARDOWN
);
939 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_PEER_RT_EN_REG
,
940 UDMA_PEER_RT_EN_ENABLE
|
941 UDMA_PEER_RT_EN_FLUSH
);
942 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
943 UDMA_CHAN_RT_CTL_EN
|
944 UDMA_CHAN_RT_CTL_TDOWN
);
947 udma_tchanrt_write(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
948 UDMA_CHAN_RT_CTL_EN
|
949 UDMA_CHAN_RT_CTL_TDOWN
);
952 uc
->state
= old_state
;
953 complete_all(&uc
->teardown_completed
);
960 static void udma_cyclic_packet_elapsed(struct udma_chan
*uc
)
962 struct udma_desc
*d
= uc
->desc
;
963 struct cppi5_host_desc_t
*h_desc
;
965 h_desc
= d
->hwdesc
[d
->desc_idx
].cppi5_desc_vaddr
;
966 cppi5_hdesc_reset_to_original(h_desc
);
967 udma_push_to_ring(uc
, d
->desc_idx
);
968 d
->desc_idx
= (d
->desc_idx
+ 1) % d
->sglen
;
971 static inline void udma_fetch_epib(struct udma_chan
*uc
, struct udma_desc
*d
)
973 struct cppi5_host_desc_t
*h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
975 memcpy(d
->metadata
, h_desc
->epib
, d
->metadata_size
);
978 static bool udma_is_desc_really_done(struct udma_chan
*uc
, struct udma_desc
*d
)
982 /* Only TX towards PDMA is affected */
983 if (uc
->config
.ep_type
== PSIL_EP_NATIVE
||
984 uc
->config
.dir
!= DMA_MEM_TO_DEV
)
987 peer_bcnt
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_PEER_BCNT_REG
);
988 bcnt
= udma_tchanrt_read(uc
->tchan
, UDMA_TCHAN_RT_BCNT_REG
);
990 /* Transfer is incomplete, store current residue and time stamp */
991 if (peer_bcnt
< bcnt
) {
992 uc
->tx_drain
.residue
= bcnt
- peer_bcnt
;
993 uc
->tx_drain
.tstamp
= ktime_get();
1000 static void udma_check_tx_completion(struct work_struct
*work
)
1002 struct udma_chan
*uc
= container_of(work
, typeof(*uc
),
1003 tx_drain
.work
.work
);
1004 bool desc_done
= true;
1007 unsigned long delay
;
1011 /* Get previous residue and time stamp */
1012 residue_diff
= uc
->tx_drain
.residue
;
1013 time_diff
= uc
->tx_drain
.tstamp
;
1015 * Get current residue and time stamp or see if
1016 * transfer is complete
1018 desc_done
= udma_is_desc_really_done(uc
, uc
->desc
);
1023 * Find the time delta and residue delta w.r.t
1026 time_diff
= ktime_sub(uc
->tx_drain
.tstamp
,
1028 residue_diff
-= uc
->tx_drain
.residue
;
1031 * Try to guess when we should check
1032 * next time by calculating rate at
1033 * which data is being drained at the
1036 delay
= (time_diff
/ residue_diff
) *
1037 uc
->tx_drain
.residue
;
1039 /* No progress, check again in 1 second */
1040 schedule_delayed_work(&uc
->tx_drain
.work
, HZ
);
1044 usleep_range(ktime_to_us(delay
),
1045 ktime_to_us(delay
) + 10);
1050 struct udma_desc
*d
= uc
->desc
;
1052 uc
->bcnt
+= d
->residue
;
1054 vchan_cookie_complete(&d
->vd
);
1062 static irqreturn_t
udma_ring_irq_handler(int irq
, void *data
)
1064 struct udma_chan
*uc
= data
;
1065 struct udma_desc
*d
;
1066 unsigned long flags
;
1067 dma_addr_t paddr
= 0;
1069 if (udma_pop_from_ring(uc
, &paddr
) || !paddr
)
1072 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
1074 /* Teardown completion message */
1075 if (cppi5_desc_is_tdcm(paddr
)) {
1076 /* Compensate our internal pop/push counter */
1079 complete_all(&uc
->teardown_completed
);
1081 if (uc
->terminated_desc
) {
1082 udma_desc_free(&uc
->terminated_desc
->vd
);
1083 uc
->terminated_desc
= NULL
;
1092 d
= udma_udma_desc_from_paddr(uc
, paddr
);
1095 dma_addr_t desc_paddr
= udma_curr_cppi5_desc_paddr(d
,
1097 if (desc_paddr
!= paddr
) {
1098 dev_err(uc
->ud
->dev
, "not matching descriptors!\n");
1102 if (d
== uc
->desc
) {
1103 /* active descriptor */
1105 udma_cyclic_packet_elapsed(uc
);
1106 vchan_cyclic_callback(&d
->vd
);
1108 if (udma_is_desc_really_done(uc
, d
)) {
1109 uc
->bcnt
+= d
->residue
;
1111 vchan_cookie_complete(&d
->vd
);
1113 schedule_delayed_work(&uc
->tx_drain
.work
,
1119 * terminated descriptor, mark the descriptor as
1120 * completed to update the channel's cookie marker
1122 dma_cookie_complete(&d
->vd
.tx
);
1126 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
1131 static irqreturn_t
udma_udma_irq_handler(int irq
, void *data
)
1133 struct udma_chan
*uc
= data
;
1134 struct udma_desc
*d
;
1135 unsigned long flags
;
1137 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
1140 d
->tr_idx
= (d
->tr_idx
+ 1) % d
->sglen
;
1143 vchan_cyclic_callback(&d
->vd
);
1145 /* TODO: figure out the real amount of data */
1146 uc
->bcnt
+= d
->residue
;
1148 vchan_cookie_complete(&d
->vd
);
1152 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
1158 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1160 * @from: Start the search from this flow id number
1161 * @cnt: Number of consecutive flow ids to allocate
1163 * Allocate range of RX flow ids for future use, those flows can be requested
1164 * only using explicit flow id number. if @from is set to -1 it will try to find
1165 * first free range. if @from is positive value it will force allocation only
1166 * of the specified range of flows.
1168 * Returns -ENOMEM if can't find free range.
1169 * -EEXIST if requested range is busy.
1170 * -EINVAL if wrong input values passed.
1171 * Returns flow id on success.
1173 static int __udma_alloc_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1175 int start
, tmp_from
;
1176 DECLARE_BITMAP(tmp
, K3_UDMA_MAX_RFLOWS
);
1180 tmp_from
= ud
->rchan_cnt
;
1181 /* default flows can't be allocated and accessible only by id */
1182 if (tmp_from
< ud
->rchan_cnt
)
1185 if (tmp_from
+ cnt
> ud
->rflow_cnt
)
1188 bitmap_or(tmp
, ud
->rflow_gp_map
, ud
->rflow_gp_map_allocated
,
1191 start
= bitmap_find_next_zero_area(tmp
,
1194 if (start
>= ud
->rflow_cnt
)
1197 if (from
>= 0 && start
!= from
)
1200 bitmap_set(ud
->rflow_gp_map_allocated
, start
, cnt
);
1204 static int __udma_free_gp_rflow_range(struct udma_dev
*ud
, int from
, int cnt
)
1206 if (from
< ud
->rchan_cnt
)
1208 if (from
+ cnt
> ud
->rflow_cnt
)
1211 bitmap_clear(ud
->rflow_gp_map_allocated
, from
, cnt
);
1215 static struct udma_rflow
*__udma_get_rflow(struct udma_dev
*ud
, int id
)
1218 * Attempt to request rflow by ID can be made for any rflow
1219 * if not in use with assumption that caller knows what's doing.
1220 * TI-SCI FW will perform additional permission check ant way, it's
1224 if (id
< 0 || id
>= ud
->rflow_cnt
)
1225 return ERR_PTR(-ENOENT
);
1227 if (test_bit(id
, ud
->rflow_in_use
))
1228 return ERR_PTR(-ENOENT
);
1230 /* GP rflow has to be allocated first */
1231 if (!test_bit(id
, ud
->rflow_gp_map
) &&
1232 !test_bit(id
, ud
->rflow_gp_map_allocated
))
1233 return ERR_PTR(-EINVAL
);
1235 dev_dbg(ud
->dev
, "get rflow%d\n", id
);
1236 set_bit(id
, ud
->rflow_in_use
);
1237 return &ud
->rflows
[id
];
1240 static void __udma_put_rflow(struct udma_dev
*ud
, struct udma_rflow
*rflow
)
1242 if (!test_bit(rflow
->id
, ud
->rflow_in_use
)) {
1243 dev_err(ud
->dev
, "attempt to put unused rflow%d\n", rflow
->id
);
1247 dev_dbg(ud
->dev
, "put rflow%d\n", rflow
->id
);
1248 clear_bit(rflow
->id
, ud
->rflow_in_use
);
1251 #define UDMA_RESERVE_RESOURCE(res) \
1252 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1253 enum udma_tp_level tpl, \
1257 if (test_bit(id, ud->res##_map)) { \
1258 dev_err(ud->dev, "res##%d is in use\n", id); \
1259 return ERR_PTR(-ENOENT); \
1264 if (tpl >= ud->match_data->tpl_levels) \
1265 tpl = ud->match_data->tpl_levels - 1; \
1267 start = ud->match_data->level_start_idx[tpl]; \
1269 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1271 if (id == ud->res##_cnt) { \
1272 return ERR_PTR(-ENOENT); \
1276 set_bit(id, ud->res##_map); \
1277 return &ud->res##s[id]; \
1280 UDMA_RESERVE_RESOURCE(tchan
);
1281 UDMA_RESERVE_RESOURCE(rchan
);
1283 static int udma_get_tchan(struct udma_chan
*uc
)
1285 struct udma_dev
*ud
= uc
->ud
;
1288 dev_dbg(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1289 uc
->id
, uc
->tchan
->id
);
1293 uc
->tchan
= __udma_reserve_tchan(ud
, uc
->config
.channel_tpl
, -1);
1294 if (IS_ERR(uc
->tchan
))
1295 return PTR_ERR(uc
->tchan
);
1300 static int udma_get_rchan(struct udma_chan
*uc
)
1302 struct udma_dev
*ud
= uc
->ud
;
1305 dev_dbg(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1306 uc
->id
, uc
->rchan
->id
);
1310 uc
->rchan
= __udma_reserve_rchan(ud
, uc
->config
.channel_tpl
, -1);
1311 if (IS_ERR(uc
->rchan
))
1312 return PTR_ERR(uc
->rchan
);
1317 static int udma_get_chan_pair(struct udma_chan
*uc
)
1319 struct udma_dev
*ud
= uc
->ud
;
1320 const struct udma_match_data
*match_data
= ud
->match_data
;
1323 if ((uc
->tchan
&& uc
->rchan
) && uc
->tchan
->id
== uc
->rchan
->id
) {
1324 dev_info(ud
->dev
, "chan%d: already have %d pair allocated\n",
1325 uc
->id
, uc
->tchan
->id
);
1330 dev_err(ud
->dev
, "chan%d: already have tchan%d allocated\n",
1331 uc
->id
, uc
->tchan
->id
);
1333 } else if (uc
->rchan
) {
1334 dev_err(ud
->dev
, "chan%d: already have rchan%d allocated\n",
1335 uc
->id
, uc
->rchan
->id
);
1339 /* Can be optimized, but let's have it like this for now */
1340 end
= min(ud
->tchan_cnt
, ud
->rchan_cnt
);
1341 /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1342 chan_id
= match_data
->level_start_idx
[match_data
->tpl_levels
- 1];
1343 for (; chan_id
< end
; chan_id
++) {
1344 if (!test_bit(chan_id
, ud
->tchan_map
) &&
1345 !test_bit(chan_id
, ud
->rchan_map
))
1352 set_bit(chan_id
, ud
->tchan_map
);
1353 set_bit(chan_id
, ud
->rchan_map
);
1354 uc
->tchan
= &ud
->tchans
[chan_id
];
1355 uc
->rchan
= &ud
->rchans
[chan_id
];
1360 static int udma_get_rflow(struct udma_chan
*uc
, int flow_id
)
1362 struct udma_dev
*ud
= uc
->ud
;
1365 dev_err(ud
->dev
, "chan%d: does not have rchan??\n", uc
->id
);
1370 dev_dbg(ud
->dev
, "chan%d: already have rflow%d allocated\n",
1371 uc
->id
, uc
->rflow
->id
);
1375 uc
->rflow
= __udma_get_rflow(ud
, flow_id
);
1376 if (IS_ERR(uc
->rflow
))
1377 return PTR_ERR(uc
->rflow
);
1382 static void udma_put_rchan(struct udma_chan
*uc
)
1384 struct udma_dev
*ud
= uc
->ud
;
1387 dev_dbg(ud
->dev
, "chan%d: put rchan%d\n", uc
->id
,
1389 clear_bit(uc
->rchan
->id
, ud
->rchan_map
);
1394 static void udma_put_tchan(struct udma_chan
*uc
)
1396 struct udma_dev
*ud
= uc
->ud
;
1399 dev_dbg(ud
->dev
, "chan%d: put tchan%d\n", uc
->id
,
1401 clear_bit(uc
->tchan
->id
, ud
->tchan_map
);
1406 static void udma_put_rflow(struct udma_chan
*uc
)
1408 struct udma_dev
*ud
= uc
->ud
;
1411 dev_dbg(ud
->dev
, "chan%d: put rflow%d\n", uc
->id
,
1413 __udma_put_rflow(ud
, uc
->rflow
);
1418 static void udma_free_tx_resources(struct udma_chan
*uc
)
1423 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1424 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1425 uc
->tchan
->t_ring
= NULL
;
1426 uc
->tchan
->tc_ring
= NULL
;
1431 static int udma_alloc_tx_resources(struct udma_chan
*uc
)
1433 struct k3_ring_cfg ring_cfg
;
1434 struct udma_dev
*ud
= uc
->ud
;
1437 ret
= udma_get_tchan(uc
);
1441 uc
->tchan
->t_ring
= k3_ringacc_request_ring(ud
->ringacc
,
1443 if (!uc
->tchan
->t_ring
) {
1448 uc
->tchan
->tc_ring
= k3_ringacc_request_ring(ud
->ringacc
, -1, 0);
1449 if (!uc
->tchan
->tc_ring
) {
1454 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1455 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1456 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1457 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1459 ret
= k3_ringacc_ring_cfg(uc
->tchan
->t_ring
, &ring_cfg
);
1460 ret
|= k3_ringacc_ring_cfg(uc
->tchan
->tc_ring
, &ring_cfg
);
1468 k3_ringacc_ring_free(uc
->tchan
->tc_ring
);
1469 uc
->tchan
->tc_ring
= NULL
;
1471 k3_ringacc_ring_free(uc
->tchan
->t_ring
);
1472 uc
->tchan
->t_ring
= NULL
;
1479 static void udma_free_rx_resources(struct udma_chan
*uc
)
1485 struct udma_rflow
*rflow
= uc
->rflow
;
1487 k3_ringacc_ring_free(rflow
->fd_ring
);
1488 k3_ringacc_ring_free(rflow
->r_ring
);
1489 rflow
->fd_ring
= NULL
;
1490 rflow
->r_ring
= NULL
;
1498 static int udma_alloc_rx_resources(struct udma_chan
*uc
)
1500 struct udma_dev
*ud
= uc
->ud
;
1501 struct k3_ring_cfg ring_cfg
;
1502 struct udma_rflow
*rflow
;
1506 ret
= udma_get_rchan(uc
);
1510 /* For MEM_TO_MEM we don't need rflow or rings */
1511 if (uc
->config
.dir
== DMA_MEM_TO_MEM
)
1514 ret
= udma_get_rflow(uc
, uc
->rchan
->id
);
1521 fd_ring_id
= ud
->tchan_cnt
+ ud
->echan_cnt
+ uc
->rchan
->id
;
1522 rflow
->fd_ring
= k3_ringacc_request_ring(ud
->ringacc
, fd_ring_id
, 0);
1523 if (!rflow
->fd_ring
) {
1528 rflow
->r_ring
= k3_ringacc_request_ring(ud
->ringacc
, -1, 0);
1529 if (!rflow
->r_ring
) {
1534 memset(&ring_cfg
, 0, sizeof(ring_cfg
));
1536 if (uc
->config
.pkt_mode
)
1537 ring_cfg
.size
= SG_MAX_SEGMENTS
;
1539 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1541 ring_cfg
.elm_size
= K3_RINGACC_RING_ELSIZE_8
;
1542 ring_cfg
.mode
= K3_RINGACC_RING_MODE_MESSAGE
;
1544 ret
= k3_ringacc_ring_cfg(rflow
->fd_ring
, &ring_cfg
);
1545 ring_cfg
.size
= K3_UDMA_DEFAULT_RING_SIZE
;
1546 ret
|= k3_ringacc_ring_cfg(rflow
->r_ring
, &ring_cfg
);
1554 k3_ringacc_ring_free(rflow
->r_ring
);
1555 rflow
->r_ring
= NULL
;
1557 k3_ringacc_ring_free(rflow
->fd_ring
);
1558 rflow
->fd_ring
= NULL
;
1567 #define TISCI_TCHAN_VALID_PARAMS ( \
1568 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1569 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1570 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1571 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1572 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1573 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1574 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1575 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1577 #define TISCI_RCHAN_VALID_PARAMS ( \
1578 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1579 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1580 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1581 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1582 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1583 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1584 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1585 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1586 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1588 static int udma_tisci_m2m_channel_config(struct udma_chan
*uc
)
1590 struct udma_dev
*ud
= uc
->ud
;
1591 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1592 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1593 struct udma_tchan
*tchan
= uc
->tchan
;
1594 struct udma_rchan
*rchan
= uc
->rchan
;
1597 /* Non synchronized - mem to mem type of transfer */
1598 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1599 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1600 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1602 req_tx
.valid_params
= TISCI_TCHAN_VALID_PARAMS
;
1603 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1604 req_tx
.index
= tchan
->id
;
1605 req_tx
.tx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1606 req_tx
.tx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1607 req_tx
.txcq_qnum
= tc_ring
;
1608 req_tx
.tx_atype
= ud
->atype
;
1610 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1612 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1616 req_rx
.valid_params
= TISCI_RCHAN_VALID_PARAMS
;
1617 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1618 req_rx
.index
= rchan
->id
;
1619 req_rx
.rx_fetch_size
= sizeof(struct cppi5_desc_hdr_t
) >> 2;
1620 req_rx
.rxcq_qnum
= tc_ring
;
1621 req_rx
.rx_chan_type
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR
;
1622 req_rx
.rx_atype
= ud
->atype
;
1624 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1626 dev_err(ud
->dev
, "rchan%d alloc failed %d\n", rchan
->id
, ret
);
1631 static int udma_tisci_tx_channel_config(struct udma_chan
*uc
)
1633 struct udma_dev
*ud
= uc
->ud
;
1634 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1635 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1636 struct udma_tchan
*tchan
= uc
->tchan
;
1637 int tc_ring
= k3_ringacc_get_ring_id(tchan
->tc_ring
);
1638 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx
= { 0 };
1639 u32 mode
, fetch_size
;
1642 if (uc
->config
.pkt_mode
) {
1643 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1644 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1645 uc
->config
.psd_size
, 0);
1647 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1648 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1651 req_tx
.valid_params
= TISCI_TCHAN_VALID_PARAMS
;
1652 req_tx
.nav_id
= tisci_rm
->tisci_dev_id
;
1653 req_tx
.index
= tchan
->id
;
1654 req_tx
.tx_chan_type
= mode
;
1655 req_tx
.tx_supr_tdpkt
= uc
->config
.notdpkt
;
1656 req_tx
.tx_fetch_size
= fetch_size
>> 2;
1657 req_tx
.txcq_qnum
= tc_ring
;
1658 req_tx
.tx_atype
= uc
->config
.atype
;
1660 ret
= tisci_ops
->tx_ch_cfg(tisci_rm
->tisci
, &req_tx
);
1662 dev_err(ud
->dev
, "tchan%d cfg failed %d\n", tchan
->id
, ret
);
1667 static int udma_tisci_rx_channel_config(struct udma_chan
*uc
)
1669 struct udma_dev
*ud
= uc
->ud
;
1670 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
1671 const struct ti_sci_rm_udmap_ops
*tisci_ops
= tisci_rm
->tisci_udmap_ops
;
1672 struct udma_rchan
*rchan
= uc
->rchan
;
1673 int fd_ring
= k3_ringacc_get_ring_id(uc
->rflow
->fd_ring
);
1674 int rx_ring
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
1675 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx
= { 0 };
1676 struct ti_sci_msg_rm_udmap_flow_cfg flow_req
= { 0 };
1677 u32 mode
, fetch_size
;
1680 if (uc
->config
.pkt_mode
) {
1681 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR
;
1682 fetch_size
= cppi5_hdesc_calc_size(uc
->config
.needs_epib
,
1683 uc
->config
.psd_size
, 0);
1685 mode
= TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR
;
1686 fetch_size
= sizeof(struct cppi5_desc_hdr_t
);
1689 req_rx
.valid_params
= TISCI_RCHAN_VALID_PARAMS
;
1690 req_rx
.nav_id
= tisci_rm
->tisci_dev_id
;
1691 req_rx
.index
= rchan
->id
;
1692 req_rx
.rx_fetch_size
= fetch_size
>> 2;
1693 req_rx
.rxcq_qnum
= rx_ring
;
1694 req_rx
.rx_chan_type
= mode
;
1695 req_rx
.rx_atype
= uc
->config
.atype
;
1697 ret
= tisci_ops
->rx_ch_cfg(tisci_rm
->tisci
, &req_rx
);
1699 dev_err(ud
->dev
, "rchan%d cfg failed %d\n", rchan
->id
, ret
);
1703 flow_req
.valid_params
=
1704 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID
|
1705 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID
|
1706 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID
|
1707 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID
|
1708 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID
|
1709 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID
|
1710 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID
|
1711 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID
|
1712 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID
|
1713 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID
|
1714 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID
|
1715 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID
|
1716 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID
;
1718 flow_req
.nav_id
= tisci_rm
->tisci_dev_id
;
1719 flow_req
.flow_index
= rchan
->id
;
1721 if (uc
->config
.needs_epib
)
1722 flow_req
.rx_einfo_present
= 1;
1724 flow_req
.rx_einfo_present
= 0;
1725 if (uc
->config
.psd_size
)
1726 flow_req
.rx_psinfo_present
= 1;
1728 flow_req
.rx_psinfo_present
= 0;
1729 flow_req
.rx_error_handling
= 1;
1730 flow_req
.rx_dest_qnum
= rx_ring
;
1731 flow_req
.rx_src_tag_hi_sel
= UDMA_RFLOW_SRCTAG_NONE
;
1732 flow_req
.rx_src_tag_lo_sel
= UDMA_RFLOW_SRCTAG_SRC_TAG
;
1733 flow_req
.rx_dest_tag_hi_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_HI
;
1734 flow_req
.rx_dest_tag_lo_sel
= UDMA_RFLOW_DSTTAG_DST_TAG_LO
;
1735 flow_req
.rx_fdq0_sz0_qnum
= fd_ring
;
1736 flow_req
.rx_fdq1_qnum
= fd_ring
;
1737 flow_req
.rx_fdq2_qnum
= fd_ring
;
1738 flow_req
.rx_fdq3_qnum
= fd_ring
;
1740 ret
= tisci_ops
->rx_flow_cfg(tisci_rm
->tisci
, &flow_req
);
1743 dev_err(ud
->dev
, "flow%d config failed: %d\n", rchan
->id
, ret
);
1748 static int udma_alloc_chan_resources(struct dma_chan
*chan
)
1750 struct udma_chan
*uc
= to_udma_chan(chan
);
1751 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
1752 const struct udma_match_data
*match_data
= ud
->match_data
;
1753 struct k3_ring
*irq_ring
;
1757 if (uc
->config
.pkt_mode
|| uc
->config
.dir
== DMA_MEM_TO_MEM
) {
1758 uc
->use_dma_pool
= true;
1759 /* in case of MEM_TO_MEM we have maximum of two TRs */
1760 if (uc
->config
.dir
== DMA_MEM_TO_MEM
) {
1761 uc
->config
.hdesc_size
= cppi5_trdesc_calc_size(
1762 sizeof(struct cppi5_tr_type15_t
), 2);
1763 uc
->config
.pkt_mode
= false;
1767 if (uc
->use_dma_pool
) {
1768 uc
->hdesc_pool
= dma_pool_create(uc
->name
, ud
->ddev
.dev
,
1769 uc
->config
.hdesc_size
,
1772 if (!uc
->hdesc_pool
) {
1773 dev_err(ud
->ddev
.dev
,
1774 "Descriptor pool allocation failed\n");
1775 uc
->use_dma_pool
= false;
1781 * Make sure that the completion is in a known state:
1782 * No teardown, the channel is idle
1784 reinit_completion(&uc
->teardown_completed
);
1785 complete_all(&uc
->teardown_completed
);
1786 uc
->state
= UDMA_CHAN_IS_IDLE
;
1788 switch (uc
->config
.dir
) {
1789 case DMA_MEM_TO_MEM
:
1790 /* Non synchronized - mem to mem type of transfer */
1791 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-MEM\n", __func__
,
1794 ret
= udma_get_chan_pair(uc
);
1798 ret
= udma_alloc_tx_resources(uc
);
1802 ret
= udma_alloc_rx_resources(uc
);
1804 udma_free_tx_resources(uc
);
1808 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
1809 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
1810 K3_PSIL_DST_THREAD_ID_OFFSET
;
1812 irq_ring
= uc
->tchan
->tc_ring
;
1813 irq_udma_idx
= uc
->tchan
->id
;
1815 ret
= udma_tisci_m2m_channel_config(uc
);
1817 case DMA_MEM_TO_DEV
:
1818 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1819 dev_dbg(uc
->ud
->dev
, "%s: chan%d as MEM-to-DEV\n", __func__
,
1822 ret
= udma_alloc_tx_resources(uc
);
1824 uc
->config
.remote_thread_id
= -1;
1828 uc
->config
.src_thread
= ud
->psil_base
+ uc
->tchan
->id
;
1829 uc
->config
.dst_thread
= uc
->config
.remote_thread_id
;
1830 uc
->config
.dst_thread
|= K3_PSIL_DST_THREAD_ID_OFFSET
;
1832 irq_ring
= uc
->tchan
->tc_ring
;
1833 irq_udma_idx
= uc
->tchan
->id
;
1835 ret
= udma_tisci_tx_channel_config(uc
);
1837 case DMA_DEV_TO_MEM
:
1838 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1839 dev_dbg(uc
->ud
->dev
, "%s: chan%d as DEV-to-MEM\n", __func__
,
1842 ret
= udma_alloc_rx_resources(uc
);
1844 uc
->config
.remote_thread_id
= -1;
1848 uc
->config
.src_thread
= uc
->config
.remote_thread_id
;
1849 uc
->config
.dst_thread
= (ud
->psil_base
+ uc
->rchan
->id
) |
1850 K3_PSIL_DST_THREAD_ID_OFFSET
;
1852 irq_ring
= uc
->rflow
->r_ring
;
1853 irq_udma_idx
= match_data
->rchan_oes_offset
+ uc
->rchan
->id
;
1855 ret
= udma_tisci_rx_channel_config(uc
);
1858 /* Can not happen */
1859 dev_err(uc
->ud
->dev
, "%s: chan%d invalid direction (%u)\n",
1860 __func__
, uc
->id
, uc
->config
.dir
);
1864 /* check if the channel configuration was successful */
1868 if (udma_is_chan_running(uc
)) {
1869 dev_warn(ud
->dev
, "chan%d: is running!\n", uc
->id
);
1871 if (udma_is_chan_running(uc
)) {
1872 dev_err(ud
->dev
, "chan%d: won't stop!\n", uc
->id
);
1878 ret
= navss_psil_pair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
1880 dev_err(ud
->dev
, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1881 uc
->config
.src_thread
, uc
->config
.dst_thread
);
1885 uc
->psil_paired
= true;
1887 uc
->irq_num_ring
= k3_ringacc_get_ring_irq_num(irq_ring
);
1888 if (uc
->irq_num_ring
<= 0) {
1889 dev_err(ud
->dev
, "Failed to get ring irq (index: %u)\n",
1890 k3_ringacc_get_ring_id(irq_ring
));
1895 ret
= request_irq(uc
->irq_num_ring
, udma_ring_irq_handler
,
1896 IRQF_TRIGGER_HIGH
, uc
->name
, uc
);
1898 dev_err(ud
->dev
, "chan%d: ring irq request failed\n", uc
->id
);
1902 /* Event from UDMA (TR events) only needed for slave TR mode channels */
1903 if (is_slave_direction(uc
->config
.dir
) && !uc
->config
.pkt_mode
) {
1904 uc
->irq_num_udma
= ti_sci_inta_msi_get_virq(ud
->dev
,
1906 if (uc
->irq_num_udma
<= 0) {
1907 dev_err(ud
->dev
, "Failed to get udma irq (index: %u)\n",
1909 free_irq(uc
->irq_num_ring
, uc
);
1914 ret
= request_irq(uc
->irq_num_udma
, udma_udma_irq_handler
, 0,
1917 dev_err(ud
->dev
, "chan%d: UDMA irq request failed\n",
1919 free_irq(uc
->irq_num_ring
, uc
);
1923 uc
->irq_num_udma
= 0;
1926 udma_reset_rings(uc
);
1928 INIT_DELAYED_WORK_ONSTACK(&uc
->tx_drain
.work
,
1929 udma_check_tx_completion
);
1933 uc
->irq_num_ring
= 0;
1934 uc
->irq_num_udma
= 0;
1936 navss_psil_unpair(ud
, uc
->config
.src_thread
, uc
->config
.dst_thread
);
1937 uc
->psil_paired
= false;
1939 udma_free_tx_resources(uc
);
1940 udma_free_rx_resources(uc
);
1942 udma_reset_uchan(uc
);
1944 if (uc
->use_dma_pool
) {
1945 dma_pool_destroy(uc
->hdesc_pool
);
1946 uc
->use_dma_pool
= false;
1952 static int udma_slave_config(struct dma_chan
*chan
,
1953 struct dma_slave_config
*cfg
)
1955 struct udma_chan
*uc
= to_udma_chan(chan
);
1957 memcpy(&uc
->cfg
, cfg
, sizeof(uc
->cfg
));
1962 static struct udma_desc
*udma_alloc_tr_desc(struct udma_chan
*uc
,
1963 size_t tr_size
, int tr_count
,
1964 enum dma_transfer_direction dir
)
1966 struct udma_hwdesc
*hwdesc
;
1967 struct cppi5_desc_hdr_t
*tr_desc
;
1968 struct udma_desc
*d
;
1969 u32 reload_count
= 0;
1979 dev_err(uc
->ud
->dev
, "Unsupported TR size of %zu\n", tr_size
);
1983 /* We have only one descriptor containing multiple TRs */
1984 d
= kzalloc(sizeof(*d
) + sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
1988 d
->sglen
= tr_count
;
1990 d
->hwdesc_count
= 1;
1991 hwdesc
= &d
->hwdesc
[0];
1993 /* Allocate memory for DMA ring descriptor */
1994 if (uc
->use_dma_pool
) {
1995 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
1996 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
1998 &hwdesc
->cppi5_desc_paddr
);
2000 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
,
2002 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
2003 uc
->ud
->desc_align
);
2004 hwdesc
->cppi5_desc_vaddr
= dma_alloc_coherent(uc
->ud
->dev
,
2005 hwdesc
->cppi5_desc_size
,
2006 &hwdesc
->cppi5_desc_paddr
,
2010 if (!hwdesc
->cppi5_desc_vaddr
) {
2015 /* Start of the TR req records */
2016 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
2017 /* Start address of the TR response array */
2018 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
* tr_count
;
2020 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
2023 reload_count
= CPPI5_INFO0_TRDESC_RLDCNT_INFINITE
;
2025 if (dir
== DMA_DEV_TO_MEM
)
2026 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2028 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2030 cppi5_trdesc_init(tr_desc
, tr_count
, tr_size
, 0, reload_count
);
2031 cppi5_desc_set_pktids(tr_desc
, uc
->id
,
2032 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2033 cppi5_desc_set_retpolicy(tr_desc
, 0, ring_id
);
2039 * udma_get_tr_counters - calculate TR counters for a given length
2040 * @len: Length of the trasnfer
2041 * @align_to: Preferred alignment
2042 * @tr0_cnt0: First TR icnt0
2043 * @tr0_cnt1: First TR icnt1
2044 * @tr1_cnt0: Second (if used) TR icnt0
2046 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2047 * For len >= SZ_64K two TRs are used in a simple way:
2048 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2049 * Second TR: the remaining length (tr1_cnt0)
2051 * Returns the number of TRs the length needs (1 or 2)
2052 * -EINVAL if the length can not be supported
2054 static int udma_get_tr_counters(size_t len
, unsigned long align_to
,
2055 u16
*tr0_cnt0
, u16
*tr0_cnt1
, u16
*tr1_cnt0
)
2068 *tr0_cnt0
= SZ_64K
- BIT(align_to
);
2069 if (len
/ *tr0_cnt0
>= SZ_64K
) {
2077 *tr0_cnt1
= len
/ *tr0_cnt0
;
2078 *tr1_cnt0
= len
% *tr0_cnt0
;
2083 static struct udma_desc
*
2084 udma_prep_slave_sg_tr(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2085 unsigned int sglen
, enum dma_transfer_direction dir
,
2086 unsigned long tx_flags
, void *context
)
2088 struct scatterlist
*sgent
;
2089 struct udma_desc
*d
;
2090 struct cppi5_tr_type1_t
*tr_req
= NULL
;
2091 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2097 if (!is_slave_direction(dir
)) {
2098 dev_err(uc
->ud
->dev
, "Only slave cyclic is supported\n");
2102 /* estimate the number of TRs we will need */
2103 for_each_sg(sgl
, sgent
, sglen
, i
) {
2104 if (sg_dma_len(sgent
) < SZ_64K
)
2110 /* Now allocate and setup the descriptor. */
2111 tr_size
= sizeof(struct cppi5_tr_type1_t
);
2112 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, dir
);
2118 tr_req
= d
->hwdesc
[0].tr_req_base
;
2119 for_each_sg(sgl
, sgent
, sglen
, i
) {
2120 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2122 num_tr
= udma_get_tr_counters(sg_dma_len(sgent
), __ffs(sg_addr
),
2123 &tr0_cnt0
, &tr0_cnt1
, &tr1_cnt0
);
2125 dev_err(uc
->ud
->dev
, "size %u is not supported\n",
2127 udma_free_hwdesc(uc
, d
);
2132 cppi5_tr_init(&tr_req
[i
].flags
, CPPI5_TR_TYPE1
, false, false,
2133 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2134 cppi5_tr_csf_set(&tr_req
[i
].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2136 tr_req
[tr_idx
].addr
= sg_addr
;
2137 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
2138 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
2139 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
2143 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
2145 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2146 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2147 CPPI5_TR_CSF_SUPR_EVT
);
2149 tr_req
[tr_idx
].addr
= sg_addr
+ tr0_cnt1
* tr0_cnt0
;
2150 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
2151 tr_req
[tr_idx
].icnt1
= 1;
2152 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
2156 d
->residue
+= sg_dma_len(sgent
);
2159 cppi5_tr_csf_set(&tr_req
[tr_idx
- 1].flags
,
2160 CPPI5_TR_CSF_SUPR_EVT
| CPPI5_TR_CSF_EOP
);
2165 static int udma_configure_statictr(struct udma_chan
*uc
, struct udma_desc
*d
,
2166 enum dma_slave_buswidth dev_width
,
2169 if (uc
->config
.ep_type
!= PSIL_EP_PDMA_XY
)
2172 /* Bus width translates to the element size (ES) */
2173 switch (dev_width
) {
2174 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2175 d
->static_tr
.elsize
= 0;
2177 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2178 d
->static_tr
.elsize
= 1;
2180 case DMA_SLAVE_BUSWIDTH_3_BYTES
:
2181 d
->static_tr
.elsize
= 2;
2183 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2184 d
->static_tr
.elsize
= 3;
2186 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2187 d
->static_tr
.elsize
= 4;
2189 default: /* not reached */
2193 d
->static_tr
.elcnt
= elcnt
;
2196 * PDMA must to close the packet when the channel is in packet mode.
2197 * For TR mode when the channel is not cyclic we also need PDMA to close
2198 * the packet otherwise the transfer will stall because PDMA holds on
2199 * the data it has received from the peripheral.
2201 if (uc
->config
.pkt_mode
|| !uc
->cyclic
) {
2202 unsigned int div
= dev_width
* elcnt
;
2205 d
->static_tr
.bstcnt
= d
->residue
/ d
->sglen
/ div
;
2207 d
->static_tr
.bstcnt
= d
->residue
/ div
;
2209 if (uc
->config
.dir
== DMA_DEV_TO_MEM
&&
2210 d
->static_tr
.bstcnt
> uc
->ud
->match_data
->statictr_z_mask
)
2213 d
->static_tr
.bstcnt
= 0;
2219 static struct udma_desc
*
2220 udma_prep_slave_sg_pkt(struct udma_chan
*uc
, struct scatterlist
*sgl
,
2221 unsigned int sglen
, enum dma_transfer_direction dir
,
2222 unsigned long tx_flags
, void *context
)
2224 struct scatterlist
*sgent
;
2225 struct cppi5_host_desc_t
*h_desc
= NULL
;
2226 struct udma_desc
*d
;
2230 d
= kzalloc(sizeof(*d
) + sglen
* sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2235 d
->hwdesc_count
= sglen
;
2237 if (dir
== DMA_DEV_TO_MEM
)
2238 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2240 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2242 for_each_sg(sgl
, sgent
, sglen
, i
) {
2243 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
2244 dma_addr_t sg_addr
= sg_dma_address(sgent
);
2245 struct cppi5_host_desc_t
*desc
;
2246 size_t sg_len
= sg_dma_len(sgent
);
2248 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2250 &hwdesc
->cppi5_desc_paddr
);
2251 if (!hwdesc
->cppi5_desc_vaddr
) {
2252 dev_err(uc
->ud
->dev
,
2253 "descriptor%d allocation failed\n", i
);
2255 udma_free_hwdesc(uc
, d
);
2260 d
->residue
+= sg_len
;
2261 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2262 desc
= hwdesc
->cppi5_desc_vaddr
;
2265 cppi5_hdesc_init(desc
, 0, 0);
2266 /* Flow and Packed ID */
2267 cppi5_desc_set_pktids(&desc
->hdr
, uc
->id
,
2268 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2269 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, ring_id
);
2271 cppi5_hdesc_reset_hbdesc(desc
);
2272 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0xffff);
2275 /* attach the sg buffer to the descriptor */
2276 cppi5_hdesc_attach_buf(desc
, sg_addr
, sg_len
, sg_addr
, sg_len
);
2278 /* Attach link as host buffer descriptor */
2280 cppi5_hdesc_link_hbdesc(h_desc
,
2281 hwdesc
->cppi5_desc_paddr
);
2283 if (dir
== DMA_MEM_TO_DEV
)
2287 if (d
->residue
>= SZ_4M
) {
2288 dev_err(uc
->ud
->dev
,
2289 "%s: Transfer size %u is over the supported 4M range\n",
2290 __func__
, d
->residue
);
2291 udma_free_hwdesc(uc
, d
);
2296 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2297 cppi5_hdesc_set_pktlen(h_desc
, d
->residue
);
2302 static int udma_attach_metadata(struct dma_async_tx_descriptor
*desc
,
2303 void *data
, size_t len
)
2305 struct udma_desc
*d
= to_udma_desc(desc
);
2306 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2307 struct cppi5_host_desc_t
*h_desc
;
2311 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2314 if (!data
|| len
> uc
->config
.metadata_size
)
2317 if (uc
->config
.needs_epib
&& len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
2320 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2321 if (d
->dir
== DMA_MEM_TO_DEV
)
2322 memcpy(h_desc
->epib
, data
, len
);
2324 if (uc
->config
.needs_epib
)
2325 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
2328 d
->metadata_size
= len
;
2329 if (uc
->config
.needs_epib
)
2330 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
2332 cppi5_hdesc_update_flags(h_desc
, flags
);
2333 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
2338 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
2339 size_t *payload_len
, size_t *max_len
)
2341 struct udma_desc
*d
= to_udma_desc(desc
);
2342 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2343 struct cppi5_host_desc_t
*h_desc
;
2345 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2346 return ERR_PTR(-ENOTSUPP
);
2348 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2350 *max_len
= uc
->config
.metadata_size
;
2352 *payload_len
= cppi5_hdesc_epib_present(&h_desc
->hdr
) ?
2353 CPPI5_INFO0_HDESC_EPIB_SIZE
: 0;
2354 *payload_len
+= cppi5_hdesc_get_psdata_size(h_desc
);
2356 return h_desc
->epib
;
2359 static int udma_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
2362 struct udma_desc
*d
= to_udma_desc(desc
);
2363 struct udma_chan
*uc
= to_udma_chan(desc
->chan
);
2364 struct cppi5_host_desc_t
*h_desc
;
2365 u32 psd_size
= payload_len
;
2368 if (!uc
->config
.pkt_mode
|| !uc
->config
.metadata_size
)
2371 if (payload_len
> uc
->config
.metadata_size
)
2374 if (uc
->config
.needs_epib
&& payload_len
< CPPI5_INFO0_HDESC_EPIB_SIZE
)
2377 h_desc
= d
->hwdesc
[0].cppi5_desc_vaddr
;
2379 if (uc
->config
.needs_epib
) {
2380 psd_size
-= CPPI5_INFO0_HDESC_EPIB_SIZE
;
2381 flags
|= CPPI5_INFO0_HDESC_EPIB_PRESENT
;
2384 cppi5_hdesc_update_flags(h_desc
, flags
);
2385 cppi5_hdesc_update_psdata_size(h_desc
, psd_size
);
2390 static struct dma_descriptor_metadata_ops metadata_ops
= {
2391 .attach
= udma_attach_metadata
,
2392 .get_ptr
= udma_get_metadata_ptr
,
2393 .set_len
= udma_set_metadata_len
,
2396 static struct dma_async_tx_descriptor
*
2397 udma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2398 unsigned int sglen
, enum dma_transfer_direction dir
,
2399 unsigned long tx_flags
, void *context
)
2401 struct udma_chan
*uc
= to_udma_chan(chan
);
2402 enum dma_slave_buswidth dev_width
;
2403 struct udma_desc
*d
;
2406 if (dir
!= uc
->config
.dir
) {
2407 dev_err(chan
->device
->dev
,
2408 "%s: chan%d is for %s, not supporting %s\n",
2410 dmaengine_get_direction_text(uc
->config
.dir
),
2411 dmaengine_get_direction_text(dir
));
2415 if (dir
== DMA_DEV_TO_MEM
) {
2416 dev_width
= uc
->cfg
.src_addr_width
;
2417 burst
= uc
->cfg
.src_maxburst
;
2418 } else if (dir
== DMA_MEM_TO_DEV
) {
2419 dev_width
= uc
->cfg
.dst_addr_width
;
2420 burst
= uc
->cfg
.dst_maxburst
;
2422 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
2429 if (uc
->config
.pkt_mode
)
2430 d
= udma_prep_slave_sg_pkt(uc
, sgl
, sglen
, dir
, tx_flags
,
2433 d
= udma_prep_slave_sg_tr(uc
, sgl
, sglen
, dir
, tx_flags
,
2443 /* static TR for remote PDMA */
2444 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
2445 dev_err(uc
->ud
->dev
,
2446 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2447 __func__
, d
->static_tr
.bstcnt
);
2449 udma_free_hwdesc(uc
, d
);
2454 if (uc
->config
.metadata_size
)
2455 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2457 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
2460 static struct udma_desc
*
2461 udma_prep_dma_cyclic_tr(struct udma_chan
*uc
, dma_addr_t buf_addr
,
2462 size_t buf_len
, size_t period_len
,
2463 enum dma_transfer_direction dir
, unsigned long flags
)
2465 struct udma_desc
*d
;
2466 size_t tr_size
, period_addr
;
2467 struct cppi5_tr_type1_t
*tr_req
;
2468 unsigned int periods
= buf_len
/ period_len
;
2469 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2473 if (!is_slave_direction(dir
)) {
2474 dev_err(uc
->ud
->dev
, "Only slave cyclic is supported\n");
2478 num_tr
= udma_get_tr_counters(period_len
, __ffs(buf_addr
), &tr0_cnt0
,
2479 &tr0_cnt1
, &tr1_cnt0
);
2481 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
2486 /* Now allocate and setup the descriptor. */
2487 tr_size
= sizeof(struct cppi5_tr_type1_t
);
2488 d
= udma_alloc_tr_desc(uc
, tr_size
, periods
* num_tr
, dir
);
2492 tr_req
= d
->hwdesc
[0].tr_req_base
;
2493 period_addr
= buf_addr
;
2494 for (i
= 0; i
< periods
; i
++) {
2495 int tr_idx
= i
* num_tr
;
2497 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
, false,
2498 false, CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2500 tr_req
[tr_idx
].addr
= period_addr
;
2501 tr_req
[tr_idx
].icnt0
= tr0_cnt0
;
2502 tr_req
[tr_idx
].icnt1
= tr0_cnt1
;
2503 tr_req
[tr_idx
].dim1
= tr0_cnt0
;
2506 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2507 CPPI5_TR_CSF_SUPR_EVT
);
2510 cppi5_tr_init(&tr_req
[tr_idx
].flags
, CPPI5_TR_TYPE1
,
2512 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2514 tr_req
[tr_idx
].addr
= period_addr
+ tr0_cnt1
* tr0_cnt0
;
2515 tr_req
[tr_idx
].icnt0
= tr1_cnt0
;
2516 tr_req
[tr_idx
].icnt1
= 1;
2517 tr_req
[tr_idx
].dim1
= tr1_cnt0
;
2520 if (!(flags
& DMA_PREP_INTERRUPT
))
2521 cppi5_tr_csf_set(&tr_req
[tr_idx
].flags
,
2522 CPPI5_TR_CSF_SUPR_EVT
);
2524 period_addr
+= period_len
;
2530 static struct udma_desc
*
2531 udma_prep_dma_cyclic_pkt(struct udma_chan
*uc
, dma_addr_t buf_addr
,
2532 size_t buf_len
, size_t period_len
,
2533 enum dma_transfer_direction dir
, unsigned long flags
)
2535 struct udma_desc
*d
;
2538 int periods
= buf_len
/ period_len
;
2540 if (periods
> (K3_UDMA_DEFAULT_RING_SIZE
- 1))
2543 if (period_len
>= SZ_4M
)
2546 d
= kzalloc(sizeof(*d
) + periods
* sizeof(d
->hwdesc
[0]), GFP_NOWAIT
);
2550 d
->hwdesc_count
= periods
;
2552 /* TODO: re-check this... */
2553 if (dir
== DMA_DEV_TO_MEM
)
2554 ring_id
= k3_ringacc_get_ring_id(uc
->rflow
->r_ring
);
2556 ring_id
= k3_ringacc_get_ring_id(uc
->tchan
->tc_ring
);
2558 for (i
= 0; i
< periods
; i
++) {
2559 struct udma_hwdesc
*hwdesc
= &d
->hwdesc
[i
];
2560 dma_addr_t period_addr
= buf_addr
+ (period_len
* i
);
2561 struct cppi5_host_desc_t
*h_desc
;
2563 hwdesc
->cppi5_desc_vaddr
= dma_pool_zalloc(uc
->hdesc_pool
,
2565 &hwdesc
->cppi5_desc_paddr
);
2566 if (!hwdesc
->cppi5_desc_vaddr
) {
2567 dev_err(uc
->ud
->dev
,
2568 "descriptor%d allocation failed\n", i
);
2570 udma_free_hwdesc(uc
, d
);
2575 hwdesc
->cppi5_desc_size
= uc
->config
.hdesc_size
;
2576 h_desc
= hwdesc
->cppi5_desc_vaddr
;
2578 cppi5_hdesc_init(h_desc
, 0, 0);
2579 cppi5_hdesc_set_pktlen(h_desc
, period_len
);
2581 /* Flow and Packed ID */
2582 cppi5_desc_set_pktids(&h_desc
->hdr
, uc
->id
,
2583 CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
2584 cppi5_desc_set_retpolicy(&h_desc
->hdr
, 0, ring_id
);
2586 /* attach each period to a new descriptor */
2587 cppi5_hdesc_attach_buf(h_desc
,
2588 period_addr
, period_len
,
2589 period_addr
, period_len
);
2595 static struct dma_async_tx_descriptor
*
2596 udma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
2597 size_t period_len
, enum dma_transfer_direction dir
,
2598 unsigned long flags
)
2600 struct udma_chan
*uc
= to_udma_chan(chan
);
2601 enum dma_slave_buswidth dev_width
;
2602 struct udma_desc
*d
;
2605 if (dir
!= uc
->config
.dir
) {
2606 dev_err(chan
->device
->dev
,
2607 "%s: chan%d is for %s, not supporting %s\n",
2609 dmaengine_get_direction_text(uc
->config
.dir
),
2610 dmaengine_get_direction_text(dir
));
2616 if (dir
== DMA_DEV_TO_MEM
) {
2617 dev_width
= uc
->cfg
.src_addr_width
;
2618 burst
= uc
->cfg
.src_maxburst
;
2619 } else if (dir
== DMA_MEM_TO_DEV
) {
2620 dev_width
= uc
->cfg
.dst_addr_width
;
2621 burst
= uc
->cfg
.dst_maxburst
;
2623 dev_err(uc
->ud
->dev
, "%s: bad direction?\n", __func__
);
2630 if (uc
->config
.pkt_mode
)
2631 d
= udma_prep_dma_cyclic_pkt(uc
, buf_addr
, buf_len
, period_len
,
2634 d
= udma_prep_dma_cyclic_tr(uc
, buf_addr
, buf_len
, period_len
,
2640 d
->sglen
= buf_len
/ period_len
;
2643 d
->residue
= buf_len
;
2645 /* static TR for remote PDMA */
2646 if (udma_configure_statictr(uc
, d
, dev_width
, burst
)) {
2647 dev_err(uc
->ud
->dev
,
2648 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2649 __func__
, d
->static_tr
.bstcnt
);
2651 udma_free_hwdesc(uc
, d
);
2656 if (uc
->config
.metadata_size
)
2657 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2659 return vchan_tx_prep(&uc
->vc
, &d
->vd
, flags
);
2662 static struct dma_async_tx_descriptor
*
2663 udma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
2664 size_t len
, unsigned long tx_flags
)
2666 struct udma_chan
*uc
= to_udma_chan(chan
);
2667 struct udma_desc
*d
;
2668 struct cppi5_tr_type15_t
*tr_req
;
2670 size_t tr_size
= sizeof(struct cppi5_tr_type15_t
);
2671 u16 tr0_cnt0
, tr0_cnt1
, tr1_cnt0
;
2673 if (uc
->config
.dir
!= DMA_MEM_TO_MEM
) {
2674 dev_err(chan
->device
->dev
,
2675 "%s: chan%d is for %s, not supporting %s\n",
2677 dmaengine_get_direction_text(uc
->config
.dir
),
2678 dmaengine_get_direction_text(DMA_MEM_TO_MEM
));
2682 num_tr
= udma_get_tr_counters(len
, __ffs(src
| dest
), &tr0_cnt0
,
2683 &tr0_cnt1
, &tr1_cnt0
);
2685 dev_err(uc
->ud
->dev
, "size %zu is not supported\n",
2690 d
= udma_alloc_tr_desc(uc
, tr_size
, num_tr
, DMA_MEM_TO_MEM
);
2694 d
->dir
= DMA_MEM_TO_MEM
;
2699 tr_req
= d
->hwdesc
[0].tr_req_base
;
2701 cppi5_tr_init(&tr_req
[0].flags
, CPPI5_TR_TYPE15
, false, true,
2702 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2703 cppi5_tr_csf_set(&tr_req
[0].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2705 tr_req
[0].addr
= src
;
2706 tr_req
[0].icnt0
= tr0_cnt0
;
2707 tr_req
[0].icnt1
= tr0_cnt1
;
2708 tr_req
[0].icnt2
= 1;
2709 tr_req
[0].icnt3
= 1;
2710 tr_req
[0].dim1
= tr0_cnt0
;
2712 tr_req
[0].daddr
= dest
;
2713 tr_req
[0].dicnt0
= tr0_cnt0
;
2714 tr_req
[0].dicnt1
= tr0_cnt1
;
2715 tr_req
[0].dicnt2
= 1;
2716 tr_req
[0].dicnt3
= 1;
2717 tr_req
[0].ddim1
= tr0_cnt0
;
2720 cppi5_tr_init(&tr_req
[1].flags
, CPPI5_TR_TYPE15
, false, true,
2721 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
2722 cppi5_tr_csf_set(&tr_req
[1].flags
, CPPI5_TR_CSF_SUPR_EVT
);
2724 tr_req
[1].addr
= src
+ tr0_cnt1
* tr0_cnt0
;
2725 tr_req
[1].icnt0
= tr1_cnt0
;
2726 tr_req
[1].icnt1
= 1;
2727 tr_req
[1].icnt2
= 1;
2728 tr_req
[1].icnt3
= 1;
2730 tr_req
[1].daddr
= dest
+ tr0_cnt1
* tr0_cnt0
;
2731 tr_req
[1].dicnt0
= tr1_cnt0
;
2732 tr_req
[1].dicnt1
= 1;
2733 tr_req
[1].dicnt2
= 1;
2734 tr_req
[1].dicnt3
= 1;
2737 cppi5_tr_csf_set(&tr_req
[num_tr
- 1].flags
,
2738 CPPI5_TR_CSF_SUPR_EVT
| CPPI5_TR_CSF_EOP
);
2740 if (uc
->config
.metadata_size
)
2741 d
->vd
.tx
.metadata_ops
= &metadata_ops
;
2743 return vchan_tx_prep(&uc
->vc
, &d
->vd
, tx_flags
);
2746 static void udma_issue_pending(struct dma_chan
*chan
)
2748 struct udma_chan
*uc
= to_udma_chan(chan
);
2749 unsigned long flags
;
2751 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2753 /* If we have something pending and no active descriptor, then */
2754 if (vchan_issue_pending(&uc
->vc
) && !uc
->desc
) {
2756 * start a descriptor if the channel is NOT [marked as
2757 * terminating _and_ it is still running (teardown has not
2760 if (!(uc
->state
== UDMA_CHAN_IS_TERMINATING
&&
2761 udma_is_chan_running(uc
)))
2765 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2768 static enum dma_status
udma_tx_status(struct dma_chan
*chan
,
2769 dma_cookie_t cookie
,
2770 struct dma_tx_state
*txstate
)
2772 struct udma_chan
*uc
= to_udma_chan(chan
);
2773 enum dma_status ret
;
2774 unsigned long flags
;
2776 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2778 ret
= dma_cookie_status(chan
, cookie
, txstate
);
2780 if (!udma_is_chan_running(uc
))
2783 if (ret
== DMA_IN_PROGRESS
&& udma_is_chan_paused(uc
))
2786 if (ret
== DMA_COMPLETE
|| !txstate
)
2789 if (uc
->desc
&& uc
->desc
->vd
.tx
.cookie
== cookie
) {
2792 u32 residue
= uc
->desc
->residue
;
2795 if (uc
->desc
->dir
== DMA_MEM_TO_DEV
) {
2796 bcnt
= udma_tchanrt_read(uc
->tchan
,
2797 UDMA_TCHAN_RT_SBCNT_REG
);
2799 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
2800 peer_bcnt
= udma_tchanrt_read(uc
->tchan
,
2801 UDMA_TCHAN_RT_PEER_BCNT_REG
);
2803 if (bcnt
> peer_bcnt
)
2804 delay
= bcnt
- peer_bcnt
;
2806 } else if (uc
->desc
->dir
== DMA_DEV_TO_MEM
) {
2807 bcnt
= udma_rchanrt_read(uc
->rchan
,
2808 UDMA_RCHAN_RT_BCNT_REG
);
2810 if (uc
->config
.ep_type
!= PSIL_EP_NATIVE
) {
2811 peer_bcnt
= udma_rchanrt_read(uc
->rchan
,
2812 UDMA_RCHAN_RT_PEER_BCNT_REG
);
2814 if (peer_bcnt
> bcnt
)
2815 delay
= peer_bcnt
- bcnt
;
2818 bcnt
= udma_tchanrt_read(uc
->tchan
,
2819 UDMA_TCHAN_RT_BCNT_REG
);
2823 if (bcnt
&& !(bcnt
% uc
->desc
->residue
))
2826 residue
-= bcnt
% uc
->desc
->residue
;
2828 if (!residue
&& (uc
->config
.dir
== DMA_DEV_TO_MEM
|| !delay
)) {
2833 dma_set_residue(txstate
, residue
);
2834 dma_set_in_flight_bytes(txstate
, delay
);
2841 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2845 static int udma_pause(struct dma_chan
*chan
)
2847 struct udma_chan
*uc
= to_udma_chan(chan
);
2849 /* pause the channel */
2850 switch (uc
->config
.dir
) {
2851 case DMA_DEV_TO_MEM
:
2852 udma_rchanrt_update_bits(uc
->rchan
,
2853 UDMA_RCHAN_RT_PEER_RT_EN_REG
,
2854 UDMA_PEER_RT_EN_PAUSE
,
2855 UDMA_PEER_RT_EN_PAUSE
);
2857 case DMA_MEM_TO_DEV
:
2858 udma_tchanrt_update_bits(uc
->tchan
,
2859 UDMA_TCHAN_RT_PEER_RT_EN_REG
,
2860 UDMA_PEER_RT_EN_PAUSE
,
2861 UDMA_PEER_RT_EN_PAUSE
);
2863 case DMA_MEM_TO_MEM
:
2864 udma_tchanrt_update_bits(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
2865 UDMA_CHAN_RT_CTL_PAUSE
,
2866 UDMA_CHAN_RT_CTL_PAUSE
);
2875 static int udma_resume(struct dma_chan
*chan
)
2877 struct udma_chan
*uc
= to_udma_chan(chan
);
2879 /* resume the channel */
2880 switch (uc
->config
.dir
) {
2881 case DMA_DEV_TO_MEM
:
2882 udma_rchanrt_update_bits(uc
->rchan
,
2883 UDMA_RCHAN_RT_PEER_RT_EN_REG
,
2884 UDMA_PEER_RT_EN_PAUSE
, 0);
2887 case DMA_MEM_TO_DEV
:
2888 udma_tchanrt_update_bits(uc
->tchan
,
2889 UDMA_TCHAN_RT_PEER_RT_EN_REG
,
2890 UDMA_PEER_RT_EN_PAUSE
, 0);
2892 case DMA_MEM_TO_MEM
:
2893 udma_tchanrt_update_bits(uc
->tchan
, UDMA_TCHAN_RT_CTL_REG
,
2894 UDMA_CHAN_RT_CTL_PAUSE
, 0);
2903 static int udma_terminate_all(struct dma_chan
*chan
)
2905 struct udma_chan
*uc
= to_udma_chan(chan
);
2906 unsigned long flags
;
2909 spin_lock_irqsave(&uc
->vc
.lock
, flags
);
2911 if (udma_is_chan_running(uc
))
2915 uc
->terminated_desc
= uc
->desc
;
2917 uc
->terminated_desc
->terminated
= true;
2918 cancel_delayed_work(&uc
->tx_drain
.work
);
2923 vchan_get_all_descriptors(&uc
->vc
, &head
);
2924 spin_unlock_irqrestore(&uc
->vc
.lock
, flags
);
2925 vchan_dma_desc_free_list(&uc
->vc
, &head
);
2930 static void udma_synchronize(struct dma_chan
*chan
)
2932 struct udma_chan
*uc
= to_udma_chan(chan
);
2933 unsigned long timeout
= msecs_to_jiffies(1000);
2935 vchan_synchronize(&uc
->vc
);
2937 if (uc
->state
== UDMA_CHAN_IS_TERMINATING
) {
2938 timeout
= wait_for_completion_timeout(&uc
->teardown_completed
,
2941 dev_warn(uc
->ud
->dev
, "chan%d teardown timeout!\n",
2943 udma_dump_chan_stdata(uc
);
2944 udma_reset_chan(uc
, true);
2948 udma_reset_chan(uc
, false);
2949 if (udma_is_chan_running(uc
))
2950 dev_warn(uc
->ud
->dev
, "chan%d refused to stop!\n", uc
->id
);
2952 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
2953 udma_reset_rings(uc
);
2956 static void udma_desc_pre_callback(struct virt_dma_chan
*vc
,
2957 struct virt_dma_desc
*vd
,
2958 struct dmaengine_result
*result
)
2960 struct udma_chan
*uc
= to_udma_chan(&vc
->chan
);
2961 struct udma_desc
*d
;
2966 d
= to_udma_desc(&vd
->tx
);
2968 if (d
->metadata_size
)
2969 udma_fetch_epib(uc
, d
);
2971 /* Provide residue information for the client */
2973 void *desc_vaddr
= udma_curr_cppi5_desc_vaddr(d
, d
->desc_idx
);
2975 if (cppi5_desc_get_type(desc_vaddr
) ==
2976 CPPI5_INFO0_DESC_TYPE_VAL_HOST
) {
2977 result
->residue
= d
->residue
-
2978 cppi5_hdesc_get_pktlen(desc_vaddr
);
2979 if (result
->residue
)
2980 result
->result
= DMA_TRANS_ABORTED
;
2982 result
->result
= DMA_TRANS_NOERROR
;
2984 result
->residue
= 0;
2985 result
->result
= DMA_TRANS_NOERROR
;
2991 * This tasklet handles the completion of a DMA descriptor by
2992 * calling its callback and freeing it.
2994 static void udma_vchan_complete(unsigned long arg
)
2996 struct virt_dma_chan
*vc
= (struct virt_dma_chan
*)arg
;
2997 struct virt_dma_desc
*vd
, *_vd
;
2998 struct dmaengine_desc_callback cb
;
3001 spin_lock_irq(&vc
->lock
);
3002 list_splice_tail_init(&vc
->desc_completed
, &head
);
3006 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
3008 memset(&cb
, 0, sizeof(cb
));
3010 spin_unlock_irq(&vc
->lock
);
3012 udma_desc_pre_callback(vc
, vd
, NULL
);
3013 dmaengine_desc_callback_invoke(&cb
, NULL
);
3015 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
3016 struct dmaengine_result result
;
3018 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
3020 list_del(&vd
->node
);
3022 udma_desc_pre_callback(vc
, vd
, &result
);
3023 dmaengine_desc_callback_invoke(&cb
, &result
);
3025 vchan_vdesc_fini(vd
);
3029 static void udma_free_chan_resources(struct dma_chan
*chan
)
3031 struct udma_chan
*uc
= to_udma_chan(chan
);
3032 struct udma_dev
*ud
= to_udma_dev(chan
->device
);
3034 udma_terminate_all(chan
);
3035 if (uc
->terminated_desc
) {
3036 udma_reset_chan(uc
, false);
3037 udma_reset_rings(uc
);
3040 cancel_delayed_work_sync(&uc
->tx_drain
.work
);
3041 destroy_delayed_work_on_stack(&uc
->tx_drain
.work
);
3043 if (uc
->irq_num_ring
> 0) {
3044 free_irq(uc
->irq_num_ring
, uc
);
3046 uc
->irq_num_ring
= 0;
3048 if (uc
->irq_num_udma
> 0) {
3049 free_irq(uc
->irq_num_udma
, uc
);
3051 uc
->irq_num_udma
= 0;
3054 /* Release PSI-L pairing */
3055 if (uc
->psil_paired
) {
3056 navss_psil_unpair(ud
, uc
->config
.src_thread
,
3057 uc
->config
.dst_thread
);
3058 uc
->psil_paired
= false;
3061 vchan_free_chan_resources(&uc
->vc
);
3062 tasklet_kill(&uc
->vc
.task
);
3064 udma_free_tx_resources(uc
);
3065 udma_free_rx_resources(uc
);
3066 udma_reset_uchan(uc
);
3068 if (uc
->use_dma_pool
) {
3069 dma_pool_destroy(uc
->hdesc_pool
);
3070 uc
->use_dma_pool
= false;
3074 static struct platform_driver udma_driver
;
3076 struct udma_filter_param
{
3077 int remote_thread_id
;
3081 static bool udma_dma_filter_fn(struct dma_chan
*chan
, void *param
)
3083 struct udma_chan_config
*ucc
;
3084 struct psil_endpoint_config
*ep_config
;
3085 struct udma_filter_param
*filter_param
;
3086 struct udma_chan
*uc
;
3087 struct udma_dev
*ud
;
3089 if (chan
->device
->dev
->driver
!= &udma_driver
.driver
)
3092 uc
= to_udma_chan(chan
);
3095 filter_param
= param
;
3097 if (filter_param
->atype
> 2) {
3098 dev_err(ud
->dev
, "Invalid channel atype: %u\n",
3099 filter_param
->atype
);
3103 ucc
->remote_thread_id
= filter_param
->remote_thread_id
;
3104 ucc
->atype
= filter_param
->atype
;
3106 if (ucc
->remote_thread_id
& K3_PSIL_DST_THREAD_ID_OFFSET
)
3107 ucc
->dir
= DMA_MEM_TO_DEV
;
3109 ucc
->dir
= DMA_DEV_TO_MEM
;
3111 ep_config
= psil_get_ep_config(ucc
->remote_thread_id
);
3112 if (IS_ERR(ep_config
)) {
3113 dev_err(ud
->dev
, "No configuration for psi-l thread 0x%04x\n",
3114 ucc
->remote_thread_id
);
3115 ucc
->dir
= DMA_MEM_TO_MEM
;
3116 ucc
->remote_thread_id
= -1;
3121 ucc
->pkt_mode
= ep_config
->pkt_mode
;
3122 ucc
->channel_tpl
= ep_config
->channel_tpl
;
3123 ucc
->notdpkt
= ep_config
->notdpkt
;
3124 ucc
->ep_type
= ep_config
->ep_type
;
3126 if (ucc
->ep_type
!= PSIL_EP_NATIVE
) {
3127 const struct udma_match_data
*match_data
= ud
->match_data
;
3129 if (match_data
->flags
& UDMA_FLAG_PDMA_ACC32
)
3130 ucc
->enable_acc32
= ep_config
->pdma_acc32
;
3131 if (match_data
->flags
& UDMA_FLAG_PDMA_BURST
)
3132 ucc
->enable_burst
= ep_config
->pdma_burst
;
3135 ucc
->needs_epib
= ep_config
->needs_epib
;
3136 ucc
->psd_size
= ep_config
->psd_size
;
3137 ucc
->metadata_size
=
3138 (ucc
->needs_epib
? CPPI5_INFO0_HDESC_EPIB_SIZE
: 0) +
3142 ucc
->hdesc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
3143 ucc
->metadata_size
, ud
->desc_align
);
3145 dev_dbg(ud
->dev
, "chan%d: Remote thread: 0x%04x (%s)\n", uc
->id
,
3146 ucc
->remote_thread_id
, dmaengine_get_direction_text(ucc
->dir
));
3151 static struct dma_chan
*udma_of_xlate(struct of_phandle_args
*dma_spec
,
3152 struct of_dma
*ofdma
)
3154 struct udma_dev
*ud
= ofdma
->of_dma_data
;
3155 dma_cap_mask_t mask
= ud
->ddev
.cap_mask
;
3156 struct udma_filter_param filter_param
;
3157 struct dma_chan
*chan
;
3159 if (dma_spec
->args_count
!= 1 && dma_spec
->args_count
!= 2)
3162 filter_param
.remote_thread_id
= dma_spec
->args
[0];
3163 if (dma_spec
->args_count
== 2)
3164 filter_param
.atype
= dma_spec
->args
[1];
3166 filter_param
.atype
= 0;
3168 chan
= __dma_request_channel(&mask
, udma_dma_filter_fn
, &filter_param
,
3171 dev_err(ud
->dev
, "get channel fail in %s.\n", __func__
);
3172 return ERR_PTR(-EINVAL
);
3178 static struct udma_match_data am654_main_data
= {
3179 .psil_base
= 0x1000,
3180 .enable_memcpy_support
= true,
3181 .statictr_z_mask
= GENMASK(11, 0),
3182 .rchan_oes_offset
= 0x2000,
3184 .level_start_idx
= {
3185 [0] = 8, /* Normal channels */
3186 [1] = 0, /* High Throughput channels */
3190 static struct udma_match_data am654_mcu_data
= {
3191 .psil_base
= 0x6000,
3192 .enable_memcpy_support
= true, /* TEST: DMA domains */
3193 .statictr_z_mask
= GENMASK(11, 0),
3194 .rchan_oes_offset
= 0x2000,
3196 .level_start_idx
= {
3197 [0] = 2, /* Normal channels */
3198 [1] = 0, /* High Throughput channels */
3202 static struct udma_match_data j721e_main_data
= {
3203 .psil_base
= 0x1000,
3204 .enable_memcpy_support
= true,
3205 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
,
3206 .statictr_z_mask
= GENMASK(23, 0),
3207 .rchan_oes_offset
= 0x400,
3209 .level_start_idx
= {
3210 [0] = 16, /* Normal channels */
3211 [1] = 4, /* High Throughput channels */
3212 [2] = 0, /* Ultra High Throughput channels */
3216 static struct udma_match_data j721e_mcu_data
= {
3217 .psil_base
= 0x6000,
3218 .enable_memcpy_support
= false, /* MEM_TO_MEM is slow via MCU UDMA */
3219 .flags
= UDMA_FLAG_PDMA_ACC32
| UDMA_FLAG_PDMA_BURST
,
3220 .statictr_z_mask
= GENMASK(23, 0),
3221 .rchan_oes_offset
= 0x400,
3223 .level_start_idx
= {
3224 [0] = 2, /* Normal channels */
3225 [1] = 0, /* High Throughput channels */
3229 static const struct of_device_id udma_of_match
[] = {
3231 .compatible
= "ti,am654-navss-main-udmap",
3232 .data
= &am654_main_data
,
3235 .compatible
= "ti,am654-navss-mcu-udmap",
3236 .data
= &am654_mcu_data
,
3238 .compatible
= "ti,j721e-navss-main-udmap",
3239 .data
= &j721e_main_data
,
3241 .compatible
= "ti,j721e-navss-mcu-udmap",
3242 .data
= &j721e_mcu_data
,
3247 static int udma_get_mmrs(struct platform_device
*pdev
, struct udma_dev
*ud
)
3249 struct resource
*res
;
3252 for (i
= 0; i
< MMR_LAST
; i
++) {
3253 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
3255 ud
->mmrs
[i
] = devm_ioremap_resource(&pdev
->dev
, res
);
3256 if (IS_ERR(ud
->mmrs
[i
]))
3257 return PTR_ERR(ud
->mmrs
[i
]);
3263 static int udma_setup_resources(struct udma_dev
*ud
)
3265 struct device
*dev
= ud
->dev
;
3266 int ch_count
, ret
, i
, j
;
3268 struct ti_sci_resource_desc
*rm_desc
;
3269 struct ti_sci_resource
*rm_res
, irq_res
;
3270 struct udma_tisci_rm
*tisci_rm
= &ud
->tisci_rm
;
3271 static const char * const range_names
[] = { "ti,sci-rm-range-tchan",
3272 "ti,sci-rm-range-rchan",
3273 "ti,sci-rm-range-rflow" };
3275 cap2
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x28);
3276 cap3
= udma_read(ud
->mmrs
[MMR_GCFG
], 0x2c);
3278 ud
->rflow_cnt
= cap3
& 0x3fff;
3279 ud
->tchan_cnt
= cap2
& 0x1ff;
3280 ud
->echan_cnt
= (cap2
>> 9) & 0x1ff;
3281 ud
->rchan_cnt
= (cap2
>> 18) & 0x1ff;
3282 ch_count
= ud
->tchan_cnt
+ ud
->rchan_cnt
;
3284 ud
->tchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->tchan_cnt
),
3285 sizeof(unsigned long), GFP_KERNEL
);
3286 ud
->tchans
= devm_kcalloc(dev
, ud
->tchan_cnt
, sizeof(*ud
->tchans
),
3288 ud
->rchan_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rchan_cnt
),
3289 sizeof(unsigned long), GFP_KERNEL
);
3290 ud
->rchans
= devm_kcalloc(dev
, ud
->rchan_cnt
, sizeof(*ud
->rchans
),
3292 ud
->rflow_gp_map
= devm_kmalloc_array(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
3293 sizeof(unsigned long),
3295 ud
->rflow_gp_map_allocated
= devm_kcalloc(dev
,
3296 BITS_TO_LONGS(ud
->rflow_cnt
),
3297 sizeof(unsigned long),
3299 ud
->rflow_in_use
= devm_kcalloc(dev
, BITS_TO_LONGS(ud
->rflow_cnt
),
3300 sizeof(unsigned long),
3302 ud
->rflows
= devm_kcalloc(dev
, ud
->rflow_cnt
, sizeof(*ud
->rflows
),
3305 if (!ud
->tchan_map
|| !ud
->rchan_map
|| !ud
->rflow_gp_map
||
3306 !ud
->rflow_gp_map_allocated
|| !ud
->tchans
|| !ud
->rchans
||
3307 !ud
->rflows
|| !ud
->rflow_in_use
)
3311 * RX flows with the same Ids as RX channels are reserved to be used
3312 * as default flows if remote HW can't generate flow_ids. Those
3313 * RX flows can be requested only explicitly by id.
3315 bitmap_set(ud
->rflow_gp_map_allocated
, 0, ud
->rchan_cnt
);
3317 /* by default no GP rflows are assigned to Linux */
3318 bitmap_set(ud
->rflow_gp_map
, 0, ud
->rflow_cnt
);
3320 /* Get resource ranges from tisci */
3321 for (i
= 0; i
< RM_RANGE_LAST
; i
++)
3322 tisci_rm
->rm_ranges
[i
] =
3323 devm_ti_sci_get_of_resource(tisci_rm
->tisci
, dev
,
3324 tisci_rm
->tisci_dev_id
,
3325 (char *)range_names
[i
]);
3328 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
3329 if (IS_ERR(rm_res
)) {
3330 bitmap_zero(ud
->tchan_map
, ud
->tchan_cnt
);
3332 bitmap_fill(ud
->tchan_map
, ud
->tchan_cnt
);
3333 for (i
= 0; i
< rm_res
->sets
; i
++) {
3334 rm_desc
= &rm_res
->desc
[i
];
3335 bitmap_clear(ud
->tchan_map
, rm_desc
->start
,
3337 dev_dbg(dev
, "ti-sci-res: tchan: %d:%d\n",
3338 rm_desc
->start
, rm_desc
->num
);
3341 irq_res
.sets
= rm_res
->sets
;
3343 /* rchan and matching default flow ranges */
3344 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
3345 if (IS_ERR(rm_res
)) {
3346 bitmap_zero(ud
->rchan_map
, ud
->rchan_cnt
);
3348 bitmap_fill(ud
->rchan_map
, ud
->rchan_cnt
);
3349 for (i
= 0; i
< rm_res
->sets
; i
++) {
3350 rm_desc
= &rm_res
->desc
[i
];
3351 bitmap_clear(ud
->rchan_map
, rm_desc
->start
,
3353 dev_dbg(dev
, "ti-sci-res: rchan: %d:%d\n",
3354 rm_desc
->start
, rm_desc
->num
);
3358 irq_res
.sets
+= rm_res
->sets
;
3359 irq_res
.desc
= kcalloc(irq_res
.sets
, sizeof(*irq_res
.desc
), GFP_KERNEL
);
3360 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_TCHAN
];
3361 for (i
= 0; i
< rm_res
->sets
; i
++) {
3362 irq_res
.desc
[i
].start
= rm_res
->desc
[i
].start
;
3363 irq_res
.desc
[i
].num
= rm_res
->desc
[i
].num
;
3365 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RCHAN
];
3366 for (j
= 0; j
< rm_res
->sets
; j
++, i
++) {
3367 irq_res
.desc
[i
].start
= rm_res
->desc
[j
].start
+
3368 ud
->match_data
->rchan_oes_offset
;
3369 irq_res
.desc
[i
].num
= rm_res
->desc
[j
].num
;
3371 ret
= ti_sci_inta_msi_domain_alloc_irqs(ud
->dev
, &irq_res
);
3372 kfree(irq_res
.desc
);
3374 dev_err(ud
->dev
, "Failed to allocate MSI interrupts\n");
3378 /* GP rflow ranges */
3379 rm_res
= tisci_rm
->rm_ranges
[RM_RANGE_RFLOW
];
3380 if (IS_ERR(rm_res
)) {
3381 /* all gp flows are assigned exclusively to Linux */
3382 bitmap_clear(ud
->rflow_gp_map
, ud
->rchan_cnt
,
3383 ud
->rflow_cnt
- ud
->rchan_cnt
);
3385 for (i
= 0; i
< rm_res
->sets
; i
++) {
3386 rm_desc
= &rm_res
->desc
[i
];
3387 bitmap_clear(ud
->rflow_gp_map
, rm_desc
->start
,
3389 dev_dbg(dev
, "ti-sci-res: rflow: %d:%d\n",
3390 rm_desc
->start
, rm_desc
->num
);
3394 ch_count
-= bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
);
3395 ch_count
-= bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
);
3399 ud
->channels
= devm_kcalloc(dev
, ch_count
, sizeof(*ud
->channels
),
3404 dev_info(dev
, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3406 ud
->tchan_cnt
- bitmap_weight(ud
->tchan_map
, ud
->tchan_cnt
),
3407 ud
->rchan_cnt
- bitmap_weight(ud
->rchan_map
, ud
->rchan_cnt
),
3408 ud
->rflow_cnt
- bitmap_weight(ud
->rflow_gp_map
,
3414 static int udma_setup_rx_flush(struct udma_dev
*ud
)
3416 struct udma_rx_flush
*rx_flush
= &ud
->rx_flush
;
3417 struct cppi5_desc_hdr_t
*tr_desc
;
3418 struct cppi5_tr_type1_t
*tr_req
;
3419 struct cppi5_host_desc_t
*desc
;
3420 struct device
*dev
= ud
->dev
;
3421 struct udma_hwdesc
*hwdesc
;
3424 /* Allocate 1K buffer for discarded data on RX channel teardown */
3425 rx_flush
->buffer_size
= SZ_1K
;
3426 rx_flush
->buffer_vaddr
= devm_kzalloc(dev
, rx_flush
->buffer_size
,
3428 if (!rx_flush
->buffer_vaddr
)
3431 rx_flush
->buffer_paddr
= dma_map_single(dev
, rx_flush
->buffer_vaddr
,
3432 rx_flush
->buffer_size
,
3434 if (dma_mapping_error(dev
, rx_flush
->buffer_paddr
))
3437 /* Set up descriptor to be used for TR mode */
3438 hwdesc
= &rx_flush
->hwdescs
[0];
3439 tr_size
= sizeof(struct cppi5_tr_type1_t
);
3440 hwdesc
->cppi5_desc_size
= cppi5_trdesc_calc_size(tr_size
, 1);
3441 hwdesc
->cppi5_desc_size
= ALIGN(hwdesc
->cppi5_desc_size
,
3444 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
3446 if (!hwdesc
->cppi5_desc_vaddr
)
3449 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
3450 hwdesc
->cppi5_desc_size
,
3452 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
3455 /* Start of the TR req records */
3456 hwdesc
->tr_req_base
= hwdesc
->cppi5_desc_vaddr
+ tr_size
;
3457 /* Start address of the TR response array */
3458 hwdesc
->tr_resp_base
= hwdesc
->tr_req_base
+ tr_size
;
3460 tr_desc
= hwdesc
->cppi5_desc_vaddr
;
3461 cppi5_trdesc_init(tr_desc
, 1, tr_size
, 0, 0);
3462 cppi5_desc_set_pktids(tr_desc
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3463 cppi5_desc_set_retpolicy(tr_desc
, 0, 0);
3465 tr_req
= hwdesc
->tr_req_base
;
3466 cppi5_tr_init(&tr_req
->flags
, CPPI5_TR_TYPE1
, false, false,
3467 CPPI5_TR_EVENT_SIZE_COMPLETION
, 0);
3468 cppi5_tr_csf_set(&tr_req
->flags
, CPPI5_TR_CSF_SUPR_EVT
);
3470 tr_req
->addr
= rx_flush
->buffer_paddr
;
3471 tr_req
->icnt0
= rx_flush
->buffer_size
;
3474 /* Set up descriptor to be used for packet mode */
3475 hwdesc
= &rx_flush
->hwdescs
[1];
3476 hwdesc
->cppi5_desc_size
= ALIGN(sizeof(struct cppi5_host_desc_t
) +
3477 CPPI5_INFO0_HDESC_EPIB_SIZE
+
3478 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
,
3481 hwdesc
->cppi5_desc_vaddr
= devm_kzalloc(dev
, hwdesc
->cppi5_desc_size
,
3483 if (!hwdesc
->cppi5_desc_vaddr
)
3486 hwdesc
->cppi5_desc_paddr
= dma_map_single(dev
, hwdesc
->cppi5_desc_vaddr
,
3487 hwdesc
->cppi5_desc_size
,
3489 if (dma_mapping_error(dev
, hwdesc
->cppi5_desc_paddr
))
3492 desc
= hwdesc
->cppi5_desc_vaddr
;
3493 cppi5_hdesc_init(desc
, 0, 0);
3494 cppi5_desc_set_pktids(&desc
->hdr
, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT
);
3495 cppi5_desc_set_retpolicy(&desc
->hdr
, 0, 0);
3497 cppi5_hdesc_attach_buf(desc
,
3498 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
,
3499 rx_flush
->buffer_paddr
, rx_flush
->buffer_size
);
3501 dma_sync_single_for_device(dev
, hwdesc
->cppi5_desc_paddr
,
3502 hwdesc
->cppi5_desc_size
, DMA_TO_DEVICE
);
3506 #ifdef CONFIG_DEBUG_FS
3507 static void udma_dbg_summary_show_chan(struct seq_file
*s
,
3508 struct dma_chan
*chan
)
3510 struct udma_chan
*uc
= to_udma_chan(chan
);
3511 struct udma_chan_config
*ucc
= &uc
->config
;
3513 seq_printf(s
, " %-13s| %s", dma_chan_name(chan
),
3514 chan
->dbg_client_name
?: "in-use");
3515 seq_printf(s
, " (%s, ", dmaengine_get_direction_text(uc
->config
.dir
));
3517 switch (uc
->config
.dir
) {
3518 case DMA_MEM_TO_MEM
:
3519 seq_printf(s
, "chan%d pair [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
3520 ucc
->src_thread
, ucc
->dst_thread
);
3522 case DMA_DEV_TO_MEM
:
3523 seq_printf(s
, "rchan%d [0x%04x -> 0x%04x], ", uc
->rchan
->id
,
3524 ucc
->src_thread
, ucc
->dst_thread
);
3526 case DMA_MEM_TO_DEV
:
3527 seq_printf(s
, "tchan%d [0x%04x -> 0x%04x], ", uc
->tchan
->id
,
3528 ucc
->src_thread
, ucc
->dst_thread
);
3531 seq_printf(s
, ")\n");
3535 if (ucc
->ep_type
== PSIL_EP_NATIVE
) {
3536 seq_printf(s
, "PSI-L Native");
3537 if (ucc
->metadata_size
) {
3538 seq_printf(s
, "[%s", ucc
->needs_epib
? " EPIB" : "");
3540 seq_printf(s
, " PSDsize:%u", ucc
->psd_size
);
3541 seq_printf(s
, " ]");
3544 seq_printf(s
, "PDMA");
3545 if (ucc
->enable_acc32
|| ucc
->enable_burst
)
3546 seq_printf(s
, "[%s%s ]",
3547 ucc
->enable_acc32
? " ACC32" : "",
3548 ucc
->enable_burst
? " BURST" : "");
3551 seq_printf(s
, ", %s)\n", ucc
->pkt_mode
? "Packet mode" : "TR mode");
3554 static void udma_dbg_summary_show(struct seq_file
*s
,
3555 struct dma_device
*dma_dev
)
3557 struct dma_chan
*chan
;
3559 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
3560 if (chan
->client_count
)
3561 udma_dbg_summary_show_chan(s
, chan
);
3564 #endif /* CONFIG_DEBUG_FS */
3566 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3567 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3568 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3569 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3570 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3572 static int udma_probe(struct platform_device
*pdev
)
3574 struct device_node
*navss_node
= pdev
->dev
.parent
->of_node
;
3575 struct device
*dev
= &pdev
->dev
;
3576 struct udma_dev
*ud
;
3577 const struct of_device_id
*match
;
3581 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(48));
3583 dev_err(dev
, "failed to set dma mask stuff\n");
3585 ud
= devm_kzalloc(dev
, sizeof(*ud
), GFP_KERNEL
);
3589 ret
= udma_get_mmrs(pdev
, ud
);
3593 ud
->tisci_rm
.tisci
= ti_sci_get_by_phandle(dev
->of_node
, "ti,sci");
3594 if (IS_ERR(ud
->tisci_rm
.tisci
))
3595 return PTR_ERR(ud
->tisci_rm
.tisci
);
3597 ret
= of_property_read_u32(dev
->of_node
, "ti,sci-dev-id",
3598 &ud
->tisci_rm
.tisci_dev_id
);
3600 dev_err(dev
, "ti,sci-dev-id read failure %d\n", ret
);
3603 pdev
->id
= ud
->tisci_rm
.tisci_dev_id
;
3605 ret
= of_property_read_u32(navss_node
, "ti,sci-dev-id",
3606 &ud
->tisci_rm
.tisci_navss_dev_id
);
3608 dev_err(dev
, "NAVSS ti,sci-dev-id read failure %d\n", ret
);
3612 ret
= of_property_read_u32(navss_node
, "ti,udma-atype", &ud
->atype
);
3613 if (!ret
&& ud
->atype
> 2) {
3614 dev_err(dev
, "Invalid atype: %u\n", ud
->atype
);
3618 ud
->tisci_rm
.tisci_udmap_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_udmap_ops
;
3619 ud
->tisci_rm
.tisci_psil_ops
= &ud
->tisci_rm
.tisci
->ops
.rm_psil_ops
;
3621 ud
->ringacc
= of_k3_ringacc_get_by_phandle(dev
->of_node
, "ti,ringacc");
3622 if (IS_ERR(ud
->ringacc
))
3623 return PTR_ERR(ud
->ringacc
);
3625 dev
->msi_domain
= of_msi_get_domain(dev
, dev
->of_node
,
3626 DOMAIN_BUS_TI_SCI_INTA_MSI
);
3627 if (!dev
->msi_domain
) {
3628 dev_err(dev
, "Failed to get MSI domain\n");
3629 return -EPROBE_DEFER
;
3632 match
= of_match_node(udma_of_match
, dev
->of_node
);
3634 dev_err(dev
, "No compatible match found\n");
3637 ud
->match_data
= match
->data
;
3639 dma_cap_set(DMA_SLAVE
, ud
->ddev
.cap_mask
);
3640 dma_cap_set(DMA_CYCLIC
, ud
->ddev
.cap_mask
);
3642 ud
->ddev
.device_alloc_chan_resources
= udma_alloc_chan_resources
;
3643 ud
->ddev
.device_config
= udma_slave_config
;
3644 ud
->ddev
.device_prep_slave_sg
= udma_prep_slave_sg
;
3645 ud
->ddev
.device_prep_dma_cyclic
= udma_prep_dma_cyclic
;
3646 ud
->ddev
.device_issue_pending
= udma_issue_pending
;
3647 ud
->ddev
.device_tx_status
= udma_tx_status
;
3648 ud
->ddev
.device_pause
= udma_pause
;
3649 ud
->ddev
.device_resume
= udma_resume
;
3650 ud
->ddev
.device_terminate_all
= udma_terminate_all
;
3651 ud
->ddev
.device_synchronize
= udma_synchronize
;
3652 #ifdef CONFIG_DEBUG_FS
3653 ud
->ddev
.dbg_summary_show
= udma_dbg_summary_show
;
3656 ud
->ddev
.device_free_chan_resources
= udma_free_chan_resources
;
3657 ud
->ddev
.src_addr_widths
= TI_UDMAC_BUSWIDTHS
;
3658 ud
->ddev
.dst_addr_widths
= TI_UDMAC_BUSWIDTHS
;
3659 ud
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
3660 ud
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
3661 ud
->ddev
.copy_align
= DMAENGINE_ALIGN_8_BYTES
;
3662 ud
->ddev
.desc_metadata_modes
= DESC_METADATA_CLIENT
|
3663 DESC_METADATA_ENGINE
;
3664 if (ud
->match_data
->enable_memcpy_support
) {
3665 dma_cap_set(DMA_MEMCPY
, ud
->ddev
.cap_mask
);
3666 ud
->ddev
.device_prep_dma_memcpy
= udma_prep_dma_memcpy
;
3667 ud
->ddev
.directions
|= BIT(DMA_MEM_TO_MEM
);
3672 ud
->psil_base
= ud
->match_data
->psil_base
;
3674 INIT_LIST_HEAD(&ud
->ddev
.channels
);
3675 INIT_LIST_HEAD(&ud
->desc_to_purge
);
3677 ch_count
= udma_setup_resources(ud
);
3681 spin_lock_init(&ud
->lock
);
3682 INIT_WORK(&ud
->purge_work
, udma_purge_desc_work
);
3684 ud
->desc_align
= 64;
3685 if (ud
->desc_align
< dma_get_cache_alignment())
3686 ud
->desc_align
= dma_get_cache_alignment();
3688 ret
= udma_setup_rx_flush(ud
);
3692 for (i
= 0; i
< ud
->tchan_cnt
; i
++) {
3693 struct udma_tchan
*tchan
= &ud
->tchans
[i
];
3696 tchan
->reg_rt
= ud
->mmrs
[MMR_TCHANRT
] + i
* 0x1000;
3699 for (i
= 0; i
< ud
->rchan_cnt
; i
++) {
3700 struct udma_rchan
*rchan
= &ud
->rchans
[i
];
3703 rchan
->reg_rt
= ud
->mmrs
[MMR_RCHANRT
] + i
* 0x1000;
3706 for (i
= 0; i
< ud
->rflow_cnt
; i
++) {
3707 struct udma_rflow
*rflow
= &ud
->rflows
[i
];
3712 for (i
= 0; i
< ch_count
; i
++) {
3713 struct udma_chan
*uc
= &ud
->channels
[i
];
3716 uc
->vc
.desc_free
= udma_desc_free
;
3720 uc
->config
.remote_thread_id
= -1;
3721 uc
->config
.dir
= DMA_MEM_TO_MEM
;
3722 uc
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s chan%d",
3725 vchan_init(&uc
->vc
, &ud
->ddev
);
3726 /* Use custom vchan completion handling */
3727 tasklet_init(&uc
->vc
.task
, udma_vchan_complete
,
3728 (unsigned long)&uc
->vc
);
3729 init_completion(&uc
->teardown_completed
);
3732 ret
= dma_async_device_register(&ud
->ddev
);
3734 dev_err(dev
, "failed to register slave DMA engine: %d\n", ret
);
3738 platform_set_drvdata(pdev
, ud
);
3740 ret
= of_dma_controller_register(dev
->of_node
, udma_of_xlate
, ud
);
3742 dev_err(dev
, "failed to register of_dma controller\n");
3743 dma_async_device_unregister(&ud
->ddev
);
3749 static struct platform_driver udma_driver
= {
3752 .of_match_table
= udma_of_match
,
3753 .suppress_bind_attrs
= true,
3755 .probe
= udma_probe
,
3757 builtin_platform_driver(udma_driver
);
3759 /* Private interfaces to UDMA */
3760 #include "k3-udma-private.c"