1 // SPDX-License-Identifier: GPL-2.0+
3 * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
10 #include <asm/cache.h>
13 #include <asm/bitops.h>
15 #include <dm/device_compat.h>
16 #include <dm/devres.h>
18 #include <dm/uclass.h>
19 #include <linux/compat.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/soc/ti/k3-navss-ringacc.h>
23 #include <linux/soc/ti/ti_sci_protocol.h>
25 #define set_bit(bit, bitmap) __set_bit(bit, bitmap)
26 #define clear_bit(bit, bitmap) __clear_bit(bit, bitmap)
27 #define dma_free_coherent(dev, size, cpu_addr, dma_handle) \
28 dma_free_coherent(cpu_addr)
29 #define dma_zalloc_coherent(dev, size, dma_handle, flag) \
31 void *ring_mem_virt; \
32 ring_mem_virt = dma_alloc_coherent((size), \
33 (unsigned long *)(dma_handle)); \
35 memset(ring_mem_virt, 0, (size)); \
39 static LIST_HEAD(k3_nav_ringacc_list
);
41 static void ringacc_writel(u32 v
, void __iomem
*reg
)
43 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", v
, reg
);
47 static u32
ringacc_readl(void __iomem
*reg
)
52 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v
, reg
);
56 #define KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
59 * struct k3_nav_ring_rt_regs - The RA Control/Status Registers region
61 struct k3_nav_ring_rt_regs
{
63 u32 db
; /* RT Ring N Doorbell Register */
65 u32 occ
; /* RT Ring N Occupancy Register */
66 u32 indx
; /* RT Ring N Current Index Register */
67 u32 hwocc
; /* RT Ring N Hardware Occupancy Register */
68 u32 hwindx
; /* RT Ring N Current Index Register */
71 #define KNAV_RINGACC_RT_REGS_STEP 0x1000
74 * struct k3_nav_ring_fifo_regs - The Ring Accelerator Queues Registers region
76 struct k3_nav_ring_fifo_regs
{
77 u32 head_data
[128]; /* Ring Head Entry Data Registers */
78 u32 tail_data
[128]; /* Ring Tail Entry Data Registers */
79 u32 peek_head_data
[128]; /* Ring Peek Head Entry Data Regs */
80 u32 peek_tail_data
[128]; /* Ring Peek Tail Entry Data Regs */
84 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
86 struct k3_ringacc_proxy_gcfg_regs
{
87 u32 revision
; /* Revision Register */
88 u32 config
; /* Config Register */
91 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
94 * struct k3_ringacc_proxy_target_regs - RA Proxy Datapath MMIO Region
96 struct k3_ringacc_proxy_target_regs
{
97 u32 control
; /* Proxy Control Register */
98 u32 status
; /* Proxy Status Register */
100 u32 data
[128]; /* Proxy Data Register */
103 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000
104 #define K3_RINGACC_PROXY_NOT_USED (-1)
106 enum k3_ringacc_proxy_access_mode
{
107 PROXY_ACCESS_MODE_HEAD
= 0,
108 PROXY_ACCESS_MODE_TAIL
= 1,
109 PROXY_ACCESS_MODE_PEEK_HEAD
= 2,
110 PROXY_ACCESS_MODE_PEEK_TAIL
= 3,
113 #define KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
114 #define KNAV_RINGACC_FIFO_REGS_STEP 0x1000
115 #define KNAV_RINGACC_MAX_DB_RING_CNT (127U)
118 * struct k3_nav_ring_ops - Ring operations
120 struct k3_nav_ring_ops
{
121 int (*push_tail
)(struct k3_nav_ring
*ring
, void *elm
);
122 int (*push_head
)(struct k3_nav_ring
*ring
, void *elm
);
123 int (*pop_tail
)(struct k3_nav_ring
*ring
, void *elm
);
124 int (*pop_head
)(struct k3_nav_ring
*ring
, void *elm
);
128 * struct k3_nav_ring - RA Ring descriptor
130 * @rt - Ring control/status registers
131 * @fifos - Ring queues registers
132 * @proxy - Ring Proxy Datapath registers
133 * @ring_mem_dma - Ring buffer dma address
134 * @ring_mem_virt - Ring buffer virt address
135 * @ops - Ring operations
136 * @size - Ring size in elements
137 * @elm_size - Size of the ring element
140 * @free - Number of free elements
141 * @occ - Ring occupancy
142 * @windex - Write index (only for @K3_NAV_RINGACC_RING_MODE_RING)
143 * @rindex - Read index (only for @K3_NAV_RINGACC_RING_MODE_RING)
145 * @parent - Pointer on struct @k3_nav_ringacc
146 * @use_count - Use count for shared rings
147 * @proxy_id - RA Ring Proxy Id (only if @K3_NAV_RINGACC_RING_USE_PROXY)
150 struct k3_nav_ring_rt_regs __iomem
*rt
;
151 struct k3_nav_ring_fifo_regs __iomem
*fifos
;
152 struct k3_ringacc_proxy_target_regs __iomem
*proxy
;
153 dma_addr_t ring_mem_dma
;
155 struct k3_nav_ring_ops
*ops
;
157 enum k3_nav_ring_size elm_size
;
158 enum k3_nav_ring_mode mode
;
160 #define KNAV_RING_FLAG_BUSY BIT(1)
161 #define K3_NAV_RING_FLAG_SHARED BIT(2)
167 struct k3_nav_ringacc
*parent
;
173 * struct k3_nav_ringacc - Rings accelerator descriptor
175 * @dev - pointer on RA device
176 * @proxy_gcfg - RA proxy global config registers
177 * @proxy_target_base - RA proxy datapath region
178 * @num_rings - number of ring in RA
179 * @rm_gp_range - general purpose rings range from tisci
180 * @dma_ring_reset_quirk - DMA reset w/a enable
181 * @num_proxies - number of RA proxies
182 * @rings - array of rings descriptors (struct @k3_nav_ring)
183 * @list - list of RAs in the system
184 * @tisci - pointer ti-sci handle
185 * @tisci_ring_ops - ti-sci rings ops
186 * @tisci_dev_id - ti-sci device id
188 struct k3_nav_ringacc
{
190 struct k3_ringacc_proxy_gcfg_regs __iomem
*proxy_gcfg
;
191 void __iomem
*proxy_target_base
;
192 u32 num_rings
; /* number of rings in Ringacc module */
193 unsigned long *rings_inuse
;
194 struct ti_sci_resource
*rm_gp_range
;
195 bool dma_ring_reset_quirk
;
197 unsigned long *proxy_inuse
;
199 struct k3_nav_ring
*rings
;
200 struct list_head list
;
202 const struct ti_sci_handle
*tisci
;
203 const struct ti_sci_rm_ringacc_ops
*tisci_ring_ops
;
207 static long k3_nav_ringacc_ring_get_fifo_pos(struct k3_nav_ring
*ring
)
209 return KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES
-
210 (4 << ring
->elm_size
);
213 static void *k3_nav_ringacc_get_elm_addr(struct k3_nav_ring
*ring
, u32 idx
)
215 return (idx
* (4 << ring
->elm_size
) + ring
->ring_mem_virt
);
218 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring
*ring
, void *elem
);
219 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring
*ring
, void *elem
);
221 static struct k3_nav_ring_ops k3_nav_mode_ring_ops
= {
222 .push_tail
= k3_nav_ringacc_ring_push_mem
,
223 .pop_head
= k3_nav_ringacc_ring_pop_mem
,
226 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring
*ring
, void *elem
);
227 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring
*ring
, void *elem
);
228 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring
*ring
,
230 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring
*ring
,
233 static struct k3_nav_ring_ops k3_nav_mode_msg_ops
= {
234 .push_tail
= k3_nav_ringacc_ring_push_io
,
235 .push_head
= k3_nav_ringacc_ring_push_head_io
,
236 .pop_tail
= k3_nav_ringacc_ring_pop_tail_io
,
237 .pop_head
= k3_nav_ringacc_ring_pop_io
,
240 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring
*ring
,
242 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring
*ring
,
244 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring
*ring
, void *elem
);
245 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring
*ring
, void *elem
);
247 static struct k3_nav_ring_ops k3_nav_mode_proxy_ops
= {
248 .push_tail
= k3_ringacc_ring_push_tail_proxy
,
249 .push_head
= k3_ringacc_ring_push_head_proxy
,
250 .pop_tail
= k3_ringacc_ring_pop_tail_proxy
,
251 .pop_head
= k3_ringacc_ring_pop_head_proxy
,
254 struct udevice
*k3_nav_ringacc_get_dev(struct k3_nav_ringacc
*ringacc
)
259 struct k3_nav_ring
*k3_nav_ringacc_request_ring(struct k3_nav_ringacc
*ringacc
,
262 int proxy_id
= K3_RINGACC_PROXY_NOT_USED
;
264 if (id
== K3_NAV_RINGACC_RING_ID_ANY
) {
265 /* Request for any general purpose ring */
266 struct ti_sci_resource_desc
*gp_rings
=
267 &ringacc
->rm_gp_range
->desc
[0];
270 size
= gp_rings
->start
+ gp_rings
->num
;
271 id
= find_next_zero_bit(ringacc
->rings_inuse
,
272 size
, gp_rings
->start
);
279 if (test_bit(id
, ringacc
->rings_inuse
) &&
280 !(ringacc
->rings
[id
].flags
& K3_NAV_RING_FLAG_SHARED
))
282 else if (ringacc
->rings
[id
].flags
& K3_NAV_RING_FLAG_SHARED
)
285 if (flags
& K3_NAV_RINGACC_RING_USE_PROXY
) {
286 proxy_id
= find_next_zero_bit(ringacc
->proxy_inuse
,
287 ringacc
->num_proxies
, 0);
288 if (proxy_id
== ringacc
->num_proxies
)
292 if (!try_module_get(ringacc
->dev
->driver
->owner
))
295 if (proxy_id
!= K3_RINGACC_PROXY_NOT_USED
) {
296 set_bit(proxy_id
, ringacc
->proxy_inuse
);
297 ringacc
->rings
[id
].proxy_id
= proxy_id
;
298 pr_debug("Giving ring#%d proxy#%d\n",
301 pr_debug("Giving ring#%d\n", id
);
304 set_bit(id
, ringacc
->rings_inuse
);
306 ringacc
->rings
[id
].use_count
++;
307 return &ringacc
->rings
[id
];
313 static void k3_ringacc_ring_reset_sci(struct k3_nav_ring
*ring
)
315 struct k3_nav_ringacc
*ringacc
= ring
->parent
;
318 ret
= ringacc
->tisci_ring_ops
->config(
320 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID
,
321 ringacc
->tisci_dev_id
,
330 dev_err(ringacc
->dev
, "TISCI reset ring fail (%d) ring_idx %d\n",
334 void k3_nav_ringacc_ring_reset(struct k3_nav_ring
*ring
)
336 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
344 k3_ringacc_ring_reset_sci(ring
);
347 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring
*ring
,
348 enum k3_nav_ring_mode mode
)
350 struct k3_nav_ringacc
*ringacc
= ring
->parent
;
353 ret
= ringacc
->tisci_ring_ops
->config(
355 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID
,
356 ringacc
->tisci_dev_id
,
365 dev_err(ringacc
->dev
, "TISCI reconf qmode fail (%d) ring_idx %d\n",
369 void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring
*ring
, u32 occ
)
371 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
374 if (!ring
->parent
->dma_ring_reset_quirk
) {
375 k3_nav_ringacc_ring_reset(ring
);
380 occ
= ringacc_readl(&ring
->rt
->occ
);
383 u32 db_ring_cnt
, db_ring_cnt_cur
;
385 pr_debug("%s %u occ: %u\n", __func__
,
387 /* 2. Reset the ring */
388 k3_ringacc_ring_reset_sci(ring
);
391 * 3. Setup the ring in ring/doorbell mode
392 * (if not already in this mode)
394 if (ring
->mode
!= K3_NAV_RINGACC_RING_MODE_RING
)
395 k3_ringacc_ring_reconfig_qmode_sci(
396 ring
, K3_NAV_RINGACC_RING_MODE_RING
);
398 * 4. Ring the doorbell 2**22 – ringOcc times.
399 * This will wrap the internal UDMAP ring state occupancy
400 * counter (which is 21-bits wide) to 0.
402 db_ring_cnt
= (1U << 22) - occ
;
404 while (db_ring_cnt
!= 0) {
406 * Ring the doorbell with the maximum count each
407 * iteration if possible to minimize the total
410 if (db_ring_cnt
> KNAV_RINGACC_MAX_DB_RING_CNT
)
411 db_ring_cnt_cur
= KNAV_RINGACC_MAX_DB_RING_CNT
;
413 db_ring_cnt_cur
= db_ring_cnt
;
415 writel(db_ring_cnt_cur
, &ring
->rt
->db
);
416 db_ring_cnt
-= db_ring_cnt_cur
;
419 /* 5. Restore the original ring mode (if not ring mode) */
420 if (ring
->mode
!= K3_NAV_RINGACC_RING_MODE_RING
)
421 k3_ringacc_ring_reconfig_qmode_sci(ring
, ring
->mode
);
424 /* 2. Reset the ring */
425 k3_nav_ringacc_ring_reset(ring
);
428 static void k3_ringacc_ring_free_sci(struct k3_nav_ring
*ring
)
430 struct k3_nav_ringacc
*ringacc
= ring
->parent
;
433 ret
= ringacc
->tisci_ring_ops
->config(
435 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER
,
436 ringacc
->tisci_dev_id
,
445 dev_err(ringacc
->dev
, "TISCI ring free fail (%d) ring_idx %d\n",
449 int k3_nav_ringacc_ring_free(struct k3_nav_ring
*ring
)
451 struct k3_nav_ringacc
*ringacc
;
456 ringacc
= ring
->parent
;
458 pr_debug("%s flags: 0x%08x\n", __func__
, ring
->flags
);
460 if (!test_bit(ring
->ring_id
, ringacc
->rings_inuse
))
463 if (--ring
->use_count
)
466 if (!(ring
->flags
& KNAV_RING_FLAG_BUSY
))
469 k3_ringacc_ring_free_sci(ring
);
471 dma_free_coherent(ringacc
->dev
,
472 ring
->size
* (4 << ring
->elm_size
),
473 ring
->ring_mem_virt
, ring
->ring_mem_dma
);
474 ring
->flags
&= ~KNAV_RING_FLAG_BUSY
;
476 if (ring
->proxy_id
!= K3_RINGACC_PROXY_NOT_USED
) {
477 clear_bit(ring
->proxy_id
, ringacc
->proxy_inuse
);
479 ring
->proxy_id
= K3_RINGACC_PROXY_NOT_USED
;
483 clear_bit(ring
->ring_id
, ringacc
->rings_inuse
);
485 module_put(ringacc
->dev
->driver
->owner
);
491 u32
k3_nav_ringacc_get_ring_id(struct k3_nav_ring
*ring
)
496 return ring
->ring_id
;
499 static int k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring
*ring
)
501 struct k3_nav_ringacc
*ringacc
= ring
->parent
;
508 ring_idx
= ring
->ring_id
;
509 ret
= ringacc
->tisci_ring_ops
->config(
511 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER
,
512 ringacc
->tisci_dev_id
,
514 lower_32_bits(ring
->ring_mem_dma
),
515 upper_32_bits(ring
->ring_mem_dma
),
521 dev_err(ringacc
->dev
, "TISCI config ring fail (%d) ring_idx %d\n",
527 int k3_nav_ringacc_ring_cfg(struct k3_nav_ring
*ring
,
528 struct k3_nav_ring_cfg
*cfg
)
530 struct k3_nav_ringacc
*ringacc
= ring
->parent
;
535 if (cfg
->elm_size
> K3_NAV_RINGACC_RING_ELSIZE_256
||
536 cfg
->mode
> K3_NAV_RINGACC_RING_MODE_QM
||
537 cfg
->size
& ~KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK
||
538 !test_bit(ring
->ring_id
, ringacc
->rings_inuse
))
541 if (ring
->use_count
!= 1)
544 ring
->size
= cfg
->size
;
545 ring
->elm_size
= cfg
->elm_size
;
546 ring
->mode
= cfg
->mode
;
552 if (ring
->proxy_id
!= K3_RINGACC_PROXY_NOT_USED
)
553 ring
->proxy
= ringacc
->proxy_target_base
+
554 ring
->proxy_id
* K3_RINGACC_PROXY_TARGET_STEP
;
556 switch (ring
->mode
) {
557 case K3_NAV_RINGACC_RING_MODE_RING
:
558 ring
->ops
= &k3_nav_mode_ring_ops
;
560 case K3_NAV_RINGACC_RING_MODE_QM
:
562 * In Queue mode elm_size can be 8 only and each operation
563 * uses 2 element slots
565 if (cfg
->elm_size
!= K3_NAV_RINGACC_RING_ELSIZE_8
||
568 case K3_NAV_RINGACC_RING_MODE_MESSAGE
:
570 ring
->ops
= &k3_nav_mode_proxy_ops
;
572 ring
->ops
= &k3_nav_mode_msg_ops
;
580 ring
->ring_mem_virt
=
581 dma_zalloc_coherent(ringacc
->dev
,
582 ring
->size
* (4 << ring
->elm_size
),
583 &ring
->ring_mem_dma
, GFP_KERNEL
);
584 if (!ring
->ring_mem_virt
) {
585 dev_err(ringacc
->dev
, "Failed to alloc ring mem\n");
590 ret
= k3_nav_ringacc_ring_cfg_sci(ring
);
595 ring
->flags
|= KNAV_RING_FLAG_BUSY
;
596 ring
->flags
|= (cfg
->flags
& K3_NAV_RINGACC_RING_SHARED
) ?
597 K3_NAV_RING_FLAG_SHARED
: 0;
602 dma_free_coherent(ringacc
->dev
,
603 ring
->size
* (4 << ring
->elm_size
),
613 u32
k3_nav_ringacc_ring_get_size(struct k3_nav_ring
*ring
)
615 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
621 u32
k3_nav_ringacc_ring_get_free(struct k3_nav_ring
*ring
)
623 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
627 ring
->free
= ring
->size
- ringacc_readl(&ring
->rt
->occ
);
632 u32
k3_nav_ringacc_ring_get_occ(struct k3_nav_ring
*ring
)
634 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
637 return ringacc_readl(&ring
->rt
->occ
);
640 u32
k3_nav_ringacc_ring_is_full(struct k3_nav_ring
*ring
)
642 return !k3_nav_ringacc_ring_get_free(ring
);
645 enum k3_ringacc_access_mode
{
646 K3_RINGACC_ACCESS_MODE_PUSH_HEAD
,
647 K3_RINGACC_ACCESS_MODE_POP_HEAD
,
648 K3_RINGACC_ACCESS_MODE_PUSH_TAIL
,
649 K3_RINGACC_ACCESS_MODE_POP_TAIL
,
650 K3_RINGACC_ACCESS_MODE_PEEK_HEAD
,
651 K3_RINGACC_ACCESS_MODE_PEEK_TAIL
,
654 static int k3_ringacc_ring_cfg_proxy(struct k3_nav_ring
*ring
,
655 enum k3_ringacc_proxy_access_mode mode
)
661 val
|= ring
->elm_size
<< 24;
662 ringacc_writel(val
, &ring
->proxy
->control
);
666 static int k3_nav_ringacc_ring_access_proxy(
667 struct k3_nav_ring
*ring
, void *elem
,
668 enum k3_ringacc_access_mode access_mode
)
672 ptr
= (void __iomem
*)&ring
->proxy
->data
;
674 switch (access_mode
) {
675 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
676 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
677 k3_ringacc_ring_cfg_proxy(ring
, PROXY_ACCESS_MODE_HEAD
);
679 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
680 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
681 k3_ringacc_ring_cfg_proxy(ring
, PROXY_ACCESS_MODE_TAIL
);
687 ptr
+= k3_nav_ringacc_ring_get_fifo_pos(ring
);
689 switch (access_mode
) {
690 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
691 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
692 pr_debug("proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n",
694 memcpy_fromio(elem
, ptr
, (4 << ring
->elm_size
));
697 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
698 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
699 pr_debug("proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n",
701 memcpy_toio(ptr
, elem
, (4 << ring
->elm_size
));
708 pr_debug("proxy: free%d occ%d\n",
709 ring
->free
, ring
->occ
);
713 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring
*ring
, void *elem
)
715 return k3_nav_ringacc_ring_access_proxy(
716 ring
, elem
, K3_RINGACC_ACCESS_MODE_PUSH_HEAD
);
719 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring
*ring
, void *elem
)
721 return k3_nav_ringacc_ring_access_proxy(
722 ring
, elem
, K3_RINGACC_ACCESS_MODE_PUSH_TAIL
);
725 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring
*ring
, void *elem
)
727 return k3_nav_ringacc_ring_access_proxy(
728 ring
, elem
, K3_RINGACC_ACCESS_MODE_POP_HEAD
);
731 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring
*ring
, void *elem
)
733 return k3_nav_ringacc_ring_access_proxy(
734 ring
, elem
, K3_RINGACC_ACCESS_MODE_POP_HEAD
);
737 static int k3_nav_ringacc_ring_access_io(
738 struct k3_nav_ring
*ring
, void *elem
,
739 enum k3_ringacc_access_mode access_mode
)
743 switch (access_mode
) {
744 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
745 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
746 ptr
= (void __iomem
*)&ring
->fifos
->head_data
;
748 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
749 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
750 ptr
= (void __iomem
*)&ring
->fifos
->tail_data
;
756 ptr
+= k3_nav_ringacc_ring_get_fifo_pos(ring
);
758 switch (access_mode
) {
759 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
760 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
761 pr_debug("memcpy_fromio(x): --> ptr(%p), mode:%d\n",
763 memcpy_fromio(elem
, ptr
, (4 << ring
->elm_size
));
766 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
767 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
768 pr_debug("memcpy_toio(x): --> ptr(%p), mode:%d\n",
770 memcpy_toio(ptr
, elem
, (4 << ring
->elm_size
));
777 pr_debug("free%d index%d occ%d index%d\n",
778 ring
->free
, ring
->windex
, ring
->occ
, ring
->rindex
);
782 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring
*ring
,
785 return k3_nav_ringacc_ring_access_io(
786 ring
, elem
, K3_RINGACC_ACCESS_MODE_PUSH_HEAD
);
789 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring
*ring
, void *elem
)
791 return k3_nav_ringacc_ring_access_io(
792 ring
, elem
, K3_RINGACC_ACCESS_MODE_PUSH_TAIL
);
795 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring
*ring
, void *elem
)
797 return k3_nav_ringacc_ring_access_io(
798 ring
, elem
, K3_RINGACC_ACCESS_MODE_POP_HEAD
);
801 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring
*ring
, void *elem
)
803 return k3_nav_ringacc_ring_access_io(
804 ring
, elem
, K3_RINGACC_ACCESS_MODE_POP_HEAD
);
807 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring
*ring
, void *elem
)
811 elem_ptr
= k3_nav_ringacc_get_elm_addr(ring
, ring
->windex
);
813 memcpy(elem_ptr
, elem
, (4 << ring
->elm_size
));
815 flush_dcache_range((unsigned long)ring
->ring_mem_virt
,
816 ALIGN((unsigned long)ring
->ring_mem_virt
+
817 ring
->size
* (4 << ring
->elm_size
),
820 ring
->windex
= (ring
->windex
+ 1) % ring
->size
;
822 ringacc_writel(1, &ring
->rt
->db
);
824 pr_debug("ring_push_mem: free%d index%d\n",
825 ring
->free
, ring
->windex
);
830 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring
*ring
, void *elem
)
834 elem_ptr
= k3_nav_ringacc_get_elm_addr(ring
, ring
->rindex
);
836 invalidate_dcache_range((unsigned long)ring
->ring_mem_virt
,
837 ALIGN((unsigned long)ring
->ring_mem_virt
+
838 ring
->size
* (4 << ring
->elm_size
),
841 memcpy(elem
, elem_ptr
, (4 << ring
->elm_size
));
843 ring
->rindex
= (ring
->rindex
+ 1) % ring
->size
;
845 ringacc_writel(-1, &ring
->rt
->db
);
847 pr_debug("ring_pop_mem: occ%d index%d pos_ptr%p\n",
848 ring
->occ
, ring
->rindex
, elem_ptr
);
852 int k3_nav_ringacc_ring_push(struct k3_nav_ring
*ring
, void *elem
)
854 int ret
= -EOPNOTSUPP
;
856 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
859 pr_debug("ring_push%d: free%d index%d\n",
860 ring
->ring_id
, ring
->free
, ring
->windex
);
862 if (k3_nav_ringacc_ring_is_full(ring
))
865 if (ring
->ops
&& ring
->ops
->push_tail
)
866 ret
= ring
->ops
->push_tail(ring
, elem
);
871 int k3_nav_ringacc_ring_push_head(struct k3_nav_ring
*ring
, void *elem
)
873 int ret
= -EOPNOTSUPP
;
875 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
878 pr_debug("ring_push_head: free%d index%d\n",
879 ring
->free
, ring
->windex
);
881 if (k3_nav_ringacc_ring_is_full(ring
))
884 if (ring
->ops
&& ring
->ops
->push_head
)
885 ret
= ring
->ops
->push_head(ring
, elem
);
890 int k3_nav_ringacc_ring_pop(struct k3_nav_ring
*ring
, void *elem
)
892 int ret
= -EOPNOTSUPP
;
894 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
898 ring
->occ
= k3_nav_ringacc_ring_get_occ(ring
);
900 pr_debug("ring_pop%d: occ%d index%d\n",
901 ring
->ring_id
, ring
->occ
, ring
->rindex
);
906 if (ring
->ops
&& ring
->ops
->pop_head
)
907 ret
= ring
->ops
->pop_head(ring
, elem
);
912 int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring
*ring
, void *elem
)
914 int ret
= -EOPNOTSUPP
;
916 if (!ring
|| !(ring
->flags
& KNAV_RING_FLAG_BUSY
))
920 ring
->occ
= k3_nav_ringacc_ring_get_occ(ring
);
922 pr_debug("ring_pop_tail: occ%d index%d\n",
923 ring
->occ
, ring
->rindex
);
928 if (ring
->ops
&& ring
->ops
->pop_tail
)
929 ret
= ring
->ops
->pop_tail(ring
, elem
);
934 static int k3_nav_ringacc_probe_dt(struct k3_nav_ringacc
*ringacc
)
936 struct udevice
*dev
= ringacc
->dev
;
937 struct udevice
*tisci_dev
= NULL
;
940 ringacc
->num_rings
= dev_read_u32_default(dev
, "ti,num-rings", 0);
941 if (!ringacc
->num_rings
) {
942 dev_err(dev
, "ti,num-rings read failure %d\n", ret
);
946 ringacc
->dma_ring_reset_quirk
=
947 dev_read_bool(dev
, "ti,dma-ring-reset-quirk");
949 ret
= uclass_get_device_by_phandle(UCLASS_FIRMWARE
, dev
,
950 "ti,sci", &tisci_dev
);
952 pr_debug("TISCI RA RM get failed (%d)\n", ret
);
953 ringacc
->tisci
= NULL
;
956 ringacc
->tisci
= (struct ti_sci_handle
*)
957 (ti_sci_get_handle_from_sysfw(tisci_dev
));
959 ret
= dev_read_u32_default(dev
, "ti,sci", 0);
961 dev_err(dev
, "TISCI RA RM disabled\n");
962 ringacc
->tisci
= NULL
;
966 ret
= dev_read_u32(dev
, "ti,sci-dev-id", &ringacc
->tisci_dev_id
);
968 dev_err(dev
, "ti,sci-dev-id read failure %d\n", ret
);
969 ringacc
->tisci
= NULL
;
973 ringacc
->rm_gp_range
= devm_ti_sci_get_of_resource(
975 ringacc
->tisci_dev_id
,
976 "ti,sci-rm-range-gp-rings");
977 if (IS_ERR(ringacc
->rm_gp_range
))
978 ret
= PTR_ERR(ringacc
->rm_gp_range
);
983 static int k3_nav_ringacc_probe(struct udevice
*dev
)
985 struct k3_nav_ringacc
*ringacc
;
986 void __iomem
*base_fifo
, *base_rt
;
989 ringacc
= dev_get_priv(dev
);
995 ret
= k3_nav_ringacc_probe_dt(ringacc
);
999 base_rt
= (uint32_t *)devfdt_get_addr_name(dev
, "rt");
1000 pr_debug("rt %p\n", base_rt
);
1001 if (IS_ERR(base_rt
))
1002 return PTR_ERR(base_rt
);
1004 base_fifo
= (uint32_t *)devfdt_get_addr_name(dev
, "fifos");
1005 pr_debug("fifos %p\n", base_fifo
);
1006 if (IS_ERR(base_fifo
))
1007 return PTR_ERR(base_fifo
);
1009 ringacc
->proxy_gcfg
= (struct k3_ringacc_proxy_gcfg_regs __iomem
*)
1010 devfdt_get_addr_name(dev
, "proxy_gcfg");
1011 if (IS_ERR(ringacc
->proxy_gcfg
))
1012 return PTR_ERR(ringacc
->proxy_gcfg
);
1013 ringacc
->proxy_target_base
=
1014 (struct k3_ringacc_proxy_gcfg_regs __iomem
*)
1015 devfdt_get_addr_name(dev
, "proxy_target");
1016 if (IS_ERR(ringacc
->proxy_target_base
))
1017 return PTR_ERR(ringacc
->proxy_target_base
);
1019 ringacc
->num_proxies
= ringacc_readl(&ringacc
->proxy_gcfg
->config
) &
1020 K3_RINGACC_PROXY_CFG_THREADS_MASK
;
1022 ringacc
->rings
= devm_kzalloc(dev
,
1023 sizeof(*ringacc
->rings
) *
1026 ringacc
->rings_inuse
= devm_kcalloc(dev
,
1027 BITS_TO_LONGS(ringacc
->num_rings
),
1028 sizeof(unsigned long), GFP_KERNEL
);
1029 ringacc
->proxy_inuse
= devm_kcalloc(dev
,
1030 BITS_TO_LONGS(ringacc
->num_proxies
),
1031 sizeof(unsigned long), GFP_KERNEL
);
1033 if (!ringacc
->rings
|| !ringacc
->rings_inuse
|| !ringacc
->proxy_inuse
)
1036 for (i
= 0; i
< ringacc
->num_rings
; i
++) {
1037 ringacc
->rings
[i
].rt
= base_rt
+
1038 KNAV_RINGACC_RT_REGS_STEP
* i
;
1039 ringacc
->rings
[i
].fifos
= base_fifo
+
1040 KNAV_RINGACC_FIFO_REGS_STEP
* i
;
1041 ringacc
->rings
[i
].parent
= ringacc
;
1042 ringacc
->rings
[i
].ring_id
= i
;
1043 ringacc
->rings
[i
].proxy_id
= K3_RINGACC_PROXY_NOT_USED
;
1045 dev_set_drvdata(dev
, ringacc
);
1047 ringacc
->tisci_ring_ops
= &ringacc
->tisci
->ops
.rm_ring_ops
;
1049 list_add_tail(&ringacc
->list
, &k3_nav_ringacc_list
);
1051 dev_info(dev
, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1053 ringacc
->rm_gp_range
->desc
[0].start
,
1054 ringacc
->rm_gp_range
->desc
[0].num
,
1055 ringacc
->tisci_dev_id
);
1056 dev_info(dev
, "dma-ring-reset-quirk: %s\n",
1057 ringacc
->dma_ring_reset_quirk
? "enabled" : "disabled");
1058 dev_info(dev
, "RA Proxy rev. %08x, num_proxies:%u\n",
1059 ringacc_readl(&ringacc
->proxy_gcfg
->revision
),
1060 ringacc
->num_proxies
);
1064 static const struct udevice_id knav_ringacc_ids
[] = {
1065 { .compatible
= "ti,am654-navss-ringacc" },
1069 U_BOOT_DRIVER(k3_navss_ringacc
) = {
1070 .name
= "k3-navss-ringacc",
1072 .of_match
= knav_ringacc_ids
,
1073 .probe
= k3_nav_ringacc_probe
,
1074 .priv_auto_alloc_size
= sizeof(struct k3_nav_ringacc
),