]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/soc/ti/k3-navss-ringacc.c
dm: core: Create a new header file for 'compat' features
[thirdparty/u-boot.git] / drivers / soc / ti / k3-navss-ringacc.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
6 */
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <asm/io.h>
11 #include <malloc.h>
12 #include <asm/dma-mapping.h>
13 #include <asm/bitops.h>
14 #include <dm.h>
15 #include <dm/device_compat.h>
16 #include <dm/devres.h>
17 #include <dm/read.h>
18 #include <dm/uclass.h>
19 #include <linux/compat.h>
20 #include <linux/err.h>
21 #include <linux/soc/ti/k3-navss-ringacc.h>
22 #include <linux/soc/ti/ti_sci_protocol.h>
23
24 #define set_bit(bit, bitmap) __set_bit(bit, bitmap)
25 #define clear_bit(bit, bitmap) __clear_bit(bit, bitmap)
26 #define dma_free_coherent(dev, size, cpu_addr, dma_handle) \
27 dma_free_coherent(cpu_addr)
28 #define dma_zalloc_coherent(dev, size, dma_handle, flag) \
29 ({ \
30 void *ring_mem_virt; \
31 ring_mem_virt = dma_alloc_coherent((size), \
32 (unsigned long *)(dma_handle)); \
33 if (ring_mem_virt) \
34 memset(ring_mem_virt, 0, (size)); \
35 ring_mem_virt; \
36 })
37
38 static LIST_HEAD(k3_nav_ringacc_list);
39
40 static void ringacc_writel(u32 v, void __iomem *reg)
41 {
42 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", v, reg);
43 writel(v, reg);
44 }
45
46 static u32 ringacc_readl(void __iomem *reg)
47 {
48 u32 v;
49
50 v = readl(reg);
51 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, reg);
52 return v;
53 }
54
55 #define KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
56
57 /**
58 * struct k3_nav_ring_rt_regs - The RA Control/Status Registers region
59 */
60 struct k3_nav_ring_rt_regs {
61 u32 resv_16[4];
62 u32 db; /* RT Ring N Doorbell Register */
63 u32 resv_4[1];
64 u32 occ; /* RT Ring N Occupancy Register */
65 u32 indx; /* RT Ring N Current Index Register */
66 u32 hwocc; /* RT Ring N Hardware Occupancy Register */
67 u32 hwindx; /* RT Ring N Current Index Register */
68 };
69
70 #define KNAV_RINGACC_RT_REGS_STEP 0x1000
71
72 /**
73 * struct k3_nav_ring_fifo_regs - The Ring Accelerator Queues Registers region
74 */
75 struct k3_nav_ring_fifo_regs {
76 u32 head_data[128]; /* Ring Head Entry Data Registers */
77 u32 tail_data[128]; /* Ring Tail Entry Data Registers */
78 u32 peek_head_data[128]; /* Ring Peek Head Entry Data Regs */
79 u32 peek_tail_data[128]; /* Ring Peek Tail Entry Data Regs */
80 };
81
82 /**
83 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
84 */
85 struct k3_ringacc_proxy_gcfg_regs {
86 u32 revision; /* Revision Register */
87 u32 config; /* Config Register */
88 };
89
90 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
91
92 /**
93 * struct k3_ringacc_proxy_target_regs - RA Proxy Datapath MMIO Region
94 */
95 struct k3_ringacc_proxy_target_regs {
96 u32 control; /* Proxy Control Register */
97 u32 status; /* Proxy Status Register */
98 u8 resv_512[504];
99 u32 data[128]; /* Proxy Data Register */
100 };
101
102 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000
103 #define K3_RINGACC_PROXY_NOT_USED (-1)
104
105 enum k3_ringacc_proxy_access_mode {
106 PROXY_ACCESS_MODE_HEAD = 0,
107 PROXY_ACCESS_MODE_TAIL = 1,
108 PROXY_ACCESS_MODE_PEEK_HEAD = 2,
109 PROXY_ACCESS_MODE_PEEK_TAIL = 3,
110 };
111
112 #define KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
113 #define KNAV_RINGACC_FIFO_REGS_STEP 0x1000
114 #define KNAV_RINGACC_MAX_DB_RING_CNT (127U)
115
116 /**
117 * struct k3_nav_ring_ops - Ring operations
118 */
119 struct k3_nav_ring_ops {
120 int (*push_tail)(struct k3_nav_ring *ring, void *elm);
121 int (*push_head)(struct k3_nav_ring *ring, void *elm);
122 int (*pop_tail)(struct k3_nav_ring *ring, void *elm);
123 int (*pop_head)(struct k3_nav_ring *ring, void *elm);
124 };
125
126 /**
127 * struct k3_nav_ring - RA Ring descriptor
128 *
129 * @rt - Ring control/status registers
130 * @fifos - Ring queues registers
131 * @proxy - Ring Proxy Datapath registers
132 * @ring_mem_dma - Ring buffer dma address
133 * @ring_mem_virt - Ring buffer virt address
134 * @ops - Ring operations
135 * @size - Ring size in elements
136 * @elm_size - Size of the ring element
137 * @mode - Ring mode
138 * @flags - flags
139 * @free - Number of free elements
140 * @occ - Ring occupancy
141 * @windex - Write index (only for @K3_NAV_RINGACC_RING_MODE_RING)
142 * @rindex - Read index (only for @K3_NAV_RINGACC_RING_MODE_RING)
143 * @ring_id - Ring Id
144 * @parent - Pointer on struct @k3_nav_ringacc
145 * @use_count - Use count for shared rings
146 * @proxy_id - RA Ring Proxy Id (only if @K3_NAV_RINGACC_RING_USE_PROXY)
147 */
148 struct k3_nav_ring {
149 struct k3_nav_ring_rt_regs __iomem *rt;
150 struct k3_nav_ring_fifo_regs __iomem *fifos;
151 struct k3_ringacc_proxy_target_regs __iomem *proxy;
152 dma_addr_t ring_mem_dma;
153 void *ring_mem_virt;
154 struct k3_nav_ring_ops *ops;
155 u32 size;
156 enum k3_nav_ring_size elm_size;
157 enum k3_nav_ring_mode mode;
158 u32 flags;
159 #define KNAV_RING_FLAG_BUSY BIT(1)
160 #define K3_NAV_RING_FLAG_SHARED BIT(2)
161 u32 free;
162 u32 occ;
163 u32 windex;
164 u32 rindex;
165 u32 ring_id;
166 struct k3_nav_ringacc *parent;
167 u32 use_count;
168 int proxy_id;
169 };
170
171 /**
172 * struct k3_nav_ringacc - Rings accelerator descriptor
173 *
174 * @dev - pointer on RA device
175 * @proxy_gcfg - RA proxy global config registers
176 * @proxy_target_base - RA proxy datapath region
177 * @num_rings - number of ring in RA
178 * @rm_gp_range - general purpose rings range from tisci
179 * @dma_ring_reset_quirk - DMA reset w/a enable
180 * @num_proxies - number of RA proxies
181 * @rings - array of rings descriptors (struct @k3_nav_ring)
182 * @list - list of RAs in the system
183 * @tisci - pointer ti-sci handle
184 * @tisci_ring_ops - ti-sci rings ops
185 * @tisci_dev_id - ti-sci device id
186 */
187 struct k3_nav_ringacc {
188 struct udevice *dev;
189 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
190 void __iomem *proxy_target_base;
191 u32 num_rings; /* number of rings in Ringacc module */
192 unsigned long *rings_inuse;
193 struct ti_sci_resource *rm_gp_range;
194 bool dma_ring_reset_quirk;
195 u32 num_proxies;
196 unsigned long *proxy_inuse;
197
198 struct k3_nav_ring *rings;
199 struct list_head list;
200
201 const struct ti_sci_handle *tisci;
202 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
203 u32 tisci_dev_id;
204 };
205
206 static long k3_nav_ringacc_ring_get_fifo_pos(struct k3_nav_ring *ring)
207 {
208 return KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES -
209 (4 << ring->elm_size);
210 }
211
212 static void *k3_nav_ringacc_get_elm_addr(struct k3_nav_ring *ring, u32 idx)
213 {
214 return (idx * (4 << ring->elm_size) + ring->ring_mem_virt);
215 }
216
217 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem);
218 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem);
219
220 static struct k3_nav_ring_ops k3_nav_mode_ring_ops = {
221 .push_tail = k3_nav_ringacc_ring_push_mem,
222 .pop_head = k3_nav_ringacc_ring_pop_mem,
223 };
224
225 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem);
226 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem);
227 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
228 void *elem);
229 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring,
230 void *elem);
231
232 static struct k3_nav_ring_ops k3_nav_mode_msg_ops = {
233 .push_tail = k3_nav_ringacc_ring_push_io,
234 .push_head = k3_nav_ringacc_ring_push_head_io,
235 .pop_tail = k3_nav_ringacc_ring_pop_tail_io,
236 .pop_head = k3_nav_ringacc_ring_pop_io,
237 };
238
239 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring,
240 void *elem);
241 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring,
242 void *elem);
243 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem);
244 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem);
245
246 static struct k3_nav_ring_ops k3_nav_mode_proxy_ops = {
247 .push_tail = k3_ringacc_ring_push_tail_proxy,
248 .push_head = k3_ringacc_ring_push_head_proxy,
249 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
250 .pop_head = k3_ringacc_ring_pop_head_proxy,
251 };
252
253 struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc)
254 {
255 return ringacc->dev;
256 }
257
258 struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc,
259 int id, u32 flags)
260 {
261 int proxy_id = K3_RINGACC_PROXY_NOT_USED;
262
263 if (id == K3_NAV_RINGACC_RING_ID_ANY) {
264 /* Request for any general purpose ring */
265 struct ti_sci_resource_desc *gp_rings =
266 &ringacc->rm_gp_range->desc[0];
267 unsigned long size;
268
269 size = gp_rings->start + gp_rings->num;
270 id = find_next_zero_bit(ringacc->rings_inuse,
271 size, gp_rings->start);
272 if (id == size)
273 goto error;
274 } else if (id < 0) {
275 goto error;
276 }
277
278 if (test_bit(id, ringacc->rings_inuse) &&
279 !(ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED))
280 goto error;
281 else if (ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED)
282 goto out;
283
284 if (flags & K3_NAV_RINGACC_RING_USE_PROXY) {
285 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
286 ringacc->num_proxies, 0);
287 if (proxy_id == ringacc->num_proxies)
288 goto error;
289 }
290
291 if (!try_module_get(ringacc->dev->driver->owner))
292 goto error;
293
294 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
295 set_bit(proxy_id, ringacc->proxy_inuse);
296 ringacc->rings[id].proxy_id = proxy_id;
297 pr_debug("Giving ring#%d proxy#%d\n",
298 id, proxy_id);
299 } else {
300 pr_debug("Giving ring#%d\n", id);
301 }
302
303 set_bit(id, ringacc->rings_inuse);
304 out:
305 ringacc->rings[id].use_count++;
306 return &ringacc->rings[id];
307
308 error:
309 return NULL;
310 }
311
312 static void k3_ringacc_ring_reset_sci(struct k3_nav_ring *ring)
313 {
314 struct k3_nav_ringacc *ringacc = ring->parent;
315 int ret;
316
317 ret = ringacc->tisci_ring_ops->config(
318 ringacc->tisci,
319 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
320 ringacc->tisci_dev_id,
321 ring->ring_id,
322 0,
323 0,
324 ring->size,
325 0,
326 0,
327 0);
328 if (ret)
329 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
330 ret, ring->ring_id);
331 }
332
333 void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring)
334 {
335 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
336 return;
337
338 ring->occ = 0;
339 ring->free = 0;
340 ring->rindex = 0;
341 ring->windex = 0;
342
343 k3_ringacc_ring_reset_sci(ring);
344 }
345
346 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring *ring,
347 enum k3_nav_ring_mode mode)
348 {
349 struct k3_nav_ringacc *ringacc = ring->parent;
350 int ret;
351
352 ret = ringacc->tisci_ring_ops->config(
353 ringacc->tisci,
354 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
355 ringacc->tisci_dev_id,
356 ring->ring_id,
357 0,
358 0,
359 0,
360 mode,
361 0,
362 0);
363 if (ret)
364 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
365 ret, ring->ring_id);
366 }
367
368 void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ)
369 {
370 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
371 return;
372
373 if (!ring->parent->dma_ring_reset_quirk) {
374 k3_nav_ringacc_ring_reset(ring);
375 return;
376 }
377
378 if (!occ)
379 occ = ringacc_readl(&ring->rt->occ);
380
381 if (occ) {
382 u32 db_ring_cnt, db_ring_cnt_cur;
383
384 pr_debug("%s %u occ: %u\n", __func__,
385 ring->ring_id, occ);
386 /* 2. Reset the ring */
387 k3_ringacc_ring_reset_sci(ring);
388
389 /*
390 * 3. Setup the ring in ring/doorbell mode
391 * (if not already in this mode)
392 */
393 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
394 k3_ringacc_ring_reconfig_qmode_sci(
395 ring, K3_NAV_RINGACC_RING_MODE_RING);
396 /*
397 * 4. Ring the doorbell 2**22 – ringOcc times.
398 * This will wrap the internal UDMAP ring state occupancy
399 * counter (which is 21-bits wide) to 0.
400 */
401 db_ring_cnt = (1U << 22) - occ;
402
403 while (db_ring_cnt != 0) {
404 /*
405 * Ring the doorbell with the maximum count each
406 * iteration if possible to minimize the total
407 * of writes
408 */
409 if (db_ring_cnt > KNAV_RINGACC_MAX_DB_RING_CNT)
410 db_ring_cnt_cur = KNAV_RINGACC_MAX_DB_RING_CNT;
411 else
412 db_ring_cnt_cur = db_ring_cnt;
413
414 writel(db_ring_cnt_cur, &ring->rt->db);
415 db_ring_cnt -= db_ring_cnt_cur;
416 }
417
418 /* 5. Restore the original ring mode (if not ring mode) */
419 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
420 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
421 }
422
423 /* 2. Reset the ring */
424 k3_nav_ringacc_ring_reset(ring);
425 }
426
427 static void k3_ringacc_ring_free_sci(struct k3_nav_ring *ring)
428 {
429 struct k3_nav_ringacc *ringacc = ring->parent;
430 int ret;
431
432 ret = ringacc->tisci_ring_ops->config(
433 ringacc->tisci,
434 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
435 ringacc->tisci_dev_id,
436 ring->ring_id,
437 0,
438 0,
439 0,
440 0,
441 0,
442 0);
443 if (ret)
444 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
445 ret, ring->ring_id);
446 }
447
448 int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring)
449 {
450 struct k3_nav_ringacc *ringacc;
451
452 if (!ring)
453 return -EINVAL;
454
455 ringacc = ring->parent;
456
457 pr_debug("%s flags: 0x%08x\n", __func__, ring->flags);
458
459 if (!test_bit(ring->ring_id, ringacc->rings_inuse))
460 return -EINVAL;
461
462 if (--ring->use_count)
463 goto out;
464
465 if (!(ring->flags & KNAV_RING_FLAG_BUSY))
466 goto no_init;
467
468 k3_ringacc_ring_free_sci(ring);
469
470 dma_free_coherent(ringacc->dev,
471 ring->size * (4 << ring->elm_size),
472 ring->ring_mem_virt, ring->ring_mem_dma);
473 ring->flags &= ~KNAV_RING_FLAG_BUSY;
474 ring->ops = NULL;
475 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
476 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
477 ring->proxy = NULL;
478 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
479 }
480
481 no_init:
482 clear_bit(ring->ring_id, ringacc->rings_inuse);
483
484 module_put(ringacc->dev->driver->owner);
485
486 out:
487 return 0;
488 }
489
490 u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring)
491 {
492 if (!ring)
493 return -EINVAL;
494
495 return ring->ring_id;
496 }
497
498 static int k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring *ring)
499 {
500 struct k3_nav_ringacc *ringacc = ring->parent;
501 u32 ring_idx;
502 int ret;
503
504 if (!ringacc->tisci)
505 return -EINVAL;
506
507 ring_idx = ring->ring_id;
508 ret = ringacc->tisci_ring_ops->config(
509 ringacc->tisci,
510 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
511 ringacc->tisci_dev_id,
512 ring_idx,
513 lower_32_bits(ring->ring_mem_dma),
514 upper_32_bits(ring->ring_mem_dma),
515 ring->size,
516 ring->mode,
517 ring->elm_size,
518 0);
519 if (ret)
520 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
521 ret, ring_idx);
522
523 return ret;
524 }
525
526 int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring,
527 struct k3_nav_ring_cfg *cfg)
528 {
529 struct k3_nav_ringacc *ringacc = ring->parent;
530 int ret = 0;
531
532 if (!ring || !cfg)
533 return -EINVAL;
534 if (cfg->elm_size > K3_NAV_RINGACC_RING_ELSIZE_256 ||
535 cfg->mode > K3_NAV_RINGACC_RING_MODE_QM ||
536 cfg->size & ~KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
537 !test_bit(ring->ring_id, ringacc->rings_inuse))
538 return -EINVAL;
539
540 if (ring->use_count != 1)
541 return 0;
542
543 ring->size = cfg->size;
544 ring->elm_size = cfg->elm_size;
545 ring->mode = cfg->mode;
546 ring->occ = 0;
547 ring->free = 0;
548 ring->rindex = 0;
549 ring->windex = 0;
550
551 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
552 ring->proxy = ringacc->proxy_target_base +
553 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
554
555 switch (ring->mode) {
556 case K3_NAV_RINGACC_RING_MODE_RING:
557 ring->ops = &k3_nav_mode_ring_ops;
558 break;
559 case K3_NAV_RINGACC_RING_MODE_QM:
560 /*
561 * In Queue mode elm_size can be 8 only and each operation
562 * uses 2 element slots
563 */
564 if (cfg->elm_size != K3_NAV_RINGACC_RING_ELSIZE_8 ||
565 cfg->size % 2)
566 goto err_free_proxy;
567 case K3_NAV_RINGACC_RING_MODE_MESSAGE:
568 if (ring->proxy)
569 ring->ops = &k3_nav_mode_proxy_ops;
570 else
571 ring->ops = &k3_nav_mode_msg_ops;
572 break;
573 default:
574 ring->ops = NULL;
575 ret = -EINVAL;
576 goto err_free_proxy;
577 };
578
579 ring->ring_mem_virt =
580 dma_zalloc_coherent(ringacc->dev,
581 ring->size * (4 << ring->elm_size),
582 &ring->ring_mem_dma, GFP_KERNEL);
583 if (!ring->ring_mem_virt) {
584 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
585 ret = -ENOMEM;
586 goto err_free_ops;
587 }
588
589 ret = k3_nav_ringacc_ring_cfg_sci(ring);
590
591 if (ret)
592 goto err_free_mem;
593
594 ring->flags |= KNAV_RING_FLAG_BUSY;
595 ring->flags |= (cfg->flags & K3_NAV_RINGACC_RING_SHARED) ?
596 K3_NAV_RING_FLAG_SHARED : 0;
597
598 return 0;
599
600 err_free_mem:
601 dma_free_coherent(ringacc->dev,
602 ring->size * (4 << ring->elm_size),
603 ring->ring_mem_virt,
604 ring->ring_mem_dma);
605 err_free_ops:
606 ring->ops = NULL;
607 err_free_proxy:
608 ring->proxy = NULL;
609 return ret;
610 }
611
612 u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring)
613 {
614 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
615 return -EINVAL;
616
617 return ring->size;
618 }
619
620 u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring)
621 {
622 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
623 return -EINVAL;
624
625 if (!ring->free)
626 ring->free = ring->size - ringacc_readl(&ring->rt->occ);
627
628 return ring->free;
629 }
630
631 u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring)
632 {
633 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
634 return -EINVAL;
635
636 return ringacc_readl(&ring->rt->occ);
637 }
638
639 u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring)
640 {
641 return !k3_nav_ringacc_ring_get_free(ring);
642 }
643
644 enum k3_ringacc_access_mode {
645 K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
646 K3_RINGACC_ACCESS_MODE_POP_HEAD,
647 K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
648 K3_RINGACC_ACCESS_MODE_POP_TAIL,
649 K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
650 K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
651 };
652
653 static int k3_ringacc_ring_cfg_proxy(struct k3_nav_ring *ring,
654 enum k3_ringacc_proxy_access_mode mode)
655 {
656 u32 val;
657
658 val = ring->ring_id;
659 val |= mode << 16;
660 val |= ring->elm_size << 24;
661 ringacc_writel(val, &ring->proxy->control);
662 return 0;
663 }
664
665 static int k3_nav_ringacc_ring_access_proxy(
666 struct k3_nav_ring *ring, void *elem,
667 enum k3_ringacc_access_mode access_mode)
668 {
669 void __iomem *ptr;
670
671 ptr = (void __iomem *)&ring->proxy->data;
672
673 switch (access_mode) {
674 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
675 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
676 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
677 break;
678 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
679 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
680 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
681 break;
682 default:
683 return -EINVAL;
684 }
685
686 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
687
688 switch (access_mode) {
689 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
690 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
691 pr_debug("proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n",
692 ptr, access_mode);
693 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
694 ring->occ--;
695 break;
696 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
697 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
698 pr_debug("proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n",
699 ptr, access_mode);
700 memcpy_toio(ptr, elem, (4 << ring->elm_size));
701 ring->free--;
702 break;
703 default:
704 return -EINVAL;
705 }
706
707 pr_debug("proxy: free%d occ%d\n",
708 ring->free, ring->occ);
709 return 0;
710 }
711
712 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring, void *elem)
713 {
714 return k3_nav_ringacc_ring_access_proxy(
715 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
716 }
717
718 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring, void *elem)
719 {
720 return k3_nav_ringacc_ring_access_proxy(
721 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
722 }
723
724 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem)
725 {
726 return k3_nav_ringacc_ring_access_proxy(
727 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
728 }
729
730 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem)
731 {
732 return k3_nav_ringacc_ring_access_proxy(
733 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
734 }
735
736 static int k3_nav_ringacc_ring_access_io(
737 struct k3_nav_ring *ring, void *elem,
738 enum k3_ringacc_access_mode access_mode)
739 {
740 void __iomem *ptr;
741
742 switch (access_mode) {
743 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
744 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
745 ptr = (void __iomem *)&ring->fifos->head_data;
746 break;
747 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
748 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
749 ptr = (void __iomem *)&ring->fifos->tail_data;
750 break;
751 default:
752 return -EINVAL;
753 }
754
755 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
756
757 switch (access_mode) {
758 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
759 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
760 pr_debug("memcpy_fromio(x): --> ptr(%p), mode:%d\n",
761 ptr, access_mode);
762 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
763 ring->occ--;
764 break;
765 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
766 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
767 pr_debug("memcpy_toio(x): --> ptr(%p), mode:%d\n",
768 ptr, access_mode);
769 memcpy_toio(ptr, elem, (4 << ring->elm_size));
770 ring->free--;
771 break;
772 default:
773 return -EINVAL;
774 }
775
776 pr_debug("free%d index%d occ%d index%d\n",
777 ring->free, ring->windex, ring->occ, ring->rindex);
778 return 0;
779 }
780
781 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
782 void *elem)
783 {
784 return k3_nav_ringacc_ring_access_io(
785 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
786 }
787
788 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem)
789 {
790 return k3_nav_ringacc_ring_access_io(
791 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
792 }
793
794 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem)
795 {
796 return k3_nav_ringacc_ring_access_io(
797 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
798 }
799
800 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring, void *elem)
801 {
802 return k3_nav_ringacc_ring_access_io(
803 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
804 }
805
806 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem)
807 {
808 void *elem_ptr;
809
810 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->windex);
811
812 memcpy(elem_ptr, elem, (4 << ring->elm_size));
813
814 flush_dcache_range((unsigned long)ring->ring_mem_virt,
815 ALIGN((unsigned long)ring->ring_mem_virt +
816 ring->size * (4 << ring->elm_size),
817 ARCH_DMA_MINALIGN));
818
819 ring->windex = (ring->windex + 1) % ring->size;
820 ring->free--;
821 ringacc_writel(1, &ring->rt->db);
822
823 pr_debug("ring_push_mem: free%d index%d\n",
824 ring->free, ring->windex);
825
826 return 0;
827 }
828
829 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem)
830 {
831 void *elem_ptr;
832
833 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->rindex);
834
835 invalidate_dcache_range((unsigned long)ring->ring_mem_virt,
836 ALIGN((unsigned long)ring->ring_mem_virt +
837 ring->size * (4 << ring->elm_size),
838 ARCH_DMA_MINALIGN));
839
840 memcpy(elem, elem_ptr, (4 << ring->elm_size));
841
842 ring->rindex = (ring->rindex + 1) % ring->size;
843 ring->occ--;
844 ringacc_writel(-1, &ring->rt->db);
845
846 pr_debug("ring_pop_mem: occ%d index%d pos_ptr%p\n",
847 ring->occ, ring->rindex, elem_ptr);
848 return 0;
849 }
850
851 int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem)
852 {
853 int ret = -EOPNOTSUPP;
854
855 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
856 return -EINVAL;
857
858 pr_debug("ring_push%d: free%d index%d\n",
859 ring->ring_id, ring->free, ring->windex);
860
861 if (k3_nav_ringacc_ring_is_full(ring))
862 return -ENOMEM;
863
864 if (ring->ops && ring->ops->push_tail)
865 ret = ring->ops->push_tail(ring, elem);
866
867 return ret;
868 }
869
870 int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem)
871 {
872 int ret = -EOPNOTSUPP;
873
874 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
875 return -EINVAL;
876
877 pr_debug("ring_push_head: free%d index%d\n",
878 ring->free, ring->windex);
879
880 if (k3_nav_ringacc_ring_is_full(ring))
881 return -ENOMEM;
882
883 if (ring->ops && ring->ops->push_head)
884 ret = ring->ops->push_head(ring, elem);
885
886 return ret;
887 }
888
889 int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem)
890 {
891 int ret = -EOPNOTSUPP;
892
893 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
894 return -EINVAL;
895
896 if (!ring->occ)
897 ring->occ = k3_nav_ringacc_ring_get_occ(ring);
898
899 pr_debug("ring_pop%d: occ%d index%d\n",
900 ring->ring_id, ring->occ, ring->rindex);
901
902 if (!ring->occ)
903 return -ENODATA;
904
905 if (ring->ops && ring->ops->pop_head)
906 ret = ring->ops->pop_head(ring, elem);
907
908 return ret;
909 }
910
911 int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem)
912 {
913 int ret = -EOPNOTSUPP;
914
915 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
916 return -EINVAL;
917
918 if (!ring->occ)
919 ring->occ = k3_nav_ringacc_ring_get_occ(ring);
920
921 pr_debug("ring_pop_tail: occ%d index%d\n",
922 ring->occ, ring->rindex);
923
924 if (!ring->occ)
925 return -ENODATA;
926
927 if (ring->ops && ring->ops->pop_tail)
928 ret = ring->ops->pop_tail(ring, elem);
929
930 return ret;
931 }
932
933 static int k3_nav_ringacc_probe_dt(struct k3_nav_ringacc *ringacc)
934 {
935 struct udevice *dev = ringacc->dev;
936 struct udevice *tisci_dev = NULL;
937 int ret;
938
939 ringacc->num_rings = dev_read_u32_default(dev, "ti,num-rings", 0);
940 if (!ringacc->num_rings) {
941 dev_err(dev, "ti,num-rings read failure %d\n", ret);
942 return -EINVAL;
943 }
944
945 ringacc->dma_ring_reset_quirk =
946 dev_read_bool(dev, "ti,dma-ring-reset-quirk");
947
948 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
949 "ti,sci", &tisci_dev);
950 if (ret) {
951 pr_debug("TISCI RA RM get failed (%d)\n", ret);
952 ringacc->tisci = NULL;
953 return -ENODEV;
954 }
955 ringacc->tisci = (struct ti_sci_handle *)
956 (ti_sci_get_handle_from_sysfw(tisci_dev));
957
958 ret = dev_read_u32_default(dev, "ti,sci", 0);
959 if (!ret) {
960 dev_err(dev, "TISCI RA RM disabled\n");
961 ringacc->tisci = NULL;
962 return ret;
963 }
964
965 ret = dev_read_u32(dev, "ti,sci-dev-id", &ringacc->tisci_dev_id);
966 if (ret) {
967 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
968 ringacc->tisci = NULL;
969 return ret;
970 }
971
972 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(
973 ringacc->tisci, dev,
974 ringacc->tisci_dev_id,
975 "ti,sci-rm-range-gp-rings");
976 if (IS_ERR(ringacc->rm_gp_range))
977 ret = PTR_ERR(ringacc->rm_gp_range);
978
979 return 0;
980 }
981
982 static int k3_nav_ringacc_probe(struct udevice *dev)
983 {
984 struct k3_nav_ringacc *ringacc;
985 void __iomem *base_fifo, *base_rt;
986 int ret, i;
987
988 ringacc = dev_get_priv(dev);
989 if (!ringacc)
990 return -ENOMEM;
991
992 ringacc->dev = dev;
993
994 ret = k3_nav_ringacc_probe_dt(ringacc);
995 if (ret)
996 return ret;
997
998 base_rt = (uint32_t *)devfdt_get_addr_name(dev, "rt");
999 pr_debug("rt %p\n", base_rt);
1000 if (IS_ERR(base_rt))
1001 return PTR_ERR(base_rt);
1002
1003 base_fifo = (uint32_t *)devfdt_get_addr_name(dev, "fifos");
1004 pr_debug("fifos %p\n", base_fifo);
1005 if (IS_ERR(base_fifo))
1006 return PTR_ERR(base_fifo);
1007
1008 ringacc->proxy_gcfg = (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1009 devfdt_get_addr_name(dev, "proxy_gcfg");
1010 if (IS_ERR(ringacc->proxy_gcfg))
1011 return PTR_ERR(ringacc->proxy_gcfg);
1012 ringacc->proxy_target_base =
1013 (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1014 devfdt_get_addr_name(dev, "proxy_target");
1015 if (IS_ERR(ringacc->proxy_target_base))
1016 return PTR_ERR(ringacc->proxy_target_base);
1017
1018 ringacc->num_proxies = ringacc_readl(&ringacc->proxy_gcfg->config) &
1019 K3_RINGACC_PROXY_CFG_THREADS_MASK;
1020
1021 ringacc->rings = devm_kzalloc(dev,
1022 sizeof(*ringacc->rings) *
1023 ringacc->num_rings,
1024 GFP_KERNEL);
1025 ringacc->rings_inuse = devm_kcalloc(dev,
1026 BITS_TO_LONGS(ringacc->num_rings),
1027 sizeof(unsigned long), GFP_KERNEL);
1028 ringacc->proxy_inuse = devm_kcalloc(dev,
1029 BITS_TO_LONGS(ringacc->num_proxies),
1030 sizeof(unsigned long), GFP_KERNEL);
1031
1032 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1033 return -ENOMEM;
1034
1035 for (i = 0; i < ringacc->num_rings; i++) {
1036 ringacc->rings[i].rt = base_rt +
1037 KNAV_RINGACC_RT_REGS_STEP * i;
1038 ringacc->rings[i].fifos = base_fifo +
1039 KNAV_RINGACC_FIFO_REGS_STEP * i;
1040 ringacc->rings[i].parent = ringacc;
1041 ringacc->rings[i].ring_id = i;
1042 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1043 }
1044 dev_set_drvdata(dev, ringacc);
1045
1046 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1047
1048 list_add_tail(&ringacc->list, &k3_nav_ringacc_list);
1049
1050 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1051 ringacc->num_rings,
1052 ringacc->rm_gp_range->desc[0].start,
1053 ringacc->rm_gp_range->desc[0].num,
1054 ringacc->tisci_dev_id);
1055 dev_info(dev, "dma-ring-reset-quirk: %s\n",
1056 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1057 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1058 ringacc_readl(&ringacc->proxy_gcfg->revision),
1059 ringacc->num_proxies);
1060 return 0;
1061 }
1062
1063 static const struct udevice_id knav_ringacc_ids[] = {
1064 { .compatible = "ti,am654-navss-ringacc" },
1065 {},
1066 };
1067
1068 U_BOOT_DRIVER(k3_navss_ringacc) = {
1069 .name = "k3-navss-ringacc",
1070 .id = UCLASS_MISC,
1071 .of_match = knav_ringacc_ids,
1072 .probe = k3_nav_ringacc_probe,
1073 .priv_auto_alloc_size = sizeof(struct k3_nav_ringacc),
1074 };