]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/soc/ti/k3-navss-ringacc.c
common: Drop net.h from common header
[thirdparty/u-boot.git] / drivers / soc / ti / k3-navss-ringacc.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
6 */
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <asm/cache.h>
11 #include <asm/io.h>
12 #include <malloc.h>
13 #include <asm/bitops.h>
14 #include <dm.h>
15 #include <dm/device_compat.h>
16 #include <dm/devres.h>
17 #include <dm/read.h>
18 #include <dm/uclass.h>
19 #include <linux/compat.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/soc/ti/k3-navss-ringacc.h>
23 #include <linux/soc/ti/ti_sci_protocol.h>
24
25 #define set_bit(bit, bitmap) __set_bit(bit, bitmap)
26 #define clear_bit(bit, bitmap) __clear_bit(bit, bitmap)
27 #define dma_free_coherent(dev, size, cpu_addr, dma_handle) \
28 dma_free_coherent(cpu_addr)
29 #define dma_zalloc_coherent(dev, size, dma_handle, flag) \
30 ({ \
31 void *ring_mem_virt; \
32 ring_mem_virt = dma_alloc_coherent((size), \
33 (unsigned long *)(dma_handle)); \
34 if (ring_mem_virt) \
35 memset(ring_mem_virt, 0, (size)); \
36 ring_mem_virt; \
37 })
38
39 static LIST_HEAD(k3_nav_ringacc_list);
40
41 static void ringacc_writel(u32 v, void __iomem *reg)
42 {
43 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", v, reg);
44 writel(v, reg);
45 }
46
47 static u32 ringacc_readl(void __iomem *reg)
48 {
49 u32 v;
50
51 v = readl(reg);
52 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, reg);
53 return v;
54 }
55
56 #define KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
57
58 /**
59 * struct k3_nav_ring_rt_regs - The RA Control/Status Registers region
60 */
61 struct k3_nav_ring_rt_regs {
62 u32 resv_16[4];
63 u32 db; /* RT Ring N Doorbell Register */
64 u32 resv_4[1];
65 u32 occ; /* RT Ring N Occupancy Register */
66 u32 indx; /* RT Ring N Current Index Register */
67 u32 hwocc; /* RT Ring N Hardware Occupancy Register */
68 u32 hwindx; /* RT Ring N Current Index Register */
69 };
70
71 #define KNAV_RINGACC_RT_REGS_STEP 0x1000
72
73 /**
74 * struct k3_nav_ring_fifo_regs - The Ring Accelerator Queues Registers region
75 */
76 struct k3_nav_ring_fifo_regs {
77 u32 head_data[128]; /* Ring Head Entry Data Registers */
78 u32 tail_data[128]; /* Ring Tail Entry Data Registers */
79 u32 peek_head_data[128]; /* Ring Peek Head Entry Data Regs */
80 u32 peek_tail_data[128]; /* Ring Peek Tail Entry Data Regs */
81 };
82
83 /**
84 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
85 */
86 struct k3_ringacc_proxy_gcfg_regs {
87 u32 revision; /* Revision Register */
88 u32 config; /* Config Register */
89 };
90
91 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
92
93 /**
94 * struct k3_ringacc_proxy_target_regs - RA Proxy Datapath MMIO Region
95 */
96 struct k3_ringacc_proxy_target_regs {
97 u32 control; /* Proxy Control Register */
98 u32 status; /* Proxy Status Register */
99 u8 resv_512[504];
100 u32 data[128]; /* Proxy Data Register */
101 };
102
103 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000
104 #define K3_RINGACC_PROXY_NOT_USED (-1)
105
106 enum k3_ringacc_proxy_access_mode {
107 PROXY_ACCESS_MODE_HEAD = 0,
108 PROXY_ACCESS_MODE_TAIL = 1,
109 PROXY_ACCESS_MODE_PEEK_HEAD = 2,
110 PROXY_ACCESS_MODE_PEEK_TAIL = 3,
111 };
112
113 #define KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
114 #define KNAV_RINGACC_FIFO_REGS_STEP 0x1000
115 #define KNAV_RINGACC_MAX_DB_RING_CNT (127U)
116
117 /**
118 * struct k3_nav_ring_ops - Ring operations
119 */
120 struct k3_nav_ring_ops {
121 int (*push_tail)(struct k3_nav_ring *ring, void *elm);
122 int (*push_head)(struct k3_nav_ring *ring, void *elm);
123 int (*pop_tail)(struct k3_nav_ring *ring, void *elm);
124 int (*pop_head)(struct k3_nav_ring *ring, void *elm);
125 };
126
127 /**
128 * struct k3_nav_ring - RA Ring descriptor
129 *
130 * @rt - Ring control/status registers
131 * @fifos - Ring queues registers
132 * @proxy - Ring Proxy Datapath registers
133 * @ring_mem_dma - Ring buffer dma address
134 * @ring_mem_virt - Ring buffer virt address
135 * @ops - Ring operations
136 * @size - Ring size in elements
137 * @elm_size - Size of the ring element
138 * @mode - Ring mode
139 * @flags - flags
140 * @free - Number of free elements
141 * @occ - Ring occupancy
142 * @windex - Write index (only for @K3_NAV_RINGACC_RING_MODE_RING)
143 * @rindex - Read index (only for @K3_NAV_RINGACC_RING_MODE_RING)
144 * @ring_id - Ring Id
145 * @parent - Pointer on struct @k3_nav_ringacc
146 * @use_count - Use count for shared rings
147 * @proxy_id - RA Ring Proxy Id (only if @K3_NAV_RINGACC_RING_USE_PROXY)
148 */
149 struct k3_nav_ring {
150 struct k3_nav_ring_rt_regs __iomem *rt;
151 struct k3_nav_ring_fifo_regs __iomem *fifos;
152 struct k3_ringacc_proxy_target_regs __iomem *proxy;
153 dma_addr_t ring_mem_dma;
154 void *ring_mem_virt;
155 struct k3_nav_ring_ops *ops;
156 u32 size;
157 enum k3_nav_ring_size elm_size;
158 enum k3_nav_ring_mode mode;
159 u32 flags;
160 #define KNAV_RING_FLAG_BUSY BIT(1)
161 #define K3_NAV_RING_FLAG_SHARED BIT(2)
162 u32 free;
163 u32 occ;
164 u32 windex;
165 u32 rindex;
166 u32 ring_id;
167 struct k3_nav_ringacc *parent;
168 u32 use_count;
169 int proxy_id;
170 };
171
172 /**
173 * struct k3_nav_ringacc - Rings accelerator descriptor
174 *
175 * @dev - pointer on RA device
176 * @proxy_gcfg - RA proxy global config registers
177 * @proxy_target_base - RA proxy datapath region
178 * @num_rings - number of ring in RA
179 * @rm_gp_range - general purpose rings range from tisci
180 * @dma_ring_reset_quirk - DMA reset w/a enable
181 * @num_proxies - number of RA proxies
182 * @rings - array of rings descriptors (struct @k3_nav_ring)
183 * @list - list of RAs in the system
184 * @tisci - pointer ti-sci handle
185 * @tisci_ring_ops - ti-sci rings ops
186 * @tisci_dev_id - ti-sci device id
187 */
188 struct k3_nav_ringacc {
189 struct udevice *dev;
190 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
191 void __iomem *proxy_target_base;
192 u32 num_rings; /* number of rings in Ringacc module */
193 unsigned long *rings_inuse;
194 struct ti_sci_resource *rm_gp_range;
195 bool dma_ring_reset_quirk;
196 u32 num_proxies;
197 unsigned long *proxy_inuse;
198
199 struct k3_nav_ring *rings;
200 struct list_head list;
201
202 const struct ti_sci_handle *tisci;
203 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
204 u32 tisci_dev_id;
205 };
206
207 static long k3_nav_ringacc_ring_get_fifo_pos(struct k3_nav_ring *ring)
208 {
209 return KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES -
210 (4 << ring->elm_size);
211 }
212
213 static void *k3_nav_ringacc_get_elm_addr(struct k3_nav_ring *ring, u32 idx)
214 {
215 return (idx * (4 << ring->elm_size) + ring->ring_mem_virt);
216 }
217
218 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem);
219 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem);
220
221 static struct k3_nav_ring_ops k3_nav_mode_ring_ops = {
222 .push_tail = k3_nav_ringacc_ring_push_mem,
223 .pop_head = k3_nav_ringacc_ring_pop_mem,
224 };
225
226 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem);
227 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem);
228 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
229 void *elem);
230 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring,
231 void *elem);
232
233 static struct k3_nav_ring_ops k3_nav_mode_msg_ops = {
234 .push_tail = k3_nav_ringacc_ring_push_io,
235 .push_head = k3_nav_ringacc_ring_push_head_io,
236 .pop_tail = k3_nav_ringacc_ring_pop_tail_io,
237 .pop_head = k3_nav_ringacc_ring_pop_io,
238 };
239
240 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring,
241 void *elem);
242 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring,
243 void *elem);
244 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem);
245 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem);
246
247 static struct k3_nav_ring_ops k3_nav_mode_proxy_ops = {
248 .push_tail = k3_ringacc_ring_push_tail_proxy,
249 .push_head = k3_ringacc_ring_push_head_proxy,
250 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
251 .pop_head = k3_ringacc_ring_pop_head_proxy,
252 };
253
254 struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc)
255 {
256 return ringacc->dev;
257 }
258
259 struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc,
260 int id, u32 flags)
261 {
262 int proxy_id = K3_RINGACC_PROXY_NOT_USED;
263
264 if (id == K3_NAV_RINGACC_RING_ID_ANY) {
265 /* Request for any general purpose ring */
266 struct ti_sci_resource_desc *gp_rings =
267 &ringacc->rm_gp_range->desc[0];
268 unsigned long size;
269
270 size = gp_rings->start + gp_rings->num;
271 id = find_next_zero_bit(ringacc->rings_inuse,
272 size, gp_rings->start);
273 if (id == size)
274 goto error;
275 } else if (id < 0) {
276 goto error;
277 }
278
279 if (test_bit(id, ringacc->rings_inuse) &&
280 !(ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED))
281 goto error;
282 else if (ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED)
283 goto out;
284
285 if (flags & K3_NAV_RINGACC_RING_USE_PROXY) {
286 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
287 ringacc->num_proxies, 0);
288 if (proxy_id == ringacc->num_proxies)
289 goto error;
290 }
291
292 if (!try_module_get(ringacc->dev->driver->owner))
293 goto error;
294
295 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
296 set_bit(proxy_id, ringacc->proxy_inuse);
297 ringacc->rings[id].proxy_id = proxy_id;
298 pr_debug("Giving ring#%d proxy#%d\n",
299 id, proxy_id);
300 } else {
301 pr_debug("Giving ring#%d\n", id);
302 }
303
304 set_bit(id, ringacc->rings_inuse);
305 out:
306 ringacc->rings[id].use_count++;
307 return &ringacc->rings[id];
308
309 error:
310 return NULL;
311 }
312
313 static void k3_ringacc_ring_reset_sci(struct k3_nav_ring *ring)
314 {
315 struct k3_nav_ringacc *ringacc = ring->parent;
316 int ret;
317
318 ret = ringacc->tisci_ring_ops->config(
319 ringacc->tisci,
320 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
321 ringacc->tisci_dev_id,
322 ring->ring_id,
323 0,
324 0,
325 ring->size,
326 0,
327 0,
328 0);
329 if (ret)
330 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
331 ret, ring->ring_id);
332 }
333
334 void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring)
335 {
336 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
337 return;
338
339 ring->occ = 0;
340 ring->free = 0;
341 ring->rindex = 0;
342 ring->windex = 0;
343
344 k3_ringacc_ring_reset_sci(ring);
345 }
346
347 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring *ring,
348 enum k3_nav_ring_mode mode)
349 {
350 struct k3_nav_ringacc *ringacc = ring->parent;
351 int ret;
352
353 ret = ringacc->tisci_ring_ops->config(
354 ringacc->tisci,
355 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
356 ringacc->tisci_dev_id,
357 ring->ring_id,
358 0,
359 0,
360 0,
361 mode,
362 0,
363 0);
364 if (ret)
365 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
366 ret, ring->ring_id);
367 }
368
369 void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ)
370 {
371 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
372 return;
373
374 if (!ring->parent->dma_ring_reset_quirk) {
375 k3_nav_ringacc_ring_reset(ring);
376 return;
377 }
378
379 if (!occ)
380 occ = ringacc_readl(&ring->rt->occ);
381
382 if (occ) {
383 u32 db_ring_cnt, db_ring_cnt_cur;
384
385 pr_debug("%s %u occ: %u\n", __func__,
386 ring->ring_id, occ);
387 /* 2. Reset the ring */
388 k3_ringacc_ring_reset_sci(ring);
389
390 /*
391 * 3. Setup the ring in ring/doorbell mode
392 * (if not already in this mode)
393 */
394 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
395 k3_ringacc_ring_reconfig_qmode_sci(
396 ring, K3_NAV_RINGACC_RING_MODE_RING);
397 /*
398 * 4. Ring the doorbell 2**22 – ringOcc times.
399 * This will wrap the internal UDMAP ring state occupancy
400 * counter (which is 21-bits wide) to 0.
401 */
402 db_ring_cnt = (1U << 22) - occ;
403
404 while (db_ring_cnt != 0) {
405 /*
406 * Ring the doorbell with the maximum count each
407 * iteration if possible to minimize the total
408 * of writes
409 */
410 if (db_ring_cnt > KNAV_RINGACC_MAX_DB_RING_CNT)
411 db_ring_cnt_cur = KNAV_RINGACC_MAX_DB_RING_CNT;
412 else
413 db_ring_cnt_cur = db_ring_cnt;
414
415 writel(db_ring_cnt_cur, &ring->rt->db);
416 db_ring_cnt -= db_ring_cnt_cur;
417 }
418
419 /* 5. Restore the original ring mode (if not ring mode) */
420 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
421 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
422 }
423
424 /* 2. Reset the ring */
425 k3_nav_ringacc_ring_reset(ring);
426 }
427
428 static void k3_ringacc_ring_free_sci(struct k3_nav_ring *ring)
429 {
430 struct k3_nav_ringacc *ringacc = ring->parent;
431 int ret;
432
433 ret = ringacc->tisci_ring_ops->config(
434 ringacc->tisci,
435 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
436 ringacc->tisci_dev_id,
437 ring->ring_id,
438 0,
439 0,
440 0,
441 0,
442 0,
443 0);
444 if (ret)
445 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
446 ret, ring->ring_id);
447 }
448
449 int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring)
450 {
451 struct k3_nav_ringacc *ringacc;
452
453 if (!ring)
454 return -EINVAL;
455
456 ringacc = ring->parent;
457
458 pr_debug("%s flags: 0x%08x\n", __func__, ring->flags);
459
460 if (!test_bit(ring->ring_id, ringacc->rings_inuse))
461 return -EINVAL;
462
463 if (--ring->use_count)
464 goto out;
465
466 if (!(ring->flags & KNAV_RING_FLAG_BUSY))
467 goto no_init;
468
469 k3_ringacc_ring_free_sci(ring);
470
471 dma_free_coherent(ringacc->dev,
472 ring->size * (4 << ring->elm_size),
473 ring->ring_mem_virt, ring->ring_mem_dma);
474 ring->flags &= ~KNAV_RING_FLAG_BUSY;
475 ring->ops = NULL;
476 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
477 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
478 ring->proxy = NULL;
479 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
480 }
481
482 no_init:
483 clear_bit(ring->ring_id, ringacc->rings_inuse);
484
485 module_put(ringacc->dev->driver->owner);
486
487 out:
488 return 0;
489 }
490
491 u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring)
492 {
493 if (!ring)
494 return -EINVAL;
495
496 return ring->ring_id;
497 }
498
499 static int k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring *ring)
500 {
501 struct k3_nav_ringacc *ringacc = ring->parent;
502 u32 ring_idx;
503 int ret;
504
505 if (!ringacc->tisci)
506 return -EINVAL;
507
508 ring_idx = ring->ring_id;
509 ret = ringacc->tisci_ring_ops->config(
510 ringacc->tisci,
511 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
512 ringacc->tisci_dev_id,
513 ring_idx,
514 lower_32_bits(ring->ring_mem_dma),
515 upper_32_bits(ring->ring_mem_dma),
516 ring->size,
517 ring->mode,
518 ring->elm_size,
519 0);
520 if (ret)
521 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
522 ret, ring_idx);
523
524 return ret;
525 }
526
527 int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring,
528 struct k3_nav_ring_cfg *cfg)
529 {
530 struct k3_nav_ringacc *ringacc = ring->parent;
531 int ret = 0;
532
533 if (!ring || !cfg)
534 return -EINVAL;
535 if (cfg->elm_size > K3_NAV_RINGACC_RING_ELSIZE_256 ||
536 cfg->mode > K3_NAV_RINGACC_RING_MODE_QM ||
537 cfg->size & ~KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
538 !test_bit(ring->ring_id, ringacc->rings_inuse))
539 return -EINVAL;
540
541 if (ring->use_count != 1)
542 return 0;
543
544 ring->size = cfg->size;
545 ring->elm_size = cfg->elm_size;
546 ring->mode = cfg->mode;
547 ring->occ = 0;
548 ring->free = 0;
549 ring->rindex = 0;
550 ring->windex = 0;
551
552 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
553 ring->proxy = ringacc->proxy_target_base +
554 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
555
556 switch (ring->mode) {
557 case K3_NAV_RINGACC_RING_MODE_RING:
558 ring->ops = &k3_nav_mode_ring_ops;
559 break;
560 case K3_NAV_RINGACC_RING_MODE_QM:
561 /*
562 * In Queue mode elm_size can be 8 only and each operation
563 * uses 2 element slots
564 */
565 if (cfg->elm_size != K3_NAV_RINGACC_RING_ELSIZE_8 ||
566 cfg->size % 2)
567 goto err_free_proxy;
568 case K3_NAV_RINGACC_RING_MODE_MESSAGE:
569 if (ring->proxy)
570 ring->ops = &k3_nav_mode_proxy_ops;
571 else
572 ring->ops = &k3_nav_mode_msg_ops;
573 break;
574 default:
575 ring->ops = NULL;
576 ret = -EINVAL;
577 goto err_free_proxy;
578 };
579
580 ring->ring_mem_virt =
581 dma_zalloc_coherent(ringacc->dev,
582 ring->size * (4 << ring->elm_size),
583 &ring->ring_mem_dma, GFP_KERNEL);
584 if (!ring->ring_mem_virt) {
585 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
586 ret = -ENOMEM;
587 goto err_free_ops;
588 }
589
590 ret = k3_nav_ringacc_ring_cfg_sci(ring);
591
592 if (ret)
593 goto err_free_mem;
594
595 ring->flags |= KNAV_RING_FLAG_BUSY;
596 ring->flags |= (cfg->flags & K3_NAV_RINGACC_RING_SHARED) ?
597 K3_NAV_RING_FLAG_SHARED : 0;
598
599 return 0;
600
601 err_free_mem:
602 dma_free_coherent(ringacc->dev,
603 ring->size * (4 << ring->elm_size),
604 ring->ring_mem_virt,
605 ring->ring_mem_dma);
606 err_free_ops:
607 ring->ops = NULL;
608 err_free_proxy:
609 ring->proxy = NULL;
610 return ret;
611 }
612
613 u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring)
614 {
615 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
616 return -EINVAL;
617
618 return ring->size;
619 }
620
621 u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring)
622 {
623 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
624 return -EINVAL;
625
626 if (!ring->free)
627 ring->free = ring->size - ringacc_readl(&ring->rt->occ);
628
629 return ring->free;
630 }
631
632 u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring)
633 {
634 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
635 return -EINVAL;
636
637 return ringacc_readl(&ring->rt->occ);
638 }
639
640 u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring)
641 {
642 return !k3_nav_ringacc_ring_get_free(ring);
643 }
644
645 enum k3_ringacc_access_mode {
646 K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
647 K3_RINGACC_ACCESS_MODE_POP_HEAD,
648 K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
649 K3_RINGACC_ACCESS_MODE_POP_TAIL,
650 K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
651 K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
652 };
653
654 static int k3_ringacc_ring_cfg_proxy(struct k3_nav_ring *ring,
655 enum k3_ringacc_proxy_access_mode mode)
656 {
657 u32 val;
658
659 val = ring->ring_id;
660 val |= mode << 16;
661 val |= ring->elm_size << 24;
662 ringacc_writel(val, &ring->proxy->control);
663 return 0;
664 }
665
666 static int k3_nav_ringacc_ring_access_proxy(
667 struct k3_nav_ring *ring, void *elem,
668 enum k3_ringacc_access_mode access_mode)
669 {
670 void __iomem *ptr;
671
672 ptr = (void __iomem *)&ring->proxy->data;
673
674 switch (access_mode) {
675 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
676 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
677 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
678 break;
679 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
680 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
681 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
682 break;
683 default:
684 return -EINVAL;
685 }
686
687 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
688
689 switch (access_mode) {
690 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
691 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
692 pr_debug("proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n",
693 ptr, access_mode);
694 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
695 ring->occ--;
696 break;
697 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
698 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
699 pr_debug("proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n",
700 ptr, access_mode);
701 memcpy_toio(ptr, elem, (4 << ring->elm_size));
702 ring->free--;
703 break;
704 default:
705 return -EINVAL;
706 }
707
708 pr_debug("proxy: free%d occ%d\n",
709 ring->free, ring->occ);
710 return 0;
711 }
712
713 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring, void *elem)
714 {
715 return k3_nav_ringacc_ring_access_proxy(
716 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
717 }
718
719 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring, void *elem)
720 {
721 return k3_nav_ringacc_ring_access_proxy(
722 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
723 }
724
725 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem)
726 {
727 return k3_nav_ringacc_ring_access_proxy(
728 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
729 }
730
731 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem)
732 {
733 return k3_nav_ringacc_ring_access_proxy(
734 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
735 }
736
737 static int k3_nav_ringacc_ring_access_io(
738 struct k3_nav_ring *ring, void *elem,
739 enum k3_ringacc_access_mode access_mode)
740 {
741 void __iomem *ptr;
742
743 switch (access_mode) {
744 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
745 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
746 ptr = (void __iomem *)&ring->fifos->head_data;
747 break;
748 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
749 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
750 ptr = (void __iomem *)&ring->fifos->tail_data;
751 break;
752 default:
753 return -EINVAL;
754 }
755
756 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
757
758 switch (access_mode) {
759 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
760 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
761 pr_debug("memcpy_fromio(x): --> ptr(%p), mode:%d\n",
762 ptr, access_mode);
763 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
764 ring->occ--;
765 break;
766 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
767 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
768 pr_debug("memcpy_toio(x): --> ptr(%p), mode:%d\n",
769 ptr, access_mode);
770 memcpy_toio(ptr, elem, (4 << ring->elm_size));
771 ring->free--;
772 break;
773 default:
774 return -EINVAL;
775 }
776
777 pr_debug("free%d index%d occ%d index%d\n",
778 ring->free, ring->windex, ring->occ, ring->rindex);
779 return 0;
780 }
781
782 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
783 void *elem)
784 {
785 return k3_nav_ringacc_ring_access_io(
786 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
787 }
788
789 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem)
790 {
791 return k3_nav_ringacc_ring_access_io(
792 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
793 }
794
795 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem)
796 {
797 return k3_nav_ringacc_ring_access_io(
798 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
799 }
800
801 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring, void *elem)
802 {
803 return k3_nav_ringacc_ring_access_io(
804 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
805 }
806
807 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem)
808 {
809 void *elem_ptr;
810
811 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->windex);
812
813 memcpy(elem_ptr, elem, (4 << ring->elm_size));
814
815 flush_dcache_range((unsigned long)ring->ring_mem_virt,
816 ALIGN((unsigned long)ring->ring_mem_virt +
817 ring->size * (4 << ring->elm_size),
818 ARCH_DMA_MINALIGN));
819
820 ring->windex = (ring->windex + 1) % ring->size;
821 ring->free--;
822 ringacc_writel(1, &ring->rt->db);
823
824 pr_debug("ring_push_mem: free%d index%d\n",
825 ring->free, ring->windex);
826
827 return 0;
828 }
829
830 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem)
831 {
832 void *elem_ptr;
833
834 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->rindex);
835
836 invalidate_dcache_range((unsigned long)ring->ring_mem_virt,
837 ALIGN((unsigned long)ring->ring_mem_virt +
838 ring->size * (4 << ring->elm_size),
839 ARCH_DMA_MINALIGN));
840
841 memcpy(elem, elem_ptr, (4 << ring->elm_size));
842
843 ring->rindex = (ring->rindex + 1) % ring->size;
844 ring->occ--;
845 ringacc_writel(-1, &ring->rt->db);
846
847 pr_debug("ring_pop_mem: occ%d index%d pos_ptr%p\n",
848 ring->occ, ring->rindex, elem_ptr);
849 return 0;
850 }
851
852 int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem)
853 {
854 int ret = -EOPNOTSUPP;
855
856 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
857 return -EINVAL;
858
859 pr_debug("ring_push%d: free%d index%d\n",
860 ring->ring_id, ring->free, ring->windex);
861
862 if (k3_nav_ringacc_ring_is_full(ring))
863 return -ENOMEM;
864
865 if (ring->ops && ring->ops->push_tail)
866 ret = ring->ops->push_tail(ring, elem);
867
868 return ret;
869 }
870
871 int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem)
872 {
873 int ret = -EOPNOTSUPP;
874
875 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
876 return -EINVAL;
877
878 pr_debug("ring_push_head: free%d index%d\n",
879 ring->free, ring->windex);
880
881 if (k3_nav_ringacc_ring_is_full(ring))
882 return -ENOMEM;
883
884 if (ring->ops && ring->ops->push_head)
885 ret = ring->ops->push_head(ring, elem);
886
887 return ret;
888 }
889
890 int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem)
891 {
892 int ret = -EOPNOTSUPP;
893
894 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
895 return -EINVAL;
896
897 if (!ring->occ)
898 ring->occ = k3_nav_ringacc_ring_get_occ(ring);
899
900 pr_debug("ring_pop%d: occ%d index%d\n",
901 ring->ring_id, ring->occ, ring->rindex);
902
903 if (!ring->occ)
904 return -ENODATA;
905
906 if (ring->ops && ring->ops->pop_head)
907 ret = ring->ops->pop_head(ring, elem);
908
909 return ret;
910 }
911
912 int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem)
913 {
914 int ret = -EOPNOTSUPP;
915
916 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
917 return -EINVAL;
918
919 if (!ring->occ)
920 ring->occ = k3_nav_ringacc_ring_get_occ(ring);
921
922 pr_debug("ring_pop_tail: occ%d index%d\n",
923 ring->occ, ring->rindex);
924
925 if (!ring->occ)
926 return -ENODATA;
927
928 if (ring->ops && ring->ops->pop_tail)
929 ret = ring->ops->pop_tail(ring, elem);
930
931 return ret;
932 }
933
934 static int k3_nav_ringacc_probe_dt(struct k3_nav_ringacc *ringacc)
935 {
936 struct udevice *dev = ringacc->dev;
937 struct udevice *tisci_dev = NULL;
938 int ret;
939
940 ringacc->num_rings = dev_read_u32_default(dev, "ti,num-rings", 0);
941 if (!ringacc->num_rings) {
942 dev_err(dev, "ti,num-rings read failure %d\n", ret);
943 return -EINVAL;
944 }
945
946 ringacc->dma_ring_reset_quirk =
947 dev_read_bool(dev, "ti,dma-ring-reset-quirk");
948
949 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
950 "ti,sci", &tisci_dev);
951 if (ret) {
952 pr_debug("TISCI RA RM get failed (%d)\n", ret);
953 ringacc->tisci = NULL;
954 return -ENODEV;
955 }
956 ringacc->tisci = (struct ti_sci_handle *)
957 (ti_sci_get_handle_from_sysfw(tisci_dev));
958
959 ret = dev_read_u32_default(dev, "ti,sci", 0);
960 if (!ret) {
961 dev_err(dev, "TISCI RA RM disabled\n");
962 ringacc->tisci = NULL;
963 return ret;
964 }
965
966 ret = dev_read_u32(dev, "ti,sci-dev-id", &ringacc->tisci_dev_id);
967 if (ret) {
968 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
969 ringacc->tisci = NULL;
970 return ret;
971 }
972
973 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(
974 ringacc->tisci, dev,
975 ringacc->tisci_dev_id,
976 "ti,sci-rm-range-gp-rings");
977 if (IS_ERR(ringacc->rm_gp_range))
978 ret = PTR_ERR(ringacc->rm_gp_range);
979
980 return 0;
981 }
982
983 static int k3_nav_ringacc_probe(struct udevice *dev)
984 {
985 struct k3_nav_ringacc *ringacc;
986 void __iomem *base_fifo, *base_rt;
987 int ret, i;
988
989 ringacc = dev_get_priv(dev);
990 if (!ringacc)
991 return -ENOMEM;
992
993 ringacc->dev = dev;
994
995 ret = k3_nav_ringacc_probe_dt(ringacc);
996 if (ret)
997 return ret;
998
999 base_rt = (uint32_t *)devfdt_get_addr_name(dev, "rt");
1000 pr_debug("rt %p\n", base_rt);
1001 if (IS_ERR(base_rt))
1002 return PTR_ERR(base_rt);
1003
1004 base_fifo = (uint32_t *)devfdt_get_addr_name(dev, "fifos");
1005 pr_debug("fifos %p\n", base_fifo);
1006 if (IS_ERR(base_fifo))
1007 return PTR_ERR(base_fifo);
1008
1009 ringacc->proxy_gcfg = (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1010 devfdt_get_addr_name(dev, "proxy_gcfg");
1011 if (IS_ERR(ringacc->proxy_gcfg))
1012 return PTR_ERR(ringacc->proxy_gcfg);
1013 ringacc->proxy_target_base =
1014 (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1015 devfdt_get_addr_name(dev, "proxy_target");
1016 if (IS_ERR(ringacc->proxy_target_base))
1017 return PTR_ERR(ringacc->proxy_target_base);
1018
1019 ringacc->num_proxies = ringacc_readl(&ringacc->proxy_gcfg->config) &
1020 K3_RINGACC_PROXY_CFG_THREADS_MASK;
1021
1022 ringacc->rings = devm_kzalloc(dev,
1023 sizeof(*ringacc->rings) *
1024 ringacc->num_rings,
1025 GFP_KERNEL);
1026 ringacc->rings_inuse = devm_kcalloc(dev,
1027 BITS_TO_LONGS(ringacc->num_rings),
1028 sizeof(unsigned long), GFP_KERNEL);
1029 ringacc->proxy_inuse = devm_kcalloc(dev,
1030 BITS_TO_LONGS(ringacc->num_proxies),
1031 sizeof(unsigned long), GFP_KERNEL);
1032
1033 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1034 return -ENOMEM;
1035
1036 for (i = 0; i < ringacc->num_rings; i++) {
1037 ringacc->rings[i].rt = base_rt +
1038 KNAV_RINGACC_RT_REGS_STEP * i;
1039 ringacc->rings[i].fifos = base_fifo +
1040 KNAV_RINGACC_FIFO_REGS_STEP * i;
1041 ringacc->rings[i].parent = ringacc;
1042 ringacc->rings[i].ring_id = i;
1043 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1044 }
1045 dev_set_drvdata(dev, ringacc);
1046
1047 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1048
1049 list_add_tail(&ringacc->list, &k3_nav_ringacc_list);
1050
1051 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1052 ringacc->num_rings,
1053 ringacc->rm_gp_range->desc[0].start,
1054 ringacc->rm_gp_range->desc[0].num,
1055 ringacc->tisci_dev_id);
1056 dev_info(dev, "dma-ring-reset-quirk: %s\n",
1057 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1058 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1059 ringacc_readl(&ringacc->proxy_gcfg->revision),
1060 ringacc->num_proxies);
1061 return 0;
1062 }
1063
1064 static const struct udevice_id knav_ringacc_ids[] = {
1065 { .compatible = "ti,am654-navss-ringacc" },
1066 {},
1067 };
1068
1069 U_BOOT_DRIVER(k3_navss_ringacc) = {
1070 .name = "k3-navss-ringacc",
1071 .id = UCLASS_MISC,
1072 .of_match = knav_ringacc_ids,
1073 .probe = k3_nav_ringacc_probe,
1074 .priv_auto_alloc_size = sizeof(struct k3_nav_ringacc),
1075 };