]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/remoteproc/ipu_rproc.c
tree-wide: Replace http:// link with https:// link for ti.com
[thirdparty/u-boot.git] / drivers / remoteproc / ipu_rproc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * IPU remoteproc driver for various SoCs
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
6 * Angela Stegmaier <angelabaker@ti.com>
7 * Venkateswara Rao Mandela <venkat.mandela@ti.com>
8 * Keerthy <j-keerthy@ti.com>
9 */
10
11 #include <common.h>
12 #include <hang.h>
13 #include <cpu_func.h>
14 #include <dm.h>
15 #include <dm/device_compat.h>
16 #include <elf.h>
17 #include <env.h>
18 #include <dm/of_access.h>
19 #include <fs_loader.h>
20 #include <remoteproc.h>
21 #include <errno.h>
22 #include <clk.h>
23 #include <reset.h>
24 #include <regmap.h>
25 #include <syscon.h>
26 #include <asm/io.h>
27 #include <misc.h>
28 #include <power-domain.h>
29 #include <timer.h>
30 #include <fs.h>
31 #include <spl.h>
32 #include <timer.h>
33 #include <reset.h>
34 #include <linux/bitmap.h>
35
36 #define IPU1_LOAD_ADDR (0xa17ff000)
37 #define MAX_REMOTECORE_BIN_SIZE (8 * 0x100000)
38
39 enum ipu_num {
40 IPU1 = 0,
41 IPU2,
42 RPROC_END_ENUMS,
43 };
44
45 #define IPU2_LOAD_ADDR (IPU1_LOAD_ADDR + MAX_REMOTECORE_BIN_SIZE)
46
47 #define PAGE_SHIFT 12
48 #define PAGESIZE_1M 0x0
49 #define PAGESIZE_64K 0x1
50 #define PAGESIZE_4K 0x2
51 #define PAGESIZE_16M 0x3
52 #define LE 0
53 #define BE 1
54 #define ELEMSIZE_8 0x0
55 #define ELEMSIZE_16 0x1
56 #define ELEMSIZE_32 0x2
57 #define MIXED_TLB 0x0
58 #define MIXED_CPU 0x1
59
60 #define PGT_SMALLPAGE_SIZE 0x00001000
61 #define PGT_LARGEPAGE_SIZE 0x00010000
62 #define PGT_SECTION_SIZE 0x00100000
63 #define PGT_SUPERSECTION_SIZE 0x01000000
64
65 #define PGT_L1_DESC_PAGE 0x00001
66 #define PGT_L1_DESC_SECTION 0x00002
67 #define PGT_L1_DESC_SUPERSECTION 0x40002
68
69 #define PGT_L1_DESC_PAGE_MASK 0xfffffC00
70 #define PGT_L1_DESC_SECTION_MASK 0xfff00000
71 #define PGT_L1_DESC_SUPERSECTION_MASK 0xff000000
72
73 #define PGT_L1_DESC_SMALLPAGE_INDEX_SHIFT 12
74 #define PGT_L1_DESC_LARGEPAGE_INDEX_SHIFT 16
75 #define PGT_L1_DESC_SECTION_INDEX_SHIFT 20
76 #define PGT_L1_DESC_SUPERSECTION_INDEX_SHIFT 24
77
78 #define PGT_L2_DESC_SMALLPAGE 0x02
79 #define PGT_L2_DESC_LARGEPAGE 0x01
80
81 #define PGT_L2_DESC_SMALLPAGE_MASK 0xfffff000
82 #define PGT_L2_DESC_LARGEPAGE_MASK 0xffff0000
83
84 /*
85 * The memory for the page tables (256 KB per IPU) is placed just before
86 * the carveout memories for the remote processors. 16 KB of memory is
87 * needed for the L1 page table (4096 entries * 4 bytes per 1 MB section).
88 * Any smaller page (64 KB or 4 KB) entries are supported through L2 page
89 * tables (1 KB per table). The remaining 240 KB can provide support for
90 * 240 L2 page tables. Any remoteproc firmware image requiring more than
91 * 240 L2 page table entries would need more memory to be reserved.
92 */
93 #define PAGE_TABLE_SIZE_L1 (0x00004000)
94 #define PAGE_TABLE_SIZE_L2 (0x400)
95 #define MAX_NUM_L2_PAGE_TABLES (240)
96 #define PAGE_TABLE_SIZE_L2_TOTAL (MAX_NUM_L2_PAGE_TABLES * PAGE_TABLE_SIZE_L2)
97 #define PAGE_TABLE_SIZE (PAGE_TABLE_SIZE_L1 + (PAGE_TABLE_SIZE_L2_TOTAL))
98
99 /**
100 * struct omap_rproc_mem - internal memory structure
101 * @cpu_addr: MPU virtual address of the memory region
102 * @bus_addr: bus address used to access the memory region
103 * @dev_addr: device address of the memory region from DSP view
104 * @size: size of the memory region
105 */
106 struct omap_rproc_mem {
107 void __iomem *cpu_addr;
108 phys_addr_t bus_addr;
109 u32 dev_addr;
110 size_t size;
111 };
112
113 struct ipu_privdata {
114 struct omap_rproc_mem mem;
115 struct list_head mappings;
116 const char *fw_name;
117 u32 bootaddr;
118 int id;
119 struct udevice *rdev;
120 };
121
122 typedef int (*handle_resource_t) (void *, int offset, int avail);
123
124 unsigned int *page_table_l1 = (unsigned int *)0x0;
125 unsigned int *page_table_l2 = (unsigned int *)0x0;
126
127 /*
128 * Set maximum carveout size to 96 MB
129 */
130 #define DRA7_RPROC_MAX_CO_SIZE (96 * 0x100000)
131
132 /*
133 * These global variables are used for deriving the MMU page tables. They
134 * are initialized for each core with the appropriate values. The length
135 * of the array mem_bitmap is set as per a 96 MB carveout which the
136 * maximum set aside in the current memory map.
137 */
138 unsigned long mem_base;
139 unsigned long mem_size;
140 unsigned long
141
142 mem_bitmap[BITS_TO_LONGS(DRA7_RPROC_MAX_CO_SIZE >> PAGE_SHIFT)];
143 unsigned long mem_count;
144
145 unsigned int pgtable_l2_map[MAX_NUM_L2_PAGE_TABLES];
146 unsigned int pgtable_l2_cnt;
147
148 void *ipu_alloc_mem(struct udevice *dev, unsigned long len, unsigned long align)
149 {
150 unsigned long mask;
151 unsigned long pageno;
152 int count;
153
154 count = ((len + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
155 mask = (1 << align) - 1;
156 pageno =
157 bitmap_find_next_zero_area(mem_bitmap, mem_count, 0, count, mask);
158 debug("%s: count %d mask %#lx pageno %#lx\n", __func__, count, mask,
159 pageno);
160
161 if (pageno >= mem_count) {
162 debug("%s: %s Error allocating memory; "
163 "Please check carveout size\n", __FILE__, __func__);
164 return NULL;
165 }
166
167 bitmap_set(mem_bitmap, pageno, count);
168 return (void *)(mem_base + (pageno << PAGE_SHIFT));
169 }
170
171 int find_pagesz(unsigned int virt, unsigned int phys, unsigned int len)
172 {
173 int pg_sz_ind = -1;
174 unsigned int min_align = __ffs(virt);
175
176 if (min_align > __ffs(phys))
177 min_align = __ffs(phys);
178
179 if (min_align >= PGT_L1_DESC_SUPERSECTION_INDEX_SHIFT &&
180 len >= 0x1000000) {
181 pg_sz_ind = PAGESIZE_16M;
182 goto ret_block;
183 }
184 if (min_align >= PGT_L1_DESC_SECTION_INDEX_SHIFT &&
185 len >= 0x100000) {
186 pg_sz_ind = PAGESIZE_1M;
187 goto ret_block;
188 }
189 if (min_align >= PGT_L1_DESC_LARGEPAGE_INDEX_SHIFT &&
190 len >= 0x10000) {
191 pg_sz_ind = PAGESIZE_64K;
192 goto ret_block;
193 }
194 if (min_align >= PGT_L1_DESC_SMALLPAGE_INDEX_SHIFT &&
195 len >= 0x1000) {
196 pg_sz_ind = PAGESIZE_4K;
197 goto ret_block;
198 }
199
200 ret_block:
201 return pg_sz_ind;
202 }
203
204 int get_l2_pg_tbl_addr(unsigned int virt, unsigned int *pg_tbl_addr)
205 {
206 int ret = -1;
207 int i = 0;
208 int match_found = 0;
209 unsigned int tag = (virt & PGT_L1_DESC_SECTION_MASK);
210
211 *pg_tbl_addr = 0;
212 for (i = 0; (i < pgtable_l2_cnt) && (match_found == 0); i++) {
213 if (tag == pgtable_l2_map[i]) {
214 *pg_tbl_addr =
215 ((unsigned int)page_table_l2) +
216 (i * PAGE_TABLE_SIZE_L2);
217 match_found = 1;
218 ret = 0;
219 }
220 }
221
222 if (match_found == 0 && i < MAX_NUM_L2_PAGE_TABLES) {
223 pgtable_l2_map[i] = tag;
224 pgtable_l2_cnt++;
225 *pg_tbl_addr =
226 ((unsigned int)page_table_l2) + (i * PAGE_TABLE_SIZE_L2);
227 ret = 0;
228 }
229
230 return ret;
231 }
232
233 int
234 config_l2_pagetable(unsigned int virt, unsigned int phys,
235 unsigned int pg_sz, unsigned int pg_tbl_addr)
236 {
237 int ret = -1;
238 unsigned int desc = 0;
239 int i = 0;
240 unsigned int *pg_tbl = (unsigned int *)pg_tbl_addr;
241
242 /*
243 * Pick bit 19:12 of the virtual address as index
244 */
245 unsigned int index = (virt & (~PGT_L1_DESC_SECTION_MASK)) >> PAGE_SHIFT;
246
247 switch (pg_sz) {
248 case PAGESIZE_64K:
249 desc =
250 (phys & PGT_L2_DESC_LARGEPAGE_MASK) | PGT_L2_DESC_LARGEPAGE;
251 for (i = 0; i < 16; i++)
252 pg_tbl[index + i] = desc;
253 ret = 0;
254 break;
255 case PAGESIZE_4K:
256 desc =
257 (phys & PGT_L2_DESC_SMALLPAGE_MASK) | PGT_L2_DESC_SMALLPAGE;
258 pg_tbl[index] = desc;
259 ret = 0;
260 break;
261 default:
262 break;
263 }
264
265 return ret;
266 }
267
268 unsigned int
269 ipu_config_pagetable(struct udevice *dev, unsigned int virt, unsigned int phys,
270 unsigned int len)
271 {
272 unsigned int index;
273 unsigned int l = len;
274 unsigned int desc;
275 int pg_sz = 0;
276 int i = 0, err = 0;
277 unsigned int pg_tbl_l2_addr = 0;
278 unsigned int tmp_pgsz;
279
280 if ((len & 0x0FFF) != 0)
281 return 0;
282
283 while (l > 0) {
284 pg_sz = find_pagesz(virt, phys, l);
285 index = virt >> PGT_L1_DESC_SECTION_INDEX_SHIFT;
286 switch (pg_sz) {
287 /*
288 * 16 MB super section
289 */
290 case PAGESIZE_16M:
291 /*
292 * Program the next 16 descriptors
293 */
294 desc =
295 (phys & PGT_L1_DESC_SUPERSECTION_MASK) |
296 PGT_L1_DESC_SUPERSECTION;
297 for (i = 0; i < 16; i++)
298 page_table_l1[index + i] = desc;
299 l -= PGT_SUPERSECTION_SIZE;
300 phys += PGT_SUPERSECTION_SIZE;
301 virt += PGT_SUPERSECTION_SIZE;
302 break;
303 /*
304 * 1 MB section
305 */
306 case PAGESIZE_1M:
307 desc =
308 (phys & PGT_L1_DESC_SECTION_MASK) |
309 PGT_L1_DESC_SECTION;
310 page_table_l1[index] = desc;
311 l -= PGT_SECTION_SIZE;
312 phys += PGT_SECTION_SIZE;
313 virt += PGT_SECTION_SIZE;
314 break;
315 /*
316 * 64 KB large page
317 */
318 case PAGESIZE_64K:
319 case PAGESIZE_4K:
320 if (pg_sz == PAGESIZE_64K)
321 tmp_pgsz = 0x10000;
322 else
323 tmp_pgsz = 0x1000;
324
325 err = get_l2_pg_tbl_addr(virt, &pg_tbl_l2_addr);
326 if (err != 0) {
327 debug
328 ("Unable to get level 2 PT address\n");
329 hang();
330 }
331 err =
332 config_l2_pagetable(virt, phys, pg_sz,
333 pg_tbl_l2_addr);
334 desc =
335 (pg_tbl_l2_addr & PGT_L1_DESC_PAGE_MASK) |
336 PGT_L1_DESC_PAGE;
337 page_table_l1[index] = desc;
338 l -= tmp_pgsz;
339 phys += tmp_pgsz;
340 virt += tmp_pgsz;
341 break;
342 case -1:
343 default:
344 return 0;
345 }
346 }
347
348 return len;
349 }
350
351 int da_to_pa(struct udevice *dev, int da)
352 {
353 struct rproc_mem_entry *maps = NULL;
354 struct ipu_privdata *priv = dev_get_priv(dev);
355
356 list_for_each_entry(maps, &priv->mappings, node) {
357 if (da >= maps->da && da < (maps->da + maps->len))
358 return maps->dma + (da - maps->da);
359 }
360
361 return 0;
362 }
363
364 u32 ipu_config_mmu(u32 core_id, struct rproc *cfg)
365 {
366 u32 i = 0;
367 u32 reg = 0;
368
369 /*
370 * Clear the entire pagetable location before programming the
371 * address into the MMU
372 */
373 memset((void *)cfg->page_table_addr, 0x00, PAGE_TABLE_SIZE);
374
375 for (i = 0; i < cfg->num_iommus; i++) {
376 u32 mmu_base = cfg->mmu_base_addr[i];
377
378 __raw_writel((int)cfg->page_table_addr, mmu_base + 0x4c);
379 reg = __raw_readl(mmu_base + 0x88);
380
381 /*
382 * enable bus-error back
383 */
384 __raw_writel(reg | 0x1, mmu_base + 0x88);
385
386 /*
387 * Enable the MMU IRQs during MMU programming for the
388 * late attachcase. This is to allow the MMU fault to be
389 * detected by the kernel.
390 *
391 * MULTIHITFAULT|EMMUMISS|TRANSLATIONFAULT|TABLEWALKFAULT
392 */
393 __raw_writel(0x1E, mmu_base + 0x1c);
394
395 /*
396 * emutlbupdate|TWLENABLE|MMUENABLE
397 */
398 __raw_writel(0x6, mmu_base + 0x44);
399 }
400
401 return 0;
402 }
403
404 /**
405 * enum ipu_mem - PRU core memory range identifiers
406 */
407 enum ipu_mem {
408 PRU_MEM_IRAM = 0,
409 PRU_MEM_CTRL,
410 PRU_MEM_DEBUG,
411 PRU_MEM_MAX,
412 };
413
414 static int ipu_start(struct udevice *dev)
415 {
416 struct ipu_privdata *priv;
417 struct reset_ctl reset;
418 struct rproc *cfg = NULL;
419 int ret;
420
421 priv = dev_get_priv(dev);
422
423 cfg = rproc_cfg_arr[priv->id];
424 if (cfg->config_peripherals)
425 cfg->config_peripherals(priv->id, cfg);
426
427 /*
428 * Start running the remote core
429 */
430 ret = reset_get_by_index(dev, 0, &reset);
431 if (ret < 0) {
432 dev_err(dev, "%s: error getting reset index %d\n", __func__, 0);
433 return ret;
434 }
435
436 ret = reset_deassert(&reset);
437 if (ret < 0) {
438 dev_err(dev, "%s: error deasserting reset %d\n", __func__, 0);
439 return ret;
440 }
441
442 ret = reset_get_by_index(dev, 1, &reset);
443 if (ret < 0) {
444 dev_err(dev, "%s: error getting reset index %d\n", __func__, 1);
445 return ret;
446 }
447
448 ret = reset_deassert(&reset);
449 if (ret < 0) {
450 dev_err(dev, "%s: error deasserting reset %d\n", __func__, 1);
451 return ret;
452 }
453
454 return 0;
455 }
456
457 static int ipu_stop(struct udevice *dev)
458 {
459 return 0;
460 }
461
462 /**
463 * ipu_init() - Initialize the remote processor
464 * @dev: rproc device pointer
465 *
466 * Return: 0 if all went ok, else return appropriate error
467 */
468 static int ipu_init(struct udevice *dev)
469 {
470 return 0;
471 }
472
473 static int ipu_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
474 {
475 struct ipu_privdata *priv = dev_get_priv(dev);
476
477 list_add_tail(&mapping->node, &priv->mappings);
478 return 0;
479 }
480
481 static int ipu_load(struct udevice *dev, ulong addr, ulong size)
482 {
483 Elf32_Ehdr *ehdr; /* Elf header structure pointer */
484 Elf32_Phdr *phdr; /* Program header structure pointer */
485 Elf32_Phdr proghdr;
486 int va;
487 int pa;
488 int i;
489
490 ehdr = (Elf32_Ehdr *)addr;
491 phdr = (Elf32_Phdr *)(addr + ehdr->e_phoff);
492 /*
493 * Load each program header
494 */
495 for (i = 0; i < ehdr->e_phnum; ++i) {
496 memcpy(&proghdr, phdr, sizeof(Elf32_Phdr));
497
498 if (proghdr.p_type != PT_LOAD) {
499 ++phdr;
500 continue;
501 }
502
503 va = proghdr.p_paddr;
504 pa = da_to_pa(dev, va);
505 if (pa)
506 proghdr.p_paddr = pa;
507
508 void *dst = (void *)(uintptr_t)proghdr.p_paddr;
509 void *src = (void *)addr + proghdr.p_offset;
510
511 debug("Loading phdr %i to 0x%p (%i bytes)\n", i, dst,
512 proghdr.p_filesz);
513 if (proghdr.p_filesz)
514 memcpy(dst, src, proghdr.p_filesz);
515
516 flush_cache((unsigned long)dst, proghdr.p_memsz);
517
518 ++phdr;
519 }
520
521 return 0;
522 }
523
524 static const struct dm_rproc_ops ipu_ops = {
525 .init = ipu_init,
526 .start = ipu_start,
527 .stop = ipu_stop,
528 .load = ipu_load,
529 .add_res = ipu_add_res,
530 .config_pagetable = ipu_config_pagetable,
531 .alloc_mem = ipu_alloc_mem,
532 };
533
534 /*
535 * If the remotecore binary expects any peripherals to be setup before it has
536 * booted, configure them here.
537 *
538 * These functions are left empty by default as their operation is usecase
539 * specific.
540 */
541
542 u32 ipu1_config_peripherals(u32 core_id, struct rproc *cfg)
543 {
544 return 0;
545 }
546
547 u32 ipu2_config_peripherals(u32 core_id, struct rproc *cfg)
548 {
549 return 0;
550 }
551
552 struct rproc_intmem_to_l3_mapping ipu1_intmem_to_l3_mapping = {
553 .num_entries = 1,
554 .mappings = {
555 /*
556 * L2 SRAM
557 */
558 {
559 .priv_addr = 0x55020000,
560 .l3_addr = 0x58820000,
561 .len = (64 * 1024)},
562 }
563 };
564
565 struct rproc_intmem_to_l3_mapping ipu2_intmem_to_l3_mapping = {
566 .num_entries = 1,
567 .mappings = {
568 /*
569 * L2 SRAM
570 */
571 {
572 .priv_addr = 0x55020000,
573 .l3_addr = 0x55020000,
574 .len = (64 * 1024)},
575 }
576 };
577
578 struct rproc ipu1_config = {
579 .num_iommus = 1,
580 .mmu_base_addr = {0x58882000, 0},
581 .load_addr = IPU1_LOAD_ADDR,
582 .core_name = "IPU1",
583 .firmware_name = "dra7-ipu1-fw.xem4",
584 .config_mmu = ipu_config_mmu,
585 .config_peripherals = ipu1_config_peripherals,
586 .intmem_to_l3_mapping = &ipu1_intmem_to_l3_mapping
587 };
588
589 struct rproc ipu2_config = {
590 .num_iommus = 1,
591 .mmu_base_addr = {0x55082000, 0},
592 .load_addr = IPU2_LOAD_ADDR,
593 .core_name = "IPU2",
594 .firmware_name = "dra7-ipu2-fw.xem4",
595 .config_mmu = ipu_config_mmu,
596 .config_peripherals = ipu2_config_peripherals,
597 .intmem_to_l3_mapping = &ipu2_intmem_to_l3_mapping
598 };
599
600 struct rproc *rproc_cfg_arr[2] = {
601 [IPU2] = &ipu2_config,
602 [IPU1] = &ipu1_config,
603 };
604
605 u32 spl_pre_boot_core(struct udevice *dev, u32 core_id)
606 {
607 struct rproc *cfg = NULL;
608 unsigned long load_elf_status = 0;
609 int tablesz;
610
611 cfg = rproc_cfg_arr[core_id];
612 /*
613 * Check for valid elf image
614 */
615 if (!valid_elf_image(cfg->load_addr))
616 return 1;
617
618 if (rproc_find_resource_table(dev, cfg->load_addr, &tablesz))
619 cfg->has_rsc_table = 1;
620 else
621 cfg->has_rsc_table = 0;
622
623 /*
624 * Configure the MMU
625 */
626 if (cfg->config_mmu && cfg->has_rsc_table)
627 cfg->config_mmu(core_id, cfg);
628
629 /*
630 * Load the remote core. Fill the page table of the first(possibly
631 * only) IOMMU during ELF loading. Copy the page table to the second
632 * IOMMU before running the remote core.
633 */
634
635 page_table_l1 = (unsigned int *)cfg->page_table_addr;
636 page_table_l2 =
637 (unsigned int *)(cfg->page_table_addr + PAGE_TABLE_SIZE_L1);
638 mem_base = cfg->cma_base;
639 mem_size = cfg->cma_size;
640 memset(mem_bitmap, 0x00, sizeof(mem_bitmap));
641 mem_count = (cfg->cma_size >> PAGE_SHIFT);
642
643 /*
644 * Clear variables used for level 2 page table allocation
645 */
646 memset(pgtable_l2_map, 0x00, sizeof(pgtable_l2_map));
647 pgtable_l2_cnt = 0;
648
649 load_elf_status = rproc_parse_resource_table(dev, cfg);
650 if (load_elf_status == 0) {
651 debug("load_elf_image_phdr returned error for core %s\n",
652 cfg->core_name);
653 return 1;
654 }
655
656 flush_cache(cfg->page_table_addr, PAGE_TABLE_SIZE);
657
658 return 0;
659 }
660
661 static fdt_addr_t ipu_parse_mem_nodes(struct udevice *dev, char *name,
662 int privid, fdt_size_t *sizep)
663 {
664 int ret;
665 u32 sp;
666 ofnode mem_node;
667
668 ret = ofnode_read_u32(dev_ofnode(dev), name, &sp);
669 if (ret) {
670 dev_err(dev, "memory-region node fetch failed %d\n", ret);
671 return ret;
672 }
673
674 mem_node = ofnode_get_by_phandle(sp);
675 if (!ofnode_valid(mem_node))
676 return -EINVAL;
677
678 return ofnode_get_addr_size_index(mem_node, 0, sizep);
679 }
680
681 /**
682 * ipu_probe() - Basic probe
683 * @dev: corresponding k3 remote processor device
684 *
685 * Return: 0 if all goes good, else appropriate error message.
686 */
687 static int ipu_probe(struct udevice *dev)
688 {
689 struct ipu_privdata *priv;
690 struct rproc *cfg = NULL;
691 struct reset_ctl reset;
692 static const char *const ipu_mem_names[] = { "l2ram" };
693 int ret;
694 fdt_size_t sizep;
695
696 priv = dev_get_priv(dev);
697
698 priv->mem.bus_addr =
699 devfdt_get_addr_size_name(dev,
700 ipu_mem_names[0],
701 (fdt_addr_t *)&priv->mem.size);
702
703 ret = reset_get_by_index(dev, 2, &reset);
704 if (ret < 0) {
705 dev_err(dev, "%s: error getting reset index %d\n", __func__, 2);
706 return ret;
707 }
708
709 ret = reset_deassert(&reset);
710 if (ret < 0) {
711 dev_err(dev, "%s: error deasserting reset %d\n", __func__, 2);
712 return ret;
713 }
714
715 if (priv->mem.bus_addr == FDT_ADDR_T_NONE) {
716 dev_err(dev, "%s bus address not found\n", ipu_mem_names[0]);
717 return -EINVAL;
718 }
719 priv->mem.cpu_addr = map_physmem(priv->mem.bus_addr,
720 priv->mem.size, MAP_NOCACHE);
721
722 if (devfdt_get_addr(dev) == 0x58820000)
723 priv->id = 0;
724 else
725 priv->id = 1;
726
727 cfg = rproc_cfg_arr[priv->id];
728 cfg->cma_base = ipu_parse_mem_nodes(dev, "memory-region", priv->id,
729 &sizep);
730 cfg->cma_size = sizep;
731
732 cfg->page_table_addr = ipu_parse_mem_nodes(dev, "pg-tbl", priv->id,
733 &sizep);
734
735 dev_info(dev,
736 "ID %d memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
737 priv->id, ipu_mem_names[0], &priv->mem.bus_addr,
738 priv->mem.size, priv->mem.cpu_addr, priv->mem.dev_addr);
739
740 INIT_LIST_HEAD(&priv->mappings);
741 if (spl_pre_boot_core(dev, priv->id))
742 return -EINVAL;
743
744 return 0;
745 }
746
747 static const struct udevice_id ipu_ids[] = {
748 {.compatible = "ti,dra7-ipu"},
749 {}
750 };
751
752 U_BOOT_DRIVER(ipu) = {
753 .name = "ipu",
754 .of_match = ipu_ids,
755 .id = UCLASS_REMOTEPROC,
756 .ops = &ipu_ops,
757 .probe = ipu_probe,
758 .priv_auto = sizeof(struct ipu_privdata),
759 };