]> git.ipfire.org Git - thirdparty/u-boot.git/commitdiff
mach-k3: add runtime memory carveouts for MMU table
authorAnshul Dalal <anshuld@ti.com>
Wed, 18 Jun 2025 12:42:08 +0000 (18:12 +0530)
committerTom Rini <trini@konsulko.com>
Fri, 27 Jun 2025 16:02:18 +0000 (10:02 -0600)
In u-boot we only provide a single MMU table for all k3 platforms,
this does not scale for devices with reserved memory outside the range
0x9e780000 - 0xa0000000 or for devices with < 2GiB of memory (eg
am62-SIP with 512MiB of RAM).

To properly configure the MMU on various k3 platforms, the
reserved-memory regions need to be queried at runtime from the
device-tree and the MMU table should be updated accordingly.

This patch adds the required fixups to the MMU table (during proper
U-boot stage) by marking the reserved regions as non cacheable and
keeping the remaining area as cacheable.

For the A-core SPL, the 128MiB region starting from SPL_TEXT_BASE
is marked as cacheable i.e 0x80080000 to 0x88080000.

The 128MiB size is chosen to allow for future use cases such as falcon
boot from the A-Core SPL which would require loading kernel image from
the SPL stage. This change also ensures the reserved memory regions that
all exist past 0x88080000 are non cacheable preventing speculative
accesses to those addresses.

Signed-off-by: Anshul Dalal <anshuld@ti.com>
arch/arm/mach-k3/arm64/arm64-mmu.c
arch/arm/mach-k3/include/mach/k3-ddr.h
board/beagle/beagleplay/beagleplay.c
board/ti/common/k3-ddr.c

index 0e07b1b7ce0fa1ddd2be295ff2a9d1e868c74fee..2fdd106fec4912344cceae152a3d39cb4e879d73 100644 (file)
@@ -9,11 +9,19 @@
  *
  */
 
+#include <linux/sizes.h>
 #include <asm/system.h>
 #include <asm/armv8/mmu.h>
+#include <mach/k3-ddr.h>
+#include <sort.h>
 
-struct mm_region k3_mem_map[] = {
+#include "../common_fdt.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct mm_region k3_mem_map[K3_MMU_REGIONS_COUNT] = {
        {
+               /* Peripherals */
                .virt = 0x0UL,
                .phys = 0x0UL,
                .size = 0x80000000UL,
@@ -21,30 +29,27 @@ struct mm_region k3_mem_map[] = {
                         PTE_BLOCK_NON_SHARE |
                         PTE_BLOCK_PXN | PTE_BLOCK_UXN
        }, {
-               .virt = 0x80000000UL,
-               .phys = 0x80000000UL,
-               .size = 0x1e780000UL,
-               .attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
-                        PTE_BLOCK_INNER_SHARE
-       }, {
-               .virt = 0xa0000000UL,
-               .phys = 0xa0000000UL,
-               .size = 0x60000000UL,
-               .attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
-                        PTE_BLOCK_INNER_SHARE
-       }, {
+               /* Higher DDR banks */
                .virt = 0x880000000UL,
                .phys = 0x880000000UL,
                .size = 0x80000000UL,
                .attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
                         PTE_BLOCK_INNER_SHARE
        }, {
+               /* Flash peripherals */
                .virt = 0x500000000UL,
                .phys = 0x500000000UL,
                .size = 0x380000000UL,
                .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
                         PTE_BLOCK_NON_SHARE |
                         PTE_BLOCK_PXN | PTE_BLOCK_UXN
+       }, {
+               /* Map SPL load region and the next 128MiB as cacheable */
+               .virt = CONFIG_SPL_TEXT_BASE,
+               .phys = CONFIG_SPL_TEXT_BASE,
+               .size = SZ_128M,
+               .attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+                        PTE_BLOCK_INNER_SHARE
        }, {
                /* List terminator */
                0,
@@ -52,3 +57,185 @@ struct mm_region k3_mem_map[] = {
 };
 
 struct mm_region *mem_map = k3_mem_map;
+
+static int dt_reserved_cmp(const void *a, const void *b)
+{
+       const struct fdt_resource *val_a = a, *val_b = b;
+
+       return (val_a->start - val_b->start);
+}
+
+int k3_mem_map_init(void)
+{
+       fdt_addr_t mem_base;
+       fdt_size_t mem_size;
+       struct fdt_resource dt_reserved[K3_MMU_REGIONS_COUNT],
+               coalesced[K3_MMU_REGIONS_COUNT];
+       int k3_map_idx = -EINVAL, ret, nodeoffset, subnode;
+       void *blob = (void *)gd->fdt_blob;
+       unsigned int carveout_len, i, j;
+
+       ret = fdt_fixup_reserved(blob, "tfa", CONFIG_K3_ATF_LOAD_ADDR, 0x80000);
+       if (ret) {
+               pr_err("%s: Failed to fixup reserved node for tfa [%d]\n",
+                      __func__, ret);
+               return ret;
+       }
+
+       ret = fdt_fixup_reserved(blob, "optee", CONFIG_K3_OPTEE_LOAD_ADDR,
+                                0x1800000);
+       if (ret) {
+               pr_err("%s: Failed to fixup reserved node for optee [%d]\n",
+                      __func__, ret);
+               return ret;
+       }
+
+       nodeoffset = fdt_subnode_offset(blob, 0, "memory");
+       if (nodeoffset < 0) {
+               pr_err("%s: Failed to get memory data: %s\n", __func__,
+                      fdt_strerror(nodeoffset));
+               return nodeoffset;
+       }
+
+       mem_base = fdtdec_get_addr_size(blob, nodeoffset, "reg", &mem_size);
+       if (mem_base != CFG_SYS_SDRAM_BASE)
+               return -EINVAL;
+
+       for (i = 0; i < K3_MMU_REGIONS_COUNT; i++) {
+               if (k3_mem_map[i].virt == CONFIG_SPL_TEXT_BASE) {
+                       k3_map_idx = i;
+                       break;
+               }
+       }
+
+       if (k3_map_idx == -EINVAL) {
+               pr_err("%s: Failed to find DDR region in MMU memory map\n",
+                      __func__);
+               return -EINVAL;
+       }
+
+       i = 0;
+       nodeoffset = fdt_subnode_offset(blob, 0, "reserved-memory");
+       fdt_for_each_subnode(subnode, blob, nodeoffset) {
+               const char *name;
+               fdt_addr_t addr, end_addr;
+               fdt_size_t size;
+
+               if (i >= K3_MMU_REGIONS_COUNT) {
+                       /*
+                        * This is a recoverable error if the regions can be
+                        * coalesced, the required logic can be implemented once
+                        * requirement arises.
+                        */
+                       pr_err("%s: Not enough space in MMU map for carveouts\n",
+                              __func__);
+                       return -ENOMEM;
+               }
+
+               name = fdt_get_name(blob, subnode, NULL);
+               addr = fdtdec_get_addr_size(blob, subnode, "reg", &size);
+
+               if (addr == FDT_ADDR_T_NONE)
+                       continue;
+
+               if (!fdtdec_get_bool(blob, subnode, "no-map"))
+                       continue;
+
+               if (addr >= mem_base + mem_size)
+                       continue;
+
+               end_addr = addr + size;
+
+               if (end_addr <= mem_base)
+                       continue;
+
+               debug("Added memory carveout at 0x%llx, size: 0x%llx for '%s'\n",
+                     addr, size, name);
+
+               addr = max(addr, mem_base);
+               end_addr = min(end_addr, mem_base + mem_size);
+               size = end_addr - addr;
+               dt_reserved[i].start = addr;
+               dt_reserved[i].end = end_addr;
+               i++;
+       }
+       carveout_len = i;
+
+       if (!carveout_len)
+               return 0;
+
+       /* sort carveout regions by address required for creating carveouts */
+       qsort(dt_reserved, carveout_len, sizeof(dt_reserved[0]),
+             dt_reserved_cmp);
+
+       /* coalesce regions */
+       struct fdt_resource coalescing_temp = dt_reserved[0];
+
+       j = 0;
+       for (i = 1; i < carveout_len; i++) {
+               struct fdt_resource current = dt_reserved[i];
+
+               if (coalescing_temp.end >= current.start) {
+                       coalescing_temp.end = current.end;
+                       continue;
+               }
+               coalesced[j] = coalescing_temp;
+               coalescing_temp = current;
+               j++;
+       }
+
+       coalesced[j] = coalescing_temp;
+       carveout_len = j + 1;
+
+       if (coalesced[0].start != mem_base) {
+               k3_mem_map[k3_map_idx].virt = mem_base;
+               k3_mem_map[k3_map_idx].phys = mem_base;
+               k3_mem_map[k3_map_idx].size = coalesced[0].start - mem_base;
+               k3_mem_map[k3_map_idx].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+                                              PTE_BLOCK_INNER_SHARE;
+               k3_map_idx++;
+       }
+
+       for (i = 1; i < carveout_len; i++) {
+               k3_mem_map[k3_map_idx].virt = coalesced[i - 1].end;
+               k3_mem_map[k3_map_idx].phys = coalesced[i - 1].end;
+               k3_mem_map[k3_map_idx].size =
+                       coalesced[i].start - coalesced[i - 1].end;
+               k3_mem_map[k3_map_idx].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+                                              PTE_BLOCK_INNER_SHARE;
+               k3_map_idx++;
+       }
+
+       k3_mem_map[k3_map_idx].virt = coalesced[carveout_len - 1].end;
+       k3_mem_map[k3_map_idx].phys = coalesced[carveout_len - 1].end;
+       k3_mem_map[k3_map_idx].size =
+               mem_base + mem_size - coalesced[carveout_len - 1].end;
+       k3_mem_map[k3_map_idx].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+                                      PTE_BLOCK_INNER_SHARE;
+       k3_map_idx++;
+
+       /* map reserved memory as non cachable */
+       for (i = 0; i < carveout_len; i++) {
+               k3_mem_map[k3_map_idx].virt = coalesced[i].start;
+               k3_mem_map[k3_map_idx].phys = coalesced[i].start;
+               k3_mem_map[k3_map_idx].size =
+                       coalesced[i].end - coalesced[i].start;
+               k3_mem_map[k3_map_idx].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL_NC) |
+                                              PTE_BLOCK_INNER_SHARE;
+               k3_map_idx++;
+       }
+
+       debug("%s: MMU Table configured as:\n", __func__);
+       debug("   |virt start\t\t|virt end\t|phys\t\t|size\t\t|attrs:\n");
+       for (i = 0; i < k3_map_idx; i++) {
+               debug("%2d: 0x%-12llx\t0x%-12llx\t0x%-12llx\t0x%-12llx\t0x%llx\n",
+                     i, k3_mem_map[i].virt,
+                     k3_mem_map[i].virt + k3_mem_map[i].size,
+                     k3_mem_map[i].phys, k3_mem_map[i].size,
+                     k3_mem_map[i].attrs);
+       }
+
+       k3_mem_map[k3_map_idx] = (const struct mm_region){ 0 };
+
+       return 0;
+}
index 39e6725bb9bcd7e733ceda655c1c894ab65712b4..0b164ebf5e648f4a68223c87d8290b674bc1bdf9 100644 (file)
@@ -8,10 +8,19 @@
 
 #include <spl.h>
 
+/* Number of mappable regions in the MMU page table */
+#define K3_MMU_REGIONS_COUNT 32
+
 int dram_init(void);
 int dram_init_banksize(void);
 
 void fixup_ddr_driver_for_ecc(struct spl_image_info *spl_image);
 void fixup_memory_node(struct spl_image_info *spl_image);
 
+/*
+ * Modifies the MMU memory map based on DDR size and reserved-memory
+ * nodes in DT
+ */
+int k3_mem_map_init(void);
+
 #endif /* _K3_DDR_H_ */
index 786358105854582d9c43868d5e6e1668a3dca78b..031040fe70510a561fde65de0fc9977662826a1a 100644 (file)
@@ -13,6 +13,7 @@
 #include <spl.h>
 
 #include <asm/arch/hardware.h>
+#include <mach/k3-ddr.h>
 
 DECLARE_GLOBAL_DATA_PTR;
 
@@ -48,6 +49,17 @@ int board_init(void)
 
 int dram_init(void)
 {
+       int ret;
+
+       if (IS_ENABLED(CONFIG_ARM64) && xpl_phase() != PHASE_SPL) {
+               ret = k3_mem_map_init();
+               if (ret) {
+                       printf("%s: Error fixing up MMU memory map: %d\n",
+                              __func__, ret);
+                       return ret;
+               }
+       }
+
        return fdtdec_setup_mem_size_base();
 }
 
index a8425da8de5ca4f8ddfbb0d43a1147e8eabdf2dc..ee882f621099c9d40ff7b8b617a7e2aa1565d284 100644 (file)
@@ -7,6 +7,7 @@
 #include <dm/uclass.h>
 #include <k3-ddrss.h>
 #include <spl.h>
+#include <mach/k3-ddr.h>
 
 #include "k3-ddr.h"
 
@@ -14,6 +15,15 @@ int dram_init(void)
 {
        s32 ret;
 
+       if (IS_ENABLED(CONFIG_ARM64) && xpl_phase() != PHASE_SPL) {
+               ret = k3_mem_map_init();
+               if (ret) {
+                       printf("%s: Error fixing up MMU memory map: %d\n",
+                              __func__, ret);
+                       return ret;
+               }
+       }
+
        ret = fdtdec_setup_mem_size_base_lowest();
        if (ret)
                printf("Error setting up mem size and base. %d\n", ret);