memunmap(timings);
}
-static const struct reserved_mem_ops tegra210_emc_table_ops = {
- .device_init = tegra210_emc_table_device_init,
- .device_release = tegra210_emc_table_device_release,
-};
-
static int tegra210_emc_table_init(unsigned long node,
struct reserved_mem *rmem)
{
pr_debug("Tegra210 EMC table at %pa, size %lu bytes\n", &rmem->base,
(unsigned long)rmem->size);
- rmem->ops = &tegra210_emc_table_ops;
-
return 0;
}
+
+static const struct reserved_mem_ops tegra210_emc_table_ops = {
+ .node_init = tegra210_emc_table_init,
+ .device_init = tegra210_emc_table_device_init,
+ .device_release = tegra210_emc_table_device_release,
+};
+
RESERVEDMEM_OF_DECLARE(tegra210_emc_table, "nvidia,tegra210-emc-table",
- tegra210_emc_table_init);
+ &tegra210_emc_table_ops);
static const struct of_device_id __rmem_of_table_sentinel
__used __section("__reservedmem_of_table_end");
-/*
- * __reserved_mem_init_node() - call region specific reserved memory init code
+/**
+ * __reserved_mem_init_node() - initialize a reserved memory region
+ * @rmem: reserved_mem structure to initialize
+ * @node: FDT node describing the reserved memory region
+ *
+ * This function iterates through the reserved memory drivers and calls the
+ * node_init callback for the compatible entry matching the node. On success,
+ * the operations pointer is stored in the reserved_mem structure.
+ *
+ * Return: 0 on success, -ENODEV if no compatible match found
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem,
unsigned long node)
for (i = __reservedmem_of_table; ret == -ENODEV &&
i < &__rmem_of_table_sentinel; i++) {
- reservedmem_of_init_fn initfn = i->data;
+ const struct reserved_mem_ops *ops = i->data;
const char *compat = i->compatible;
if (!of_flat_dt_is_compatible(node, compat))
continue;
- ret = initfn(node, rmem);
+ ret = ops->node_init(node, rmem);
if (ret == 0) {
+ rmem->ops = ops;
pr_info("initialized node %s, compatible id %s\n",
rmem->name, compat);
break;
};
struct reserved_mem_ops {
+ int (*node_init)(unsigned long fdt_node, struct reserved_mem *rmem);
int (*device_init)(struct reserved_mem *rmem,
struct device *dev);
void (*device_release)(struct reserved_mem *rmem,
struct device *dev);
};
-typedef int (*reservedmem_of_init_fn)(unsigned long node,
- struct reserved_mem *rmem);
-
#ifdef CONFIG_OF_RESERVED_MEM
-#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
- _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
+#define RESERVEDMEM_OF_DECLARE(name, compat, ops) \
+ _OF_DECLARE(reservedmem, name, compat, ops, struct reserved_mem_ops *)
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
#else
-#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
- _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
+#define RESERVEDMEM_OF_DECLARE(name, compat, ops) \
+ _OF_DECLARE_STUB(reservedmem, name, compat, ops, \
+ struct reserved_mem_ops *)
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
dev->dma_mem = NULL;
}
-static const struct reserved_mem_ops rmem_dma_ops = {
- .device_init = rmem_dma_device_init,
- .device_release = rmem_dma_device_release,
-};
static int __init rmem_dma_setup(unsigned long node, struct reserved_mem *rmem)
{
}
#endif
- rmem->ops = &rmem_dma_ops;
pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
core_initcall(dma_init_reserved_memory);
#endif /* CONFIG_DMA_GLOBAL_POOL */
-RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
+static const struct reserved_mem_ops rmem_dma_ops = {
+ .node_init = rmem_dma_setup,
+ .device_init = rmem_dma_device_init,
+ .device_release = rmem_dma_device_release,
+};
+
+RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", &rmem_dma_ops);
#endif
dev->cma_area = NULL;
}
-static const struct reserved_mem_ops rmem_cma_ops = {
- .device_init = rmem_cma_device_init,
- .device_release = rmem_cma_device_release,
-};
-
static int __init rmem_cma_setup(unsigned long node, struct reserved_mem *rmem)
{
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
if (default_cma)
dma_contiguous_default_area = cma;
- rmem->ops = &rmem_cma_ops;
rmem->priv = cma;
pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
return 0;
}
-RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
+
+static const struct reserved_mem_ops rmem_cma_ops = {
+ .node_init = rmem_cma_setup,
+ .device_init = rmem_cma_device_init,
+ .device_release = rmem_cma_device_release,
+};
+
+RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", &rmem_cma_ops);
#endif
dev->dma_io_tlb_mem = &io_tlb_default_mem;
}
-static const struct reserved_mem_ops rmem_swiotlb_ops = {
- .device_init = rmem_swiotlb_device_init,
- .device_release = rmem_swiotlb_device_release,
-};
-
static int __init rmem_swiotlb_setup(unsigned long node,
struct reserved_mem *rmem)
{
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
- rmem->ops = &rmem_swiotlb_ops;
pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
-RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
+static const struct reserved_mem_ops rmem_swiotlb_ops = {
+ .node_init = rmem_swiotlb_setup,
+ .device_init = rmem_swiotlb_device_init,
+ .device_release = rmem_swiotlb_device_release,
+};
+
+RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", &rmem_swiotlb_ops);
#endif /* CONFIG_DMA_RESTRICTED_POOL */