#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/efi.h>
+#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/log2.h>
+#include <linux/mem_encrypt.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/msi.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/percpu.h>
+#include <linux/set_memory.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
struct its_node *its;
struct event_lpi_map event_map;
void *itt;
+ u32 itt_sz;
u32 nr_ites;
u32 device_id;
bool shared;
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static struct page *its_alloc_pages_node(int node, gfp_t gfp,
+ unsigned int order)
+{
+ struct page *page;
+ int ret = 0;
+
+ page = alloc_pages_node(node, gfp, order);
+
+ if (!page)
+ return NULL;
+
+ ret = set_memory_decrypted((unsigned long)page_address(page),
+ 1 << order);
+ /*
+ * If set_memory_decrypted() fails then we don't know what state the
+ * page is in, so we can't free it. Instead we leak it.
+ * set_memory_decrypted() will already have WARNed.
+ */
+ if (ret)
+ return NULL;
+
+ return page;
+}
+
+static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
+{
+ return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
+}
+
+static void its_free_pages(void *addr, unsigned int order)
+{
+ /*
+ * If the memory cannot be encrypted again then we must leak the pages.
+ * set_memory_encrypted() will already have WARNed.
+ */
+ if (set_memory_encrypted((unsigned long)addr, 1 << order))
+ return;
+ free_pages((unsigned long)addr, order);
+}
+
+static struct gen_pool *itt_pool;
+
+static void *itt_alloc_pool(int node, int size)
+{
+ unsigned long addr;
+ struct page *page;
+
+ if (size >= PAGE_SIZE) {
+ page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
+
+ return page ? page_address(page) : NULL;
+ }
+
+ do {
+ addr = gen_pool_alloc(itt_pool, size);
+ if (addr)
+ break;
+
+ page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 1);
+ if (!page)
+ break;
+
+ gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
+ } while (!addr);
+
+ return (void *)addr;
+}
+
+static void itt_free_pool(void *addr, int size)
+{
+ if (!addr)
+ return;
+
+ if (size >= PAGE_SIZE) {
+ its_free_pages(addr, get_order(size));
+ return;
+ }
+
+ gen_pool_free(itt_pool, (unsigned long)addr, size);
+}
+
/*
* Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
* always have vSGIs mapped.
{
struct page *prop_page;
- prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+ prop_page = its_alloc_pages(gfp_flags,
+ get_order(LPI_PROPBASE_SZ));
if (!prop_page)
return NULL;
static void its_free_prop_table(struct page *prop_page)
{
- free_pages((unsigned long)page_address(prop_page),
- get_order(LPI_PROPBASE_SZ));
+ its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
}
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
+ page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
/* 52bit PA is supported only when PageSize=64K */
if (psz != SZ_64K) {
pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
- free_pages((unsigned long)base, order);
+ its_free_pages(base, order);
return -ENXIO;
}
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
val, tmp);
- free_pages((unsigned long)base, order);
+ its_free_pages(base, order);
return -ENXIO;
}
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
if (its->tables[i].base) {
- free_pages((unsigned long)its->tables[i].base,
- its->tables[i].order);
+ its_free_pages(its->tables[i].base, its->tables[i].order);
its->tables[i].base = NULL;
}
}
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
+ page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
if (!page)
return false;
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
np, npg, psz, epp, esz);
- page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
+ page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
if (!page)
return -ENOMEM;
{
struct page *pend_page;
- pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(LPI_PENDBASE_SZ));
+ pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
static void its_free_pending_table(struct page *pt)
{
- free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+ its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
}
/*
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(baser->psz));
+ page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
if (!page)
return false;
if (WARN_ON(!is_power_of_2(nvecs)))
nvecs = roundup_pow_of_two(nvecs);
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
* Even if the device wants a single LPI, the ITT must be
* sized as a power of two (and you need at least one bit...).
nr_ites = max(2, nvecs);
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+
+ itt = itt_alloc_pool(its->numa_node, sz);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
lpi_base = 0;
}
- if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
+ if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
- kfree(itt);
+ itt_free_pool(itt, sz);
bitmap_free(lpi_map);
kfree(col_map);
return NULL;
dev->its = its;
dev->itt = itt;
+ dev->itt_sz = sz;
dev->nr_ites = nr_ites;
dev->event_map.lpi_map = lpi_map;
dev->event_map.col_map = col_map;
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->event_map.col_map);
- kfree(its_dev->itt);
+ itt_free_pool(its_dev->itt, its_dev->itt_sz);
kfree(its_dev);
}
}
}
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(ITS_CMD_QUEUE_SZ));
+ page = its_alloc_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
goto out_unmap_sgir;
out_free_tables:
its_free_tables(its);
out_free_cmd:
- free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+ its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
bool has_v4_1 = false;
int err;
+ itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
+ if (!itt_pool)
+ return -ENOMEM;
+
gic_rdists = rdists;
lpi_prop_prio = irq_prio;