]> git.ipfire.org Git - thirdparty/openwrt.git/blob
92e9bf6c868d84e8dda6ac56bd28716a2783444e
[thirdparty/openwrt.git] /
1 From b08e2f42e86b5848add254da45b56fc672e2bced Mon Sep 17 00:00:00 2001
2 From: Steven Price <steven.price@arm.com>
3 Date: Wed, 2 Oct 2024 15:16:29 +0100
4 Subject: [PATCH] irqchip/gic-v3-its: Share ITS tables with a non-trusted
5 hypervisor
6
7 Within a realm guest the ITS is emulated by the host. This means the
8 allocations must have been made available to the host by a call to
9 set_memory_decrypted(). Introduce an allocation function which performs
10 this extra call.
11
12 For the ITT use a custom genpool-based allocator that calls
13 set_memory_decrypted() for each page allocated, but then suballocates the
14 size needed for each ITT. Note that there is no mechanism implemented to
15 return pages from the genpool, but it is unlikely that the peak number of
16 devices will be much larger than the normal level - so this isn't expected
17 to be an issue.
18
19 Co-developed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
20 Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
21 Signed-off-by: Steven Price <steven.price@arm.com>
22 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
23 Tested-by: Will Deacon <will@kernel.org>
24 Reviewed-by: Marc Zyngier <maz@kernel.org>
25 Link: https://lore.kernel.org/all/20241002141630.433502-2-steven.price@arm.com
26 ---
27 drivers/irqchip/irq-gic-v3-its.c | 138 +++++++++++++++++++++++++------
28 1 file changed, 115 insertions(+), 23 deletions(-)
29
30 --- a/drivers/irqchip/irq-gic-v3-its.c
31 +++ b/drivers/irqchip/irq-gic-v3-its.c
32 @@ -12,12 +12,14 @@
33 #include <linux/crash_dump.h>
34 #include <linux/delay.h>
35 #include <linux/efi.h>
36 +#include <linux/genalloc.h>
37 #include <linux/interrupt.h>
38 #include <linux/iommu.h>
39 #include <linux/iopoll.h>
40 #include <linux/irqdomain.h>
41 #include <linux/list.h>
42 #include <linux/log2.h>
43 +#include <linux/mem_encrypt.h>
44 #include <linux/memblock.h>
45 #include <linux/mm.h>
46 #include <linux/msi.h>
47 @@ -27,6 +29,7 @@
48 #include <linux/of_pci.h>
49 #include <linux/of_platform.h>
50 #include <linux/percpu.h>
51 +#include <linux/set_memory.h>
52 #include <linux/slab.h>
53 #include <linux/syscore_ops.h>
54
55 @@ -163,6 +166,7 @@ struct its_device {
56 struct its_node *its;
57 struct event_lpi_map event_map;
58 void *itt;
59 + u32 itt_sz;
60 u32 nr_ites;
61 u32 device_id;
62 bool shared;
63 @@ -198,6 +202,87 @@ static DEFINE_IDA(its_vpeid_ida);
64 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
65 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
66
67 +static struct page *its_alloc_pages_node(int node, gfp_t gfp,
68 + unsigned int order)
69 +{
70 + struct page *page;
71 + int ret = 0;
72 +
73 + page = alloc_pages_node(node, gfp, order);
74 +
75 + if (!page)
76 + return NULL;
77 +
78 + ret = set_memory_decrypted((unsigned long)page_address(page),
79 + 1 << order);
80 + /*
81 + * If set_memory_decrypted() fails then we don't know what state the
82 + * page is in, so we can't free it. Instead we leak it.
83 + * set_memory_decrypted() will already have WARNed.
84 + */
85 + if (ret)
86 + return NULL;
87 +
88 + return page;
89 +}
90 +
91 +static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
92 +{
93 + return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
94 +}
95 +
96 +static void its_free_pages(void *addr, unsigned int order)
97 +{
98 + /*
99 + * If the memory cannot be encrypted again then we must leak the pages.
100 + * set_memory_encrypted() will already have WARNed.
101 + */
102 + if (set_memory_encrypted((unsigned long)addr, 1 << order))
103 + return;
104 + free_pages((unsigned long)addr, order);
105 +}
106 +
107 +static struct gen_pool *itt_pool;
108 +
109 +static void *itt_alloc_pool(int node, int size)
110 +{
111 + unsigned long addr;
112 + struct page *page;
113 +
114 + if (size >= PAGE_SIZE) {
115 + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
116 +
117 + return page ? page_address(page) : NULL;
118 + }
119 +
120 + do {
121 + addr = gen_pool_alloc(itt_pool, size);
122 + if (addr)
123 + break;
124 +
125 + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 1);
126 + if (!page)
127 + break;
128 +
129 + gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
130 + } while (!addr);
131 +
132 + return (void *)addr;
133 +}
134 +
135 +static void itt_free_pool(void *addr, int size)
136 +{
137 + if (!addr)
138 + return;
139 +
140 + if (size >= PAGE_SIZE) {
141 + its_free_pages(addr, get_order(size));
142 + return;
143 + }
144 +
145 + gen_pool_free(itt_pool, (unsigned long)addr, size);
146 +}
147 +
148 /*
149 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
150 * always have vSGIs mapped.
151 @@ -2192,7 +2277,8 @@ static struct page *its_allocate_prop_ta
152 {
153 struct page *prop_page;
154
155 - prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
156 + prop_page = its_alloc_pages(gfp_flags,
157 + get_order(LPI_PROPBASE_SZ));
158 if (!prop_page)
159 return NULL;
160
161 @@ -2203,8 +2289,7 @@ static struct page *its_allocate_prop_ta
162
163 static void its_free_prop_table(struct page *prop_page)
164 {
165 - free_pages((unsigned long)page_address(prop_page),
166 - get_order(LPI_PROPBASE_SZ));
167 + its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
168 }
169
170 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
171 @@ -2326,7 +2411,7 @@ static int its_setup_baser(struct its_no
172 order = get_order(GITS_BASER_PAGES_MAX * psz);
173 }
174
175 - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
176 + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
177 if (!page)
178 return -ENOMEM;
179
180 @@ -2339,7 +2424,7 @@ static int its_setup_baser(struct its_no
181 /* 52bit PA is supported only when PageSize=64K */
182 if (psz != SZ_64K) {
183 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
184 - free_pages((unsigned long)base, order);
185 + its_free_pages(base, order);
186 return -ENXIO;
187 }
188
189 @@ -2395,7 +2480,7 @@ retry_baser:
190 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
191 &its->phys_base, its_base_type_string[type],
192 val, tmp);
193 - free_pages((unsigned long)base, order);
194 + its_free_pages(base, order);
195 return -ENXIO;
196 }
197
198 @@ -2534,8 +2619,7 @@ static void its_free_tables(struct its_n
199
200 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
201 if (its->tables[i].base) {
202 - free_pages((unsigned long)its->tables[i].base,
203 - its->tables[i].order);
204 + its_free_pages(its->tables[i].base, its->tables[i].order);
205 its->tables[i].base = NULL;
206 }
207 }
208 @@ -2801,7 +2885,7 @@ static bool allocate_vpe_l2_table(int cp
209
210 /* Allocate memory for 2nd level table */
211 if (!table[idx]) {
212 - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
213 + page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
214 if (!page)
215 return false;
216
217 @@ -2920,7 +3004,7 @@ static int allocate_vpe_l1_table(void)
218
219 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
220 np, npg, psz, epp, esz);
221 - page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
222 + page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
223 if (!page)
224 return -ENOMEM;
225
226 @@ -2966,8 +3050,7 @@ static struct page *its_allocate_pending
227 {
228 struct page *pend_page;
229
230 - pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
231 - get_order(LPI_PENDBASE_SZ));
232 + pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
233 if (!pend_page)
234 return NULL;
235
236 @@ -2979,7 +3062,7 @@ static struct page *its_allocate_pending
237
238 static void its_free_pending_table(struct page *pt)
239 {
240 - free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
241 + its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
242 }
243
244 /*
245 @@ -3314,8 +3397,8 @@ static bool its_alloc_table_entry(struct
246
247 /* Allocate memory for 2nd level table */
248 if (!table[idx]) {
249 - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
250 - get_order(baser->psz));
251 + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
252 + get_order(baser->psz));
253 if (!page)
254 return false;
255
256 @@ -3410,7 +3493,6 @@ static struct its_device *its_create_dev
257 if (WARN_ON(!is_power_of_2(nvecs)))
258 nvecs = roundup_pow_of_two(nvecs);
259
260 - dev = kzalloc(sizeof(*dev), GFP_KERNEL);
261 /*
262 * Even if the device wants a single LPI, the ITT must be
263 * sized as a power of two (and you need at least one bit...).
264 @@ -3418,7 +3500,11 @@ static struct its_device *its_create_dev
265 nr_ites = max(2, nvecs);
266 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
267 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
268 - itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
269 +
270 + itt = itt_alloc_pool(its->numa_node, sz);
271 +
272 + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
273 +
274 if (alloc_lpis) {
275 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
276 if (lpi_map)
277 @@ -3430,9 +3516,9 @@ static struct its_device *its_create_dev
278 lpi_base = 0;
279 }
280
281 - if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
282 + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
283 kfree(dev);
284 - kfree(itt);
285 + itt_free_pool(itt, sz);
286 bitmap_free(lpi_map);
287 kfree(col_map);
288 return NULL;
289 @@ -3442,6 +3528,7 @@ static struct its_device *its_create_dev
290
291 dev->its = its;
292 dev->itt = itt;
293 + dev->itt_sz = sz;
294 dev->nr_ites = nr_ites;
295 dev->event_map.lpi_map = lpi_map;
296 dev->event_map.col_map = col_map;
297 @@ -3469,7 +3556,7 @@ static void its_free_device(struct its_d
298 list_del(&its_dev->entry);
299 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
300 kfree(its_dev->event_map.col_map);
301 - kfree(its_dev->itt);
302 + itt_free_pool(its_dev->itt, its_dev->itt_sz);
303 kfree(its_dev);
304 }
305
306 @@ -5112,8 +5199,9 @@ static int __init its_probe_one(struct i
307 }
308 }
309
310 - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
311 - get_order(ITS_CMD_QUEUE_SZ));
312 + page = its_alloc_pages_node(its->numa_node,
313 + GFP_KERNEL | __GFP_ZERO,
314 + get_order(ITS_CMD_QUEUE_SZ));
315 if (!page) {
316 err = -ENOMEM;
317 goto out_unmap_sgir;
318 @@ -5177,7 +5265,7 @@ static int __init its_probe_one(struct i
319 out_free_tables:
320 its_free_tables(its);
321 out_free_cmd:
322 - free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
323 + its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
324 out_unmap_sgir:
325 if (its->sgir_base)
326 iounmap(its->sgir_base);
327 @@ -5659,6 +5747,10 @@ int __init its_init(struct fwnode_handle
328 bool has_v4_1 = false;
329 int err;
330
331 + itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
332 + if (!itt_pool)
333 + return -ENOMEM;
334 +
335 gic_rdists = rdists;
336
337 its_parent = parent_domain;