]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/memremap.c
Merge tag 'x86-urgent-2020-05-31' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / mm / memremap.c
CommitLineData
5981690d
DW
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
7d3dcf26 3#include <linux/device.h>
92281dee 4#include <linux/io.h>
0207df4f 5#include <linux/kasan.h>
41e94a85 6#include <linux/memory_hotplug.h>
bcfa4b72
MW
7#include <linux/mm.h>
8#include <linux/pfn_t.h>
5042db43 9#include <linux/swap.h>
9ffc1d19 10#include <linux/mmzone.h>
5042db43 11#include <linux/swapops.h>
bcfa4b72 12#include <linux/types.h>
e7638488 13#include <linux/wait_bit.h>
bcfa4b72 14#include <linux/xarray.h>
92281dee 15
bcfa4b72 16static DEFINE_XARRAY(pgmap_array);
9476df7d 17
9ffc1d19
DW
18/*
19 * The memremap() and memremap_pages() interfaces are alternately used
20 * to map persistent memory namespaces. These interfaces place different
21 * constraints on the alignment and size of the mapping (namespace).
22 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
23 * only map subsections (2MB), and at least one architecture (PowerPC)
24 * the minimum mapping granularity of memremap_pages() is 16MB.
25 *
26 * The role of memremap_compat_align() is to communicate the minimum
27 * arch supported alignment of a namespace such that it can freely
28 * switch modes without violating the arch constraint. Namely, do not
29 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
30 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
31 */
32#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
33unsigned long memremap_compat_align(void)
34{
35 return SUBSECTION_SIZE;
36}
37EXPORT_SYMBOL_GPL(memremap_compat_align);
38#endif
39
f6a55e1a
CH
40#ifdef CONFIG_DEV_PAGEMAP_OPS
41DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
42EXPORT_SYMBOL(devmap_managed_key);
43static atomic_t devmap_managed_enable;
44
6f42193f 45static void devmap_managed_enable_put(void)
f6a55e1a
CH
46{
47 if (atomic_dec_and_test(&devmap_managed_enable))
48 static_branch_disable(&devmap_managed_key);
49}
50
6f42193f 51static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a 52{
429589d6
DW
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE &&
54 (!pgmap->ops || !pgmap->ops->page_free)) {
f6a55e1a
CH
55 WARN(1, "Missing page_free method\n");
56 return -EINVAL;
57 }
58
59 if (atomic_inc_return(&devmap_managed_enable) == 1)
60 static_branch_enable(&devmap_managed_key);
6f42193f 61 return 0;
f6a55e1a
CH
62}
63#else
6f42193f 64static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a
CH
65{
66 return -EINVAL;
67}
6f42193f
CH
68static void devmap_managed_enable_put(void)
69{
70}
f6a55e1a
CH
71#endif /* CONFIG_DEV_PAGEMAP_OPS */
72
bcfa4b72 73static void pgmap_array_delete(struct resource *res)
ab1b597e 74{
bcfa4b72
MW
75 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
76 NULL, GFP_KERNEL);
ab1b597e 77 synchronize_rcu();
9476df7d
DW
78}
79
e7744aa2 80static unsigned long pfn_first(struct dev_pagemap *pgmap)
5c2c2587 81{
7cc7867f 82 return PHYS_PFN(pgmap->res.start) +
514caf23 83 vmem_altmap_offset(pgmap_altmap(pgmap));
5c2c2587
DW
84}
85
e7744aa2 86static unsigned long pfn_end(struct dev_pagemap *pgmap)
5c2c2587 87{
e7744aa2 88 const struct resource *res = &pgmap->res;
5c2c2587
DW
89
90 return (res->start + resource_size(res)) >> PAGE_SHIFT;
91}
92
949b9325
DW
93static unsigned long pfn_next(unsigned long pfn)
94{
95 if (pfn % 1024 == 0)
96 cond_resched();
97 return pfn + 1;
98}
99
5c2c2587 100#define for_each_device_pfn(pfn, map) \
949b9325 101 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
5c2c2587 102
24917f6b
CH
103static void dev_pagemap_kill(struct dev_pagemap *pgmap)
104{
105 if (pgmap->ops && pgmap->ops->kill)
106 pgmap->ops->kill(pgmap);
107 else
108 percpu_ref_kill(pgmap->ref);
109}
110
111static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
112{
113 if (pgmap->ops && pgmap->ops->cleanup) {
114 pgmap->ops->cleanup(pgmap);
115 } else {
116 wait_for_completion(&pgmap->done);
117 percpu_ref_exit(pgmap->ref);
118 }
06282373
DW
119 /*
120 * Undo the pgmap ref assignment for the internal case as the
121 * caller may re-enable the same pgmap.
122 */
123 if (pgmap->ref == &pgmap->internal_ref)
124 pgmap->ref = NULL;
24917f6b
CH
125}
126
6869b7b2 127void memunmap_pages(struct dev_pagemap *pgmap)
41e94a85 128{
e7744aa2 129 struct resource *res = &pgmap->res;
77e080e7 130 struct page *first_page;
71389703 131 unsigned long pfn;
2c2a5af6 132 int nid;
71389703 133
24917f6b 134 dev_pagemap_kill(pgmap);
e7744aa2 135 for_each_device_pfn(pfn, pgmap)
71389703 136 put_page(pfn_to_page(pfn));
24917f6b 137 dev_pagemap_cleanup(pgmap);
9476df7d 138
77e080e7
AK
139 /* make sure to access a memmap that was actually initialized */
140 first_page = pfn_to_page(pfn_first(pgmap));
141
41e94a85 142 /* pages are dead and unused, undo the arch mapping */
77e080e7 143 nid = page_to_nid(first_page);
2c2a5af6 144
f931ab47 145 mem_hotplug_begin();
d33695b1
DH
146 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start),
147 PHYS_PFN(resource_size(res)));
69324b8f 148 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
feee6b29 149 __remove_pages(PHYS_PFN(res->start),
77e080e7 150 PHYS_PFN(resource_size(res)), NULL);
69324b8f 151 } else {
7cc7867f 152 arch_remove_memory(nid, res->start, resource_size(res),
514caf23 153 pgmap_altmap(pgmap));
7cc7867f 154 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
69324b8f 155 }
f931ab47 156 mem_hotplug_done();
b5d24fda 157
7cc7867f 158 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
bcfa4b72 159 pgmap_array_delete(res);
fdc029b1 160 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
6f42193f 161 devmap_managed_enable_put();
9476df7d 162}
6869b7b2
CH
163EXPORT_SYMBOL_GPL(memunmap_pages);
164
165static void devm_memremap_pages_release(void *data)
166{
167 memunmap_pages(data);
168}
9476df7d 169
24917f6b
CH
170static void dev_pagemap_percpu_release(struct percpu_ref *ref)
171{
172 struct dev_pagemap *pgmap =
173 container_of(ref, struct dev_pagemap, internal_ref);
174
175 complete(&pgmap->done);
176}
177
6869b7b2
CH
178/*
179 * Not device managed version of dev_memremap_pages, undone by
180 * memunmap_pages(). Please use dev_memremap_pages if you have a struct
181 * device available.
4b94ffdc 182 */
6869b7b2 183void *memremap_pages(struct dev_pagemap *pgmap, int nid)
41e94a85 184{
949b9325 185 struct resource *res = &pgmap->res;
966cf44f 186 struct dev_pagemap *conflict_pgmap;
f5637d3b 187 struct mhp_params params = {
940519f0
MH
188 /*
189 * We do not want any optional features only our own memmap
7cc7867f 190 */
514caf23 191 .altmap = pgmap_altmap(pgmap),
bfeb022f 192 .pgprot = PAGE_KERNEL,
940519f0 193 };
6869b7b2 194 int error, is_ram;
f6a55e1a 195 bool need_devmap_managed = true;
5f29a77c 196
3ed2dcdf
CH
197 switch (pgmap->type) {
198 case MEMORY_DEVICE_PRIVATE:
199 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
200 WARN(1, "Device private memory not supported\n");
201 return ERR_PTR(-EINVAL);
202 }
897e6365
CH
203 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
204 WARN(1, "Missing migrate_to_ram method\n");
205 return ERR_PTR(-EINVAL);
206 }
f894ddd5
CH
207 if (!pgmap->owner) {
208 WARN(1, "Missing owner\n");
209 return ERR_PTR(-EINVAL);
210 }
3ed2dcdf
CH
211 break;
212 case MEMORY_DEVICE_FS_DAX:
213 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
214 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
215 WARN(1, "File system DAX not supported\n");
216 return ERR_PTR(-EINVAL);
217 }
218 break;
219 case MEMORY_DEVICE_DEVDAX:
a50d8d98
LG
220 need_devmap_managed = false;
221 break;
3ed2dcdf 222 case MEMORY_DEVICE_PCI_P2PDMA:
a50d8d98 223 params.pgprot = pgprot_noncached(params.pgprot);
f6a55e1a 224 need_devmap_managed = false;
3ed2dcdf
CH
225 break;
226 default:
227 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
228 break;
229 }
230
24917f6b
CH
231 if (!pgmap->ref) {
232 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
233 return ERR_PTR(-EINVAL);
234
235 init_completion(&pgmap->done);
236 error = percpu_ref_init(&pgmap->internal_ref,
237 dev_pagemap_percpu_release, 0, GFP_KERNEL);
238 if (error)
239 return ERR_PTR(error);
240 pgmap->ref = &pgmap->internal_ref;
241 } else {
242 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
243 WARN(1, "Missing reference count teardown definition\n");
244 return ERR_PTR(-EINVAL);
245 }
50f44ee7 246 }
a95c90f1 247
f6a55e1a 248 if (need_devmap_managed) {
6f42193f 249 error = devmap_managed_enable_get(pgmap);
f6a55e1a
CH
250 if (error)
251 return ERR_PTR(error);
252 }
253
7cc7867f 254 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
15d36fec 255 if (conflict_pgmap) {
6869b7b2 256 WARN(1, "Conflicting mapping in same section\n");
15d36fec 257 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
258 error = -ENOMEM;
259 goto err_array;
15d36fec
DJ
260 }
261
7cc7867f 262 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
15d36fec 263 if (conflict_pgmap) {
6869b7b2 264 WARN(1, "Conflicting mapping in same section\n");
15d36fec 265 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
266 error = -ENOMEM;
267 goto err_array;
15d36fec
DJ
268 }
269
7cc7867f 270 is_ram = region_intersects(res->start, resource_size(res),
d37a14bb 271 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a85 272
06489cfb
DW
273 if (is_ram != REGION_DISJOINT) {
274 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
275 is_ram == REGION_MIXED ? "mixed" : "ram", res);
a95c90f1
DW
276 error = -ENXIO;
277 goto err_array;
41e94a85
CH
278 }
279
bcfa4b72
MW
280 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
281 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
9476df7d 282 if (error)
bcfa4b72 283 goto err_array;
9476df7d 284
41e94a85 285 if (nid < 0)
7eff93b7 286 nid = numa_mem_id();
41e94a85 287
bfeb022f
LG
288 error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(res->start),
289 0, resource_size(res));
9049771f
DW
290 if (error)
291 goto err_pfn_remap;
292
f931ab47 293 mem_hotplug_begin();
69324b8f
DW
294
295 /*
296 * For device private memory we call add_pages() as we only need to
297 * allocate and initialize struct page for the device memory. More-
298 * over the device memory is un-accessible thus we do not want to
299 * create a linear mapping for the memory like arch_add_memory()
300 * would do.
301 *
302 * For all other device memory types, which are accessible by
303 * the CPU, we do want the linear mapping and thus use
304 * arch_add_memory().
305 */
306 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
7cc7867f 307 error = add_pages(nid, PHYS_PFN(res->start),
f5637d3b 308 PHYS_PFN(resource_size(res)), &params);
69324b8f 309 } else {
7cc7867f 310 error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
69324b8f
DW
311 if (error) {
312 mem_hotplug_done();
313 goto err_kasan;
314 }
315
7cc7867f 316 error = arch_add_memory(nid, res->start, resource_size(res),
f5637d3b 317 &params);
69324b8f
DW
318 }
319
320 if (!error) {
321 struct zone *zone;
322
323 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
7cc7867f 324 move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
f5637d3b 325 PHYS_PFN(resource_size(res)), params.altmap);
0207df4f
AR
326 }
327
f931ab47 328 mem_hotplug_done();
9476df7d
DW
329 if (error)
330 goto err_add_memory;
41e94a85 331
966cf44f
AD
332 /*
333 * Initialization of the pages has been deferred until now in order
334 * to allow us to do the work while not holding the hotplug lock.
335 */
336 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
7cc7867f
DW
337 PHYS_PFN(res->start),
338 PHYS_PFN(resource_size(res)), pgmap);
966cf44f 339 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
41e94a85 340 return __va(res->start);
9476df7d
DW
341
342 err_add_memory:
7cc7867f 343 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
0207df4f 344 err_kasan:
7cc7867f 345 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
9049771f 346 err_pfn_remap:
bcfa4b72
MW
347 pgmap_array_delete(res);
348 err_array:
24917f6b
CH
349 dev_pagemap_kill(pgmap);
350 dev_pagemap_cleanup(pgmap);
6f42193f 351 devmap_managed_enable_put();
9476df7d 352 return ERR_PTR(error);
41e94a85 353}
6869b7b2
CH
354EXPORT_SYMBOL_GPL(memremap_pages);
355
356/**
357 * devm_memremap_pages - remap and provide memmap backing for the given resource
358 * @dev: hosting device for @res
359 * @pgmap: pointer to a struct dev_pagemap
360 *
361 * Notes:
362 * 1/ At a minimum the res and type members of @pgmap must be initialized
363 * by the caller before passing it to this function
364 *
365 * 2/ The altmap field may optionally be initialized, in which case
366 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
367 *
368 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
369 * 'live' on entry and will be killed and reaped at
370 * devm_memremap_pages_release() time, or if this routine fails.
371 *
372 * 4/ res is expected to be a host memory range that could feasibly be
373 * treated as a "System RAM" range, i.e. not a device mmio range, but
374 * this is not enforced.
375 */
376void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
377{
378 int error;
379 void *ret;
380
381 ret = memremap_pages(pgmap, dev_to_node(dev));
382 if (IS_ERR(ret))
383 return ret;
384
385 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
386 pgmap);
387 if (error)
388 return ERR_PTR(error);
389 return ret;
390}
808153e1 391EXPORT_SYMBOL_GPL(devm_memremap_pages);
4b94ffdc 392
2e3f139e
DW
393void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
394{
395 devm_release_action(dev, devm_memremap_pages_release, pgmap);
396}
397EXPORT_SYMBOL_GPL(devm_memunmap_pages);
398
4b94ffdc
DW
399unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
400{
401 /* number of pfns from base where pfn_to_page() is valid */
514caf23
CH
402 if (altmap)
403 return altmap->reserve + altmap->free;
404 return 0;
4b94ffdc
DW
405}
406
407void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
408{
409 altmap->alloc -= nr_pfns;
410}
411
0822acb8
CH
412/**
413 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
414 * @pfn: page frame number to lookup page_map
415 * @pgmap: optional known pgmap that already has a reference
416 *
832d7aa0
CH
417 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
418 * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb8
CH
419 */
420struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
421 struct dev_pagemap *pgmap)
422{
0822acb8
CH
423 resource_size_t phys = PFN_PHYS(pfn);
424
425 /*
832d7aa0 426 * In the cached case we're already holding a live reference.
0822acb8 427 */
832d7aa0 428 if (pgmap) {
e7744aa2 429 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
832d7aa0
CH
430 return pgmap;
431 put_dev_pagemap(pgmap);
0822acb8
CH
432 }
433
434 /* fall back to slow path lookup */
435 rcu_read_lock();
bcfa4b72 436 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
0822acb8
CH
437 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
438 pgmap = NULL;
439 rcu_read_unlock();
440
441 return pgmap;
442}
e7638488 443EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2 444
e7638488 445#ifdef CONFIG_DEV_PAGEMAP_OPS
07d80269 446void free_devmap_managed_page(struct page *page)
7b2d55d2 447{
429589d6
DW
448 /* notify page idle for dax */
449 if (!is_device_private_page(page)) {
450 wake_up_var(&page->_refcount);
451 return;
452 }
7ab0ad0e 453
429589d6
DW
454 /* Clear Active bit in case of parallel mark_page_accessed */
455 __ClearPageActive(page);
456 __ClearPageWaiters(page);
457
458 mem_cgroup_uncharge(page);
459
460 /*
461 * When a device_private page is freed, the page->mapping field
462 * may still contain a (stale) mapping value. For example, the
463 * lower bits of page->mapping may still identify the page as an
464 * anonymous page. Ultimately, this entire field is just stale
465 * and wrong, and it will cause errors if not cleared. One
466 * example is:
467 *
468 * migrate_vma_pages()
469 * migrate_vma_insert_page()
470 * page_add_new_anon_rmap()
471 * __page_set_anon_rmap()
472 * ...checks page->mapping, via PageAnon(page) call,
473 * and incorrectly concludes that the page is an
474 * anonymous page. Therefore, it incorrectly,
475 * silently fails to set up the new anon rmap.
476 *
477 * For other types of ZONE_DEVICE pages, migration is either
478 * handled differently or not done at all, so there is no need
479 * to clear page->mapping.
480 */
481 page->mapping = NULL;
482 page->pgmap->ops->page_free(page);
7b2d55d2 483}
e7638488 484#endif /* CONFIG_DEV_PAGEMAP_OPS */