]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - kernel/resource.c
KVM: Harden copying of userspace-array against overflow
[thirdparty/kernel/stable.git] / kernel / resource.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
30 #include <asm/io.h>
31
32
33 struct resource ioport_resource = {
34 .name = "PCI IO",
35 .start = 0,
36 .end = IO_SPACE_LIMIT,
37 .flags = IORESOURCE_IO,
38 };
39 EXPORT_SYMBOL(ioport_resource);
40
41 struct resource iomem_resource = {
42 .name = "PCI mem",
43 .start = 0,
44 .end = -1,
45 .flags = IORESOURCE_MEM,
46 };
47 EXPORT_SYMBOL(iomem_resource);
48
49 /* constraints to be met while allocating resources */
50 struct resource_constraint {
51 resource_size_t min, max, align;
52 resource_size_t (*alignf)(void *, const struct resource *,
53 resource_size_t, resource_size_t);
54 void *alignf_data;
55 };
56
57 static DEFINE_RWLOCK(resource_lock);
58
59 static struct resource *next_resource(struct resource *p, bool skip_children)
60 {
61 if (!skip_children && p->child)
62 return p->child;
63 while (!p->sibling && p->parent)
64 p = p->parent;
65 return p->sibling;
66 }
67
68 #define for_each_resource(_root, _p, _skip_children) \
69 for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
70
71 #ifdef CONFIG_PROC_FS
72
73 enum { MAX_IORES_LEVEL = 5 };
74
75 static void *r_start(struct seq_file *m, loff_t *pos)
76 __acquires(resource_lock)
77 {
78 struct resource *root = pde_data(file_inode(m->file));
79 struct resource *p;
80 loff_t l = *pos;
81
82 read_lock(&resource_lock);
83 for_each_resource(root, p, false) {
84 if (l-- == 0)
85 break;
86 }
87
88 return p;
89 }
90
91 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
92 {
93 struct resource *p = v;
94
95 (*pos)++;
96
97 return (void *)next_resource(p, false);
98 }
99
100 static void r_stop(struct seq_file *m, void *v)
101 __releases(resource_lock)
102 {
103 read_unlock(&resource_lock);
104 }
105
106 static int r_show(struct seq_file *m, void *v)
107 {
108 struct resource *root = pde_data(file_inode(m->file));
109 struct resource *r = v, *p;
110 unsigned long long start, end;
111 int width = root->end < 0x10000 ? 4 : 8;
112 int depth;
113
114 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
115 if (p->parent == root)
116 break;
117
118 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
119 start = r->start;
120 end = r->end;
121 } else {
122 start = end = 0;
123 }
124
125 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
126 depth * 2, "",
127 width, start,
128 width, end,
129 r->name ? r->name : "<BAD>");
130 return 0;
131 }
132
133 static const struct seq_operations resource_op = {
134 .start = r_start,
135 .next = r_next,
136 .stop = r_stop,
137 .show = r_show,
138 };
139
140 static int __init ioresources_init(void)
141 {
142 proc_create_seq_data("ioports", 0, NULL, &resource_op,
143 &ioport_resource);
144 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
145 return 0;
146 }
147 __initcall(ioresources_init);
148
149 #endif /* CONFIG_PROC_FS */
150
151 static void free_resource(struct resource *res)
152 {
153 /**
154 * If the resource was allocated using memblock early during boot
155 * we'll leak it here: we can only return full pages back to the
156 * buddy and trying to be smart and reusing them eventually in
157 * alloc_resource() overcomplicates resource handling.
158 */
159 if (res && PageSlab(virt_to_head_page(res)))
160 kfree(res);
161 }
162
163 static struct resource *alloc_resource(gfp_t flags)
164 {
165 return kzalloc(sizeof(struct resource), flags);
166 }
167
168 /* Return the conflict entry if you can't request it */
169 static struct resource * __request_resource(struct resource *root, struct resource *new)
170 {
171 resource_size_t start = new->start;
172 resource_size_t end = new->end;
173 struct resource *tmp, **p;
174
175 if (end < start)
176 return root;
177 if (start < root->start)
178 return root;
179 if (end > root->end)
180 return root;
181 p = &root->child;
182 for (;;) {
183 tmp = *p;
184 if (!tmp || tmp->start > end) {
185 new->sibling = tmp;
186 *p = new;
187 new->parent = root;
188 return NULL;
189 }
190 p = &tmp->sibling;
191 if (tmp->end < start)
192 continue;
193 return tmp;
194 }
195 }
196
197 static int __release_resource(struct resource *old, bool release_child)
198 {
199 struct resource *tmp, **p, *chd;
200
201 p = &old->parent->child;
202 for (;;) {
203 tmp = *p;
204 if (!tmp)
205 break;
206 if (tmp == old) {
207 if (release_child || !(tmp->child)) {
208 *p = tmp->sibling;
209 } else {
210 for (chd = tmp->child;; chd = chd->sibling) {
211 chd->parent = tmp->parent;
212 if (!(chd->sibling))
213 break;
214 }
215 *p = tmp->child;
216 chd->sibling = tmp->sibling;
217 }
218 old->parent = NULL;
219 return 0;
220 }
221 p = &tmp->sibling;
222 }
223 return -EINVAL;
224 }
225
226 static void __release_child_resources(struct resource *r)
227 {
228 struct resource *tmp, *p;
229 resource_size_t size;
230
231 p = r->child;
232 r->child = NULL;
233 while (p) {
234 tmp = p;
235 p = p->sibling;
236
237 tmp->parent = NULL;
238 tmp->sibling = NULL;
239 __release_child_resources(tmp);
240
241 printk(KERN_DEBUG "release child resource %pR\n", tmp);
242 /* need to restore size, and keep flags */
243 size = resource_size(tmp);
244 tmp->start = 0;
245 tmp->end = size - 1;
246 }
247 }
248
249 void release_child_resources(struct resource *r)
250 {
251 write_lock(&resource_lock);
252 __release_child_resources(r);
253 write_unlock(&resource_lock);
254 }
255
256 /**
257 * request_resource_conflict - request and reserve an I/O or memory resource
258 * @root: root resource descriptor
259 * @new: resource descriptor desired by caller
260 *
261 * Returns 0 for success, conflict resource on error.
262 */
263 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
264 {
265 struct resource *conflict;
266
267 write_lock(&resource_lock);
268 conflict = __request_resource(root, new);
269 write_unlock(&resource_lock);
270 return conflict;
271 }
272
273 /**
274 * request_resource - request and reserve an I/O or memory resource
275 * @root: root resource descriptor
276 * @new: resource descriptor desired by caller
277 *
278 * Returns 0 for success, negative error code on error.
279 */
280 int request_resource(struct resource *root, struct resource *new)
281 {
282 struct resource *conflict;
283
284 conflict = request_resource_conflict(root, new);
285 return conflict ? -EBUSY : 0;
286 }
287
288 EXPORT_SYMBOL(request_resource);
289
290 /**
291 * release_resource - release a previously reserved resource
292 * @old: resource pointer
293 */
294 int release_resource(struct resource *old)
295 {
296 int retval;
297
298 write_lock(&resource_lock);
299 retval = __release_resource(old, true);
300 write_unlock(&resource_lock);
301 return retval;
302 }
303
304 EXPORT_SYMBOL(release_resource);
305
306 /**
307 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
308 * [@start..@end].
309 *
310 * If a resource is found, returns 0 and @*res is overwritten with the part
311 * of the resource that's within [@start..@end]; if none is found, returns
312 * -ENODEV. Returns -EINVAL for invalid parameters.
313 *
314 * @start: start address of the resource searched for
315 * @end: end address of same resource
316 * @flags: flags which the resource must have
317 * @desc: descriptor the resource must have
318 * @res: return ptr, if resource found
319 *
320 * The caller must specify @start, @end, @flags, and @desc
321 * (which may be IORES_DESC_NONE).
322 */
323 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
324 unsigned long flags, unsigned long desc,
325 struct resource *res)
326 {
327 struct resource *p;
328
329 if (!res)
330 return -EINVAL;
331
332 if (start >= end)
333 return -EINVAL;
334
335 read_lock(&resource_lock);
336
337 for_each_resource(&iomem_resource, p, false) {
338 /* If we passed the resource we are looking for, stop */
339 if (p->start > end) {
340 p = NULL;
341 break;
342 }
343
344 /* Skip until we find a range that matches what we look for */
345 if (p->end < start)
346 continue;
347
348 if ((p->flags & flags) != flags)
349 continue;
350 if ((desc != IORES_DESC_NONE) && (desc != p->desc))
351 continue;
352
353 /* Found a match, break */
354 break;
355 }
356
357 if (p) {
358 /* copy data */
359 *res = (struct resource) {
360 .start = max(start, p->start),
361 .end = min(end, p->end),
362 .flags = p->flags,
363 .desc = p->desc,
364 .parent = p->parent,
365 };
366 }
367
368 read_unlock(&resource_lock);
369 return p ? 0 : -ENODEV;
370 }
371
372 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
373 unsigned long flags, unsigned long desc,
374 void *arg,
375 int (*func)(struct resource *, void *))
376 {
377 struct resource res;
378 int ret = -EINVAL;
379
380 while (start < end &&
381 !find_next_iomem_res(start, end, flags, desc, &res)) {
382 ret = (*func)(&res, arg);
383 if (ret)
384 break;
385
386 start = res.end + 1;
387 }
388
389 return ret;
390 }
391
392 /**
393 * walk_iomem_res_desc - Walks through iomem resources and calls func()
394 * with matching resource ranges.
395 * *
396 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
397 * @flags: I/O resource flags
398 * @start: start addr
399 * @end: end addr
400 * @arg: function argument for the callback @func
401 * @func: callback function that is called for each qualifying resource area
402 *
403 * All the memory ranges which overlap start,end and also match flags and
404 * desc are valid candidates.
405 *
406 * NOTE: For a new descriptor search, define a new IORES_DESC in
407 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
408 */
409 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
410 u64 end, void *arg, int (*func)(struct resource *, void *))
411 {
412 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
413 }
414 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
415
416 /*
417 * This function calls the @func callback against all memory ranges of type
418 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
419 * Now, this function is only for System RAM, it deals with full ranges and
420 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
421 * ranges.
422 */
423 int walk_system_ram_res(u64 start, u64 end, void *arg,
424 int (*func)(struct resource *, void *))
425 {
426 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
427
428 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
429 func);
430 }
431
432 /*
433 * This function calls the @func callback against all memory ranges, which
434 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
435 */
436 int walk_mem_res(u64 start, u64 end, void *arg,
437 int (*func)(struct resource *, void *))
438 {
439 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
440
441 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
442 func);
443 }
444
445 /*
446 * This function calls the @func callback against all memory ranges of type
447 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
448 * It is to be used only for System RAM.
449 */
450 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
451 void *arg, int (*func)(unsigned long, unsigned long, void *))
452 {
453 resource_size_t start, end;
454 unsigned long flags;
455 struct resource res;
456 unsigned long pfn, end_pfn;
457 int ret = -EINVAL;
458
459 start = (u64) start_pfn << PAGE_SHIFT;
460 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
461 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
462 while (start < end &&
463 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
464 pfn = PFN_UP(res.start);
465 end_pfn = PFN_DOWN(res.end + 1);
466 if (end_pfn > pfn)
467 ret = (*func)(pfn, end_pfn - pfn, arg);
468 if (ret)
469 break;
470 start = res.end + 1;
471 }
472 return ret;
473 }
474
475 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
476 {
477 return 1;
478 }
479
480 /*
481 * This generic page_is_ram() returns true if specified address is
482 * registered as System RAM in iomem_resource list.
483 */
484 int __weak page_is_ram(unsigned long pfn)
485 {
486 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
487 }
488 EXPORT_SYMBOL_GPL(page_is_ram);
489
490 static int __region_intersects(struct resource *parent, resource_size_t start,
491 size_t size, unsigned long flags,
492 unsigned long desc)
493 {
494 struct resource res;
495 int type = 0; int other = 0;
496 struct resource *p;
497
498 res.start = start;
499 res.end = start + size - 1;
500
501 for (p = parent->child; p ; p = p->sibling) {
502 bool is_type = (((p->flags & flags) == flags) &&
503 ((desc == IORES_DESC_NONE) ||
504 (desc == p->desc)));
505
506 if (resource_overlaps(p, &res))
507 is_type ? type++ : other++;
508 }
509
510 if (type == 0)
511 return REGION_DISJOINT;
512
513 if (other == 0)
514 return REGION_INTERSECTS;
515
516 return REGION_MIXED;
517 }
518
519 /**
520 * region_intersects() - determine intersection of region with known resources
521 * @start: region start address
522 * @size: size of region
523 * @flags: flags of resource (in iomem_resource)
524 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
525 *
526 * Check if the specified region partially overlaps or fully eclipses a
527 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
528 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
529 * return REGION_MIXED if the region overlaps @flags/@desc and another
530 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
531 * and no other defined resource. Note that REGION_INTERSECTS is also
532 * returned in the case when the specified region overlaps RAM and undefined
533 * memory holes.
534 *
535 * region_intersect() is used by memory remapping functions to ensure
536 * the user is not remapping RAM and is a vast speed up over walking
537 * through the resource table page by page.
538 */
539 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
540 unsigned long desc)
541 {
542 int ret;
543
544 read_lock(&resource_lock);
545 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
546 read_unlock(&resource_lock);
547
548 return ret;
549 }
550 EXPORT_SYMBOL_GPL(region_intersects);
551
552 void __weak arch_remove_reservations(struct resource *avail)
553 {
554 }
555
556 static resource_size_t simple_align_resource(void *data,
557 const struct resource *avail,
558 resource_size_t size,
559 resource_size_t align)
560 {
561 return avail->start;
562 }
563
564 static void resource_clip(struct resource *res, resource_size_t min,
565 resource_size_t max)
566 {
567 if (res->start < min)
568 res->start = min;
569 if (res->end > max)
570 res->end = max;
571 }
572
573 /*
574 * Find empty slot in the resource tree with the given range and
575 * alignment constraints
576 */
577 static int __find_resource(struct resource *root, struct resource *old,
578 struct resource *new,
579 resource_size_t size,
580 struct resource_constraint *constraint)
581 {
582 struct resource *this = root->child;
583 struct resource tmp = *new, avail, alloc;
584
585 tmp.start = root->start;
586 /*
587 * Skip past an allocated resource that starts at 0, since the assignment
588 * of this->start - 1 to tmp->end below would cause an underflow.
589 */
590 if (this && this->start == root->start) {
591 tmp.start = (this == old) ? old->start : this->end + 1;
592 this = this->sibling;
593 }
594 for(;;) {
595 if (this)
596 tmp.end = (this == old) ? this->end : this->start - 1;
597 else
598 tmp.end = root->end;
599
600 if (tmp.end < tmp.start)
601 goto next;
602
603 resource_clip(&tmp, constraint->min, constraint->max);
604 arch_remove_reservations(&tmp);
605
606 /* Check for overflow after ALIGN() */
607 avail.start = ALIGN(tmp.start, constraint->align);
608 avail.end = tmp.end;
609 avail.flags = new->flags & ~IORESOURCE_UNSET;
610 if (avail.start >= tmp.start) {
611 alloc.flags = avail.flags;
612 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
613 size, constraint->align);
614 alloc.end = alloc.start + size - 1;
615 if (alloc.start <= alloc.end &&
616 resource_contains(&avail, &alloc)) {
617 new->start = alloc.start;
618 new->end = alloc.end;
619 return 0;
620 }
621 }
622
623 next: if (!this || this->end == root->end)
624 break;
625
626 if (this != old)
627 tmp.start = this->end + 1;
628 this = this->sibling;
629 }
630 return -EBUSY;
631 }
632
633 /*
634 * Find empty slot in the resource tree given range and alignment.
635 */
636 static int find_resource(struct resource *root, struct resource *new,
637 resource_size_t size,
638 struct resource_constraint *constraint)
639 {
640 return __find_resource(root, NULL, new, size, constraint);
641 }
642
643 /**
644 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
645 * The resource will be relocated if the new size cannot be reallocated in the
646 * current location.
647 *
648 * @root: root resource descriptor
649 * @old: resource descriptor desired by caller
650 * @newsize: new size of the resource descriptor
651 * @constraint: the size and alignment constraints to be met.
652 */
653 static int reallocate_resource(struct resource *root, struct resource *old,
654 resource_size_t newsize,
655 struct resource_constraint *constraint)
656 {
657 int err=0;
658 struct resource new = *old;
659 struct resource *conflict;
660
661 write_lock(&resource_lock);
662
663 if ((err = __find_resource(root, old, &new, newsize, constraint)))
664 goto out;
665
666 if (resource_contains(&new, old)) {
667 old->start = new.start;
668 old->end = new.end;
669 goto out;
670 }
671
672 if (old->child) {
673 err = -EBUSY;
674 goto out;
675 }
676
677 if (resource_contains(old, &new)) {
678 old->start = new.start;
679 old->end = new.end;
680 } else {
681 __release_resource(old, true);
682 *old = new;
683 conflict = __request_resource(root, old);
684 BUG_ON(conflict);
685 }
686 out:
687 write_unlock(&resource_lock);
688 return err;
689 }
690
691
692 /**
693 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
694 * The resource will be reallocated with a new size if it was already allocated
695 * @root: root resource descriptor
696 * @new: resource descriptor desired by caller
697 * @size: requested resource region size
698 * @min: minimum boundary to allocate
699 * @max: maximum boundary to allocate
700 * @align: alignment requested, in bytes
701 * @alignf: alignment function, optional, called if not NULL
702 * @alignf_data: arbitrary data to pass to the @alignf function
703 */
704 int allocate_resource(struct resource *root, struct resource *new,
705 resource_size_t size, resource_size_t min,
706 resource_size_t max, resource_size_t align,
707 resource_size_t (*alignf)(void *,
708 const struct resource *,
709 resource_size_t,
710 resource_size_t),
711 void *alignf_data)
712 {
713 int err;
714 struct resource_constraint constraint;
715
716 if (!alignf)
717 alignf = simple_align_resource;
718
719 constraint.min = min;
720 constraint.max = max;
721 constraint.align = align;
722 constraint.alignf = alignf;
723 constraint.alignf_data = alignf_data;
724
725 if ( new->parent ) {
726 /* resource is already allocated, try reallocating with
727 the new constraints */
728 return reallocate_resource(root, new, size, &constraint);
729 }
730
731 write_lock(&resource_lock);
732 err = find_resource(root, new, size, &constraint);
733 if (err >= 0 && __request_resource(root, new))
734 err = -EBUSY;
735 write_unlock(&resource_lock);
736 return err;
737 }
738
739 EXPORT_SYMBOL(allocate_resource);
740
741 /**
742 * lookup_resource - find an existing resource by a resource start address
743 * @root: root resource descriptor
744 * @start: resource start address
745 *
746 * Returns a pointer to the resource if found, NULL otherwise
747 */
748 struct resource *lookup_resource(struct resource *root, resource_size_t start)
749 {
750 struct resource *res;
751
752 read_lock(&resource_lock);
753 for (res = root->child; res; res = res->sibling) {
754 if (res->start == start)
755 break;
756 }
757 read_unlock(&resource_lock);
758
759 return res;
760 }
761
762 /*
763 * Insert a resource into the resource tree. If successful, return NULL,
764 * otherwise return the conflicting resource (compare to __request_resource())
765 */
766 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
767 {
768 struct resource *first, *next;
769
770 for (;; parent = first) {
771 first = __request_resource(parent, new);
772 if (!first)
773 return first;
774
775 if (first == parent)
776 return first;
777 if (WARN_ON(first == new)) /* duplicated insertion */
778 return first;
779
780 if ((first->start > new->start) || (first->end < new->end))
781 break;
782 if ((first->start == new->start) && (first->end == new->end))
783 break;
784 }
785
786 for (next = first; ; next = next->sibling) {
787 /* Partial overlap? Bad, and unfixable */
788 if (next->start < new->start || next->end > new->end)
789 return next;
790 if (!next->sibling)
791 break;
792 if (next->sibling->start > new->end)
793 break;
794 }
795
796 new->parent = parent;
797 new->sibling = next->sibling;
798 new->child = first;
799
800 next->sibling = NULL;
801 for (next = first; next; next = next->sibling)
802 next->parent = new;
803
804 if (parent->child == first) {
805 parent->child = new;
806 } else {
807 next = parent->child;
808 while (next->sibling != first)
809 next = next->sibling;
810 next->sibling = new;
811 }
812 return NULL;
813 }
814
815 /**
816 * insert_resource_conflict - Inserts resource in the resource tree
817 * @parent: parent of the new resource
818 * @new: new resource to insert
819 *
820 * Returns 0 on success, conflict resource if the resource can't be inserted.
821 *
822 * This function is equivalent to request_resource_conflict when no conflict
823 * happens. If a conflict happens, and the conflicting resources
824 * entirely fit within the range of the new resource, then the new
825 * resource is inserted and the conflicting resources become children of
826 * the new resource.
827 *
828 * This function is intended for producers of resources, such as FW modules
829 * and bus drivers.
830 */
831 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
832 {
833 struct resource *conflict;
834
835 write_lock(&resource_lock);
836 conflict = __insert_resource(parent, new);
837 write_unlock(&resource_lock);
838 return conflict;
839 }
840
841 /**
842 * insert_resource - Inserts a resource in the resource tree
843 * @parent: parent of the new resource
844 * @new: new resource to insert
845 *
846 * Returns 0 on success, -EBUSY if the resource can't be inserted.
847 *
848 * This function is intended for producers of resources, such as FW modules
849 * and bus drivers.
850 */
851 int insert_resource(struct resource *parent, struct resource *new)
852 {
853 struct resource *conflict;
854
855 conflict = insert_resource_conflict(parent, new);
856 return conflict ? -EBUSY : 0;
857 }
858 EXPORT_SYMBOL_GPL(insert_resource);
859
860 /**
861 * insert_resource_expand_to_fit - Insert a resource into the resource tree
862 * @root: root resource descriptor
863 * @new: new resource to insert
864 *
865 * Insert a resource into the resource tree, possibly expanding it in order
866 * to make it encompass any conflicting resources.
867 */
868 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
869 {
870 if (new->parent)
871 return;
872
873 write_lock(&resource_lock);
874 for (;;) {
875 struct resource *conflict;
876
877 conflict = __insert_resource(root, new);
878 if (!conflict)
879 break;
880 if (conflict == root)
881 break;
882
883 /* Ok, expand resource to cover the conflict, then try again .. */
884 if (conflict->start < new->start)
885 new->start = conflict->start;
886 if (conflict->end > new->end)
887 new->end = conflict->end;
888
889 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
890 }
891 write_unlock(&resource_lock);
892 }
893 /*
894 * Not for general consumption, only early boot memory map parsing, PCI
895 * resource discovery, and late discovery of CXL resources are expected
896 * to use this interface. The former are built-in and only the latter,
897 * CXL, is a module.
898 */
899 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
900
901 /**
902 * remove_resource - Remove a resource in the resource tree
903 * @old: resource to remove
904 *
905 * Returns 0 on success, -EINVAL if the resource is not valid.
906 *
907 * This function removes a resource previously inserted by insert_resource()
908 * or insert_resource_conflict(), and moves the children (if any) up to
909 * where they were before. insert_resource() and insert_resource_conflict()
910 * insert a new resource, and move any conflicting resources down to the
911 * children of the new resource.
912 *
913 * insert_resource(), insert_resource_conflict() and remove_resource() are
914 * intended for producers of resources, such as FW modules and bus drivers.
915 */
916 int remove_resource(struct resource *old)
917 {
918 int retval;
919
920 write_lock(&resource_lock);
921 retval = __release_resource(old, false);
922 write_unlock(&resource_lock);
923 return retval;
924 }
925 EXPORT_SYMBOL_GPL(remove_resource);
926
927 static int __adjust_resource(struct resource *res, resource_size_t start,
928 resource_size_t size)
929 {
930 struct resource *tmp, *parent = res->parent;
931 resource_size_t end = start + size - 1;
932 int result = -EBUSY;
933
934 if (!parent)
935 goto skip;
936
937 if ((start < parent->start) || (end > parent->end))
938 goto out;
939
940 if (res->sibling && (res->sibling->start <= end))
941 goto out;
942
943 tmp = parent->child;
944 if (tmp != res) {
945 while (tmp->sibling != res)
946 tmp = tmp->sibling;
947 if (start <= tmp->end)
948 goto out;
949 }
950
951 skip:
952 for (tmp = res->child; tmp; tmp = tmp->sibling)
953 if ((tmp->start < start) || (tmp->end > end))
954 goto out;
955
956 res->start = start;
957 res->end = end;
958 result = 0;
959
960 out:
961 return result;
962 }
963
964 /**
965 * adjust_resource - modify a resource's start and size
966 * @res: resource to modify
967 * @start: new start value
968 * @size: new size
969 *
970 * Given an existing resource, change its start and size to match the
971 * arguments. Returns 0 on success, -EBUSY if it can't fit.
972 * Existing children of the resource are assumed to be immutable.
973 */
974 int adjust_resource(struct resource *res, resource_size_t start,
975 resource_size_t size)
976 {
977 int result;
978
979 write_lock(&resource_lock);
980 result = __adjust_resource(res, start, size);
981 write_unlock(&resource_lock);
982 return result;
983 }
984 EXPORT_SYMBOL(adjust_resource);
985
986 static void __init
987 __reserve_region_with_split(struct resource *root, resource_size_t start,
988 resource_size_t end, const char *name)
989 {
990 struct resource *parent = root;
991 struct resource *conflict;
992 struct resource *res = alloc_resource(GFP_ATOMIC);
993 struct resource *next_res = NULL;
994 int type = resource_type(root);
995
996 if (!res)
997 return;
998
999 res->name = name;
1000 res->start = start;
1001 res->end = end;
1002 res->flags = type | IORESOURCE_BUSY;
1003 res->desc = IORES_DESC_NONE;
1004
1005 while (1) {
1006
1007 conflict = __request_resource(parent, res);
1008 if (!conflict) {
1009 if (!next_res)
1010 break;
1011 res = next_res;
1012 next_res = NULL;
1013 continue;
1014 }
1015
1016 /* conflict covered whole area */
1017 if (conflict->start <= res->start &&
1018 conflict->end >= res->end) {
1019 free_resource(res);
1020 WARN_ON(next_res);
1021 break;
1022 }
1023
1024 /* failed, split and try again */
1025 if (conflict->start > res->start) {
1026 end = res->end;
1027 res->end = conflict->start - 1;
1028 if (conflict->end < end) {
1029 next_res = alloc_resource(GFP_ATOMIC);
1030 if (!next_res) {
1031 free_resource(res);
1032 break;
1033 }
1034 next_res->name = name;
1035 next_res->start = conflict->end + 1;
1036 next_res->end = end;
1037 next_res->flags = type | IORESOURCE_BUSY;
1038 next_res->desc = IORES_DESC_NONE;
1039 }
1040 } else {
1041 res->start = conflict->end + 1;
1042 }
1043 }
1044
1045 }
1046
1047 void __init
1048 reserve_region_with_split(struct resource *root, resource_size_t start,
1049 resource_size_t end, const char *name)
1050 {
1051 int abort = 0;
1052
1053 write_lock(&resource_lock);
1054 if (root->start > start || root->end < end) {
1055 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1056 (unsigned long long)start, (unsigned long long)end,
1057 root);
1058 if (start > root->end || end < root->start)
1059 abort = 1;
1060 else {
1061 if (end > root->end)
1062 end = root->end;
1063 if (start < root->start)
1064 start = root->start;
1065 pr_err("fixing request to [0x%llx-0x%llx]\n",
1066 (unsigned long long)start,
1067 (unsigned long long)end);
1068 }
1069 dump_stack();
1070 }
1071 if (!abort)
1072 __reserve_region_with_split(root, start, end, name);
1073 write_unlock(&resource_lock);
1074 }
1075
1076 /**
1077 * resource_alignment - calculate resource's alignment
1078 * @res: resource pointer
1079 *
1080 * Returns alignment on success, 0 (invalid alignment) on failure.
1081 */
1082 resource_size_t resource_alignment(struct resource *res)
1083 {
1084 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1085 case IORESOURCE_SIZEALIGN:
1086 return resource_size(res);
1087 case IORESOURCE_STARTALIGN:
1088 return res->start;
1089 default:
1090 return 0;
1091 }
1092 }
1093
1094 /*
1095 * This is compatibility stuff for IO resources.
1096 *
1097 * Note how this, unlike the above, knows about
1098 * the IO flag meanings (busy etc).
1099 *
1100 * request_region creates a new busy region.
1101 *
1102 * release_region releases a matching busy region.
1103 */
1104
1105 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1106
1107 static struct inode *iomem_inode;
1108
1109 #ifdef CONFIG_IO_STRICT_DEVMEM
1110 static void revoke_iomem(struct resource *res)
1111 {
1112 /* pairs with smp_store_release() in iomem_init_inode() */
1113 struct inode *inode = smp_load_acquire(&iomem_inode);
1114
1115 /*
1116 * Check that the initialization has completed. Losing the race
1117 * is ok because it means drivers are claiming resources before
1118 * the fs_initcall level of init and prevent iomem_get_mapping users
1119 * from establishing mappings.
1120 */
1121 if (!inode)
1122 return;
1123
1124 /*
1125 * The expectation is that the driver has successfully marked
1126 * the resource busy by this point, so devmem_is_allowed()
1127 * should start returning false, however for performance this
1128 * does not iterate the entire resource range.
1129 */
1130 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1131 devmem_is_allowed(PHYS_PFN(res->end))) {
1132 /*
1133 * *cringe* iomem=relaxed says "go ahead, what's the
1134 * worst that can happen?"
1135 */
1136 return;
1137 }
1138
1139 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1140 }
1141 #else
1142 static void revoke_iomem(struct resource *res) {}
1143 #endif
1144
1145 struct address_space *iomem_get_mapping(void)
1146 {
1147 /*
1148 * This function is only called from file open paths, hence guaranteed
1149 * that fs_initcalls have completed and no need to check for NULL. But
1150 * since revoke_iomem can be called before the initcall we still need
1151 * the barrier to appease checkers.
1152 */
1153 return smp_load_acquire(&iomem_inode)->i_mapping;
1154 }
1155
1156 static int __request_region_locked(struct resource *res, struct resource *parent,
1157 resource_size_t start, resource_size_t n,
1158 const char *name, int flags)
1159 {
1160 DECLARE_WAITQUEUE(wait, current);
1161
1162 res->name = name;
1163 res->start = start;
1164 res->end = start + n - 1;
1165
1166 for (;;) {
1167 struct resource *conflict;
1168
1169 res->flags = resource_type(parent) | resource_ext_type(parent);
1170 res->flags |= IORESOURCE_BUSY | flags;
1171 res->desc = parent->desc;
1172
1173 conflict = __request_resource(parent, res);
1174 if (!conflict)
1175 break;
1176 /*
1177 * mm/hmm.c reserves physical addresses which then
1178 * become unavailable to other users. Conflicts are
1179 * not expected. Warn to aid debugging if encountered.
1180 */
1181 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1182 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1183 conflict->name, conflict, res);
1184 }
1185 if (conflict != parent) {
1186 if (!(conflict->flags & IORESOURCE_BUSY)) {
1187 parent = conflict;
1188 continue;
1189 }
1190 }
1191 if (conflict->flags & flags & IORESOURCE_MUXED) {
1192 add_wait_queue(&muxed_resource_wait, &wait);
1193 write_unlock(&resource_lock);
1194 set_current_state(TASK_UNINTERRUPTIBLE);
1195 schedule();
1196 remove_wait_queue(&muxed_resource_wait, &wait);
1197 write_lock(&resource_lock);
1198 continue;
1199 }
1200 /* Uhhuh, that didn't work out.. */
1201 return -EBUSY;
1202 }
1203
1204 return 0;
1205 }
1206
1207 /**
1208 * __request_region - create a new busy resource region
1209 * @parent: parent resource descriptor
1210 * @start: resource start address
1211 * @n: resource region size
1212 * @name: reserving caller's ID string
1213 * @flags: IO resource flags
1214 */
1215 struct resource *__request_region(struct resource *parent,
1216 resource_size_t start, resource_size_t n,
1217 const char *name, int flags)
1218 {
1219 struct resource *res = alloc_resource(GFP_KERNEL);
1220 int ret;
1221
1222 if (!res)
1223 return NULL;
1224
1225 write_lock(&resource_lock);
1226 ret = __request_region_locked(res, parent, start, n, name, flags);
1227 write_unlock(&resource_lock);
1228
1229 if (ret) {
1230 free_resource(res);
1231 return NULL;
1232 }
1233
1234 if (parent == &iomem_resource)
1235 revoke_iomem(res);
1236
1237 return res;
1238 }
1239 EXPORT_SYMBOL(__request_region);
1240
1241 /**
1242 * __release_region - release a previously reserved resource region
1243 * @parent: parent resource descriptor
1244 * @start: resource start address
1245 * @n: resource region size
1246 *
1247 * The described resource region must match a currently busy region.
1248 */
1249 void __release_region(struct resource *parent, resource_size_t start,
1250 resource_size_t n)
1251 {
1252 struct resource **p;
1253 resource_size_t end;
1254
1255 p = &parent->child;
1256 end = start + n - 1;
1257
1258 write_lock(&resource_lock);
1259
1260 for (;;) {
1261 struct resource *res = *p;
1262
1263 if (!res)
1264 break;
1265 if (res->start <= start && res->end >= end) {
1266 if (!(res->flags & IORESOURCE_BUSY)) {
1267 p = &res->child;
1268 continue;
1269 }
1270 if (res->start != start || res->end != end)
1271 break;
1272 *p = res->sibling;
1273 write_unlock(&resource_lock);
1274 if (res->flags & IORESOURCE_MUXED)
1275 wake_up(&muxed_resource_wait);
1276 free_resource(res);
1277 return;
1278 }
1279 p = &res->sibling;
1280 }
1281
1282 write_unlock(&resource_lock);
1283
1284 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1285 }
1286 EXPORT_SYMBOL(__release_region);
1287
1288 #ifdef CONFIG_MEMORY_HOTREMOVE
1289 /**
1290 * release_mem_region_adjustable - release a previously reserved memory region
1291 * @start: resource start address
1292 * @size: resource region size
1293 *
1294 * This interface is intended for memory hot-delete. The requested region
1295 * is released from a currently busy memory resource. The requested region
1296 * must either match exactly or fit into a single busy resource entry. In
1297 * the latter case, the remaining resource is adjusted accordingly.
1298 * Existing children of the busy memory resource must be immutable in the
1299 * request.
1300 *
1301 * Note:
1302 * - Additional release conditions, such as overlapping region, can be
1303 * supported after they are confirmed as valid cases.
1304 * - When a busy memory resource gets split into two entries, the code
1305 * assumes that all children remain in the lower address entry for
1306 * simplicity. Enhance this logic when necessary.
1307 */
1308 void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1309 {
1310 struct resource *parent = &iomem_resource;
1311 struct resource *new_res = NULL;
1312 bool alloc_nofail = false;
1313 struct resource **p;
1314 struct resource *res;
1315 resource_size_t end;
1316
1317 end = start + size - 1;
1318 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1319 return;
1320
1321 /*
1322 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1323 * just before releasing the region. This is highly unlikely to
1324 * fail - let's play save and make it never fail as the caller cannot
1325 * perform any error handling (e.g., trying to re-add memory will fail
1326 * similarly).
1327 */
1328 retry:
1329 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1330
1331 p = &parent->child;
1332 write_lock(&resource_lock);
1333
1334 while ((res = *p)) {
1335 if (res->start >= end)
1336 break;
1337
1338 /* look for the next resource if it does not fit into */
1339 if (res->start > start || res->end < end) {
1340 p = &res->sibling;
1341 continue;
1342 }
1343
1344 if (!(res->flags & IORESOURCE_MEM))
1345 break;
1346
1347 if (!(res->flags & IORESOURCE_BUSY)) {
1348 p = &res->child;
1349 continue;
1350 }
1351
1352 /* found the target resource; let's adjust accordingly */
1353 if (res->start == start && res->end == end) {
1354 /* free the whole entry */
1355 *p = res->sibling;
1356 free_resource(res);
1357 } else if (res->start == start && res->end != end) {
1358 /* adjust the start */
1359 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1360 res->end - end));
1361 } else if (res->start != start && res->end == end) {
1362 /* adjust the end */
1363 WARN_ON_ONCE(__adjust_resource(res, res->start,
1364 start - res->start));
1365 } else {
1366 /* split into two entries - we need a new resource */
1367 if (!new_res) {
1368 new_res = alloc_resource(GFP_ATOMIC);
1369 if (!new_res) {
1370 alloc_nofail = true;
1371 write_unlock(&resource_lock);
1372 goto retry;
1373 }
1374 }
1375 new_res->name = res->name;
1376 new_res->start = end + 1;
1377 new_res->end = res->end;
1378 new_res->flags = res->flags;
1379 new_res->desc = res->desc;
1380 new_res->parent = res->parent;
1381 new_res->sibling = res->sibling;
1382 new_res->child = NULL;
1383
1384 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1385 start - res->start)))
1386 break;
1387 res->sibling = new_res;
1388 new_res = NULL;
1389 }
1390
1391 break;
1392 }
1393
1394 write_unlock(&resource_lock);
1395 free_resource(new_res);
1396 }
1397 #endif /* CONFIG_MEMORY_HOTREMOVE */
1398
1399 #ifdef CONFIG_MEMORY_HOTPLUG
1400 static bool system_ram_resources_mergeable(struct resource *r1,
1401 struct resource *r2)
1402 {
1403 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1404 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1405 r1->name == r2->name && r1->desc == r2->desc &&
1406 !r1->child && !r2->child;
1407 }
1408
1409 /**
1410 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1411 * merge it with adjacent, mergeable resources
1412 * @res: resource descriptor
1413 *
1414 * This interface is intended for memory hotplug, whereby lots of contiguous
1415 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1416 * the actual resource boundaries are not of interest (e.g., it might be
1417 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1418 * same parent, and that don't have any children are considered. All mergeable
1419 * resources must be immutable during the request.
1420 *
1421 * Note:
1422 * - The caller has to make sure that no pointers to resources that are
1423 * marked mergeable are used anymore after this call - the resource might
1424 * be freed and the pointer might be stale!
1425 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1426 */
1427 void merge_system_ram_resource(struct resource *res)
1428 {
1429 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1430 struct resource *cur;
1431
1432 if (WARN_ON_ONCE((res->flags & flags) != flags))
1433 return;
1434
1435 write_lock(&resource_lock);
1436 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1437
1438 /* Try to merge with next item in the list. */
1439 cur = res->sibling;
1440 if (cur && system_ram_resources_mergeable(res, cur)) {
1441 res->end = cur->end;
1442 res->sibling = cur->sibling;
1443 free_resource(cur);
1444 }
1445
1446 /* Try to merge with previous item in the list. */
1447 cur = res->parent->child;
1448 while (cur && cur->sibling != res)
1449 cur = cur->sibling;
1450 if (cur && system_ram_resources_mergeable(cur, res)) {
1451 cur->end = res->end;
1452 cur->sibling = res->sibling;
1453 free_resource(res);
1454 }
1455 write_unlock(&resource_lock);
1456 }
1457 #endif /* CONFIG_MEMORY_HOTPLUG */
1458
1459 /*
1460 * Managed region resource
1461 */
1462 static void devm_resource_release(struct device *dev, void *ptr)
1463 {
1464 struct resource **r = ptr;
1465
1466 release_resource(*r);
1467 }
1468
1469 /**
1470 * devm_request_resource() - request and reserve an I/O or memory resource
1471 * @dev: device for which to request the resource
1472 * @root: root of the resource tree from which to request the resource
1473 * @new: descriptor of the resource to request
1474 *
1475 * This is a device-managed version of request_resource(). There is usually
1476 * no need to release resources requested by this function explicitly since
1477 * that will be taken care of when the device is unbound from its driver.
1478 * If for some reason the resource needs to be released explicitly, because
1479 * of ordering issues for example, drivers must call devm_release_resource()
1480 * rather than the regular release_resource().
1481 *
1482 * When a conflict is detected between any existing resources and the newly
1483 * requested resource, an error message will be printed.
1484 *
1485 * Returns 0 on success or a negative error code on failure.
1486 */
1487 int devm_request_resource(struct device *dev, struct resource *root,
1488 struct resource *new)
1489 {
1490 struct resource *conflict, **ptr;
1491
1492 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1493 if (!ptr)
1494 return -ENOMEM;
1495
1496 *ptr = new;
1497
1498 conflict = request_resource_conflict(root, new);
1499 if (conflict) {
1500 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1501 new, conflict->name, conflict);
1502 devres_free(ptr);
1503 return -EBUSY;
1504 }
1505
1506 devres_add(dev, ptr);
1507 return 0;
1508 }
1509 EXPORT_SYMBOL(devm_request_resource);
1510
1511 static int devm_resource_match(struct device *dev, void *res, void *data)
1512 {
1513 struct resource **ptr = res;
1514
1515 return *ptr == data;
1516 }
1517
1518 /**
1519 * devm_release_resource() - release a previously requested resource
1520 * @dev: device for which to release the resource
1521 * @new: descriptor of the resource to release
1522 *
1523 * Releases a resource previously requested using devm_request_resource().
1524 */
1525 void devm_release_resource(struct device *dev, struct resource *new)
1526 {
1527 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1528 new));
1529 }
1530 EXPORT_SYMBOL(devm_release_resource);
1531
1532 struct region_devres {
1533 struct resource *parent;
1534 resource_size_t start;
1535 resource_size_t n;
1536 };
1537
1538 static void devm_region_release(struct device *dev, void *res)
1539 {
1540 struct region_devres *this = res;
1541
1542 __release_region(this->parent, this->start, this->n);
1543 }
1544
1545 static int devm_region_match(struct device *dev, void *res, void *match_data)
1546 {
1547 struct region_devres *this = res, *match = match_data;
1548
1549 return this->parent == match->parent &&
1550 this->start == match->start && this->n == match->n;
1551 }
1552
1553 struct resource *
1554 __devm_request_region(struct device *dev, struct resource *parent,
1555 resource_size_t start, resource_size_t n, const char *name)
1556 {
1557 struct region_devres *dr = NULL;
1558 struct resource *res;
1559
1560 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1561 GFP_KERNEL);
1562 if (!dr)
1563 return NULL;
1564
1565 dr->parent = parent;
1566 dr->start = start;
1567 dr->n = n;
1568
1569 res = __request_region(parent, start, n, name, 0);
1570 if (res)
1571 devres_add(dev, dr);
1572 else
1573 devres_free(dr);
1574
1575 return res;
1576 }
1577 EXPORT_SYMBOL(__devm_request_region);
1578
1579 void __devm_release_region(struct device *dev, struct resource *parent,
1580 resource_size_t start, resource_size_t n)
1581 {
1582 struct region_devres match_data = { parent, start, n };
1583
1584 __release_region(parent, start, n);
1585 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1586 &match_data));
1587 }
1588 EXPORT_SYMBOL(__devm_release_region);
1589
1590 /*
1591 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1592 */
1593 #define MAXRESERVE 4
1594 static int __init reserve_setup(char *str)
1595 {
1596 static int reserved;
1597 static struct resource reserve[MAXRESERVE];
1598
1599 for (;;) {
1600 unsigned int io_start, io_num;
1601 int x = reserved;
1602 struct resource *parent;
1603
1604 if (get_option(&str, &io_start) != 2)
1605 break;
1606 if (get_option(&str, &io_num) == 0)
1607 break;
1608 if (x < MAXRESERVE) {
1609 struct resource *res = reserve + x;
1610
1611 /*
1612 * If the region starts below 0x10000, we assume it's
1613 * I/O port space; otherwise assume it's memory.
1614 */
1615 if (io_start < 0x10000) {
1616 res->flags = IORESOURCE_IO;
1617 parent = &ioport_resource;
1618 } else {
1619 res->flags = IORESOURCE_MEM;
1620 parent = &iomem_resource;
1621 }
1622 res->name = "reserved";
1623 res->start = io_start;
1624 res->end = io_start + io_num - 1;
1625 res->flags |= IORESOURCE_BUSY;
1626 res->desc = IORES_DESC_NONE;
1627 res->child = NULL;
1628 if (request_resource(parent, res) == 0)
1629 reserved = x+1;
1630 }
1631 }
1632 return 1;
1633 }
1634 __setup("reserve=", reserve_setup);
1635
1636 /*
1637 * Check if the requested addr and size spans more than any slot in the
1638 * iomem resource tree.
1639 */
1640 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1641 {
1642 resource_size_t end = addr + size - 1;
1643 struct resource *p;
1644 int err = 0;
1645
1646 read_lock(&resource_lock);
1647 for_each_resource(&iomem_resource, p, false) {
1648 /*
1649 * We can probably skip the resources without
1650 * IORESOURCE_IO attribute?
1651 */
1652 if (p->start > end)
1653 continue;
1654 if (p->end < addr)
1655 continue;
1656 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1657 PFN_DOWN(p->end) >= PFN_DOWN(end))
1658 continue;
1659 /*
1660 * if a resource is "BUSY", it's not a hardware resource
1661 * but a driver mapping of such a resource; we don't want
1662 * to warn for those; some drivers legitimately map only
1663 * partial hardware resources. (example: vesafb)
1664 */
1665 if (p->flags & IORESOURCE_BUSY)
1666 continue;
1667
1668 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1669 &addr, &end, p->name, p);
1670 err = -1;
1671 break;
1672 }
1673 read_unlock(&resource_lock);
1674
1675 return err;
1676 }
1677
1678 #ifdef CONFIG_STRICT_DEVMEM
1679 static int strict_iomem_checks = 1;
1680 #else
1681 static int strict_iomem_checks;
1682 #endif
1683
1684 /*
1685 * Check if an address is exclusive to the kernel and must not be mapped to
1686 * user space, for example, via /dev/mem.
1687 *
1688 * Returns true if exclusive to the kernel, otherwise returns false.
1689 */
1690 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1691 {
1692 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1693 IORESOURCE_EXCLUSIVE;
1694 bool skip_children = false, err = false;
1695 struct resource *p;
1696
1697 read_lock(&resource_lock);
1698 for_each_resource(root, p, skip_children) {
1699 if (p->start >= addr + size)
1700 break;
1701 if (p->end < addr) {
1702 skip_children = true;
1703 continue;
1704 }
1705 skip_children = false;
1706
1707 /*
1708 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1709 * IORESOURCE_EXCLUSIVE is set, even if they
1710 * are not busy and even if "iomem=relaxed" is set. The
1711 * responsible driver dynamically adds/removes system RAM within
1712 * such an area and uncontrolled access is dangerous.
1713 */
1714 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1715 err = true;
1716 break;
1717 }
1718
1719 /*
1720 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1721 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1722 * resource is busy.
1723 */
1724 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1725 continue;
1726 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1727 || p->flags & IORESOURCE_EXCLUSIVE) {
1728 err = true;
1729 break;
1730 }
1731 }
1732 read_unlock(&resource_lock);
1733
1734 return err;
1735 }
1736
1737 bool iomem_is_exclusive(u64 addr)
1738 {
1739 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1740 PAGE_SIZE);
1741 }
1742
1743 struct resource_entry *resource_list_create_entry(struct resource *res,
1744 size_t extra_size)
1745 {
1746 struct resource_entry *entry;
1747
1748 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1749 if (entry) {
1750 INIT_LIST_HEAD(&entry->node);
1751 entry->res = res ? res : &entry->__res;
1752 }
1753
1754 return entry;
1755 }
1756 EXPORT_SYMBOL(resource_list_create_entry);
1757
1758 void resource_list_free(struct list_head *head)
1759 {
1760 struct resource_entry *entry, *tmp;
1761
1762 list_for_each_entry_safe(entry, tmp, head, node)
1763 resource_list_destroy_entry(entry);
1764 }
1765 EXPORT_SYMBOL(resource_list_free);
1766
1767 #ifdef CONFIG_GET_FREE_REGION
1768 #define GFR_DESCENDING (1UL << 0)
1769 #define GFR_REQUEST_REGION (1UL << 1)
1770 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1771
1772 static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1773 resource_size_t align, unsigned long flags)
1774 {
1775 if (flags & GFR_DESCENDING) {
1776 resource_size_t end;
1777
1778 end = min_t(resource_size_t, base->end,
1779 (1ULL << MAX_PHYSMEM_BITS) - 1);
1780 return end - size + 1;
1781 }
1782
1783 return ALIGN(base->start, align);
1784 }
1785
1786 static bool gfr_continue(struct resource *base, resource_size_t addr,
1787 resource_size_t size, unsigned long flags)
1788 {
1789 if (flags & GFR_DESCENDING)
1790 return addr > size && addr >= base->start;
1791 /*
1792 * In the ascend case be careful that the last increment by
1793 * @size did not wrap 0.
1794 */
1795 return addr > addr - size &&
1796 addr <= min_t(resource_size_t, base->end,
1797 (1ULL << MAX_PHYSMEM_BITS) - 1);
1798 }
1799
1800 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1801 unsigned long flags)
1802 {
1803 if (flags & GFR_DESCENDING)
1804 return addr - size;
1805 return addr + size;
1806 }
1807
1808 static void remove_free_mem_region(void *_res)
1809 {
1810 struct resource *res = _res;
1811
1812 if (res->parent)
1813 remove_resource(res);
1814 free_resource(res);
1815 }
1816
1817 static struct resource *
1818 get_free_mem_region(struct device *dev, struct resource *base,
1819 resource_size_t size, const unsigned long align,
1820 const char *name, const unsigned long desc,
1821 const unsigned long flags)
1822 {
1823 resource_size_t addr;
1824 struct resource *res;
1825 struct region_devres *dr = NULL;
1826
1827 size = ALIGN(size, align);
1828
1829 res = alloc_resource(GFP_KERNEL);
1830 if (!res)
1831 return ERR_PTR(-ENOMEM);
1832
1833 if (dev && (flags & GFR_REQUEST_REGION)) {
1834 dr = devres_alloc(devm_region_release,
1835 sizeof(struct region_devres), GFP_KERNEL);
1836 if (!dr) {
1837 free_resource(res);
1838 return ERR_PTR(-ENOMEM);
1839 }
1840 } else if (dev) {
1841 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1842 return ERR_PTR(-ENOMEM);
1843 }
1844
1845 write_lock(&resource_lock);
1846 for (addr = gfr_start(base, size, align, flags);
1847 gfr_continue(base, addr, size, flags);
1848 addr = gfr_next(addr, size, flags)) {
1849 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1850 REGION_DISJOINT)
1851 continue;
1852
1853 if (flags & GFR_REQUEST_REGION) {
1854 if (__request_region_locked(res, &iomem_resource, addr,
1855 size, name, 0))
1856 break;
1857
1858 if (dev) {
1859 dr->parent = &iomem_resource;
1860 dr->start = addr;
1861 dr->n = size;
1862 devres_add(dev, dr);
1863 }
1864
1865 res->desc = desc;
1866 write_unlock(&resource_lock);
1867
1868
1869 /*
1870 * A driver is claiming this region so revoke any
1871 * mappings.
1872 */
1873 revoke_iomem(res);
1874 } else {
1875 res->start = addr;
1876 res->end = addr + size - 1;
1877 res->name = name;
1878 res->desc = desc;
1879 res->flags = IORESOURCE_MEM;
1880
1881 /*
1882 * Only succeed if the resource hosts an exclusive
1883 * range after the insert
1884 */
1885 if (__insert_resource(base, res) || res->child)
1886 break;
1887
1888 write_unlock(&resource_lock);
1889 }
1890
1891 return res;
1892 }
1893 write_unlock(&resource_lock);
1894
1895 if (flags & GFR_REQUEST_REGION) {
1896 free_resource(res);
1897 devres_free(dr);
1898 } else if (dev)
1899 devm_release_action(dev, remove_free_mem_region, res);
1900
1901 return ERR_PTR(-ERANGE);
1902 }
1903
1904 /**
1905 * devm_request_free_mem_region - find free region for device private memory
1906 *
1907 * @dev: device struct to bind the resource to
1908 * @size: size in bytes of the device memory to add
1909 * @base: resource tree to look in
1910 *
1911 * This function tries to find an empty range of physical address big enough to
1912 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1913 * memory, which in turn allocates struct pages.
1914 */
1915 struct resource *devm_request_free_mem_region(struct device *dev,
1916 struct resource *base, unsigned long size)
1917 {
1918 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1919
1920 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
1921 dev_name(dev),
1922 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1923 }
1924 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1925
1926 struct resource *request_free_mem_region(struct resource *base,
1927 unsigned long size, const char *name)
1928 {
1929 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1930
1931 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
1932 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1933 }
1934 EXPORT_SYMBOL_GPL(request_free_mem_region);
1935
1936 /**
1937 * alloc_free_mem_region - find a free region relative to @base
1938 * @base: resource that will parent the new resource
1939 * @size: size in bytes of memory to allocate from @base
1940 * @align: alignment requirements for the allocation
1941 * @name: resource name
1942 *
1943 * Buses like CXL, that can dynamically instantiate new memory regions,
1944 * need a method to allocate physical address space for those regions.
1945 * Allocate and insert a new resource to cover a free, unclaimed by a
1946 * descendant of @base, range in the span of @base.
1947 */
1948 struct resource *alloc_free_mem_region(struct resource *base,
1949 unsigned long size, unsigned long align,
1950 const char *name)
1951 {
1952 /* Default of ascending direction and insert resource */
1953 unsigned long flags = 0;
1954
1955 return get_free_mem_region(NULL, base, size, align, name,
1956 IORES_DESC_NONE, flags);
1957 }
1958 EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
1959 #endif /* CONFIG_GET_FREE_REGION */
1960
1961 static int __init strict_iomem(char *str)
1962 {
1963 if (strstr(str, "relaxed"))
1964 strict_iomem_checks = 0;
1965 if (strstr(str, "strict"))
1966 strict_iomem_checks = 1;
1967 return 1;
1968 }
1969
1970 static int iomem_fs_init_fs_context(struct fs_context *fc)
1971 {
1972 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1973 }
1974
1975 static struct file_system_type iomem_fs_type = {
1976 .name = "iomem",
1977 .owner = THIS_MODULE,
1978 .init_fs_context = iomem_fs_init_fs_context,
1979 .kill_sb = kill_anon_super,
1980 };
1981
1982 static int __init iomem_init_inode(void)
1983 {
1984 static struct vfsmount *iomem_vfs_mount;
1985 static int iomem_fs_cnt;
1986 struct inode *inode;
1987 int rc;
1988
1989 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
1990 if (rc < 0) {
1991 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
1992 return rc;
1993 }
1994
1995 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
1996 if (IS_ERR(inode)) {
1997 rc = PTR_ERR(inode);
1998 pr_err("Cannot allocate inode for iomem: %d\n", rc);
1999 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2000 return rc;
2001 }
2002
2003 /*
2004 * Publish iomem revocation inode initialized.
2005 * Pairs with smp_load_acquire() in revoke_iomem().
2006 */
2007 smp_store_release(&iomem_inode, inode);
2008
2009 return 0;
2010 }
2011
2012 fs_initcall(iomem_init_inode);
2013
2014 __setup("iomem=", strict_iomem);