]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - kernel/resource.c
Linux 4.20.17
[thirdparty/kernel/stable.git] / kernel / resource.c
1 /*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/export.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/proc_fs.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/device.h>
23 #include <linux/pfn.h>
24 #include <linux/mm.h>
25 #include <linux/resource_ext.h>
26 #include <asm/io.h>
27
28
29 struct resource ioport_resource = {
30 .name = "PCI IO",
31 .start = 0,
32 .end = IO_SPACE_LIMIT,
33 .flags = IORESOURCE_IO,
34 };
35 EXPORT_SYMBOL(ioport_resource);
36
37 struct resource iomem_resource = {
38 .name = "PCI mem",
39 .start = 0,
40 .end = -1,
41 .flags = IORESOURCE_MEM,
42 };
43 EXPORT_SYMBOL(iomem_resource);
44
45 /* constraints to be met while allocating resources */
46 struct resource_constraint {
47 resource_size_t min, max, align;
48 resource_size_t (*alignf)(void *, const struct resource *,
49 resource_size_t, resource_size_t);
50 void *alignf_data;
51 };
52
53 static DEFINE_RWLOCK(resource_lock);
54
55 /*
56 * For memory hotplug, there is no way to free resource entries allocated
57 * by boot mem after the system is up. So for reusing the resource entry
58 * we need to remember the resource.
59 */
60 static struct resource *bootmem_resource_free;
61 static DEFINE_SPINLOCK(bootmem_resource_lock);
62
63 static struct resource *next_resource(struct resource *p, bool sibling_only)
64 {
65 /* Caller wants to traverse through siblings only */
66 if (sibling_only)
67 return p->sibling;
68
69 if (p->child)
70 return p->child;
71 while (!p->sibling && p->parent)
72 p = p->parent;
73 return p->sibling;
74 }
75
76 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
77 {
78 struct resource *p = v;
79 (*pos)++;
80 return (void *)next_resource(p, false);
81 }
82
83 #ifdef CONFIG_PROC_FS
84
85 enum { MAX_IORES_LEVEL = 5 };
86
87 static void *r_start(struct seq_file *m, loff_t *pos)
88 __acquires(resource_lock)
89 {
90 struct resource *p = PDE_DATA(file_inode(m->file));
91 loff_t l = 0;
92 read_lock(&resource_lock);
93 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
94 ;
95 return p;
96 }
97
98 static void r_stop(struct seq_file *m, void *v)
99 __releases(resource_lock)
100 {
101 read_unlock(&resource_lock);
102 }
103
104 static int r_show(struct seq_file *m, void *v)
105 {
106 struct resource *root = PDE_DATA(file_inode(m->file));
107 struct resource *r = v, *p;
108 unsigned long long start, end;
109 int width = root->end < 0x10000 ? 4 : 8;
110 int depth;
111
112 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
113 if (p->parent == root)
114 break;
115
116 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
117 start = r->start;
118 end = r->end;
119 } else {
120 start = end = 0;
121 }
122
123 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
124 depth * 2, "",
125 width, start,
126 width, end,
127 r->name ? r->name : "<BAD>");
128 return 0;
129 }
130
131 static const struct seq_operations resource_op = {
132 .start = r_start,
133 .next = r_next,
134 .stop = r_stop,
135 .show = r_show,
136 };
137
138 static int __init ioresources_init(void)
139 {
140 proc_create_seq_data("ioports", 0, NULL, &resource_op,
141 &ioport_resource);
142 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
143 return 0;
144 }
145 __initcall(ioresources_init);
146
147 #endif /* CONFIG_PROC_FS */
148
149 static void free_resource(struct resource *res)
150 {
151 if (!res)
152 return;
153
154 if (!PageSlab(virt_to_head_page(res))) {
155 spin_lock(&bootmem_resource_lock);
156 res->sibling = bootmem_resource_free;
157 bootmem_resource_free = res;
158 spin_unlock(&bootmem_resource_lock);
159 } else {
160 kfree(res);
161 }
162 }
163
164 static struct resource *alloc_resource(gfp_t flags)
165 {
166 struct resource *res = NULL;
167
168 spin_lock(&bootmem_resource_lock);
169 if (bootmem_resource_free) {
170 res = bootmem_resource_free;
171 bootmem_resource_free = res->sibling;
172 }
173 spin_unlock(&bootmem_resource_lock);
174
175 if (res)
176 memset(res, 0, sizeof(struct resource));
177 else
178 res = kzalloc(sizeof(struct resource), flags);
179
180 return res;
181 }
182
183 /* Return the conflict entry if you can't request it */
184 static struct resource * __request_resource(struct resource *root, struct resource *new)
185 {
186 resource_size_t start = new->start;
187 resource_size_t end = new->end;
188 struct resource *tmp, **p;
189
190 if (end < start)
191 return root;
192 if (start < root->start)
193 return root;
194 if (end > root->end)
195 return root;
196 p = &root->child;
197 for (;;) {
198 tmp = *p;
199 if (!tmp || tmp->start > end) {
200 new->sibling = tmp;
201 *p = new;
202 new->parent = root;
203 return NULL;
204 }
205 p = &tmp->sibling;
206 if (tmp->end < start)
207 continue;
208 return tmp;
209 }
210 }
211
212 static int __release_resource(struct resource *old, bool release_child)
213 {
214 struct resource *tmp, **p, *chd;
215
216 p = &old->parent->child;
217 for (;;) {
218 tmp = *p;
219 if (!tmp)
220 break;
221 if (tmp == old) {
222 if (release_child || !(tmp->child)) {
223 *p = tmp->sibling;
224 } else {
225 for (chd = tmp->child;; chd = chd->sibling) {
226 chd->parent = tmp->parent;
227 if (!(chd->sibling))
228 break;
229 }
230 *p = tmp->child;
231 chd->sibling = tmp->sibling;
232 }
233 old->parent = NULL;
234 return 0;
235 }
236 p = &tmp->sibling;
237 }
238 return -EINVAL;
239 }
240
241 static void __release_child_resources(struct resource *r)
242 {
243 struct resource *tmp, *p;
244 resource_size_t size;
245
246 p = r->child;
247 r->child = NULL;
248 while (p) {
249 tmp = p;
250 p = p->sibling;
251
252 tmp->parent = NULL;
253 tmp->sibling = NULL;
254 __release_child_resources(tmp);
255
256 printk(KERN_DEBUG "release child resource %pR\n", tmp);
257 /* need to restore size, and keep flags */
258 size = resource_size(tmp);
259 tmp->start = 0;
260 tmp->end = size - 1;
261 }
262 }
263
264 void release_child_resources(struct resource *r)
265 {
266 write_lock(&resource_lock);
267 __release_child_resources(r);
268 write_unlock(&resource_lock);
269 }
270
271 /**
272 * request_resource_conflict - request and reserve an I/O or memory resource
273 * @root: root resource descriptor
274 * @new: resource descriptor desired by caller
275 *
276 * Returns 0 for success, conflict resource on error.
277 */
278 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
279 {
280 struct resource *conflict;
281
282 write_lock(&resource_lock);
283 conflict = __request_resource(root, new);
284 write_unlock(&resource_lock);
285 return conflict;
286 }
287
288 /**
289 * request_resource - request and reserve an I/O or memory resource
290 * @root: root resource descriptor
291 * @new: resource descriptor desired by caller
292 *
293 * Returns 0 for success, negative error code on error.
294 */
295 int request_resource(struct resource *root, struct resource *new)
296 {
297 struct resource *conflict;
298
299 conflict = request_resource_conflict(root, new);
300 return conflict ? -EBUSY : 0;
301 }
302
303 EXPORT_SYMBOL(request_resource);
304
305 /**
306 * release_resource - release a previously reserved resource
307 * @old: resource pointer
308 */
309 int release_resource(struct resource *old)
310 {
311 int retval;
312
313 write_lock(&resource_lock);
314 retval = __release_resource(old, true);
315 write_unlock(&resource_lock);
316 return retval;
317 }
318
319 EXPORT_SYMBOL(release_resource);
320
321 /**
322 * Finds the lowest iomem resource that covers part of [@start..@end]. The
323 * caller must specify @start, @end, @flags, and @desc (which may be
324 * IORES_DESC_NONE).
325 *
326 * If a resource is found, returns 0 and @*res is overwritten with the part
327 * of the resource that's within [@start..@end]; if none is found, returns
328 * -1 or -EINVAL for other invalid parameters.
329 *
330 * This function walks the whole tree and not just first level children
331 * unless @first_lvl is true.
332 *
333 * @start: start address of the resource searched for
334 * @end: end address of same resource
335 * @flags: flags which the resource must have
336 * @desc: descriptor the resource must have
337 * @first_lvl: walk only the first level children, if set
338 * @res: return ptr, if resource found
339 */
340 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
341 unsigned long flags, unsigned long desc,
342 bool first_lvl, struct resource *res)
343 {
344 struct resource *p;
345
346 if (!res)
347 return -EINVAL;
348
349 if (start >= end)
350 return -EINVAL;
351
352 read_lock(&resource_lock);
353
354 for (p = iomem_resource.child; p; p = next_resource(p, first_lvl)) {
355 if ((p->flags & flags) != flags)
356 continue;
357 if ((desc != IORES_DESC_NONE) && (desc != p->desc))
358 continue;
359 if (p->start > end) {
360 p = NULL;
361 break;
362 }
363 if ((p->end >= start) && (p->start <= end))
364 break;
365 }
366
367 read_unlock(&resource_lock);
368 if (!p)
369 return -1;
370
371 /* copy data */
372 res->start = max(start, p->start);
373 res->end = min(end, p->end);
374 res->flags = p->flags;
375 res->desc = p->desc;
376 return 0;
377 }
378
379 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
380 unsigned long flags, unsigned long desc,
381 bool first_lvl, void *arg,
382 int (*func)(struct resource *, void *))
383 {
384 struct resource res;
385 int ret = -1;
386
387 while (start < end &&
388 !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) {
389 ret = (*func)(&res, arg);
390 if (ret)
391 break;
392
393 start = res.end + 1;
394 }
395
396 return ret;
397 }
398
399 /**
400 * Walks through iomem resources and calls func() with matching resource
401 * ranges. This walks through whole tree and not just first level children.
402 * All the memory ranges which overlap start,end and also match flags and
403 * desc are valid candidates.
404 *
405 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
406 * @flags: I/O resource flags
407 * @start: start addr
408 * @end: end addr
409 * @arg: function argument for the callback @func
410 * @func: callback function that is called for each qualifying resource area
411 *
412 * NOTE: For a new descriptor search, define a new IORES_DESC in
413 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
414 */
415 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
416 u64 end, void *arg, int (*func)(struct resource *, void *))
417 {
418 return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
419 }
420 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
421
422 /*
423 * This function calls the @func callback against all memory ranges of type
424 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
425 * Now, this function is only for System RAM, it deals with full ranges and
426 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
427 * ranges.
428 */
429 int walk_system_ram_res(u64 start, u64 end, void *arg,
430 int (*func)(struct resource *, void *))
431 {
432 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
433
434 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
435 arg, func);
436 }
437
438 /*
439 * This function calls the @func callback against all memory ranges, which
440 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
441 */
442 int walk_mem_res(u64 start, u64 end, void *arg,
443 int (*func)(struct resource *, void *))
444 {
445 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
446
447 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
448 arg, func);
449 }
450
451 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
452
453 /*
454 * This function calls the @func callback against all memory ranges of type
455 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
456 * It is to be used only for System RAM.
457 */
458 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
459 void *arg, int (*func)(unsigned long, unsigned long, void *))
460 {
461 resource_size_t start, end;
462 unsigned long flags;
463 struct resource res;
464 unsigned long pfn, end_pfn;
465 int ret = -1;
466
467 start = (u64) start_pfn << PAGE_SHIFT;
468 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
469 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
470 while (start < end &&
471 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
472 true, &res)) {
473 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
474 end_pfn = (res.end + 1) >> PAGE_SHIFT;
475 if (end_pfn > pfn)
476 ret = (*func)(pfn, end_pfn - pfn, arg);
477 if (ret)
478 break;
479 start = res.end + 1;
480 }
481 return ret;
482 }
483
484 #endif
485
486 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
487 {
488 return 1;
489 }
490
491 /*
492 * This generic page_is_ram() returns true if specified address is
493 * registered as System RAM in iomem_resource list.
494 */
495 int __weak page_is_ram(unsigned long pfn)
496 {
497 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
498 }
499 EXPORT_SYMBOL_GPL(page_is_ram);
500
501 /**
502 * region_intersects() - determine intersection of region with known resources
503 * @start: region start address
504 * @size: size of region
505 * @flags: flags of resource (in iomem_resource)
506 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
507 *
508 * Check if the specified region partially overlaps or fully eclipses a
509 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
510 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
511 * return REGION_MIXED if the region overlaps @flags/@desc and another
512 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
513 * and no other defined resource. Note that REGION_INTERSECTS is also
514 * returned in the case when the specified region overlaps RAM and undefined
515 * memory holes.
516 *
517 * region_intersect() is used by memory remapping functions to ensure
518 * the user is not remapping RAM and is a vast speed up over walking
519 * through the resource table page by page.
520 */
521 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
522 unsigned long desc)
523 {
524 resource_size_t end = start + size - 1;
525 int type = 0; int other = 0;
526 struct resource *p;
527
528 read_lock(&resource_lock);
529 for (p = iomem_resource.child; p ; p = p->sibling) {
530 bool is_type = (((p->flags & flags) == flags) &&
531 ((desc == IORES_DESC_NONE) ||
532 (desc == p->desc)));
533
534 if (start >= p->start && start <= p->end)
535 is_type ? type++ : other++;
536 if (end >= p->start && end <= p->end)
537 is_type ? type++ : other++;
538 if (p->start >= start && p->end <= end)
539 is_type ? type++ : other++;
540 }
541 read_unlock(&resource_lock);
542
543 if (other == 0)
544 return type ? REGION_INTERSECTS : REGION_DISJOINT;
545
546 if (type)
547 return REGION_MIXED;
548
549 return REGION_DISJOINT;
550 }
551 EXPORT_SYMBOL_GPL(region_intersects);
552
553 void __weak arch_remove_reservations(struct resource *avail)
554 {
555 }
556
557 static resource_size_t simple_align_resource(void *data,
558 const struct resource *avail,
559 resource_size_t size,
560 resource_size_t align)
561 {
562 return avail->start;
563 }
564
565 static void resource_clip(struct resource *res, resource_size_t min,
566 resource_size_t max)
567 {
568 if (res->start < min)
569 res->start = min;
570 if (res->end > max)
571 res->end = max;
572 }
573
574 /*
575 * Find empty slot in the resource tree with the given range and
576 * alignment constraints
577 */
578 static int __find_resource(struct resource *root, struct resource *old,
579 struct resource *new,
580 resource_size_t size,
581 struct resource_constraint *constraint)
582 {
583 struct resource *this = root->child;
584 struct resource tmp = *new, avail, alloc;
585
586 tmp.start = root->start;
587 /*
588 * Skip past an allocated resource that starts at 0, since the assignment
589 * of this->start - 1 to tmp->end below would cause an underflow.
590 */
591 if (this && this->start == root->start) {
592 tmp.start = (this == old) ? old->start : this->end + 1;
593 this = this->sibling;
594 }
595 for(;;) {
596 if (this)
597 tmp.end = (this == old) ? this->end : this->start - 1;
598 else
599 tmp.end = root->end;
600
601 if (tmp.end < tmp.start)
602 goto next;
603
604 resource_clip(&tmp, constraint->min, constraint->max);
605 arch_remove_reservations(&tmp);
606
607 /* Check for overflow after ALIGN() */
608 avail.start = ALIGN(tmp.start, constraint->align);
609 avail.end = tmp.end;
610 avail.flags = new->flags & ~IORESOURCE_UNSET;
611 if (avail.start >= tmp.start) {
612 alloc.flags = avail.flags;
613 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
614 size, constraint->align);
615 alloc.end = alloc.start + size - 1;
616 if (alloc.start <= alloc.end &&
617 resource_contains(&avail, &alloc)) {
618 new->start = alloc.start;
619 new->end = alloc.end;
620 return 0;
621 }
622 }
623
624 next: if (!this || this->end == root->end)
625 break;
626
627 if (this != old)
628 tmp.start = this->end + 1;
629 this = this->sibling;
630 }
631 return -EBUSY;
632 }
633
634 /*
635 * Find empty slot in the resource tree given range and alignment.
636 */
637 static int find_resource(struct resource *root, struct resource *new,
638 resource_size_t size,
639 struct resource_constraint *constraint)
640 {
641 return __find_resource(root, NULL, new, size, constraint);
642 }
643
644 /**
645 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
646 * The resource will be relocated if the new size cannot be reallocated in the
647 * current location.
648 *
649 * @root: root resource descriptor
650 * @old: resource descriptor desired by caller
651 * @newsize: new size of the resource descriptor
652 * @constraint: the size and alignment constraints to be met.
653 */
654 static int reallocate_resource(struct resource *root, struct resource *old,
655 resource_size_t newsize,
656 struct resource_constraint *constraint)
657 {
658 int err=0;
659 struct resource new = *old;
660 struct resource *conflict;
661
662 write_lock(&resource_lock);
663
664 if ((err = __find_resource(root, old, &new, newsize, constraint)))
665 goto out;
666
667 if (resource_contains(&new, old)) {
668 old->start = new.start;
669 old->end = new.end;
670 goto out;
671 }
672
673 if (old->child) {
674 err = -EBUSY;
675 goto out;
676 }
677
678 if (resource_contains(old, &new)) {
679 old->start = new.start;
680 old->end = new.end;
681 } else {
682 __release_resource(old, true);
683 *old = new;
684 conflict = __request_resource(root, old);
685 BUG_ON(conflict);
686 }
687 out:
688 write_unlock(&resource_lock);
689 return err;
690 }
691
692
693 /**
694 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
695 * The resource will be reallocated with a new size if it was already allocated
696 * @root: root resource descriptor
697 * @new: resource descriptor desired by caller
698 * @size: requested resource region size
699 * @min: minimum boundary to allocate
700 * @max: maximum boundary to allocate
701 * @align: alignment requested, in bytes
702 * @alignf: alignment function, optional, called if not NULL
703 * @alignf_data: arbitrary data to pass to the @alignf function
704 */
705 int allocate_resource(struct resource *root, struct resource *new,
706 resource_size_t size, resource_size_t min,
707 resource_size_t max, resource_size_t align,
708 resource_size_t (*alignf)(void *,
709 const struct resource *,
710 resource_size_t,
711 resource_size_t),
712 void *alignf_data)
713 {
714 int err;
715 struct resource_constraint constraint;
716
717 if (!alignf)
718 alignf = simple_align_resource;
719
720 constraint.min = min;
721 constraint.max = max;
722 constraint.align = align;
723 constraint.alignf = alignf;
724 constraint.alignf_data = alignf_data;
725
726 if ( new->parent ) {
727 /* resource is already allocated, try reallocating with
728 the new constraints */
729 return reallocate_resource(root, new, size, &constraint);
730 }
731
732 write_lock(&resource_lock);
733 err = find_resource(root, new, size, &constraint);
734 if (err >= 0 && __request_resource(root, new))
735 err = -EBUSY;
736 write_unlock(&resource_lock);
737 return err;
738 }
739
740 EXPORT_SYMBOL(allocate_resource);
741
742 /**
743 * lookup_resource - find an existing resource by a resource start address
744 * @root: root resource descriptor
745 * @start: resource start address
746 *
747 * Returns a pointer to the resource if found, NULL otherwise
748 */
749 struct resource *lookup_resource(struct resource *root, resource_size_t start)
750 {
751 struct resource *res;
752
753 read_lock(&resource_lock);
754 for (res = root->child; res; res = res->sibling) {
755 if (res->start == start)
756 break;
757 }
758 read_unlock(&resource_lock);
759
760 return res;
761 }
762
763 /*
764 * Insert a resource into the resource tree. If successful, return NULL,
765 * otherwise return the conflicting resource (compare to __request_resource())
766 */
767 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
768 {
769 struct resource *first, *next;
770
771 for (;; parent = first) {
772 first = __request_resource(parent, new);
773 if (!first)
774 return first;
775
776 if (first == parent)
777 return first;
778 if (WARN_ON(first == new)) /* duplicated insertion */
779 return first;
780
781 if ((first->start > new->start) || (first->end < new->end))
782 break;
783 if ((first->start == new->start) && (first->end == new->end))
784 break;
785 }
786
787 for (next = first; ; next = next->sibling) {
788 /* Partial overlap? Bad, and unfixable */
789 if (next->start < new->start || next->end > new->end)
790 return next;
791 if (!next->sibling)
792 break;
793 if (next->sibling->start > new->end)
794 break;
795 }
796
797 new->parent = parent;
798 new->sibling = next->sibling;
799 new->child = first;
800
801 next->sibling = NULL;
802 for (next = first; next; next = next->sibling)
803 next->parent = new;
804
805 if (parent->child == first) {
806 parent->child = new;
807 } else {
808 next = parent->child;
809 while (next->sibling != first)
810 next = next->sibling;
811 next->sibling = new;
812 }
813 return NULL;
814 }
815
816 /**
817 * insert_resource_conflict - Inserts resource in the resource tree
818 * @parent: parent of the new resource
819 * @new: new resource to insert
820 *
821 * Returns 0 on success, conflict resource if the resource can't be inserted.
822 *
823 * This function is equivalent to request_resource_conflict when no conflict
824 * happens. If a conflict happens, and the conflicting resources
825 * entirely fit within the range of the new resource, then the new
826 * resource is inserted and the conflicting resources become children of
827 * the new resource.
828 *
829 * This function is intended for producers of resources, such as FW modules
830 * and bus drivers.
831 */
832 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
833 {
834 struct resource *conflict;
835
836 write_lock(&resource_lock);
837 conflict = __insert_resource(parent, new);
838 write_unlock(&resource_lock);
839 return conflict;
840 }
841
842 /**
843 * insert_resource - Inserts a resource in the resource tree
844 * @parent: parent of the new resource
845 * @new: new resource to insert
846 *
847 * Returns 0 on success, -EBUSY if the resource can't be inserted.
848 *
849 * This function is intended for producers of resources, such as FW modules
850 * and bus drivers.
851 */
852 int insert_resource(struct resource *parent, struct resource *new)
853 {
854 struct resource *conflict;
855
856 conflict = insert_resource_conflict(parent, new);
857 return conflict ? -EBUSY : 0;
858 }
859 EXPORT_SYMBOL_GPL(insert_resource);
860
861 /**
862 * insert_resource_expand_to_fit - Insert a resource into the resource tree
863 * @root: root resource descriptor
864 * @new: new resource to insert
865 *
866 * Insert a resource into the resource tree, possibly expanding it in order
867 * to make it encompass any conflicting resources.
868 */
869 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
870 {
871 if (new->parent)
872 return;
873
874 write_lock(&resource_lock);
875 for (;;) {
876 struct resource *conflict;
877
878 conflict = __insert_resource(root, new);
879 if (!conflict)
880 break;
881 if (conflict == root)
882 break;
883
884 /* Ok, expand resource to cover the conflict, then try again .. */
885 if (conflict->start < new->start)
886 new->start = conflict->start;
887 if (conflict->end > new->end)
888 new->end = conflict->end;
889
890 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
891 }
892 write_unlock(&resource_lock);
893 }
894
895 /**
896 * remove_resource - Remove a resource in the resource tree
897 * @old: resource to remove
898 *
899 * Returns 0 on success, -EINVAL if the resource is not valid.
900 *
901 * This function removes a resource previously inserted by insert_resource()
902 * or insert_resource_conflict(), and moves the children (if any) up to
903 * where they were before. insert_resource() and insert_resource_conflict()
904 * insert a new resource, and move any conflicting resources down to the
905 * children of the new resource.
906 *
907 * insert_resource(), insert_resource_conflict() and remove_resource() are
908 * intended for producers of resources, such as FW modules and bus drivers.
909 */
910 int remove_resource(struct resource *old)
911 {
912 int retval;
913
914 write_lock(&resource_lock);
915 retval = __release_resource(old, false);
916 write_unlock(&resource_lock);
917 return retval;
918 }
919 EXPORT_SYMBOL_GPL(remove_resource);
920
921 static int __adjust_resource(struct resource *res, resource_size_t start,
922 resource_size_t size)
923 {
924 struct resource *tmp, *parent = res->parent;
925 resource_size_t end = start + size - 1;
926 int result = -EBUSY;
927
928 if (!parent)
929 goto skip;
930
931 if ((start < parent->start) || (end > parent->end))
932 goto out;
933
934 if (res->sibling && (res->sibling->start <= end))
935 goto out;
936
937 tmp = parent->child;
938 if (tmp != res) {
939 while (tmp->sibling != res)
940 tmp = tmp->sibling;
941 if (start <= tmp->end)
942 goto out;
943 }
944
945 skip:
946 for (tmp = res->child; tmp; tmp = tmp->sibling)
947 if ((tmp->start < start) || (tmp->end > end))
948 goto out;
949
950 res->start = start;
951 res->end = end;
952 result = 0;
953
954 out:
955 return result;
956 }
957
958 /**
959 * adjust_resource - modify a resource's start and size
960 * @res: resource to modify
961 * @start: new start value
962 * @size: new size
963 *
964 * Given an existing resource, change its start and size to match the
965 * arguments. Returns 0 on success, -EBUSY if it can't fit.
966 * Existing children of the resource are assumed to be immutable.
967 */
968 int adjust_resource(struct resource *res, resource_size_t start,
969 resource_size_t size)
970 {
971 int result;
972
973 write_lock(&resource_lock);
974 result = __adjust_resource(res, start, size);
975 write_unlock(&resource_lock);
976 return result;
977 }
978 EXPORT_SYMBOL(adjust_resource);
979
980 static void __init
981 __reserve_region_with_split(struct resource *root, resource_size_t start,
982 resource_size_t end, const char *name)
983 {
984 struct resource *parent = root;
985 struct resource *conflict;
986 struct resource *res = alloc_resource(GFP_ATOMIC);
987 struct resource *next_res = NULL;
988 int type = resource_type(root);
989
990 if (!res)
991 return;
992
993 res->name = name;
994 res->start = start;
995 res->end = end;
996 res->flags = type | IORESOURCE_BUSY;
997 res->desc = IORES_DESC_NONE;
998
999 while (1) {
1000
1001 conflict = __request_resource(parent, res);
1002 if (!conflict) {
1003 if (!next_res)
1004 break;
1005 res = next_res;
1006 next_res = NULL;
1007 continue;
1008 }
1009
1010 /* conflict covered whole area */
1011 if (conflict->start <= res->start &&
1012 conflict->end >= res->end) {
1013 free_resource(res);
1014 WARN_ON(next_res);
1015 break;
1016 }
1017
1018 /* failed, split and try again */
1019 if (conflict->start > res->start) {
1020 end = res->end;
1021 res->end = conflict->start - 1;
1022 if (conflict->end < end) {
1023 next_res = alloc_resource(GFP_ATOMIC);
1024 if (!next_res) {
1025 free_resource(res);
1026 break;
1027 }
1028 next_res->name = name;
1029 next_res->start = conflict->end + 1;
1030 next_res->end = end;
1031 next_res->flags = type | IORESOURCE_BUSY;
1032 next_res->desc = IORES_DESC_NONE;
1033 }
1034 } else {
1035 res->start = conflict->end + 1;
1036 }
1037 }
1038
1039 }
1040
1041 void __init
1042 reserve_region_with_split(struct resource *root, resource_size_t start,
1043 resource_size_t end, const char *name)
1044 {
1045 int abort = 0;
1046
1047 write_lock(&resource_lock);
1048 if (root->start > start || root->end < end) {
1049 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1050 (unsigned long long)start, (unsigned long long)end,
1051 root);
1052 if (start > root->end || end < root->start)
1053 abort = 1;
1054 else {
1055 if (end > root->end)
1056 end = root->end;
1057 if (start < root->start)
1058 start = root->start;
1059 pr_err("fixing request to [0x%llx-0x%llx]\n",
1060 (unsigned long long)start,
1061 (unsigned long long)end);
1062 }
1063 dump_stack();
1064 }
1065 if (!abort)
1066 __reserve_region_with_split(root, start, end, name);
1067 write_unlock(&resource_lock);
1068 }
1069
1070 /**
1071 * resource_alignment - calculate resource's alignment
1072 * @res: resource pointer
1073 *
1074 * Returns alignment on success, 0 (invalid alignment) on failure.
1075 */
1076 resource_size_t resource_alignment(struct resource *res)
1077 {
1078 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1079 case IORESOURCE_SIZEALIGN:
1080 return resource_size(res);
1081 case IORESOURCE_STARTALIGN:
1082 return res->start;
1083 default:
1084 return 0;
1085 }
1086 }
1087
1088 /*
1089 * This is compatibility stuff for IO resources.
1090 *
1091 * Note how this, unlike the above, knows about
1092 * the IO flag meanings (busy etc).
1093 *
1094 * request_region creates a new busy region.
1095 *
1096 * release_region releases a matching busy region.
1097 */
1098
1099 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1100
1101 /**
1102 * __request_region - create a new busy resource region
1103 * @parent: parent resource descriptor
1104 * @start: resource start address
1105 * @n: resource region size
1106 * @name: reserving caller's ID string
1107 * @flags: IO resource flags
1108 */
1109 struct resource * __request_region(struct resource *parent,
1110 resource_size_t start, resource_size_t n,
1111 const char *name, int flags)
1112 {
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct resource *res = alloc_resource(GFP_KERNEL);
1115
1116 if (!res)
1117 return NULL;
1118
1119 res->name = name;
1120 res->start = start;
1121 res->end = start + n - 1;
1122
1123 write_lock(&resource_lock);
1124
1125 for (;;) {
1126 struct resource *conflict;
1127
1128 res->flags = resource_type(parent) | resource_ext_type(parent);
1129 res->flags |= IORESOURCE_BUSY | flags;
1130 res->desc = parent->desc;
1131
1132 conflict = __request_resource(parent, res);
1133 if (!conflict)
1134 break;
1135 if (conflict != parent) {
1136 if (!(conflict->flags & IORESOURCE_BUSY)) {
1137 parent = conflict;
1138 continue;
1139 }
1140 }
1141 if (conflict->flags & flags & IORESOURCE_MUXED) {
1142 add_wait_queue(&muxed_resource_wait, &wait);
1143 write_unlock(&resource_lock);
1144 set_current_state(TASK_UNINTERRUPTIBLE);
1145 schedule();
1146 remove_wait_queue(&muxed_resource_wait, &wait);
1147 write_lock(&resource_lock);
1148 continue;
1149 }
1150 /* Uhhuh, that didn't work out.. */
1151 free_resource(res);
1152 res = NULL;
1153 break;
1154 }
1155 write_unlock(&resource_lock);
1156 return res;
1157 }
1158 EXPORT_SYMBOL(__request_region);
1159
1160 /**
1161 * __release_region - release a previously reserved resource region
1162 * @parent: parent resource descriptor
1163 * @start: resource start address
1164 * @n: resource region size
1165 *
1166 * The described resource region must match a currently busy region.
1167 */
1168 void __release_region(struct resource *parent, resource_size_t start,
1169 resource_size_t n)
1170 {
1171 struct resource **p;
1172 resource_size_t end;
1173
1174 p = &parent->child;
1175 end = start + n - 1;
1176
1177 write_lock(&resource_lock);
1178
1179 for (;;) {
1180 struct resource *res = *p;
1181
1182 if (!res)
1183 break;
1184 if (res->start <= start && res->end >= end) {
1185 if (!(res->flags & IORESOURCE_BUSY)) {
1186 p = &res->child;
1187 continue;
1188 }
1189 if (res->start != start || res->end != end)
1190 break;
1191 *p = res->sibling;
1192 write_unlock(&resource_lock);
1193 if (res->flags & IORESOURCE_MUXED)
1194 wake_up(&muxed_resource_wait);
1195 free_resource(res);
1196 return;
1197 }
1198 p = &res->sibling;
1199 }
1200
1201 write_unlock(&resource_lock);
1202
1203 printk(KERN_WARNING "Trying to free nonexistent resource "
1204 "<%016llx-%016llx>\n", (unsigned long long)start,
1205 (unsigned long long)end);
1206 }
1207 EXPORT_SYMBOL(__release_region);
1208
1209 #ifdef CONFIG_MEMORY_HOTREMOVE
1210 /**
1211 * release_mem_region_adjustable - release a previously reserved memory region
1212 * @parent: parent resource descriptor
1213 * @start: resource start address
1214 * @size: resource region size
1215 *
1216 * This interface is intended for memory hot-delete. The requested region
1217 * is released from a currently busy memory resource. The requested region
1218 * must either match exactly or fit into a single busy resource entry. In
1219 * the latter case, the remaining resource is adjusted accordingly.
1220 * Existing children of the busy memory resource must be immutable in the
1221 * request.
1222 *
1223 * Note:
1224 * - Additional release conditions, such as overlapping region, can be
1225 * supported after they are confirmed as valid cases.
1226 * - When a busy memory resource gets split into two entries, the code
1227 * assumes that all children remain in the lower address entry for
1228 * simplicity. Enhance this logic when necessary.
1229 */
1230 int release_mem_region_adjustable(struct resource *parent,
1231 resource_size_t start, resource_size_t size)
1232 {
1233 struct resource **p;
1234 struct resource *res;
1235 struct resource *new_res;
1236 resource_size_t end;
1237 int ret = -EINVAL;
1238
1239 end = start + size - 1;
1240 if ((start < parent->start) || (end > parent->end))
1241 return ret;
1242
1243 /* The alloc_resource() result gets checked later */
1244 new_res = alloc_resource(GFP_KERNEL);
1245
1246 p = &parent->child;
1247 write_lock(&resource_lock);
1248
1249 while ((res = *p)) {
1250 if (res->start >= end)
1251 break;
1252
1253 /* look for the next resource if it does not fit into */
1254 if (res->start > start || res->end < end) {
1255 p = &res->sibling;
1256 continue;
1257 }
1258
1259 if (!(res->flags & IORESOURCE_MEM))
1260 break;
1261
1262 if (!(res->flags & IORESOURCE_BUSY)) {
1263 p = &res->child;
1264 continue;
1265 }
1266
1267 /* found the target resource; let's adjust accordingly */
1268 if (res->start == start && res->end == end) {
1269 /* free the whole entry */
1270 *p = res->sibling;
1271 free_resource(res);
1272 ret = 0;
1273 } else if (res->start == start && res->end != end) {
1274 /* adjust the start */
1275 ret = __adjust_resource(res, end + 1,
1276 res->end - end);
1277 } else if (res->start != start && res->end == end) {
1278 /* adjust the end */
1279 ret = __adjust_resource(res, res->start,
1280 start - res->start);
1281 } else {
1282 /* split into two entries */
1283 if (!new_res) {
1284 ret = -ENOMEM;
1285 break;
1286 }
1287 new_res->name = res->name;
1288 new_res->start = end + 1;
1289 new_res->end = res->end;
1290 new_res->flags = res->flags;
1291 new_res->desc = res->desc;
1292 new_res->parent = res->parent;
1293 new_res->sibling = res->sibling;
1294 new_res->child = NULL;
1295
1296 ret = __adjust_resource(res, res->start,
1297 start - res->start);
1298 if (ret)
1299 break;
1300 res->sibling = new_res;
1301 new_res = NULL;
1302 }
1303
1304 break;
1305 }
1306
1307 write_unlock(&resource_lock);
1308 free_resource(new_res);
1309 return ret;
1310 }
1311 #endif /* CONFIG_MEMORY_HOTREMOVE */
1312
1313 /*
1314 * Managed region resource
1315 */
1316 static void devm_resource_release(struct device *dev, void *ptr)
1317 {
1318 struct resource **r = ptr;
1319
1320 release_resource(*r);
1321 }
1322
1323 /**
1324 * devm_request_resource() - request and reserve an I/O or memory resource
1325 * @dev: device for which to request the resource
1326 * @root: root of the resource tree from which to request the resource
1327 * @new: descriptor of the resource to request
1328 *
1329 * This is a device-managed version of request_resource(). There is usually
1330 * no need to release resources requested by this function explicitly since
1331 * that will be taken care of when the device is unbound from its driver.
1332 * If for some reason the resource needs to be released explicitly, because
1333 * of ordering issues for example, drivers must call devm_release_resource()
1334 * rather than the regular release_resource().
1335 *
1336 * When a conflict is detected between any existing resources and the newly
1337 * requested resource, an error message will be printed.
1338 *
1339 * Returns 0 on success or a negative error code on failure.
1340 */
1341 int devm_request_resource(struct device *dev, struct resource *root,
1342 struct resource *new)
1343 {
1344 struct resource *conflict, **ptr;
1345
1346 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1347 if (!ptr)
1348 return -ENOMEM;
1349
1350 *ptr = new;
1351
1352 conflict = request_resource_conflict(root, new);
1353 if (conflict) {
1354 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1355 new, conflict->name, conflict);
1356 devres_free(ptr);
1357 return -EBUSY;
1358 }
1359
1360 devres_add(dev, ptr);
1361 return 0;
1362 }
1363 EXPORT_SYMBOL(devm_request_resource);
1364
1365 static int devm_resource_match(struct device *dev, void *res, void *data)
1366 {
1367 struct resource **ptr = res;
1368
1369 return *ptr == data;
1370 }
1371
1372 /**
1373 * devm_release_resource() - release a previously requested resource
1374 * @dev: device for which to release the resource
1375 * @new: descriptor of the resource to release
1376 *
1377 * Releases a resource previously requested using devm_request_resource().
1378 */
1379 void devm_release_resource(struct device *dev, struct resource *new)
1380 {
1381 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1382 new));
1383 }
1384 EXPORT_SYMBOL(devm_release_resource);
1385
1386 struct region_devres {
1387 struct resource *parent;
1388 resource_size_t start;
1389 resource_size_t n;
1390 };
1391
1392 static void devm_region_release(struct device *dev, void *res)
1393 {
1394 struct region_devres *this = res;
1395
1396 __release_region(this->parent, this->start, this->n);
1397 }
1398
1399 static int devm_region_match(struct device *dev, void *res, void *match_data)
1400 {
1401 struct region_devres *this = res, *match = match_data;
1402
1403 return this->parent == match->parent &&
1404 this->start == match->start && this->n == match->n;
1405 }
1406
1407 struct resource *
1408 __devm_request_region(struct device *dev, struct resource *parent,
1409 resource_size_t start, resource_size_t n, const char *name)
1410 {
1411 struct region_devres *dr = NULL;
1412 struct resource *res;
1413
1414 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1415 GFP_KERNEL);
1416 if (!dr)
1417 return NULL;
1418
1419 dr->parent = parent;
1420 dr->start = start;
1421 dr->n = n;
1422
1423 res = __request_region(parent, start, n, name, 0);
1424 if (res)
1425 devres_add(dev, dr);
1426 else
1427 devres_free(dr);
1428
1429 return res;
1430 }
1431 EXPORT_SYMBOL(__devm_request_region);
1432
1433 void __devm_release_region(struct device *dev, struct resource *parent,
1434 resource_size_t start, resource_size_t n)
1435 {
1436 struct region_devres match_data = { parent, start, n };
1437
1438 __release_region(parent, start, n);
1439 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1440 &match_data));
1441 }
1442 EXPORT_SYMBOL(__devm_release_region);
1443
1444 /*
1445 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1446 */
1447 #define MAXRESERVE 4
1448 static int __init reserve_setup(char *str)
1449 {
1450 static int reserved;
1451 static struct resource reserve[MAXRESERVE];
1452
1453 for (;;) {
1454 unsigned int io_start, io_num;
1455 int x = reserved;
1456 struct resource *parent;
1457
1458 if (get_option(&str, &io_start) != 2)
1459 break;
1460 if (get_option(&str, &io_num) == 0)
1461 break;
1462 if (x < MAXRESERVE) {
1463 struct resource *res = reserve + x;
1464
1465 /*
1466 * If the region starts below 0x10000, we assume it's
1467 * I/O port space; otherwise assume it's memory.
1468 */
1469 if (io_start < 0x10000) {
1470 res->flags = IORESOURCE_IO;
1471 parent = &ioport_resource;
1472 } else {
1473 res->flags = IORESOURCE_MEM;
1474 parent = &iomem_resource;
1475 }
1476 res->name = "reserved";
1477 res->start = io_start;
1478 res->end = io_start + io_num - 1;
1479 res->flags |= IORESOURCE_BUSY;
1480 res->desc = IORES_DESC_NONE;
1481 res->child = NULL;
1482 if (request_resource(parent, res) == 0)
1483 reserved = x+1;
1484 }
1485 }
1486 return 1;
1487 }
1488 __setup("reserve=", reserve_setup);
1489
1490 /*
1491 * Check if the requested addr and size spans more than any slot in the
1492 * iomem resource tree.
1493 */
1494 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1495 {
1496 struct resource *p = &iomem_resource;
1497 int err = 0;
1498 loff_t l;
1499
1500 read_lock(&resource_lock);
1501 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1502 /*
1503 * We can probably skip the resources without
1504 * IORESOURCE_IO attribute?
1505 */
1506 if (p->start >= addr + size)
1507 continue;
1508 if (p->end < addr)
1509 continue;
1510 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1511 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1512 continue;
1513 /*
1514 * if a resource is "BUSY", it's not a hardware resource
1515 * but a driver mapping of such a resource; we don't want
1516 * to warn for those; some drivers legitimately map only
1517 * partial hardware resources. (example: vesafb)
1518 */
1519 if (p->flags & IORESOURCE_BUSY)
1520 continue;
1521
1522 printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
1523 (unsigned long long)addr,
1524 (unsigned long long)(addr + size - 1),
1525 p->name, p);
1526 err = -1;
1527 break;
1528 }
1529 read_unlock(&resource_lock);
1530
1531 return err;
1532 }
1533
1534 #ifdef CONFIG_STRICT_DEVMEM
1535 static int strict_iomem_checks = 1;
1536 #else
1537 static int strict_iomem_checks;
1538 #endif
1539
1540 /*
1541 * check if an address is reserved in the iomem resource tree
1542 * returns true if reserved, false if not reserved.
1543 */
1544 bool iomem_is_exclusive(u64 addr)
1545 {
1546 struct resource *p = &iomem_resource;
1547 bool err = false;
1548 loff_t l;
1549 int size = PAGE_SIZE;
1550
1551 if (!strict_iomem_checks)
1552 return false;
1553
1554 addr = addr & PAGE_MASK;
1555
1556 read_lock(&resource_lock);
1557 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1558 /*
1559 * We can probably skip the resources without
1560 * IORESOURCE_IO attribute?
1561 */
1562 if (p->start >= addr + size)
1563 break;
1564 if (p->end < addr)
1565 continue;
1566 /*
1567 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1568 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1569 * resource is busy.
1570 */
1571 if ((p->flags & IORESOURCE_BUSY) == 0)
1572 continue;
1573 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1574 || p->flags & IORESOURCE_EXCLUSIVE) {
1575 err = true;
1576 break;
1577 }
1578 }
1579 read_unlock(&resource_lock);
1580
1581 return err;
1582 }
1583
1584 struct resource_entry *resource_list_create_entry(struct resource *res,
1585 size_t extra_size)
1586 {
1587 struct resource_entry *entry;
1588
1589 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1590 if (entry) {
1591 INIT_LIST_HEAD(&entry->node);
1592 entry->res = res ? res : &entry->__res;
1593 }
1594
1595 return entry;
1596 }
1597 EXPORT_SYMBOL(resource_list_create_entry);
1598
1599 void resource_list_free(struct list_head *head)
1600 {
1601 struct resource_entry *entry, *tmp;
1602
1603 list_for_each_entry_safe(entry, tmp, head, node)
1604 resource_list_destroy_entry(entry);
1605 }
1606 EXPORT_SYMBOL(resource_list_free);
1607
1608 static int __init strict_iomem(char *str)
1609 {
1610 if (strstr(str, "relaxed"))
1611 strict_iomem_checks = 0;
1612 if (strstr(str, "strict"))
1613 strict_iomem_checks = 1;
1614 return 1;
1615 }
1616
1617 __setup("iomem=", strict_iomem);