]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - lib/ioremap.c
mm: prevent get_user_pages() from overflowing page refcount
[thirdparty/kernel/stable.git] / lib / ioremap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
9 #include <linux/vmalloc.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/io.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgtable.h>
16
17 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
18 static int __read_mostly ioremap_p4d_capable;
19 static int __read_mostly ioremap_pud_capable;
20 static int __read_mostly ioremap_pmd_capable;
21 static int __read_mostly ioremap_huge_disabled;
22
23 static int __init set_nohugeiomap(char *str)
24 {
25 ioremap_huge_disabled = 1;
26 return 0;
27 }
28 early_param("nohugeiomap", set_nohugeiomap);
29
30 void __init ioremap_huge_init(void)
31 {
32 if (!ioremap_huge_disabled) {
33 if (arch_ioremap_pud_supported())
34 ioremap_pud_capable = 1;
35 if (arch_ioremap_pmd_supported())
36 ioremap_pmd_capable = 1;
37 }
38 }
39
40 static inline int ioremap_p4d_enabled(void)
41 {
42 return ioremap_p4d_capable;
43 }
44
45 static inline int ioremap_pud_enabled(void)
46 {
47 return ioremap_pud_capable;
48 }
49
50 static inline int ioremap_pmd_enabled(void)
51 {
52 return ioremap_pmd_capable;
53 }
54
55 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
56 static inline int ioremap_p4d_enabled(void) { return 0; }
57 static inline int ioremap_pud_enabled(void) { return 0; }
58 static inline int ioremap_pmd_enabled(void) { return 0; }
59 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
60
61 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
62 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
63 {
64 pte_t *pte;
65 u64 pfn;
66
67 pfn = phys_addr >> PAGE_SHIFT;
68 pte = pte_alloc_kernel(pmd, addr);
69 if (!pte)
70 return -ENOMEM;
71 do {
72 BUG_ON(!pte_none(*pte));
73 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
74 pfn++;
75 } while (pte++, addr += PAGE_SIZE, addr != end);
76 return 0;
77 }
78
79 static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
80 unsigned long end, phys_addr_t phys_addr,
81 pgprot_t prot)
82 {
83 if (!ioremap_pmd_enabled())
84 return 0;
85
86 if ((end - addr) != PMD_SIZE)
87 return 0;
88
89 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
90 return 0;
91
92 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
93 return 0;
94
95 return pmd_set_huge(pmd, phys_addr, prot);
96 }
97
98 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
99 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
100 {
101 pmd_t *pmd;
102 unsigned long next;
103
104 pmd = pmd_alloc(&init_mm, pud, addr);
105 if (!pmd)
106 return -ENOMEM;
107 do {
108 next = pmd_addr_end(addr, end);
109
110 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
111 continue;
112
113 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
114 return -ENOMEM;
115 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
116 return 0;
117 }
118
119 static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
120 unsigned long end, phys_addr_t phys_addr,
121 pgprot_t prot)
122 {
123 if (!ioremap_pud_enabled())
124 return 0;
125
126 if ((end - addr) != PUD_SIZE)
127 return 0;
128
129 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
130 return 0;
131
132 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
133 return 0;
134
135 return pud_set_huge(pud, phys_addr, prot);
136 }
137
138 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
139 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
140 {
141 pud_t *pud;
142 unsigned long next;
143
144 pud = pud_alloc(&init_mm, p4d, addr);
145 if (!pud)
146 return -ENOMEM;
147 do {
148 next = pud_addr_end(addr, end);
149
150 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
151 continue;
152
153 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
154 return -ENOMEM;
155 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
156 return 0;
157 }
158
159 static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
160 unsigned long end, phys_addr_t phys_addr,
161 pgprot_t prot)
162 {
163 if (!ioremap_p4d_enabled())
164 return 0;
165
166 if ((end - addr) != P4D_SIZE)
167 return 0;
168
169 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
170 return 0;
171
172 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
173 return 0;
174
175 return p4d_set_huge(p4d, phys_addr, prot);
176 }
177
178 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
179 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
180 {
181 p4d_t *p4d;
182 unsigned long next;
183
184 p4d = p4d_alloc(&init_mm, pgd, addr);
185 if (!p4d)
186 return -ENOMEM;
187 do {
188 next = p4d_addr_end(addr, end);
189
190 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
191 continue;
192
193 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
194 return -ENOMEM;
195 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
196 return 0;
197 }
198
199 int ioremap_page_range(unsigned long addr,
200 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
201 {
202 pgd_t *pgd;
203 unsigned long start;
204 unsigned long next;
205 int err;
206
207 might_sleep();
208 BUG_ON(addr >= end);
209
210 start = addr;
211 pgd = pgd_offset_k(addr);
212 do {
213 next = pgd_addr_end(addr, end);
214 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
215 if (err)
216 break;
217 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
218
219 flush_cache_vmap(start, end);
220
221 return err;
222 }