1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/ioremap.c
5 * Re-map IO memory to kernel address space so that we can access it.
7 * (C) Copyright 1995 1996 Linus Torvalds
9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
10 * Hacked to allow all architectures to build, and various cleanups
13 * This allows a driver to remap an arbitrary region of bus memory into
14 * virtual space. One should *only* use readl, writel, memcpy_toio and
15 * so on with such remapped areas.
17 * Because the ARM only has a 32-bit address space we can't address the
18 * whole of the (physical) PCI space at once. PCI huge-mode addressing
19 * allows us to circumvent this restriction by splitting PCI space into
20 * two 2GB chunks and mapping only one at a time into processor memory.
21 * We use MMU protection domains to trap any attempt to access the bank
22 * that is not currently mapped. (This isn't fully implemented yet.)
24 #include <linux/module.h>
25 #include <linux/errno.h>
27 #include <linux/vmalloc.h>
29 #include <linux/sizes.h>
30 #include <linux/memblock.h>
33 #include <asm/cputype.h>
34 #include <asm/cacheflush.h>
35 #include <asm/early_ioremap.h>
36 #include <asm/mmu_context.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlbflush.h>
39 #include <asm/set_memory.h>
40 #include <asm/system_info.h>
42 #include <asm/mach/map.h>
43 #include <asm/mach/pci.h>
47 LIST_HEAD(static_vmlist
);
49 static struct static_vm
*find_static_vm_paddr(phys_addr_t paddr
,
50 size_t size
, unsigned int mtype
)
52 struct static_vm
*svm
;
55 list_for_each_entry(svm
, &static_vmlist
, list
) {
57 if (!(vm
->flags
& VM_ARM_STATIC_MAPPING
))
59 if ((vm
->flags
& VM_ARM_MTYPE_MASK
) != VM_ARM_MTYPE(mtype
))
62 if (vm
->phys_addr
> paddr
||
63 paddr
+ size
- 1 > vm
->phys_addr
+ vm
->size
- 1)
72 struct static_vm
*find_static_vm_vaddr(void *vaddr
)
74 struct static_vm
*svm
;
77 list_for_each_entry(svm
, &static_vmlist
, list
) {
80 /* static_vmlist is ascending order */
84 if (vm
->addr
<= vaddr
&& vm
->addr
+ vm
->size
> vaddr
)
91 void __init
add_static_vm_early(struct static_vm
*svm
)
93 struct static_vm
*curr_svm
;
98 vm_area_add_early(vm
);
101 list_for_each_entry(curr_svm
, &static_vmlist
, list
) {
104 if (vm
->addr
> vaddr
)
107 list_add_tail(&svm
->list
, &curr_svm
->list
);
110 int ioremap_page(unsigned long virt
, unsigned long phys
,
111 const struct mem_type
*mtype
)
113 return ioremap_page_range(virt
, virt
+ PAGE_SIZE
, phys
,
114 __pgprot(mtype
->prot_pte
));
116 EXPORT_SYMBOL(ioremap_page
);
118 void __check_vmalloc_seq(struct mm_struct
*mm
)
123 seq
= atomic_read(&init_mm
.context
.vmalloc_seq
);
124 memcpy(pgd_offset(mm
, VMALLOC_START
),
125 pgd_offset_k(VMALLOC_START
),
126 sizeof(pgd_t
) * (pgd_index(VMALLOC_END
) -
127 pgd_index(VMALLOC_START
)));
129 * Use a store-release so that other CPUs that observe the
130 * counter's new value are guaranteed to see the results of the
133 atomic_set_release(&mm
->context
.vmalloc_seq
, seq
);
134 } while (seq
!= atomic_read(&init_mm
.context
.vmalloc_seq
));
137 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
139 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
140 * the other CPUs will not see this change until their next context switch.
141 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
142 * which requires the new ioremap'd region to be referenced, the CPU will
143 * reference the _old_ region.
145 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
146 * mask the size back to 1MB aligned or we will overflow in the loop below.
148 static void unmap_area_sections(unsigned long virt
, unsigned long size
)
150 unsigned long addr
= virt
, end
= virt
+ (size
& ~(SZ_1M
- 1));
151 pmd_t
*pmdp
= pmd_off_k(addr
);
156 if (!pmd_none(pmd
)) {
158 * Clear the PMD from the page table, and
159 * increment the vmalloc sequence so others
160 * notice this change.
162 * Note: this is still racy on SMP machines.
165 atomic_inc_return_release(&init_mm
.context
.vmalloc_seq
);
168 * Free the page table, if there was one.
170 if ((pmd_val(pmd
) & PMD_TYPE_MASK
) == PMD_TYPE_TABLE
)
171 pte_free_kernel(&init_mm
, pmd_page_vaddr(pmd
));
176 } while (addr
< end
);
179 * Ensure that the active_mm is up to date - we want to
180 * catch any use-after-iounmap cases.
182 check_vmalloc_seq(current
->active_mm
);
184 flush_tlb_kernel_range(virt
, end
);
188 remap_area_sections(unsigned long virt
, unsigned long pfn
,
189 size_t size
, const struct mem_type
*type
)
191 unsigned long addr
= virt
, end
= virt
+ size
;
192 pmd_t
*pmd
= pmd_off_k(addr
);
195 * Remove and free any PTE-based mapping, and
196 * sync the current kernel mapping.
198 unmap_area_sections(virt
, size
);
201 pmd
[0] = __pmd(__pfn_to_phys(pfn
) | type
->prot_sect
);
202 pfn
+= SZ_1M
>> PAGE_SHIFT
;
203 pmd
[1] = __pmd(__pfn_to_phys(pfn
) | type
->prot_sect
);
204 pfn
+= SZ_1M
>> PAGE_SHIFT
;
205 flush_pmd_entry(pmd
);
209 } while (addr
< end
);
215 remap_area_supersections(unsigned long virt
, unsigned long pfn
,
216 size_t size
, const struct mem_type
*type
)
218 unsigned long addr
= virt
, end
= virt
+ size
;
219 pmd_t
*pmd
= pmd_off_k(addr
);
222 * Remove and free any PTE-based mapping, and
223 * sync the current kernel mapping.
225 unmap_area_sections(virt
, size
);
227 unsigned long super_pmd_val
, i
;
229 super_pmd_val
= __pfn_to_phys(pfn
) | type
->prot_sect
|
231 super_pmd_val
|= ((pfn
>> (32 - PAGE_SHIFT
)) & 0xf) << 20;
233 for (i
= 0; i
< 8; i
++) {
234 pmd
[0] = __pmd(super_pmd_val
);
235 pmd
[1] = __pmd(super_pmd_val
);
236 flush_pmd_entry(pmd
);
242 pfn
+= SUPERSECTION_SIZE
>> PAGE_SHIFT
;
243 } while (addr
< end
);
249 static void __iomem
* __arm_ioremap_pfn_caller(unsigned long pfn
,
250 unsigned long offset
, size_t size
, unsigned int mtype
, void *caller
)
252 const struct mem_type
*type
;
255 struct vm_struct
*area
;
256 phys_addr_t paddr
= __pfn_to_phys(pfn
);
258 #ifndef CONFIG_ARM_LPAE
260 * High mappings must be supersection aligned
262 if (pfn
>= 0x100000 && (paddr
& ~SUPERSECTION_MASK
))
266 type
= get_mem_type(mtype
);
271 * Page align the mapping size, taking account of any offset.
273 size
= PAGE_ALIGN(offset
+ size
);
276 * Try to reuse one of the static mapping whenever possible.
278 if (size
&& !(sizeof(phys_addr_t
) == 4 && pfn
>= 0x100000)) {
279 struct static_vm
*svm
;
281 svm
= find_static_vm_paddr(paddr
, size
, mtype
);
283 addr
= (unsigned long)svm
->vm
.addr
;
284 addr
+= paddr
- svm
->vm
.phys_addr
;
285 return (void __iomem
*) (offset
+ addr
);
290 * Don't allow RAM to be mapped with mismatched attributes - this
291 * causes problems with ARMv6+
293 if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn
)) &&
294 mtype
!= MT_MEMORY_RW
))
297 area
= get_vm_area_caller(size
, VM_IOREMAP
, caller
);
300 addr
= (unsigned long)area
->addr
;
301 area
->phys_addr
= paddr
;
303 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
304 if (DOMAIN_IO
== 0 &&
305 (((cpu_architecture() >= CPU_ARCH_ARMv6
) && (get_cr() & CR_XP
)) ||
306 cpu_is_xsc3()) && pfn
>= 0x100000 &&
307 !((paddr
| size
| addr
) & ~SUPERSECTION_MASK
)) {
308 area
->flags
|= VM_ARM_SECTION_MAPPING
;
309 err
= remap_area_supersections(addr
, pfn
, size
, type
);
310 } else if (!((paddr
| size
| addr
) & ~PMD_MASK
)) {
311 area
->flags
|= VM_ARM_SECTION_MAPPING
;
312 err
= remap_area_sections(addr
, pfn
, size
, type
);
315 err
= ioremap_page_range(addr
, addr
+ size
, paddr
,
316 __pgprot(type
->prot_pte
));
319 vunmap((void *)addr
);
323 flush_cache_vmap(addr
, addr
+ size
);
324 return (void __iomem
*) (offset
+ addr
);
327 void __iomem
*__arm_ioremap_caller(phys_addr_t phys_addr
, size_t size
,
328 unsigned int mtype
, void *caller
)
330 phys_addr_t last_addr
;
331 unsigned long offset
= phys_addr
& ~PAGE_MASK
;
332 unsigned long pfn
= __phys_to_pfn(phys_addr
);
335 * Don't allow wraparound or zero size
337 last_addr
= phys_addr
+ size
- 1;
338 if (!size
|| last_addr
< phys_addr
)
341 return __arm_ioremap_pfn_caller(pfn
, offset
, size
, mtype
,
346 * Remap an arbitrary physical address space into the kernel virtual
347 * address space. Needed when the kernel wants to access high addresses
350 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
351 * have to convert them into an offset in a page-aligned mapping, but the
352 * caller shouldn't need to know that small detail.
355 __arm_ioremap_pfn(unsigned long pfn
, unsigned long offset
, size_t size
,
358 return __arm_ioremap_pfn_caller(pfn
, offset
, size
, mtype
,
359 __builtin_return_address(0));
361 EXPORT_SYMBOL(__arm_ioremap_pfn
);
363 void __iomem
* (*arch_ioremap_caller
)(phys_addr_t
, size_t,
364 unsigned int, void *) =
365 __arm_ioremap_caller
;
367 void __iomem
*ioremap(resource_size_t res_cookie
, size_t size
)
369 return arch_ioremap_caller(res_cookie
, size
, MT_DEVICE
,
370 __builtin_return_address(0));
372 EXPORT_SYMBOL(ioremap
);
374 void __iomem
*ioremap_cache(resource_size_t res_cookie
, size_t size
)
376 return arch_ioremap_caller(res_cookie
, size
, MT_DEVICE_CACHED
,
377 __builtin_return_address(0));
379 EXPORT_SYMBOL(ioremap_cache
);
381 void __iomem
*ioremap_wc(resource_size_t res_cookie
, size_t size
)
383 return arch_ioremap_caller(res_cookie
, size
, MT_DEVICE_WC
,
384 __builtin_return_address(0));
386 EXPORT_SYMBOL(ioremap_wc
);
389 * Remap an arbitrary physical address space into the kernel virtual
390 * address space as memory. Needed when the kernel wants to execute
391 * code in external memory. This is needed for reprogramming source
392 * clocks that would affect normal memory for example. Please see
393 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
396 __arm_ioremap_exec(phys_addr_t phys_addr
, size_t size
, bool cached
)
401 mtype
= MT_MEMORY_RWX
;
403 mtype
= MT_MEMORY_RWX_NONCACHED
;
405 return __arm_ioremap_caller(phys_addr
, size
, mtype
,
406 __builtin_return_address(0));
409 void __arm_iomem_set_ro(void __iomem
*ptr
, size_t size
)
411 set_memory_ro((unsigned long)ptr
, PAGE_ALIGN(size
) / PAGE_SIZE
);
414 void *arch_memremap_wb(phys_addr_t phys_addr
, size_t size
)
416 return (__force
void *)arch_ioremap_caller(phys_addr
, size
,
418 __builtin_return_address(0));
421 void __iounmap(volatile void __iomem
*io_addr
)
423 void *addr
= (void *)(PAGE_MASK
& (unsigned long)io_addr
);
424 struct static_vm
*svm
;
426 /* If this is a static mapping, we must leave it alone */
427 svm
= find_static_vm_vaddr(addr
);
431 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
433 struct vm_struct
*vm
;
435 vm
= find_vm_area(addr
);
438 * If this is a section based mapping we need to handle it
439 * specially as the VM subsystem does not know how to handle
442 if (vm
&& (vm
->flags
& VM_ARM_SECTION_MAPPING
))
443 unmap_area_sections((unsigned long)vm
->addr
, vm
->size
);
450 void (*arch_iounmap
)(volatile void __iomem
*) = __iounmap
;
452 void iounmap(volatile void __iomem
*cookie
)
454 arch_iounmap(cookie
);
456 EXPORT_SYMBOL(iounmap
);
458 #if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
459 static int pci_ioremap_mem_type
= MT_DEVICE
;
461 void pci_ioremap_set_mem_type(int mem_type
)
463 pci_ioremap_mem_type
= mem_type
;
466 int pci_remap_iospace(const struct resource
*res
, phys_addr_t phys_addr
)
468 unsigned long vaddr
= (unsigned long)PCI_IOBASE
+ res
->start
;
470 if (!(res
->flags
& IORESOURCE_IO
))
473 if (res
->end
> IO_SPACE_LIMIT
)
476 return ioremap_page_range(vaddr
, vaddr
+ resource_size(res
), phys_addr
,
477 __pgprot(get_mem_type(pci_ioremap_mem_type
)->prot_pte
));
479 EXPORT_SYMBOL(pci_remap_iospace
);
481 void __iomem
*pci_remap_cfgspace(resource_size_t res_cookie
, size_t size
)
483 return arch_ioremap_caller(res_cookie
, size
, MT_UNCACHED
,
484 __builtin_return_address(0));
486 EXPORT_SYMBOL_GPL(pci_remap_cfgspace
);
490 * Must be called after early_fixmap_init
492 void __init
early_ioremap_init(void)
494 early_ioremap_setup();
497 bool arch_memremap_can_ram_remap(resource_size_t offset
, size_t size
,
500 unsigned long pfn
= PHYS_PFN(offset
);
502 return memblock_is_map_memory(pfn
);