]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0-only */ | |
2 | /* | |
3 | * Copyright © 2008 Keith Packard <keithp@keithp.com> | |
4 | */ | |
5 | ||
6 | #ifndef _LINUX_IO_MAPPING_H | |
7 | #define _LINUX_IO_MAPPING_H | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/bug.h> | |
12 | #include <linux/io.h> | |
13 | #include <linux/pgtable.h> | |
14 | #include <asm/page.h> | |
15 | ||
16 | /* | |
17 | * The io_mapping mechanism provides an abstraction for mapping | |
18 | * individual pages from an io device to the CPU in an efficient fashion. | |
19 | * | |
20 | * See Documentation/driver-api/io-mapping.rst | |
21 | */ | |
22 | ||
23 | struct io_mapping { | |
24 | resource_size_t base; | |
25 | unsigned long size; | |
26 | pgprot_t prot; | |
27 | void __iomem *iomem; | |
28 | }; | |
29 | ||
30 | #ifdef CONFIG_HAVE_ATOMIC_IOMAP | |
31 | ||
32 | #include <linux/pfn.h> | |
33 | #include <asm/iomap.h> | |
34 | /* | |
35 | * For small address space machines, mapping large objects | |
36 | * into the kernel virtual space isn't practical. Where | |
37 | * available, use fixmap support to dynamically map pages | |
38 | * of the object at run time. | |
39 | */ | |
40 | ||
41 | static inline struct io_mapping * | |
42 | io_mapping_init_wc(struct io_mapping *iomap, | |
43 | resource_size_t base, | |
44 | unsigned long size) | |
45 | { | |
46 | pgprot_t prot; | |
47 | ||
48 | if (iomap_create_wc(base, size, &prot)) | |
49 | return NULL; | |
50 | ||
51 | iomap->base = base; | |
52 | iomap->size = size; | |
53 | iomap->prot = prot; | |
54 | return iomap; | |
55 | } | |
56 | ||
57 | static inline void | |
58 | io_mapping_fini(struct io_mapping *mapping) | |
59 | { | |
60 | iomap_free(mapping->base, mapping->size); | |
61 | } | |
62 | ||
63 | /* Atomic map/unmap */ | |
64 | static inline void __iomem * | |
65 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | |
66 | unsigned long offset) | |
67 | { | |
68 | resource_size_t phys_addr; | |
69 | ||
70 | BUG_ON(offset >= mapping->size); | |
71 | phys_addr = mapping->base + offset; | |
72 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
73 | preempt_disable(); | |
74 | else | |
75 | migrate_disable(); | |
76 | pagefault_disable(); | |
77 | return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); | |
78 | } | |
79 | ||
80 | static inline void | |
81 | io_mapping_unmap_atomic(void __iomem *vaddr) | |
82 | { | |
83 | kunmap_local_indexed((void __force *)vaddr); | |
84 | pagefault_enable(); | |
85 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
86 | preempt_enable(); | |
87 | else | |
88 | migrate_enable(); | |
89 | } | |
90 | ||
91 | static inline void __iomem * | |
92 | io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) | |
93 | { | |
94 | resource_size_t phys_addr; | |
95 | ||
96 | BUG_ON(offset >= mapping->size); | |
97 | phys_addr = mapping->base + offset; | |
98 | return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); | |
99 | } | |
100 | ||
101 | static inline void io_mapping_unmap_local(void __iomem *vaddr) | |
102 | { | |
103 | kunmap_local_indexed((void __force *)vaddr); | |
104 | } | |
105 | ||
106 | static inline void __iomem * | |
107 | io_mapping_map_wc(struct io_mapping *mapping, | |
108 | unsigned long offset, | |
109 | unsigned long size) | |
110 | { | |
111 | resource_size_t phys_addr; | |
112 | ||
113 | BUG_ON(offset >= mapping->size); | |
114 | phys_addr = mapping->base + offset; | |
115 | ||
116 | return ioremap_wc(phys_addr, size); | |
117 | } | |
118 | ||
119 | static inline void | |
120 | io_mapping_unmap(void __iomem *vaddr) | |
121 | { | |
122 | iounmap(vaddr); | |
123 | } | |
124 | ||
125 | #else /* HAVE_ATOMIC_IOMAP */ | |
126 | ||
127 | #include <linux/uaccess.h> | |
128 | ||
129 | /* Create the io_mapping object*/ | |
130 | static inline struct io_mapping * | |
131 | io_mapping_init_wc(struct io_mapping *iomap, | |
132 | resource_size_t base, | |
133 | unsigned long size) | |
134 | { | |
135 | iomap->iomem = ioremap_wc(base, size); | |
136 | if (!iomap->iomem) | |
137 | return NULL; | |
138 | ||
139 | iomap->base = base; | |
140 | iomap->size = size; | |
141 | iomap->prot = pgprot_writecombine(PAGE_KERNEL); | |
142 | ||
143 | return iomap; | |
144 | } | |
145 | ||
146 | static inline void | |
147 | io_mapping_fini(struct io_mapping *mapping) | |
148 | { | |
149 | iounmap(mapping->iomem); | |
150 | } | |
151 | ||
152 | /* Non-atomic map/unmap */ | |
153 | static inline void __iomem * | |
154 | io_mapping_map_wc(struct io_mapping *mapping, | |
155 | unsigned long offset, | |
156 | unsigned long size) | |
157 | { | |
158 | return mapping->iomem + offset; | |
159 | } | |
160 | ||
161 | static inline void | |
162 | io_mapping_unmap(void __iomem *vaddr) | |
163 | { | |
164 | } | |
165 | ||
166 | /* Atomic map/unmap */ | |
167 | static inline void __iomem * | |
168 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | |
169 | unsigned long offset) | |
170 | { | |
171 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
172 | preempt_disable(); | |
173 | else | |
174 | migrate_disable(); | |
175 | pagefault_disable(); | |
176 | return io_mapping_map_wc(mapping, offset, PAGE_SIZE); | |
177 | } | |
178 | ||
179 | static inline void | |
180 | io_mapping_unmap_atomic(void __iomem *vaddr) | |
181 | { | |
182 | io_mapping_unmap(vaddr); | |
183 | pagefault_enable(); | |
184 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
185 | preempt_enable(); | |
186 | else | |
187 | migrate_enable(); | |
188 | } | |
189 | ||
190 | static inline void __iomem * | |
191 | io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) | |
192 | { | |
193 | return io_mapping_map_wc(mapping, offset, PAGE_SIZE); | |
194 | } | |
195 | ||
196 | static inline void io_mapping_unmap_local(void __iomem *vaddr) | |
197 | { | |
198 | io_mapping_unmap(vaddr); | |
199 | } | |
200 | ||
201 | #endif /* !HAVE_ATOMIC_IOMAP */ | |
202 | ||
203 | static inline struct io_mapping * | |
204 | io_mapping_create_wc(resource_size_t base, | |
205 | unsigned long size) | |
206 | { | |
207 | struct io_mapping *iomap; | |
208 | ||
209 | iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); | |
210 | if (!iomap) | |
211 | return NULL; | |
212 | ||
213 | if (!io_mapping_init_wc(iomap, base, size)) { | |
214 | kfree(iomap); | |
215 | return NULL; | |
216 | } | |
217 | ||
218 | return iomap; | |
219 | } | |
220 | ||
221 | static inline void | |
222 | io_mapping_free(struct io_mapping *iomap) | |
223 | { | |
224 | io_mapping_fini(iomap); | |
225 | kfree(iomap); | |
226 | } | |
227 | ||
228 | int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, | |
229 | unsigned long addr, unsigned long pfn, unsigned long size); | |
230 | ||
231 | #endif /* _LINUX_IO_MAPPING_H */ |