]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/nvdimm/test/iomap.c
mm/devm_memremap_pages: fix final page put race
[thirdparty/kernel/stable.git] / tools / testing / nvdimm / test / iomap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5 #include <linux/memremap.h>
6 #include <linux/rculist.h>
7 #include <linux/export.h>
8 #include <linux/ioport.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/pfn_t.h>
12 #include <linux/acpi.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include "nfit_test.h"
16
17 static LIST_HEAD(iomap_head);
18
19 static struct iomap_ops {
20 nfit_test_lookup_fn nfit_test_lookup;
21 nfit_test_evaluate_dsm_fn evaluate_dsm;
22 struct list_head list;
23 } iomap_ops = {
24 .list = LIST_HEAD_INIT(iomap_ops.list),
25 };
26
27 void nfit_test_setup(nfit_test_lookup_fn lookup,
28 nfit_test_evaluate_dsm_fn evaluate)
29 {
30 iomap_ops.nfit_test_lookup = lookup;
31 iomap_ops.evaluate_dsm = evaluate;
32 list_add_rcu(&iomap_ops.list, &iomap_head);
33 }
34 EXPORT_SYMBOL(nfit_test_setup);
35
36 void nfit_test_teardown(void)
37 {
38 list_del_rcu(&iomap_ops.list);
39 synchronize_rcu();
40 }
41 EXPORT_SYMBOL(nfit_test_teardown);
42
43 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
44 {
45 struct iomap_ops *ops;
46
47 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
48 if (ops)
49 return ops->nfit_test_lookup(resource);
50 return NULL;
51 }
52
53 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
54 {
55 struct nfit_test_resource *res;
56
57 rcu_read_lock();
58 res = __get_nfit_res(resource);
59 rcu_read_unlock();
60
61 return res;
62 }
63 EXPORT_SYMBOL(get_nfit_res);
64
65 void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
66 void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
67 {
68 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
69
70 if (nfit_res)
71 return (void __iomem *) nfit_res->buf + offset
72 - nfit_res->res.start;
73 return fallback_fn(offset, size);
74 }
75
76 void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
77 resource_size_t offset, unsigned long size)
78 {
79 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
80
81 if (nfit_res)
82 return (void __iomem *) nfit_res->buf + offset
83 - nfit_res->res.start;
84 return devm_ioremap_nocache(dev, offset, size);
85 }
86 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
87
88 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
89 size_t size, unsigned long flags)
90 {
91 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
92
93 if (nfit_res)
94 return nfit_res->buf + offset - nfit_res->res.start;
95 return devm_memremap(dev, offset, size, flags);
96 }
97 EXPORT_SYMBOL(__wrap_devm_memremap);
98
99 static void nfit_test_kill(void *_pgmap)
100 {
101 struct dev_pagemap *pgmap = _pgmap;
102
103 WARN_ON(!pgmap || !pgmap->ref || !pgmap->kill || !pgmap->cleanup);
104 pgmap->kill(pgmap->ref);
105 pgmap->cleanup(pgmap->ref);
106 }
107
108 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
109 {
110 resource_size_t offset = pgmap->res.start;
111 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
112
113 if (nfit_res) {
114 int rc;
115
116 rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
117 if (rc)
118 return ERR_PTR(rc);
119 return nfit_res->buf + offset - nfit_res->res.start;
120 }
121 return devm_memremap_pages(dev, pgmap);
122 }
123 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
124
125 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
126 {
127 struct nfit_test_resource *nfit_res = get_nfit_res(addr);
128
129 if (nfit_res)
130 flags &= ~PFN_MAP;
131 return phys_to_pfn_t(addr, flags);
132 }
133 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
134
135 void *__wrap_memremap(resource_size_t offset, size_t size,
136 unsigned long flags)
137 {
138 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
139
140 if (nfit_res)
141 return nfit_res->buf + offset - nfit_res->res.start;
142 return memremap(offset, size, flags);
143 }
144 EXPORT_SYMBOL(__wrap_memremap);
145
146 void __wrap_devm_memunmap(struct device *dev, void *addr)
147 {
148 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
149
150 if (nfit_res)
151 return;
152 return devm_memunmap(dev, addr);
153 }
154 EXPORT_SYMBOL(__wrap_devm_memunmap);
155
156 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
157 {
158 return __nfit_test_ioremap(offset, size, ioremap_nocache);
159 }
160 EXPORT_SYMBOL(__wrap_ioremap_nocache);
161
162 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
163 {
164 return __nfit_test_ioremap(offset, size, ioremap_wc);
165 }
166 EXPORT_SYMBOL(__wrap_ioremap_wc);
167
168 void __wrap_iounmap(volatile void __iomem *addr)
169 {
170 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
171 if (nfit_res)
172 return;
173 return iounmap(addr);
174 }
175 EXPORT_SYMBOL(__wrap_iounmap);
176
177 void __wrap_memunmap(void *addr)
178 {
179 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
180
181 if (nfit_res)
182 return;
183 return memunmap(addr);
184 }
185 EXPORT_SYMBOL(__wrap_memunmap);
186
187 static bool nfit_test_release_region(struct device *dev,
188 struct resource *parent, resource_size_t start,
189 resource_size_t n);
190
191 static void nfit_devres_release(struct device *dev, void *data)
192 {
193 struct resource *res = *((struct resource **) data);
194
195 WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
196 resource_size(res)));
197 }
198
199 static int match(struct device *dev, void *__res, void *match_data)
200 {
201 struct resource *res = *((struct resource **) __res);
202 resource_size_t start = *((resource_size_t *) match_data);
203
204 return res->start == start;
205 }
206
207 static bool nfit_test_release_region(struct device *dev,
208 struct resource *parent, resource_size_t start,
209 resource_size_t n)
210 {
211 if (parent == &iomem_resource) {
212 struct nfit_test_resource *nfit_res = get_nfit_res(start);
213
214 if (nfit_res) {
215 struct nfit_test_request *req;
216 struct resource *res = NULL;
217
218 if (dev) {
219 devres_release(dev, nfit_devres_release, match,
220 &start);
221 return true;
222 }
223
224 spin_lock(&nfit_res->lock);
225 list_for_each_entry(req, &nfit_res->requests, list)
226 if (req->res.start == start) {
227 res = &req->res;
228 list_del(&req->list);
229 break;
230 }
231 spin_unlock(&nfit_res->lock);
232
233 WARN(!res || resource_size(res) != n,
234 "%s: start: %llx n: %llx mismatch: %pr\n",
235 __func__, start, n, res);
236 if (res)
237 kfree(req);
238 return true;
239 }
240 }
241 return false;
242 }
243
244 static struct resource *nfit_test_request_region(struct device *dev,
245 struct resource *parent, resource_size_t start,
246 resource_size_t n, const char *name, int flags)
247 {
248 struct nfit_test_resource *nfit_res;
249
250 if (parent == &iomem_resource) {
251 nfit_res = get_nfit_res(start);
252 if (nfit_res) {
253 struct nfit_test_request *req;
254 struct resource *res = NULL;
255
256 if (start + n > nfit_res->res.start
257 + resource_size(&nfit_res->res)) {
258 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
259 __func__, start, n,
260 &nfit_res->res);
261 return NULL;
262 }
263
264 spin_lock(&nfit_res->lock);
265 list_for_each_entry(req, &nfit_res->requests, list)
266 if (start == req->res.start) {
267 res = &req->res;
268 break;
269 }
270 spin_unlock(&nfit_res->lock);
271
272 if (res) {
273 WARN(1, "%pr already busy\n", res);
274 return NULL;
275 }
276
277 req = kzalloc(sizeof(*req), GFP_KERNEL);
278 if (!req)
279 return NULL;
280 INIT_LIST_HEAD(&req->list);
281 res = &req->res;
282
283 res->start = start;
284 res->end = start + n - 1;
285 res->name = name;
286 res->flags = resource_type(parent);
287 res->flags |= IORESOURCE_BUSY | flags;
288 spin_lock(&nfit_res->lock);
289 list_add(&req->list, &nfit_res->requests);
290 spin_unlock(&nfit_res->lock);
291
292 if (dev) {
293 struct resource **d;
294
295 d = devres_alloc(nfit_devres_release,
296 sizeof(struct resource *),
297 GFP_KERNEL);
298 if (!d)
299 return NULL;
300 *d = res;
301 devres_add(dev, d);
302 }
303
304 pr_debug("%s: %pr\n", __func__, res);
305 return res;
306 }
307 }
308 if (dev)
309 return __devm_request_region(dev, parent, start, n, name);
310 return __request_region(parent, start, n, name, flags);
311 }
312
313 struct resource *__wrap___request_region(struct resource *parent,
314 resource_size_t start, resource_size_t n, const char *name,
315 int flags)
316 {
317 return nfit_test_request_region(NULL, parent, start, n, name, flags);
318 }
319 EXPORT_SYMBOL(__wrap___request_region);
320
321 int __wrap_insert_resource(struct resource *parent, struct resource *res)
322 {
323 if (get_nfit_res(res->start))
324 return 0;
325 return insert_resource(parent, res);
326 }
327 EXPORT_SYMBOL(__wrap_insert_resource);
328
329 int __wrap_remove_resource(struct resource *res)
330 {
331 if (get_nfit_res(res->start))
332 return 0;
333 return remove_resource(res);
334 }
335 EXPORT_SYMBOL(__wrap_remove_resource);
336
337 struct resource *__wrap___devm_request_region(struct device *dev,
338 struct resource *parent, resource_size_t start,
339 resource_size_t n, const char *name)
340 {
341 if (!dev)
342 return NULL;
343 return nfit_test_request_region(dev, parent, start, n, name, 0);
344 }
345 EXPORT_SYMBOL(__wrap___devm_request_region);
346
347 void __wrap___release_region(struct resource *parent, resource_size_t start,
348 resource_size_t n)
349 {
350 if (!nfit_test_release_region(NULL, parent, start, n))
351 __release_region(parent, start, n);
352 }
353 EXPORT_SYMBOL(__wrap___release_region);
354
355 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
356 resource_size_t start, resource_size_t n)
357 {
358 if (!nfit_test_release_region(dev, parent, start, n))
359 __devm_release_region(dev, parent, start, n);
360 }
361 EXPORT_SYMBOL(__wrap___devm_release_region);
362
363 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
364 struct acpi_object_list *p, struct acpi_buffer *buf)
365 {
366 struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
367 union acpi_object **obj;
368
369 if (!nfit_res || strcmp(path, "_FIT") || !buf)
370 return acpi_evaluate_object(handle, path, p, buf);
371
372 obj = nfit_res->buf;
373 buf->length = sizeof(union acpi_object);
374 buf->pointer = *obj;
375 return AE_OK;
376 }
377 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
378
379 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
380 u64 rev, u64 func, union acpi_object *argv4)
381 {
382 union acpi_object *obj = ERR_PTR(-ENXIO);
383 struct iomap_ops *ops;
384
385 rcu_read_lock();
386 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
387 if (ops)
388 obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
389 rcu_read_unlock();
390
391 if (IS_ERR(obj))
392 return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
393 return obj;
394 }
395 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
396
397 MODULE_LICENSE("GPL v2");