]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/gup_test.c
Merge existing fixes from asoc/for-6.5 into new branch
[thirdparty/linux.git] / mm / gup_test.c
1 #include <linux/kernel.h>
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/uaccess.h>
5 #include <linux/ktime.h>
6 #include <linux/debugfs.h>
7 #include <linux/highmem.h>
8 #include "gup_test.h"
9
10 static void put_back_pages(unsigned int cmd, struct page **pages,
11 unsigned long nr_pages, unsigned int gup_test_flags)
12 {
13 unsigned long i;
14
15 switch (cmd) {
16 case GUP_FAST_BENCHMARK:
17 case GUP_BASIC_TEST:
18 for (i = 0; i < nr_pages; i++)
19 put_page(pages[i]);
20 break;
21
22 case PIN_FAST_BENCHMARK:
23 case PIN_BASIC_TEST:
24 case PIN_LONGTERM_BENCHMARK:
25 unpin_user_pages(pages, nr_pages);
26 break;
27 case DUMP_USER_PAGES_TEST:
28 if (gup_test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) {
29 unpin_user_pages(pages, nr_pages);
30 } else {
31 for (i = 0; i < nr_pages; i++)
32 put_page(pages[i]);
33
34 }
35 break;
36 }
37 }
38
39 static void verify_dma_pinned(unsigned int cmd, struct page **pages,
40 unsigned long nr_pages)
41 {
42 unsigned long i;
43 struct folio *folio;
44
45 switch (cmd) {
46 case PIN_FAST_BENCHMARK:
47 case PIN_BASIC_TEST:
48 case PIN_LONGTERM_BENCHMARK:
49 for (i = 0; i < nr_pages; i++) {
50 folio = page_folio(pages[i]);
51
52 if (WARN(!folio_maybe_dma_pinned(folio),
53 "pages[%lu] is NOT dma-pinned\n", i)) {
54
55 dump_page(&folio->page, "gup_test failure");
56 break;
57 } else if (cmd == PIN_LONGTERM_BENCHMARK &&
58 WARN(!folio_is_longterm_pinnable(folio),
59 "pages[%lu] is NOT pinnable but pinned\n",
60 i)) {
61 dump_page(&folio->page, "gup_test failure");
62 break;
63 }
64 }
65 break;
66 }
67 }
68
69 static void dump_pages_test(struct gup_test *gup, struct page **pages,
70 unsigned long nr_pages)
71 {
72 unsigned int index_to_dump;
73 unsigned int i;
74
75 /*
76 * Zero out any user-supplied page index that is out of range. Remember:
77 * .which_pages[] contains a 1-based set of page indices.
78 */
79 for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) {
80 if (gup->which_pages[i] > nr_pages) {
81 pr_warn("ZEROING due to out of range: .which_pages[%u]: %u\n",
82 i, gup->which_pages[i]);
83 gup->which_pages[i] = 0;
84 }
85 }
86
87 for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) {
88 index_to_dump = gup->which_pages[i];
89
90 if (index_to_dump) {
91 index_to_dump--; // Decode from 1-based, to 0-based
92 pr_info("---- page #%u, starting from user virt addr: 0x%llx\n",
93 index_to_dump, gup->addr);
94 dump_page(pages[index_to_dump],
95 "gup_test: dump_pages() test");
96 }
97 }
98 }
99
100 static int __gup_test_ioctl(unsigned int cmd,
101 struct gup_test *gup)
102 {
103 ktime_t start_time, end_time;
104 unsigned long i, nr_pages, addr, next;
105 long nr;
106 struct page **pages;
107 int ret = 0;
108 bool needs_mmap_lock =
109 cmd != GUP_FAST_BENCHMARK && cmd != PIN_FAST_BENCHMARK;
110
111 if (gup->size > ULONG_MAX)
112 return -EINVAL;
113
114 nr_pages = gup->size / PAGE_SIZE;
115 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
116 if (!pages)
117 return -ENOMEM;
118
119 if (needs_mmap_lock && mmap_read_lock_killable(current->mm)) {
120 ret = -EINTR;
121 goto free_pages;
122 }
123
124 i = 0;
125 nr = gup->nr_pages_per_call;
126 start_time = ktime_get();
127 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) {
128 if (nr != gup->nr_pages_per_call)
129 break;
130
131 next = addr + nr * PAGE_SIZE;
132 if (next > gup->addr + gup->size) {
133 next = gup->addr + gup->size;
134 nr = (next - addr) / PAGE_SIZE;
135 }
136
137 switch (cmd) {
138 case GUP_FAST_BENCHMARK:
139 nr = get_user_pages_fast(addr, nr, gup->gup_flags,
140 pages + i);
141 break;
142 case GUP_BASIC_TEST:
143 nr = get_user_pages(addr, nr, gup->gup_flags, pages + i);
144 break;
145 case PIN_FAST_BENCHMARK:
146 nr = pin_user_pages_fast(addr, nr, gup->gup_flags,
147 pages + i);
148 break;
149 case PIN_BASIC_TEST:
150 nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i);
151 break;
152 case PIN_LONGTERM_BENCHMARK:
153 nr = pin_user_pages(addr, nr,
154 gup->gup_flags | FOLL_LONGTERM,
155 pages + i);
156 break;
157 case DUMP_USER_PAGES_TEST:
158 if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN)
159 nr = pin_user_pages(addr, nr, gup->gup_flags,
160 pages + i);
161 else
162 nr = get_user_pages(addr, nr, gup->gup_flags,
163 pages + i);
164 break;
165 default:
166 ret = -EINVAL;
167 goto unlock;
168 }
169
170 if (nr <= 0)
171 break;
172 i += nr;
173 }
174 end_time = ktime_get();
175
176 /* Shifting the meaning of nr_pages: now it is actual number pinned: */
177 nr_pages = i;
178
179 gup->get_delta_usec = ktime_us_delta(end_time, start_time);
180 gup->size = addr - gup->addr;
181
182 /*
183 * Take an un-benchmark-timed moment to verify DMA pinned
184 * state: print a warning if any non-dma-pinned pages are found:
185 */
186 verify_dma_pinned(cmd, pages, nr_pages);
187
188 if (cmd == DUMP_USER_PAGES_TEST)
189 dump_pages_test(gup, pages, nr_pages);
190
191 start_time = ktime_get();
192
193 put_back_pages(cmd, pages, nr_pages, gup->test_flags);
194
195 end_time = ktime_get();
196 gup->put_delta_usec = ktime_us_delta(end_time, start_time);
197
198 unlock:
199 if (needs_mmap_lock)
200 mmap_read_unlock(current->mm);
201 free_pages:
202 kvfree(pages);
203 return ret;
204 }
205
206 static DEFINE_MUTEX(pin_longterm_test_mutex);
207 static struct page **pin_longterm_test_pages;
208 static unsigned long pin_longterm_test_nr_pages;
209
210 static inline void pin_longterm_test_stop(void)
211 {
212 if (pin_longterm_test_pages) {
213 if (pin_longterm_test_nr_pages)
214 unpin_user_pages(pin_longterm_test_pages,
215 pin_longterm_test_nr_pages);
216 kvfree(pin_longterm_test_pages);
217 pin_longterm_test_pages = NULL;
218 pin_longterm_test_nr_pages = 0;
219 }
220 }
221
222 static inline int pin_longterm_test_start(unsigned long arg)
223 {
224 long nr_pages, cur_pages, addr, remaining_pages;
225 int gup_flags = FOLL_LONGTERM;
226 struct pin_longterm_test args;
227 struct page **pages;
228 int ret = 0;
229 bool fast;
230
231 if (pin_longterm_test_pages)
232 return -EINVAL;
233
234 if (copy_from_user(&args, (void __user *)arg, sizeof(args)))
235 return -EFAULT;
236
237 if (args.flags &
238 ~(PIN_LONGTERM_TEST_FLAG_USE_WRITE|PIN_LONGTERM_TEST_FLAG_USE_FAST))
239 return -EINVAL;
240 if (!IS_ALIGNED(args.addr | args.size, PAGE_SIZE))
241 return -EINVAL;
242 if (args.size > LONG_MAX)
243 return -EINVAL;
244 nr_pages = args.size / PAGE_SIZE;
245 if (!nr_pages)
246 return -EINVAL;
247
248 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
249 if (!pages)
250 return -ENOMEM;
251
252 if (args.flags & PIN_LONGTERM_TEST_FLAG_USE_WRITE)
253 gup_flags |= FOLL_WRITE;
254 fast = !!(args.flags & PIN_LONGTERM_TEST_FLAG_USE_FAST);
255
256 if (!fast && mmap_read_lock_killable(current->mm)) {
257 kvfree(pages);
258 return -EINTR;
259 }
260
261 pin_longterm_test_pages = pages;
262 pin_longterm_test_nr_pages = 0;
263
264 while (nr_pages - pin_longterm_test_nr_pages) {
265 remaining_pages = nr_pages - pin_longterm_test_nr_pages;
266 addr = args.addr + pin_longterm_test_nr_pages * PAGE_SIZE;
267
268 if (fast)
269 cur_pages = pin_user_pages_fast(addr, remaining_pages,
270 gup_flags, pages);
271 else
272 cur_pages = pin_user_pages(addr, remaining_pages,
273 gup_flags, pages);
274 if (cur_pages < 0) {
275 pin_longterm_test_stop();
276 ret = cur_pages;
277 break;
278 }
279 pin_longterm_test_nr_pages += cur_pages;
280 pages += cur_pages;
281 }
282
283 if (!fast)
284 mmap_read_unlock(current->mm);
285 return ret;
286 }
287
288 static inline int pin_longterm_test_read(unsigned long arg)
289 {
290 __u64 user_addr;
291 unsigned long i;
292
293 if (!pin_longterm_test_pages)
294 return -EINVAL;
295
296 if (copy_from_user(&user_addr, (void __user *)arg, sizeof(user_addr)))
297 return -EFAULT;
298
299 for (i = 0; i < pin_longterm_test_nr_pages; i++) {
300 void *addr = kmap_local_page(pin_longterm_test_pages[i]);
301 unsigned long ret;
302
303 ret = copy_to_user((void __user *)(unsigned long)user_addr, addr,
304 PAGE_SIZE);
305 kunmap_local(addr);
306 if (ret)
307 return -EFAULT;
308 user_addr += PAGE_SIZE;
309 }
310 return 0;
311 }
312
313 static long pin_longterm_test_ioctl(struct file *filep, unsigned int cmd,
314 unsigned long arg)
315 {
316 int ret = -EINVAL;
317
318 if (mutex_lock_killable(&pin_longterm_test_mutex))
319 return -EINTR;
320
321 switch (cmd) {
322 case PIN_LONGTERM_TEST_START:
323 ret = pin_longterm_test_start(arg);
324 break;
325 case PIN_LONGTERM_TEST_STOP:
326 pin_longterm_test_stop();
327 ret = 0;
328 break;
329 case PIN_LONGTERM_TEST_READ:
330 ret = pin_longterm_test_read(arg);
331 break;
332 }
333
334 mutex_unlock(&pin_longterm_test_mutex);
335 return ret;
336 }
337
338 static long gup_test_ioctl(struct file *filep, unsigned int cmd,
339 unsigned long arg)
340 {
341 struct gup_test gup;
342 int ret;
343
344 switch (cmd) {
345 case GUP_FAST_BENCHMARK:
346 case PIN_FAST_BENCHMARK:
347 case PIN_LONGTERM_BENCHMARK:
348 case GUP_BASIC_TEST:
349 case PIN_BASIC_TEST:
350 case DUMP_USER_PAGES_TEST:
351 break;
352 case PIN_LONGTERM_TEST_START:
353 case PIN_LONGTERM_TEST_STOP:
354 case PIN_LONGTERM_TEST_READ:
355 return pin_longterm_test_ioctl(filep, cmd, arg);
356 default:
357 return -EINVAL;
358 }
359
360 if (copy_from_user(&gup, (void __user *)arg, sizeof(gup)))
361 return -EFAULT;
362
363 ret = __gup_test_ioctl(cmd, &gup);
364 if (ret)
365 return ret;
366
367 if (copy_to_user((void __user *)arg, &gup, sizeof(gup)))
368 return -EFAULT;
369
370 return 0;
371 }
372
373 static int gup_test_release(struct inode *inode, struct file *file)
374 {
375 pin_longterm_test_stop();
376
377 return 0;
378 }
379
380 static const struct file_operations gup_test_fops = {
381 .open = nonseekable_open,
382 .unlocked_ioctl = gup_test_ioctl,
383 .compat_ioctl = compat_ptr_ioctl,
384 .release = gup_test_release,
385 };
386
387 static int __init gup_test_init(void)
388 {
389 debugfs_create_file_unsafe("gup_test", 0600, NULL, NULL,
390 &gup_test_fops);
391
392 return 0;
393 }
394
395 late_initcall(gup_test_init);