]> git.ipfire.org Git - thirdparty/linux.git/blob - fs/dax.c
Merge drm/drm-next into drm-misc-next
[thirdparty/linux.git] / fs / dax.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 */
8
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
13 #include <linux/fs.h>
14 #include <linux/highmem.h>
15 #include <linux/memcontrol.h>
16 #include <linux/mm.h>
17 #include <linux/mutex.h>
18 #include <linux/pagevec.h>
19 #include <linux/sched.h>
20 #include <linux/sched/signal.h>
21 #include <linux/uio.h>
22 #include <linux/vmstat.h>
23 #include <linux/pfn_t.h>
24 #include <linux/sizes.h>
25 #include <linux/mmu_notifier.h>
26 #include <linux/iomap.h>
27 #include <linux/rmap.h>
28 #include <asm/pgalloc.h>
29
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/fs_dax.h>
32
33 static inline unsigned int pe_order(enum page_entry_size pe_size)
34 {
35 if (pe_size == PE_SIZE_PTE)
36 return PAGE_SHIFT - PAGE_SHIFT;
37 if (pe_size == PE_SIZE_PMD)
38 return PMD_SHIFT - PAGE_SHIFT;
39 if (pe_size == PE_SIZE_PUD)
40 return PUD_SHIFT - PAGE_SHIFT;
41 return ~0;
42 }
43
44 /* We choose 4096 entries - same as per-zone page wait tables */
45 #define DAX_WAIT_TABLE_BITS 12
46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47
48 /* The 'colour' (ie low bits) within a PMD of a page offset. */
49 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
50 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
51
52 /* The order of a PMD entry */
53 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
54
55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
56
57 static int __init init_dax_wait_table(void)
58 {
59 int i;
60
61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 init_waitqueue_head(wait_table + i);
63 return 0;
64 }
65 fs_initcall(init_dax_wait_table);
66
67 /*
68 * DAX pagecache entries use XArray value entries so they can't be mistaken
69 * for pages. We use one bit for locking, one bit for the entry size (PMD)
70 * and two more to tell us if the entry is a zero page or an empty entry that
71 * is just used for locking. In total four special bits.
72 *
73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75 * block allocation.
76 */
77 #define DAX_SHIFT (4)
78 #define DAX_LOCKED (1UL << 0)
79 #define DAX_PMD (1UL << 1)
80 #define DAX_ZERO_PAGE (1UL << 2)
81 #define DAX_EMPTY (1UL << 3)
82
83 static unsigned long dax_to_pfn(void *entry)
84 {
85 return xa_to_value(entry) >> DAX_SHIFT;
86 }
87
88 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89 {
90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91 }
92
93 static bool dax_is_locked(void *entry)
94 {
95 return xa_to_value(entry) & DAX_LOCKED;
96 }
97
98 static unsigned int dax_entry_order(void *entry)
99 {
100 if (xa_to_value(entry) & DAX_PMD)
101 return PMD_ORDER;
102 return 0;
103 }
104
105 static unsigned long dax_is_pmd_entry(void *entry)
106 {
107 return xa_to_value(entry) & DAX_PMD;
108 }
109
110 static bool dax_is_pte_entry(void *entry)
111 {
112 return !(xa_to_value(entry) & DAX_PMD);
113 }
114
115 static int dax_is_zero_entry(void *entry)
116 {
117 return xa_to_value(entry) & DAX_ZERO_PAGE;
118 }
119
120 static int dax_is_empty_entry(void *entry)
121 {
122 return xa_to_value(entry) & DAX_EMPTY;
123 }
124
125 /*
126 * true if the entry that was found is of a smaller order than the entry
127 * we were looking for
128 */
129 static bool dax_is_conflict(void *entry)
130 {
131 return entry == XA_RETRY_ENTRY;
132 }
133
134 /*
135 * DAX page cache entry locking
136 */
137 struct exceptional_entry_key {
138 struct xarray *xa;
139 pgoff_t entry_start;
140 };
141
142 struct wait_exceptional_entry_queue {
143 wait_queue_entry_t wait;
144 struct exceptional_entry_key key;
145 };
146
147 /**
148 * enum dax_wake_mode: waitqueue wakeup behaviour
149 * @WAKE_ALL: wake all waiters in the waitqueue
150 * @WAKE_NEXT: wake only the first waiter in the waitqueue
151 */
152 enum dax_wake_mode {
153 WAKE_ALL,
154 WAKE_NEXT,
155 };
156
157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
158 void *entry, struct exceptional_entry_key *key)
159 {
160 unsigned long hash;
161 unsigned long index = xas->xa_index;
162
163 /*
164 * If 'entry' is a PMD, align the 'index' that we use for the wait
165 * queue to the start of that PMD. This ensures that all offsets in
166 * the range covered by the PMD map to the same bit lock.
167 */
168 if (dax_is_pmd_entry(entry))
169 index &= ~PG_PMD_COLOUR;
170 key->xa = xas->xa;
171 key->entry_start = index;
172
173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
174 return wait_table + hash;
175 }
176
177 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
178 unsigned int mode, int sync, void *keyp)
179 {
180 struct exceptional_entry_key *key = keyp;
181 struct wait_exceptional_entry_queue *ewait =
182 container_of(wait, struct wait_exceptional_entry_queue, wait);
183
184 if (key->xa != ewait->key.xa ||
185 key->entry_start != ewait->key.entry_start)
186 return 0;
187 return autoremove_wake_function(wait, mode, sync, NULL);
188 }
189
190 /*
191 * @entry may no longer be the entry at the index in the mapping.
192 * The important information it's conveying is whether the entry at
193 * this index used to be a PMD entry.
194 */
195 static void dax_wake_entry(struct xa_state *xas, void *entry,
196 enum dax_wake_mode mode)
197 {
198 struct exceptional_entry_key key;
199 wait_queue_head_t *wq;
200
201 wq = dax_entry_waitqueue(xas, entry, &key);
202
203 /*
204 * Checking for locked entry and prepare_to_wait_exclusive() happens
205 * under the i_pages lock, ditto for entry handling in our callers.
206 * So at this point all tasks that could have seen our entry locked
207 * must be in the waitqueue and the following check will see them.
208 */
209 if (waitqueue_active(wq))
210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
211 }
212
213 /*
214 * Look up entry in page cache, wait for it to become unlocked if it
215 * is a DAX entry and return it. The caller must subsequently call
216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
217 * if it did. The entry returned may have a larger order than @order.
218 * If @order is larger than the order of the entry found in i_pages, this
219 * function returns a dax_is_conflict entry.
220 *
221 * Must be called with the i_pages lock held.
222 */
223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
224 {
225 void *entry;
226 struct wait_exceptional_entry_queue ewait;
227 wait_queue_head_t *wq;
228
229 init_wait(&ewait.wait);
230 ewait.wait.func = wake_exceptional_entry_func;
231
232 for (;;) {
233 entry = xas_find_conflict(xas);
234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
235 return entry;
236 if (dax_entry_order(entry) < order)
237 return XA_RETRY_ENTRY;
238 if (!dax_is_locked(entry))
239 return entry;
240
241 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
242 prepare_to_wait_exclusive(wq, &ewait.wait,
243 TASK_UNINTERRUPTIBLE);
244 xas_unlock_irq(xas);
245 xas_reset(xas);
246 schedule();
247 finish_wait(wq, &ewait.wait);
248 xas_lock_irq(xas);
249 }
250 }
251
252 /*
253 * The only thing keeping the address space around is the i_pages lock
254 * (it's cycled in clear_inode() after removing the entries from i_pages)
255 * After we call xas_unlock_irq(), we cannot touch xas->xa.
256 */
257 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
258 {
259 struct wait_exceptional_entry_queue ewait;
260 wait_queue_head_t *wq;
261
262 init_wait(&ewait.wait);
263 ewait.wait.func = wake_exceptional_entry_func;
264
265 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
266 /*
267 * Unlike get_unlocked_entry() there is no guarantee that this
268 * path ever successfully retrieves an unlocked entry before an
269 * inode dies. Perform a non-exclusive wait in case this path
270 * never successfully performs its own wake up.
271 */
272 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
273 xas_unlock_irq(xas);
274 schedule();
275 finish_wait(wq, &ewait.wait);
276 }
277
278 static void put_unlocked_entry(struct xa_state *xas, void *entry,
279 enum dax_wake_mode mode)
280 {
281 if (entry && !dax_is_conflict(entry))
282 dax_wake_entry(xas, entry, mode);
283 }
284
285 /*
286 * We used the xa_state to get the entry, but then we locked the entry and
287 * dropped the xa_lock, so we know the xa_state is stale and must be reset
288 * before use.
289 */
290 static void dax_unlock_entry(struct xa_state *xas, void *entry)
291 {
292 void *old;
293
294 BUG_ON(dax_is_locked(entry));
295 xas_reset(xas);
296 xas_lock_irq(xas);
297 old = xas_store(xas, entry);
298 xas_unlock_irq(xas);
299 BUG_ON(!dax_is_locked(old));
300 dax_wake_entry(xas, entry, WAKE_NEXT);
301 }
302
303 /*
304 * Return: The entry stored at this location before it was locked.
305 */
306 static void *dax_lock_entry(struct xa_state *xas, void *entry)
307 {
308 unsigned long v = xa_to_value(entry);
309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
310 }
311
312 static unsigned long dax_entry_size(void *entry)
313 {
314 if (dax_is_zero_entry(entry))
315 return 0;
316 else if (dax_is_empty_entry(entry))
317 return 0;
318 else if (dax_is_pmd_entry(entry))
319 return PMD_SIZE;
320 else
321 return PAGE_SIZE;
322 }
323
324 static unsigned long dax_end_pfn(void *entry)
325 {
326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
327 }
328
329 /*
330 * Iterate through all mapped pfns represented by an entry, i.e. skip
331 * 'empty' and 'zero' entries.
332 */
333 #define for_each_mapped_pfn(entry, pfn) \
334 for (pfn = dax_to_pfn(entry); \
335 pfn < dax_end_pfn(entry); pfn++)
336
337 static inline bool dax_mapping_is_cow(struct address_space *mapping)
338 {
339 return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
340 }
341
342 /*
343 * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
344 */
345 static inline void dax_mapping_set_cow(struct page *page)
346 {
347 if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
348 /*
349 * Reset the index if the page was already mapped
350 * regularly before.
351 */
352 if (page->mapping)
353 page->index = 1;
354 page->mapping = (void *)PAGE_MAPPING_DAX_COW;
355 }
356 page->index++;
357 }
358
359 /*
360 * When it is called in dax_insert_entry(), the cow flag will indicate that
361 * whether this entry is shared by multiple files. If so, set the page->mapping
362 * FS_DAX_MAPPING_COW, and use page->index as refcount.
363 */
364 static void dax_associate_entry(void *entry, struct address_space *mapping,
365 struct vm_area_struct *vma, unsigned long address, bool cow)
366 {
367 unsigned long size = dax_entry_size(entry), pfn, index;
368 int i = 0;
369
370 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
371 return;
372
373 index = linear_page_index(vma, address & ~(size - 1));
374 for_each_mapped_pfn(entry, pfn) {
375 struct page *page = pfn_to_page(pfn);
376
377 if (cow) {
378 dax_mapping_set_cow(page);
379 } else {
380 WARN_ON_ONCE(page->mapping);
381 page->mapping = mapping;
382 page->index = index + i++;
383 }
384 }
385 }
386
387 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
388 bool trunc)
389 {
390 unsigned long pfn;
391
392 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
393 return;
394
395 for_each_mapped_pfn(entry, pfn) {
396 struct page *page = pfn_to_page(pfn);
397
398 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
399 if (dax_mapping_is_cow(page->mapping)) {
400 /* keep the CoW flag if this page is still shared */
401 if (page->index-- > 0)
402 continue;
403 } else
404 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
405 page->mapping = NULL;
406 page->index = 0;
407 }
408 }
409
410 static struct page *dax_busy_page(void *entry)
411 {
412 unsigned long pfn;
413
414 for_each_mapped_pfn(entry, pfn) {
415 struct page *page = pfn_to_page(pfn);
416
417 if (page_ref_count(page) > 1)
418 return page;
419 }
420 return NULL;
421 }
422
423 /*
424 * dax_lock_page - Lock the DAX entry corresponding to a page
425 * @page: The page whose entry we want to lock
426 *
427 * Context: Process context.
428 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
429 * not be locked.
430 */
431 dax_entry_t dax_lock_page(struct page *page)
432 {
433 XA_STATE(xas, NULL, 0);
434 void *entry;
435
436 /* Ensure page->mapping isn't freed while we look at it */
437 rcu_read_lock();
438 for (;;) {
439 struct address_space *mapping = READ_ONCE(page->mapping);
440
441 entry = NULL;
442 if (!mapping || !dax_mapping(mapping))
443 break;
444
445 /*
446 * In the device-dax case there's no need to lock, a
447 * struct dev_pagemap pin is sufficient to keep the
448 * inode alive, and we assume we have dev_pagemap pin
449 * otherwise we would not have a valid pfn_to_page()
450 * translation.
451 */
452 entry = (void *)~0UL;
453 if (S_ISCHR(mapping->host->i_mode))
454 break;
455
456 xas.xa = &mapping->i_pages;
457 xas_lock_irq(&xas);
458 if (mapping != page->mapping) {
459 xas_unlock_irq(&xas);
460 continue;
461 }
462 xas_set(&xas, page->index);
463 entry = xas_load(&xas);
464 if (dax_is_locked(entry)) {
465 rcu_read_unlock();
466 wait_entry_unlocked(&xas, entry);
467 rcu_read_lock();
468 continue;
469 }
470 dax_lock_entry(&xas, entry);
471 xas_unlock_irq(&xas);
472 break;
473 }
474 rcu_read_unlock();
475 return (dax_entry_t)entry;
476 }
477
478 void dax_unlock_page(struct page *page, dax_entry_t cookie)
479 {
480 struct address_space *mapping = page->mapping;
481 XA_STATE(xas, &mapping->i_pages, page->index);
482
483 if (S_ISCHR(mapping->host->i_mode))
484 return;
485
486 dax_unlock_entry(&xas, (void *)cookie);
487 }
488
489 /*
490 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
491 * @mapping: the file's mapping whose entry we want to lock
492 * @index: the offset within this file
493 * @page: output the dax page corresponding to this dax entry
494 *
495 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
496 * could not be locked.
497 */
498 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
499 struct page **page)
500 {
501 XA_STATE(xas, NULL, 0);
502 void *entry;
503
504 rcu_read_lock();
505 for (;;) {
506 entry = NULL;
507 if (!dax_mapping(mapping))
508 break;
509
510 xas.xa = &mapping->i_pages;
511 xas_lock_irq(&xas);
512 xas_set(&xas, index);
513 entry = xas_load(&xas);
514 if (dax_is_locked(entry)) {
515 rcu_read_unlock();
516 wait_entry_unlocked(&xas, entry);
517 rcu_read_lock();
518 continue;
519 }
520 if (!entry ||
521 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
522 /*
523 * Because we are looking for entry from file's mapping
524 * and index, so the entry may not be inserted for now,
525 * or even a zero/empty entry. We don't think this is
526 * an error case. So, return a special value and do
527 * not output @page.
528 */
529 entry = (void *)~0UL;
530 } else {
531 *page = pfn_to_page(dax_to_pfn(entry));
532 dax_lock_entry(&xas, entry);
533 }
534 xas_unlock_irq(&xas);
535 break;
536 }
537 rcu_read_unlock();
538 return (dax_entry_t)entry;
539 }
540
541 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
542 dax_entry_t cookie)
543 {
544 XA_STATE(xas, &mapping->i_pages, index);
545
546 if (cookie == ~0UL)
547 return;
548
549 dax_unlock_entry(&xas, (void *)cookie);
550 }
551
552 /*
553 * Find page cache entry at given index. If it is a DAX entry, return it
554 * with the entry locked. If the page cache doesn't contain an entry at
555 * that index, add a locked empty entry.
556 *
557 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
558 * either return that locked entry or will return VM_FAULT_FALLBACK.
559 * This will happen if there are any PTE entries within the PMD range
560 * that we are requesting.
561 *
562 * We always favor PTE entries over PMD entries. There isn't a flow where we
563 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
564 * insertion will fail if it finds any PTE entries already in the tree, and a
565 * PTE insertion will cause an existing PMD entry to be unmapped and
566 * downgraded to PTE entries. This happens for both PMD zero pages as
567 * well as PMD empty entries.
568 *
569 * The exception to this downgrade path is for PMD entries that have
570 * real storage backing them. We will leave these real PMD entries in
571 * the tree, and PTE writes will simply dirty the entire PMD entry.
572 *
573 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
574 * persistent memory the benefit is doubtful. We can add that later if we can
575 * show it helps.
576 *
577 * On error, this function does not return an ERR_PTR. Instead it returns
578 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
579 * overlap with xarray value entries.
580 */
581 static void *grab_mapping_entry(struct xa_state *xas,
582 struct address_space *mapping, unsigned int order)
583 {
584 unsigned long index = xas->xa_index;
585 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
586 void *entry;
587
588 retry:
589 pmd_downgrade = false;
590 xas_lock_irq(xas);
591 entry = get_unlocked_entry(xas, order);
592
593 if (entry) {
594 if (dax_is_conflict(entry))
595 goto fallback;
596 if (!xa_is_value(entry)) {
597 xas_set_err(xas, -EIO);
598 goto out_unlock;
599 }
600
601 if (order == 0) {
602 if (dax_is_pmd_entry(entry) &&
603 (dax_is_zero_entry(entry) ||
604 dax_is_empty_entry(entry))) {
605 pmd_downgrade = true;
606 }
607 }
608 }
609
610 if (pmd_downgrade) {
611 /*
612 * Make sure 'entry' remains valid while we drop
613 * the i_pages lock.
614 */
615 dax_lock_entry(xas, entry);
616
617 /*
618 * Besides huge zero pages the only other thing that gets
619 * downgraded are empty entries which don't need to be
620 * unmapped.
621 */
622 if (dax_is_zero_entry(entry)) {
623 xas_unlock_irq(xas);
624 unmap_mapping_pages(mapping,
625 xas->xa_index & ~PG_PMD_COLOUR,
626 PG_PMD_NR, false);
627 xas_reset(xas);
628 xas_lock_irq(xas);
629 }
630
631 dax_disassociate_entry(entry, mapping, false);
632 xas_store(xas, NULL); /* undo the PMD join */
633 dax_wake_entry(xas, entry, WAKE_ALL);
634 mapping->nrpages -= PG_PMD_NR;
635 entry = NULL;
636 xas_set(xas, index);
637 }
638
639 if (entry) {
640 dax_lock_entry(xas, entry);
641 } else {
642 unsigned long flags = DAX_EMPTY;
643
644 if (order > 0)
645 flags |= DAX_PMD;
646 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
647 dax_lock_entry(xas, entry);
648 if (xas_error(xas))
649 goto out_unlock;
650 mapping->nrpages += 1UL << order;
651 }
652
653 out_unlock:
654 xas_unlock_irq(xas);
655 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
656 goto retry;
657 if (xas->xa_node == XA_ERROR(-ENOMEM))
658 return xa_mk_internal(VM_FAULT_OOM);
659 if (xas_error(xas))
660 return xa_mk_internal(VM_FAULT_SIGBUS);
661 return entry;
662 fallback:
663 xas_unlock_irq(xas);
664 return xa_mk_internal(VM_FAULT_FALLBACK);
665 }
666
667 /**
668 * dax_layout_busy_page_range - find first pinned page in @mapping
669 * @mapping: address space to scan for a page with ref count > 1
670 * @start: Starting offset. Page containing 'start' is included.
671 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
672 * pages from 'start' till the end of file are included.
673 *
674 * DAX requires ZONE_DEVICE mapped pages. These pages are never
675 * 'onlined' to the page allocator so they are considered idle when
676 * page->count == 1. A filesystem uses this interface to determine if
677 * any page in the mapping is busy, i.e. for DMA, or other
678 * get_user_pages() usages.
679 *
680 * It is expected that the filesystem is holding locks to block the
681 * establishment of new mappings in this address_space. I.e. it expects
682 * to be able to run unmap_mapping_range() and subsequently not race
683 * mapping_mapped() becoming true.
684 */
685 struct page *dax_layout_busy_page_range(struct address_space *mapping,
686 loff_t start, loff_t end)
687 {
688 void *entry;
689 unsigned int scanned = 0;
690 struct page *page = NULL;
691 pgoff_t start_idx = start >> PAGE_SHIFT;
692 pgoff_t end_idx;
693 XA_STATE(xas, &mapping->i_pages, start_idx);
694
695 /*
696 * In the 'limited' case get_user_pages() for dax is disabled.
697 */
698 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
699 return NULL;
700
701 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
702 return NULL;
703
704 /* If end == LLONG_MAX, all pages from start to till end of file */
705 if (end == LLONG_MAX)
706 end_idx = ULONG_MAX;
707 else
708 end_idx = end >> PAGE_SHIFT;
709 /*
710 * If we race get_user_pages_fast() here either we'll see the
711 * elevated page count in the iteration and wait, or
712 * get_user_pages_fast() will see that the page it took a reference
713 * against is no longer mapped in the page tables and bail to the
714 * get_user_pages() slow path. The slow path is protected by
715 * pte_lock() and pmd_lock(). New references are not taken without
716 * holding those locks, and unmap_mapping_pages() will not zero the
717 * pte or pmd without holding the respective lock, so we are
718 * guaranteed to either see new references or prevent new
719 * references from being established.
720 */
721 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
722
723 xas_lock_irq(&xas);
724 xas_for_each(&xas, entry, end_idx) {
725 if (WARN_ON_ONCE(!xa_is_value(entry)))
726 continue;
727 if (unlikely(dax_is_locked(entry)))
728 entry = get_unlocked_entry(&xas, 0);
729 if (entry)
730 page = dax_busy_page(entry);
731 put_unlocked_entry(&xas, entry, WAKE_NEXT);
732 if (page)
733 break;
734 if (++scanned % XA_CHECK_SCHED)
735 continue;
736
737 xas_pause(&xas);
738 xas_unlock_irq(&xas);
739 cond_resched();
740 xas_lock_irq(&xas);
741 }
742 xas_unlock_irq(&xas);
743 return page;
744 }
745 EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
746
747 struct page *dax_layout_busy_page(struct address_space *mapping)
748 {
749 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
750 }
751 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
752
753 static int __dax_invalidate_entry(struct address_space *mapping,
754 pgoff_t index, bool trunc)
755 {
756 XA_STATE(xas, &mapping->i_pages, index);
757 int ret = 0;
758 void *entry;
759
760 xas_lock_irq(&xas);
761 entry = get_unlocked_entry(&xas, 0);
762 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
763 goto out;
764 if (!trunc &&
765 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
766 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
767 goto out;
768 dax_disassociate_entry(entry, mapping, trunc);
769 xas_store(&xas, NULL);
770 mapping->nrpages -= 1UL << dax_entry_order(entry);
771 ret = 1;
772 out:
773 put_unlocked_entry(&xas, entry, WAKE_ALL);
774 xas_unlock_irq(&xas);
775 return ret;
776 }
777
778 /*
779 * Delete DAX entry at @index from @mapping. Wait for it
780 * to be unlocked before deleting it.
781 */
782 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
783 {
784 int ret = __dax_invalidate_entry(mapping, index, true);
785
786 /*
787 * This gets called from truncate / punch_hole path. As such, the caller
788 * must hold locks protecting against concurrent modifications of the
789 * page cache (usually fs-private i_mmap_sem for writing). Since the
790 * caller has seen a DAX entry for this index, we better find it
791 * at that index as well...
792 */
793 WARN_ON_ONCE(!ret);
794 return ret;
795 }
796
797 /*
798 * Invalidate DAX entry if it is clean.
799 */
800 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
801 pgoff_t index)
802 {
803 return __dax_invalidate_entry(mapping, index, false);
804 }
805
806 static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
807 {
808 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
809 }
810
811 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
812 {
813 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
814 void *vto, *kaddr;
815 long rc;
816 int id;
817
818 id = dax_read_lock();
819 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
820 &kaddr, NULL);
821 if (rc < 0) {
822 dax_read_unlock(id);
823 return rc;
824 }
825 vto = kmap_atomic(vmf->cow_page);
826 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
827 kunmap_atomic(vto);
828 dax_read_unlock(id);
829 return 0;
830 }
831
832 /*
833 * MAP_SYNC on a dax mapping guarantees dirty metadata is
834 * flushed on write-faults (non-cow), but not read-faults.
835 */
836 static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
837 struct vm_area_struct *vma)
838 {
839 return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
840 (iter->iomap.flags & IOMAP_F_DIRTY);
841 }
842
843 static bool dax_fault_is_cow(const struct iomap_iter *iter)
844 {
845 return (iter->flags & IOMAP_WRITE) &&
846 (iter->iomap.flags & IOMAP_F_SHARED);
847 }
848
849 /*
850 * By this point grab_mapping_entry() has ensured that we have a locked entry
851 * of the appropriate size so we don't have to worry about downgrading PMDs to
852 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
853 * already in the tree, we will skip the insertion and just dirty the PMD as
854 * appropriate.
855 */
856 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
857 const struct iomap_iter *iter, void *entry, pfn_t pfn,
858 unsigned long flags)
859 {
860 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
861 void *new_entry = dax_make_entry(pfn, flags);
862 bool dirty = !dax_fault_is_synchronous(iter, vmf->vma);
863 bool cow = dax_fault_is_cow(iter);
864
865 if (dirty)
866 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
867
868 if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
869 unsigned long index = xas->xa_index;
870 /* we are replacing a zero page with block mapping */
871 if (dax_is_pmd_entry(entry))
872 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
873 PG_PMD_NR, false);
874 else /* pte entry */
875 unmap_mapping_pages(mapping, index, 1, false);
876 }
877
878 xas_reset(xas);
879 xas_lock_irq(xas);
880 if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
881 void *old;
882
883 dax_disassociate_entry(entry, mapping, false);
884 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
885 cow);
886 /*
887 * Only swap our new entry into the page cache if the current
888 * entry is a zero page or an empty entry. If a normal PTE or
889 * PMD entry is already in the cache, we leave it alone. This
890 * means that if we are trying to insert a PTE and the
891 * existing entry is a PMD, we will just leave the PMD in the
892 * tree and dirty it if necessary.
893 */
894 old = dax_lock_entry(xas, new_entry);
895 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
896 DAX_LOCKED));
897 entry = new_entry;
898 } else {
899 xas_load(xas); /* Walk the xa_state */
900 }
901
902 if (dirty)
903 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
904
905 if (cow)
906 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
907
908 xas_unlock_irq(xas);
909 return entry;
910 }
911
912 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
913 struct address_space *mapping, void *entry)
914 {
915 unsigned long pfn, index, count, end;
916 long ret = 0;
917 struct vm_area_struct *vma;
918
919 /*
920 * A page got tagged dirty in DAX mapping? Something is seriously
921 * wrong.
922 */
923 if (WARN_ON(!xa_is_value(entry)))
924 return -EIO;
925
926 if (unlikely(dax_is_locked(entry))) {
927 void *old_entry = entry;
928
929 entry = get_unlocked_entry(xas, 0);
930
931 /* Entry got punched out / reallocated? */
932 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
933 goto put_unlocked;
934 /*
935 * Entry got reallocated elsewhere? No need to writeback.
936 * We have to compare pfns as we must not bail out due to
937 * difference in lockbit or entry type.
938 */
939 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
940 goto put_unlocked;
941 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
942 dax_is_zero_entry(entry))) {
943 ret = -EIO;
944 goto put_unlocked;
945 }
946
947 /* Another fsync thread may have already done this entry */
948 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
949 goto put_unlocked;
950 }
951
952 /* Lock the entry to serialize with page faults */
953 dax_lock_entry(xas, entry);
954
955 /*
956 * We can clear the tag now but we have to be careful so that concurrent
957 * dax_writeback_one() calls for the same index cannot finish before we
958 * actually flush the caches. This is achieved as the calls will look
959 * at the entry only under the i_pages lock and once they do that
960 * they will see the entry locked and wait for it to unlock.
961 */
962 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
963 xas_unlock_irq(xas);
964
965 /*
966 * If dax_writeback_mapping_range() was given a wbc->range_start
967 * in the middle of a PMD, the 'index' we use needs to be
968 * aligned to the start of the PMD.
969 * This allows us to flush for PMD_SIZE and not have to worry about
970 * partial PMD writebacks.
971 */
972 pfn = dax_to_pfn(entry);
973 count = 1UL << dax_entry_order(entry);
974 index = xas->xa_index & ~(count - 1);
975 end = index + count - 1;
976
977 /* Walk all mappings of a given index of a file and writeprotect them */
978 i_mmap_lock_read(mapping);
979 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
980 pfn_mkclean_range(pfn, count, index, vma);
981 cond_resched();
982 }
983 i_mmap_unlock_read(mapping);
984
985 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
986 /*
987 * After we have flushed the cache, we can clear the dirty tag. There
988 * cannot be new dirty data in the pfn after the flush has completed as
989 * the pfn mappings are writeprotected and fault waits for mapping
990 * entry lock.
991 */
992 xas_reset(xas);
993 xas_lock_irq(xas);
994 xas_store(xas, entry);
995 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
996 dax_wake_entry(xas, entry, WAKE_NEXT);
997
998 trace_dax_writeback_one(mapping->host, index, count);
999 return ret;
1000
1001 put_unlocked:
1002 put_unlocked_entry(xas, entry, WAKE_NEXT);
1003 return ret;
1004 }
1005
1006 /*
1007 * Flush the mapping to the persistent domain within the byte range of [start,
1008 * end]. This is required by data integrity operations to ensure file data is
1009 * on persistent storage prior to completion of the operation.
1010 */
1011 int dax_writeback_mapping_range(struct address_space *mapping,
1012 struct dax_device *dax_dev, struct writeback_control *wbc)
1013 {
1014 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
1015 struct inode *inode = mapping->host;
1016 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
1017 void *entry;
1018 int ret = 0;
1019 unsigned int scanned = 0;
1020
1021 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1022 return -EIO;
1023
1024 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
1025 return 0;
1026
1027 trace_dax_writeback_range(inode, xas.xa_index, end_index);
1028
1029 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
1030
1031 xas_lock_irq(&xas);
1032 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1033 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1034 if (ret < 0) {
1035 mapping_set_error(mapping, ret);
1036 break;
1037 }
1038 if (++scanned % XA_CHECK_SCHED)
1039 continue;
1040
1041 xas_pause(&xas);
1042 xas_unlock_irq(&xas);
1043 cond_resched();
1044 xas_lock_irq(&xas);
1045 }
1046 xas_unlock_irq(&xas);
1047 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1048 return ret;
1049 }
1050 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1051
1052 static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1053 size_t size, void **kaddr, pfn_t *pfnp)
1054 {
1055 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1056 int id, rc = 0;
1057 long length;
1058
1059 id = dax_read_lock();
1060 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1061 DAX_ACCESS, kaddr, pfnp);
1062 if (length < 0) {
1063 rc = length;
1064 goto out;
1065 }
1066 if (!pfnp)
1067 goto out_check_addr;
1068 rc = -EINVAL;
1069 if (PFN_PHYS(length) < size)
1070 goto out;
1071 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1072 goto out;
1073 /* For larger pages we need devmap */
1074 if (length > 1 && !pfn_t_devmap(*pfnp))
1075 goto out;
1076 rc = 0;
1077
1078 out_check_addr:
1079 if (!kaddr)
1080 goto out;
1081 if (!*kaddr)
1082 rc = -EFAULT;
1083 out:
1084 dax_read_unlock(id);
1085 return rc;
1086 }
1087
1088 /**
1089 * dax_iomap_cow_copy - Copy the data from source to destination before write
1090 * @pos: address to do copy from.
1091 * @length: size of copy operation.
1092 * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1093 * @srcmap: iomap srcmap
1094 * @daddr: destination address to copy to.
1095 *
1096 * This can be called from two places. Either during DAX write fault (page
1097 * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1098 * write operation, dax_iomap_actor() might call this to do the copy of either
1099 * start or end unaligned address. In the latter case the rest of the copy of
1100 * aligned ranges is taken care by dax_iomap_actor() itself.
1101 */
1102 static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size,
1103 const struct iomap *srcmap, void *daddr)
1104 {
1105 loff_t head_off = pos & (align_size - 1);
1106 size_t size = ALIGN(head_off + length, align_size);
1107 loff_t end = pos + length;
1108 loff_t pg_end = round_up(end, align_size);
1109 bool copy_all = head_off == 0 && end == pg_end;
1110 void *saddr = 0;
1111 int ret = 0;
1112
1113 ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1114 if (ret)
1115 return ret;
1116
1117 if (copy_all) {
1118 ret = copy_mc_to_kernel(daddr, saddr, length);
1119 return ret ? -EIO : 0;
1120 }
1121
1122 /* Copy the head part of the range */
1123 if (head_off) {
1124 ret = copy_mc_to_kernel(daddr, saddr, head_off);
1125 if (ret)
1126 return -EIO;
1127 }
1128
1129 /* Copy the tail part of the range */
1130 if (end < pg_end) {
1131 loff_t tail_off = head_off + length;
1132 loff_t tail_len = pg_end - end;
1133
1134 ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off,
1135 tail_len);
1136 if (ret)
1137 return -EIO;
1138 }
1139 return 0;
1140 }
1141
1142 /*
1143 * The user has performed a load from a hole in the file. Allocating a new
1144 * page in the file would cause excessive storage usage for workloads with
1145 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1146 * If this page is ever written to we will re-fault and change the mapping to
1147 * point to real DAX storage instead.
1148 */
1149 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1150 const struct iomap_iter *iter, void **entry)
1151 {
1152 struct inode *inode = iter->inode;
1153 unsigned long vaddr = vmf->address;
1154 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1155 vm_fault_t ret;
1156
1157 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
1158
1159 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1160 trace_dax_load_hole(inode, vmf, ret);
1161 return ret;
1162 }
1163
1164 #ifdef CONFIG_FS_DAX_PMD
1165 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1166 const struct iomap_iter *iter, void **entry)
1167 {
1168 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1169 unsigned long pmd_addr = vmf->address & PMD_MASK;
1170 struct vm_area_struct *vma = vmf->vma;
1171 struct inode *inode = mapping->host;
1172 pgtable_t pgtable = NULL;
1173 struct page *zero_page;
1174 spinlock_t *ptl;
1175 pmd_t pmd_entry;
1176 pfn_t pfn;
1177
1178 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1179
1180 if (unlikely(!zero_page))
1181 goto fallback;
1182
1183 pfn = page_to_pfn_t(zero_page);
1184 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1185 DAX_PMD | DAX_ZERO_PAGE);
1186
1187 if (arch_needs_pgtable_deposit()) {
1188 pgtable = pte_alloc_one(vma->vm_mm);
1189 if (!pgtable)
1190 return VM_FAULT_OOM;
1191 }
1192
1193 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1194 if (!pmd_none(*(vmf->pmd))) {
1195 spin_unlock(ptl);
1196 goto fallback;
1197 }
1198
1199 if (pgtable) {
1200 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1201 mm_inc_nr_ptes(vma->vm_mm);
1202 }
1203 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1204 pmd_entry = pmd_mkhuge(pmd_entry);
1205 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1206 spin_unlock(ptl);
1207 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1208 return VM_FAULT_NOPAGE;
1209
1210 fallback:
1211 if (pgtable)
1212 pte_free(vma->vm_mm, pgtable);
1213 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1214 return VM_FAULT_FALLBACK;
1215 }
1216 #else
1217 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1218 const struct iomap_iter *iter, void **entry)
1219 {
1220 return VM_FAULT_FALLBACK;
1221 }
1222 #endif /* CONFIG_FS_DAX_PMD */
1223
1224 static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
1225 {
1226 const struct iomap *iomap = &iter->iomap;
1227 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1228 unsigned offset = offset_in_page(pos);
1229 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1230 void *kaddr;
1231 long ret;
1232
1233 ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
1234 NULL);
1235 if (ret < 0)
1236 return ret;
1237 memset(kaddr + offset, 0, size);
1238 if (srcmap->addr != iomap->addr) {
1239 ret = dax_iomap_cow_copy(pos, size, PAGE_SIZE, srcmap,
1240 kaddr);
1241 if (ret < 0)
1242 return ret;
1243 dax_flush(iomap->dax_dev, kaddr, PAGE_SIZE);
1244 } else
1245 dax_flush(iomap->dax_dev, kaddr + offset, size);
1246 return ret;
1247 }
1248
1249 static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
1250 {
1251 const struct iomap *iomap = &iter->iomap;
1252 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1253 loff_t pos = iter->pos;
1254 u64 length = iomap_length(iter);
1255 s64 written = 0;
1256
1257 /* already zeroed? we're done. */
1258 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1259 return length;
1260
1261 do {
1262 unsigned offset = offset_in_page(pos);
1263 unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1264 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1265 long rc;
1266 int id;
1267
1268 id = dax_read_lock();
1269 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1270 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1271 else
1272 rc = dax_memzero(iter, pos, size);
1273 dax_read_unlock(id);
1274
1275 if (rc < 0)
1276 return rc;
1277 pos += size;
1278 length -= size;
1279 written += size;
1280 } while (length > 0);
1281
1282 if (did_zero)
1283 *did_zero = true;
1284 return written;
1285 }
1286
1287 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1288 const struct iomap_ops *ops)
1289 {
1290 struct iomap_iter iter = {
1291 .inode = inode,
1292 .pos = pos,
1293 .len = len,
1294 .flags = IOMAP_DAX | IOMAP_ZERO,
1295 };
1296 int ret;
1297
1298 while ((ret = iomap_iter(&iter, ops)) > 0)
1299 iter.processed = dax_zero_iter(&iter, did_zero);
1300 return ret;
1301 }
1302 EXPORT_SYMBOL_GPL(dax_zero_range);
1303
1304 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1305 const struct iomap_ops *ops)
1306 {
1307 unsigned int blocksize = i_blocksize(inode);
1308 unsigned int off = pos & (blocksize - 1);
1309
1310 /* Block boundary? Nothing to do */
1311 if (!off)
1312 return 0;
1313 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
1314 }
1315 EXPORT_SYMBOL_GPL(dax_truncate_page);
1316
1317 static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1318 struct iov_iter *iter)
1319 {
1320 const struct iomap *iomap = &iomi->iomap;
1321 const struct iomap *srcmap = &iomi->srcmap;
1322 loff_t length = iomap_length(iomi);
1323 loff_t pos = iomi->pos;
1324 struct dax_device *dax_dev = iomap->dax_dev;
1325 loff_t end = pos + length, done = 0;
1326 bool write = iov_iter_rw(iter) == WRITE;
1327 ssize_t ret = 0;
1328 size_t xfer;
1329 int id;
1330
1331 if (!write) {
1332 end = min(end, i_size_read(iomi->inode));
1333 if (pos >= end)
1334 return 0;
1335
1336 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1337 return iov_iter_zero(min(length, end - pos), iter);
1338 }
1339
1340 /*
1341 * In DAX mode, enforce either pure overwrites of written extents, or
1342 * writes to unwritten extents as part of a copy-on-write operation.
1343 */
1344 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1345 !(iomap->flags & IOMAP_F_SHARED)))
1346 return -EIO;
1347
1348 /*
1349 * Write can allocate block for an area which has a hole page mapped
1350 * into page tables. We have to tear down these mappings so that data
1351 * written by write(2) is visible in mmap.
1352 */
1353 if (iomap->flags & IOMAP_F_NEW) {
1354 invalidate_inode_pages2_range(iomi->inode->i_mapping,
1355 pos >> PAGE_SHIFT,
1356 (end - 1) >> PAGE_SHIFT);
1357 }
1358
1359 id = dax_read_lock();
1360 while (pos < end) {
1361 unsigned offset = pos & (PAGE_SIZE - 1);
1362 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1363 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1364 ssize_t map_len;
1365 bool recovery = false;
1366 void *kaddr;
1367
1368 if (fatal_signal_pending(current)) {
1369 ret = -EINTR;
1370 break;
1371 }
1372
1373 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1374 DAX_ACCESS, &kaddr, NULL);
1375 if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
1376 map_len = dax_direct_access(dax_dev, pgoff,
1377 PHYS_PFN(size), DAX_RECOVERY_WRITE,
1378 &kaddr, NULL);
1379 if (map_len > 0)
1380 recovery = true;
1381 }
1382 if (map_len < 0) {
1383 ret = map_len;
1384 break;
1385 }
1386
1387 if (write &&
1388 srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
1389 ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap,
1390 kaddr);
1391 if (ret)
1392 break;
1393 }
1394
1395 map_len = PFN_PHYS(map_len);
1396 kaddr += offset;
1397 map_len -= offset;
1398 if (map_len > end - pos)
1399 map_len = end - pos;
1400
1401 if (recovery)
1402 xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1403 map_len, iter);
1404 else if (write)
1405 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1406 map_len, iter);
1407 else
1408 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1409 map_len, iter);
1410
1411 pos += xfer;
1412 length -= xfer;
1413 done += xfer;
1414
1415 if (xfer == 0)
1416 ret = -EFAULT;
1417 if (xfer < map_len)
1418 break;
1419 }
1420 dax_read_unlock(id);
1421
1422 return done ? done : ret;
1423 }
1424
1425 /**
1426 * dax_iomap_rw - Perform I/O to a DAX file
1427 * @iocb: The control block for this I/O
1428 * @iter: The addresses to do I/O from or to
1429 * @ops: iomap ops passed from the file system
1430 *
1431 * This function performs read and write operations to directly mapped
1432 * persistent memory. The callers needs to take care of read/write exclusion
1433 * and evicting any page cache pages in the region under I/O.
1434 */
1435 ssize_t
1436 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1437 const struct iomap_ops *ops)
1438 {
1439 struct iomap_iter iomi = {
1440 .inode = iocb->ki_filp->f_mapping->host,
1441 .pos = iocb->ki_pos,
1442 .len = iov_iter_count(iter),
1443 .flags = IOMAP_DAX,
1444 };
1445 loff_t done = 0;
1446 int ret;
1447
1448 if (!iomi.len)
1449 return 0;
1450
1451 if (iov_iter_rw(iter) == WRITE) {
1452 lockdep_assert_held_write(&iomi.inode->i_rwsem);
1453 iomi.flags |= IOMAP_WRITE;
1454 } else {
1455 lockdep_assert_held(&iomi.inode->i_rwsem);
1456 }
1457
1458 if (iocb->ki_flags & IOCB_NOWAIT)
1459 iomi.flags |= IOMAP_NOWAIT;
1460
1461 while ((ret = iomap_iter(&iomi, ops)) > 0)
1462 iomi.processed = dax_iomap_iter(&iomi, iter);
1463
1464 done = iomi.pos - iocb->ki_pos;
1465 iocb->ki_pos = iomi.pos;
1466 return done ? done : ret;
1467 }
1468 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1469
1470 static vm_fault_t dax_fault_return(int error)
1471 {
1472 if (error == 0)
1473 return VM_FAULT_NOPAGE;
1474 return vmf_error(error);
1475 }
1476
1477 /*
1478 * When handling a synchronous page fault and the inode need a fsync, we can
1479 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1480 * insertion for now and return the pfn so that caller can insert it after the
1481 * fsync is done.
1482 */
1483 static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1484 {
1485 if (WARN_ON_ONCE(!pfnp))
1486 return VM_FAULT_SIGBUS;
1487 *pfnp = pfn;
1488 return VM_FAULT_NEEDDSYNC;
1489 }
1490
1491 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1492 const struct iomap_iter *iter)
1493 {
1494 vm_fault_t ret;
1495 int error = 0;
1496
1497 switch (iter->iomap.type) {
1498 case IOMAP_HOLE:
1499 case IOMAP_UNWRITTEN:
1500 clear_user_highpage(vmf->cow_page, vmf->address);
1501 break;
1502 case IOMAP_MAPPED:
1503 error = copy_cow_page_dax(vmf, iter);
1504 break;
1505 default:
1506 WARN_ON_ONCE(1);
1507 error = -EIO;
1508 break;
1509 }
1510
1511 if (error)
1512 return dax_fault_return(error);
1513
1514 __SetPageUptodate(vmf->cow_page);
1515 ret = finish_fault(vmf);
1516 if (!ret)
1517 return VM_FAULT_DONE_COW;
1518 return ret;
1519 }
1520
1521 /**
1522 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
1523 * @vmf: vm fault instance
1524 * @iter: iomap iter
1525 * @pfnp: pfn to be returned
1526 * @xas: the dax mapping tree of a file
1527 * @entry: an unlocked dax entry to be inserted
1528 * @pmd: distinguish whether it is a pmd fault
1529 */
1530 static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1531 const struct iomap_iter *iter, pfn_t *pfnp,
1532 struct xa_state *xas, void **entry, bool pmd)
1533 {
1534 const struct iomap *iomap = &iter->iomap;
1535 const struct iomap *srcmap = &iter->srcmap;
1536 size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1537 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1538 bool write = iter->flags & IOMAP_WRITE;
1539 unsigned long entry_flags = pmd ? DAX_PMD : 0;
1540 int err = 0;
1541 pfn_t pfn;
1542 void *kaddr;
1543
1544 if (!pmd && vmf->cow_page)
1545 return dax_fault_cow_page(vmf, iter);
1546
1547 /* if we are reading UNWRITTEN and HOLE, return a hole. */
1548 if (!write &&
1549 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1550 if (!pmd)
1551 return dax_load_hole(xas, vmf, iter, entry);
1552 return dax_pmd_load_hole(xas, vmf, iter, entry);
1553 }
1554
1555 if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
1556 WARN_ON_ONCE(1);
1557 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1558 }
1559
1560 err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
1561 if (err)
1562 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1563
1564 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1565
1566 if (write &&
1567 srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
1568 err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr);
1569 if (err)
1570 return dax_fault_return(err);
1571 }
1572
1573 if (dax_fault_is_synchronous(iter, vmf->vma))
1574 return dax_fault_synchronous_pfnp(pfnp, pfn);
1575
1576 /* insert PMD pfn */
1577 if (pmd)
1578 return vmf_insert_pfn_pmd(vmf, pfn, write);
1579
1580 /* insert PTE pfn */
1581 if (write)
1582 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1583 return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1584 }
1585
1586 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1587 int *iomap_errp, const struct iomap_ops *ops)
1588 {
1589 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1590 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1591 struct iomap_iter iter = {
1592 .inode = mapping->host,
1593 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
1594 .len = PAGE_SIZE,
1595 .flags = IOMAP_DAX | IOMAP_FAULT,
1596 };
1597 vm_fault_t ret = 0;
1598 void *entry;
1599 int error;
1600
1601 trace_dax_pte_fault(iter.inode, vmf, ret);
1602 /*
1603 * Check whether offset isn't beyond end of file now. Caller is supposed
1604 * to hold locks serializing us with truncate / punch hole so this is
1605 * a reliable test.
1606 */
1607 if (iter.pos >= i_size_read(iter.inode)) {
1608 ret = VM_FAULT_SIGBUS;
1609 goto out;
1610 }
1611
1612 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1613 iter.flags |= IOMAP_WRITE;
1614
1615 entry = grab_mapping_entry(&xas, mapping, 0);
1616 if (xa_is_internal(entry)) {
1617 ret = xa_to_internal(entry);
1618 goto out;
1619 }
1620
1621 /*
1622 * It is possible, particularly with mixed reads & writes to private
1623 * mappings, that we have raced with a PMD fault that overlaps with
1624 * the PTE we need to set up. If so just return and the fault will be
1625 * retried.
1626 */
1627 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1628 ret = VM_FAULT_NOPAGE;
1629 goto unlock_entry;
1630 }
1631
1632 while ((error = iomap_iter(&iter, ops)) > 0) {
1633 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1634 iter.processed = -EIO; /* fs corruption? */
1635 continue;
1636 }
1637
1638 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1639 if (ret != VM_FAULT_SIGBUS &&
1640 (iter.iomap.flags & IOMAP_F_NEW)) {
1641 count_vm_event(PGMAJFAULT);
1642 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1643 ret |= VM_FAULT_MAJOR;
1644 }
1645
1646 if (!(ret & VM_FAULT_ERROR))
1647 iter.processed = PAGE_SIZE;
1648 }
1649
1650 if (iomap_errp)
1651 *iomap_errp = error;
1652 if (!ret && error)
1653 ret = dax_fault_return(error);
1654
1655 unlock_entry:
1656 dax_unlock_entry(&xas, entry);
1657 out:
1658 trace_dax_pte_fault_done(iter.inode, vmf, ret);
1659 return ret;
1660 }
1661
1662 #ifdef CONFIG_FS_DAX_PMD
1663 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1664 pgoff_t max_pgoff)
1665 {
1666 unsigned long pmd_addr = vmf->address & PMD_MASK;
1667 bool write = vmf->flags & FAULT_FLAG_WRITE;
1668
1669 /*
1670 * Make sure that the faulting address's PMD offset (color) matches
1671 * the PMD offset from the start of the file. This is necessary so
1672 * that a PMD range in the page table overlaps exactly with a PMD
1673 * range in the page cache.
1674 */
1675 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1676 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1677 return true;
1678
1679 /* Fall back to PTEs if we're going to COW */
1680 if (write && !(vmf->vma->vm_flags & VM_SHARED))
1681 return true;
1682
1683 /* If the PMD would extend outside the VMA */
1684 if (pmd_addr < vmf->vma->vm_start)
1685 return true;
1686 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1687 return true;
1688
1689 /* If the PMD would extend beyond the file size */
1690 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1691 return true;
1692
1693 return false;
1694 }
1695
1696 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1697 const struct iomap_ops *ops)
1698 {
1699 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1700 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1701 struct iomap_iter iter = {
1702 .inode = mapping->host,
1703 .len = PMD_SIZE,
1704 .flags = IOMAP_DAX | IOMAP_FAULT,
1705 };
1706 vm_fault_t ret = VM_FAULT_FALLBACK;
1707 pgoff_t max_pgoff;
1708 void *entry;
1709 int error;
1710
1711 if (vmf->flags & FAULT_FLAG_WRITE)
1712 iter.flags |= IOMAP_WRITE;
1713
1714 /*
1715 * Check whether offset isn't beyond end of file now. Caller is
1716 * supposed to hold locks serializing us with truncate / punch hole so
1717 * this is a reliable test.
1718 */
1719 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
1720
1721 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1722
1723 if (xas.xa_index >= max_pgoff) {
1724 ret = VM_FAULT_SIGBUS;
1725 goto out;
1726 }
1727
1728 if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1729 goto fallback;
1730
1731 /*
1732 * grab_mapping_entry() will make sure we get an empty PMD entry,
1733 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1734 * entry is already in the array, for instance), it will return
1735 * VM_FAULT_FALLBACK.
1736 */
1737 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1738 if (xa_is_internal(entry)) {
1739 ret = xa_to_internal(entry);
1740 goto fallback;
1741 }
1742
1743 /*
1744 * It is possible, particularly with mixed reads & writes to private
1745 * mappings, that we have raced with a PTE fault that overlaps with
1746 * the PMD we need to set up. If so just return and the fault will be
1747 * retried.
1748 */
1749 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1750 !pmd_devmap(*vmf->pmd)) {
1751 ret = 0;
1752 goto unlock_entry;
1753 }
1754
1755 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1756 while ((error = iomap_iter(&iter, ops)) > 0) {
1757 if (iomap_length(&iter) < PMD_SIZE)
1758 continue; /* actually breaks out of the loop */
1759
1760 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1761 if (ret != VM_FAULT_FALLBACK)
1762 iter.processed = PMD_SIZE;
1763 }
1764
1765 unlock_entry:
1766 dax_unlock_entry(&xas, entry);
1767 fallback:
1768 if (ret == VM_FAULT_FALLBACK) {
1769 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1770 count_vm_event(THP_FAULT_FALLBACK);
1771 }
1772 out:
1773 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1774 return ret;
1775 }
1776 #else
1777 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1778 const struct iomap_ops *ops)
1779 {
1780 return VM_FAULT_FALLBACK;
1781 }
1782 #endif /* CONFIG_FS_DAX_PMD */
1783
1784 /**
1785 * dax_iomap_fault - handle a page fault on a DAX file
1786 * @vmf: The description of the fault
1787 * @pe_size: Size of the page to fault in
1788 * @pfnp: PFN to insert for synchronous faults if fsync is required
1789 * @iomap_errp: Storage for detailed error code in case of error
1790 * @ops: Iomap ops passed from the file system
1791 *
1792 * When a page fault occurs, filesystems may call this helper in
1793 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1794 * has done all the necessary locking for page fault to proceed
1795 * successfully.
1796 */
1797 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1798 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1799 {
1800 switch (pe_size) {
1801 case PE_SIZE_PTE:
1802 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1803 case PE_SIZE_PMD:
1804 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1805 default:
1806 return VM_FAULT_FALLBACK;
1807 }
1808 }
1809 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1810
1811 /*
1812 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1813 * @vmf: The description of the fault
1814 * @pfn: PFN to insert
1815 * @order: Order of entry to insert.
1816 *
1817 * This function inserts a writeable PTE or PMD entry into the page tables
1818 * for an mmaped DAX file. It also marks the page cache entry as dirty.
1819 */
1820 static vm_fault_t
1821 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1822 {
1823 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1824 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1825 void *entry;
1826 vm_fault_t ret;
1827
1828 xas_lock_irq(&xas);
1829 entry = get_unlocked_entry(&xas, order);
1830 /* Did we race with someone splitting entry or so? */
1831 if (!entry || dax_is_conflict(entry) ||
1832 (order == 0 && !dax_is_pte_entry(entry))) {
1833 put_unlocked_entry(&xas, entry, WAKE_NEXT);
1834 xas_unlock_irq(&xas);
1835 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1836 VM_FAULT_NOPAGE);
1837 return VM_FAULT_NOPAGE;
1838 }
1839 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1840 dax_lock_entry(&xas, entry);
1841 xas_unlock_irq(&xas);
1842 if (order == 0)
1843 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1844 #ifdef CONFIG_FS_DAX_PMD
1845 else if (order == PMD_ORDER)
1846 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1847 #endif
1848 else
1849 ret = VM_FAULT_FALLBACK;
1850 dax_unlock_entry(&xas, entry);
1851 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1852 return ret;
1853 }
1854
1855 /**
1856 * dax_finish_sync_fault - finish synchronous page fault
1857 * @vmf: The description of the fault
1858 * @pe_size: Size of entry to be inserted
1859 * @pfn: PFN to insert
1860 *
1861 * This function ensures that the file range touched by the page fault is
1862 * stored persistently on the media and handles inserting of appropriate page
1863 * table entry.
1864 */
1865 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1866 enum page_entry_size pe_size, pfn_t pfn)
1867 {
1868 int err;
1869 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1870 unsigned int order = pe_order(pe_size);
1871 size_t len = PAGE_SIZE << order;
1872
1873 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1874 if (err)
1875 return VM_FAULT_SIGBUS;
1876 return dax_insert_pfn_mkwrite(vmf, pfn, order);
1877 }
1878 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1879
1880 static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
1881 struct iomap_iter *it_dest, u64 len, bool *same)
1882 {
1883 const struct iomap *smap = &it_src->iomap;
1884 const struct iomap *dmap = &it_dest->iomap;
1885 loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
1886 void *saddr, *daddr;
1887 int id, ret;
1888
1889 len = min(len, min(smap->length, dmap->length));
1890
1891 if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
1892 *same = true;
1893 return len;
1894 }
1895
1896 if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
1897 *same = false;
1898 return 0;
1899 }
1900
1901 id = dax_read_lock();
1902 ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
1903 &saddr, NULL);
1904 if (ret < 0)
1905 goto out_unlock;
1906
1907 ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
1908 &daddr, NULL);
1909 if (ret < 0)
1910 goto out_unlock;
1911
1912 *same = !memcmp(saddr, daddr, len);
1913 if (!*same)
1914 len = 0;
1915 dax_read_unlock(id);
1916 return len;
1917
1918 out_unlock:
1919 dax_read_unlock(id);
1920 return -EIO;
1921 }
1922
1923 int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1924 struct inode *dst, loff_t dstoff, loff_t len, bool *same,
1925 const struct iomap_ops *ops)
1926 {
1927 struct iomap_iter src_iter = {
1928 .inode = src,
1929 .pos = srcoff,
1930 .len = len,
1931 .flags = IOMAP_DAX,
1932 };
1933 struct iomap_iter dst_iter = {
1934 .inode = dst,
1935 .pos = dstoff,
1936 .len = len,
1937 .flags = IOMAP_DAX,
1938 };
1939 int ret;
1940
1941 while ((ret = iomap_iter(&src_iter, ops)) > 0) {
1942 while ((ret = iomap_iter(&dst_iter, ops)) > 0) {
1943 dst_iter.processed = dax_range_compare_iter(&src_iter,
1944 &dst_iter, len, same);
1945 }
1946 if (ret <= 0)
1947 src_iter.processed = ret;
1948 }
1949 return ret;
1950 }
1951
1952 int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
1953 struct file *file_out, loff_t pos_out,
1954 loff_t *len, unsigned int remap_flags,
1955 const struct iomap_ops *ops)
1956 {
1957 return __generic_remap_file_range_prep(file_in, pos_in, file_out,
1958 pos_out, len, remap_flags, ops);
1959 }
1960 EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);