]> git.ipfire.org Git - people/ms/linux.git/blame - fs/dax.c
btrfs: fix race between reflinking and ordered extent completion
[people/ms/linux.git] / fs / dax.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
d475c634
MW
2/*
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
d475c634
MW
7 */
8
9#include <linux/atomic.h>
10#include <linux/blkdev.h>
11#include <linux/buffer_head.h>
d77e92e2 12#include <linux/dax.h>
d475c634 13#include <linux/fs.h>
f7ca90b1
MW
14#include <linux/highmem.h>
15#include <linux/memcontrol.h>
16#include <linux/mm.h>
d475c634 17#include <linux/mutex.h>
9973c98e 18#include <linux/pagevec.h>
289c6aed 19#include <linux/sched.h>
f361bf4a 20#include <linux/sched/signal.h>
d475c634 21#include <linux/uio.h>
f7ca90b1 22#include <linux/vmstat.h>
34c0fd54 23#include <linux/pfn_t.h>
0e749e54 24#include <linux/sizes.h>
4b4bb46d 25#include <linux/mmu_notifier.h>
a254e568 26#include <linux/iomap.h>
11cf9d86 27#include <asm/pgalloc.h>
d475c634 28
282a8e03
RZ
29#define CREATE_TRACE_POINTS
30#include <trace/events/fs_dax.h>
31
cfc93c6c
MW
32static inline unsigned int pe_order(enum page_entry_size pe_size)
33{
34 if (pe_size == PE_SIZE_PTE)
35 return PAGE_SHIFT - PAGE_SHIFT;
36 if (pe_size == PE_SIZE_PMD)
37 return PMD_SHIFT - PAGE_SHIFT;
38 if (pe_size == PE_SIZE_PUD)
39 return PUD_SHIFT - PAGE_SHIFT;
40 return ~0;
41}
42
ac401cc7
JK
43/* We choose 4096 entries - same as per-zone page wait tables */
44#define DAX_WAIT_TABLE_BITS 12
45#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
46
917f3452
RZ
47/* The 'colour' (ie low bits) within a PMD of a page offset. */
48#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
977fbdcd 49#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
917f3452 50
cfc93c6c
MW
51/* The order of a PMD entry */
52#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
53
ce95ab0f 54static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
ac401cc7
JK
55
56static int __init init_dax_wait_table(void)
57{
58 int i;
59
60 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
61 init_waitqueue_head(wait_table + i);
62 return 0;
63}
64fs_initcall(init_dax_wait_table);
65
527b19d0 66/*
3159f943
MW
67 * DAX pagecache entries use XArray value entries so they can't be mistaken
68 * for pages. We use one bit for locking, one bit for the entry size (PMD)
69 * and two more to tell us if the entry is a zero page or an empty entry that
70 * is just used for locking. In total four special bits.
527b19d0
RZ
71 *
72 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
73 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
74 * block allocation.
75 */
3159f943
MW
76#define DAX_SHIFT (4)
77#define DAX_LOCKED (1UL << 0)
78#define DAX_PMD (1UL << 1)
79#define DAX_ZERO_PAGE (1UL << 2)
80#define DAX_EMPTY (1UL << 3)
527b19d0 81
a77d19f4 82static unsigned long dax_to_pfn(void *entry)
527b19d0 83{
3159f943 84 return xa_to_value(entry) >> DAX_SHIFT;
527b19d0
RZ
85}
86
9f32d221
MW
87static void *dax_make_entry(pfn_t pfn, unsigned long flags)
88{
89 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
90}
91
cfc93c6c
MW
92static bool dax_is_locked(void *entry)
93{
94 return xa_to_value(entry) & DAX_LOCKED;
95}
96
a77d19f4 97static unsigned int dax_entry_order(void *entry)
527b19d0 98{
3159f943 99 if (xa_to_value(entry) & DAX_PMD)
cfc93c6c 100 return PMD_ORDER;
527b19d0
RZ
101 return 0;
102}
103
fda490d3 104static unsigned long dax_is_pmd_entry(void *entry)
d1a5f2b4 105{
3159f943 106 return xa_to_value(entry) & DAX_PMD;
d1a5f2b4
DW
107}
108
fda490d3 109static bool dax_is_pte_entry(void *entry)
d475c634 110{
3159f943 111 return !(xa_to_value(entry) & DAX_PMD);
d475c634
MW
112}
113
642261ac 114static int dax_is_zero_entry(void *entry)
d475c634 115{
3159f943 116 return xa_to_value(entry) & DAX_ZERO_PAGE;
d475c634
MW
117}
118
642261ac 119static int dax_is_empty_entry(void *entry)
b2e0d162 120{
3159f943 121 return xa_to_value(entry) & DAX_EMPTY;
b2e0d162
DW
122}
123
23c84eb7
MWO
124/*
125 * true if the entry that was found is of a smaller order than the entry
126 * we were looking for
127 */
128static bool dax_is_conflict(void *entry)
129{
130 return entry == XA_RETRY_ENTRY;
131}
132
ac401cc7 133/*
a77d19f4 134 * DAX page cache entry locking
ac401cc7
JK
135 */
136struct exceptional_entry_key {
ec4907ff 137 struct xarray *xa;
63e95b5c 138 pgoff_t entry_start;
ac401cc7
JK
139};
140
141struct wait_exceptional_entry_queue {
ac6424b9 142 wait_queue_entry_t wait;
ac401cc7
JK
143 struct exceptional_entry_key key;
144};
145
698ab77a
VG
146/**
147 * enum dax_wake_mode: waitqueue wakeup behaviour
148 * @WAKE_ALL: wake all waiters in the waitqueue
149 * @WAKE_NEXT: wake only the first waiter in the waitqueue
150 */
151enum dax_wake_mode {
152 WAKE_ALL,
153 WAKE_NEXT,
154};
155
b15cd800
MW
156static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
157 void *entry, struct exceptional_entry_key *key)
63e95b5c
RZ
158{
159 unsigned long hash;
b15cd800 160 unsigned long index = xas->xa_index;
63e95b5c
RZ
161
162 /*
163 * If 'entry' is a PMD, align the 'index' that we use for the wait
164 * queue to the start of that PMD. This ensures that all offsets in
165 * the range covered by the PMD map to the same bit lock.
166 */
642261ac 167 if (dax_is_pmd_entry(entry))
917f3452 168 index &= ~PG_PMD_COLOUR;
b15cd800 169 key->xa = xas->xa;
63e95b5c
RZ
170 key->entry_start = index;
171
b15cd800 172 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
63e95b5c
RZ
173 return wait_table + hash;
174}
175
ec4907ff
MW
176static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
177 unsigned int mode, int sync, void *keyp)
ac401cc7
JK
178{
179 struct exceptional_entry_key *key = keyp;
180 struct wait_exceptional_entry_queue *ewait =
181 container_of(wait, struct wait_exceptional_entry_queue, wait);
182
ec4907ff 183 if (key->xa != ewait->key.xa ||
63e95b5c 184 key->entry_start != ewait->key.entry_start)
ac401cc7
JK
185 return 0;
186 return autoremove_wake_function(wait, mode, sync, NULL);
187}
188
e30331ff 189/*
b93b0163
MW
190 * @entry may no longer be the entry at the index in the mapping.
191 * The important information it's conveying is whether the entry at
192 * this index used to be a PMD entry.
e30331ff 193 */
698ab77a
VG
194static void dax_wake_entry(struct xa_state *xas, void *entry,
195 enum dax_wake_mode mode)
e30331ff
RZ
196{
197 struct exceptional_entry_key key;
198 wait_queue_head_t *wq;
199
b15cd800 200 wq = dax_entry_waitqueue(xas, entry, &key);
e30331ff
RZ
201
202 /*
203 * Checking for locked entry and prepare_to_wait_exclusive() happens
b93b0163 204 * under the i_pages lock, ditto for entry handling in our callers.
e30331ff
RZ
205 * So at this point all tasks that could have seen our entry locked
206 * must be in the waitqueue and the following check will see them.
207 */
208 if (waitqueue_active(wq))
698ab77a 209 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
e30331ff
RZ
210}
211
cfc93c6c
MW
212/*
213 * Look up entry in page cache, wait for it to become unlocked if it
214 * is a DAX entry and return it. The caller must subsequently call
215 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
23c84eb7
MWO
216 * if it did. The entry returned may have a larger order than @order.
217 * If @order is larger than the order of the entry found in i_pages, this
218 * function returns a dax_is_conflict entry.
cfc93c6c
MW
219 *
220 * Must be called with the i_pages lock held.
221 */
23c84eb7 222static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
cfc93c6c
MW
223{
224 void *entry;
225 struct wait_exceptional_entry_queue ewait;
226 wait_queue_head_t *wq;
227
228 init_wait(&ewait.wait);
229 ewait.wait.func = wake_exceptional_entry_func;
230
231 for (;;) {
0e40de03 232 entry = xas_find_conflict(xas);
6370740e
DW
233 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
234 return entry;
23c84eb7
MWO
235 if (dax_entry_order(entry) < order)
236 return XA_RETRY_ENTRY;
6370740e 237 if (!dax_is_locked(entry))
cfc93c6c
MW
238 return entry;
239
b15cd800 240 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
cfc93c6c
MW
241 prepare_to_wait_exclusive(wq, &ewait.wait,
242 TASK_UNINTERRUPTIBLE);
243 xas_unlock_irq(xas);
244 xas_reset(xas);
245 schedule();
246 finish_wait(wq, &ewait.wait);
247 xas_lock_irq(xas);
248 }
249}
250
55e56f06
MW
251/*
252 * The only thing keeping the address space around is the i_pages lock
253 * (it's cycled in clear_inode() after removing the entries from i_pages)
254 * After we call xas_unlock_irq(), we cannot touch xas->xa.
255 */
256static void wait_entry_unlocked(struct xa_state *xas, void *entry)
257{
258 struct wait_exceptional_entry_queue ewait;
259 wait_queue_head_t *wq;
260
261 init_wait(&ewait.wait);
262 ewait.wait.func = wake_exceptional_entry_func;
263
264 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
d8a70641
DW
265 /*
266 * Unlike get_unlocked_entry() there is no guarantee that this
267 * path ever successfully retrieves an unlocked entry before an
268 * inode dies. Perform a non-exclusive wait in case this path
269 * never successfully performs its own wake up.
270 */
271 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
55e56f06
MW
272 xas_unlock_irq(xas);
273 schedule();
274 finish_wait(wq, &ewait.wait);
55e56f06
MW
275}
276
4c3d043d
VG
277static void put_unlocked_entry(struct xa_state *xas, void *entry,
278 enum dax_wake_mode mode)
cfc93c6c 279{
61c30c98 280 if (entry && !dax_is_conflict(entry))
4c3d043d 281 dax_wake_entry(xas, entry, mode);
cfc93c6c
MW
282}
283
284/*
285 * We used the xa_state to get the entry, but then we locked the entry and
286 * dropped the xa_lock, so we know the xa_state is stale and must be reset
287 * before use.
288 */
289static void dax_unlock_entry(struct xa_state *xas, void *entry)
290{
291 void *old;
292
7ae2ea7d 293 BUG_ON(dax_is_locked(entry));
cfc93c6c
MW
294 xas_reset(xas);
295 xas_lock_irq(xas);
296 old = xas_store(xas, entry);
297 xas_unlock_irq(xas);
298 BUG_ON(!dax_is_locked(old));
698ab77a 299 dax_wake_entry(xas, entry, WAKE_NEXT);
cfc93c6c
MW
300}
301
302/*
303 * Return: The entry stored at this location before it was locked.
304 */
305static void *dax_lock_entry(struct xa_state *xas, void *entry)
306{
307 unsigned long v = xa_to_value(entry);
308 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
309}
310
d2c997c0
DW
311static unsigned long dax_entry_size(void *entry)
312{
313 if (dax_is_zero_entry(entry))
314 return 0;
315 else if (dax_is_empty_entry(entry))
316 return 0;
317 else if (dax_is_pmd_entry(entry))
318 return PMD_SIZE;
319 else
320 return PAGE_SIZE;
321}
322
a77d19f4 323static unsigned long dax_end_pfn(void *entry)
d2c997c0 324{
a77d19f4 325 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
d2c997c0
DW
326}
327
328/*
329 * Iterate through all mapped pfns represented by an entry, i.e. skip
330 * 'empty' and 'zero' entries.
331 */
332#define for_each_mapped_pfn(entry, pfn) \
a77d19f4
MW
333 for (pfn = dax_to_pfn(entry); \
334 pfn < dax_end_pfn(entry); pfn++)
d2c997c0 335
73449daf
DW
336/*
337 * TODO: for reflink+dax we need a way to associate a single page with
338 * multiple address_space instances at different linear_page_index()
339 * offsets.
340 */
341static void dax_associate_entry(void *entry, struct address_space *mapping,
342 struct vm_area_struct *vma, unsigned long address)
d2c997c0 343{
73449daf
DW
344 unsigned long size = dax_entry_size(entry), pfn, index;
345 int i = 0;
d2c997c0
DW
346
347 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
348 return;
349
73449daf 350 index = linear_page_index(vma, address & ~(size - 1));
d2c997c0
DW
351 for_each_mapped_pfn(entry, pfn) {
352 struct page *page = pfn_to_page(pfn);
353
354 WARN_ON_ONCE(page->mapping);
355 page->mapping = mapping;
73449daf 356 page->index = index + i++;
d2c997c0
DW
357 }
358}
359
360static void dax_disassociate_entry(void *entry, struct address_space *mapping,
361 bool trunc)
362{
363 unsigned long pfn;
364
365 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
366 return;
367
368 for_each_mapped_pfn(entry, pfn) {
369 struct page *page = pfn_to_page(pfn);
370
371 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
372 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
373 page->mapping = NULL;
73449daf 374 page->index = 0;
d2c997c0
DW
375 }
376}
377
5fac7408
DW
378static struct page *dax_busy_page(void *entry)
379{
380 unsigned long pfn;
381
382 for_each_mapped_pfn(entry, pfn) {
383 struct page *page = pfn_to_page(pfn);
384
385 if (page_ref_count(page) > 1)
386 return page;
387 }
388 return NULL;
389}
390
c5bbd451 391/*
c2e8021a 392 * dax_lock_page - Lock the DAX entry corresponding to a page
c5bbd451
MW
393 * @page: The page whose entry we want to lock
394 *
395 * Context: Process context.
27359fd6
MW
396 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
397 * not be locked.
c5bbd451 398 */
27359fd6 399dax_entry_t dax_lock_page(struct page *page)
c2a7d2a1 400{
9f32d221
MW
401 XA_STATE(xas, NULL, 0);
402 void *entry;
c2a7d2a1 403
c5bbd451
MW
404 /* Ensure page->mapping isn't freed while we look at it */
405 rcu_read_lock();
c2a7d2a1 406 for (;;) {
9f32d221 407 struct address_space *mapping = READ_ONCE(page->mapping);
c2a7d2a1 408
27359fd6 409 entry = NULL;
c93db7bb 410 if (!mapping || !dax_mapping(mapping))
c5bbd451 411 break;
c2a7d2a1
DW
412
413 /*
414 * In the device-dax case there's no need to lock, a
415 * struct dev_pagemap pin is sufficient to keep the
416 * inode alive, and we assume we have dev_pagemap pin
417 * otherwise we would not have a valid pfn_to_page()
418 * translation.
419 */
27359fd6 420 entry = (void *)~0UL;
9f32d221 421 if (S_ISCHR(mapping->host->i_mode))
c5bbd451 422 break;
c2a7d2a1 423
9f32d221
MW
424 xas.xa = &mapping->i_pages;
425 xas_lock_irq(&xas);
c2a7d2a1 426 if (mapping != page->mapping) {
9f32d221 427 xas_unlock_irq(&xas);
c2a7d2a1
DW
428 continue;
429 }
9f32d221
MW
430 xas_set(&xas, page->index);
431 entry = xas_load(&xas);
432 if (dax_is_locked(entry)) {
c5bbd451 433 rcu_read_unlock();
55e56f06 434 wait_entry_unlocked(&xas, entry);
c5bbd451 435 rcu_read_lock();
6d7cd8c1 436 continue;
c2a7d2a1 437 }
9f32d221
MW
438 dax_lock_entry(&xas, entry);
439 xas_unlock_irq(&xas);
c5bbd451 440 break;
c2a7d2a1 441 }
c5bbd451 442 rcu_read_unlock();
27359fd6 443 return (dax_entry_t)entry;
c2a7d2a1
DW
444}
445
27359fd6 446void dax_unlock_page(struct page *page, dax_entry_t cookie)
c2a7d2a1
DW
447{
448 struct address_space *mapping = page->mapping;
9f32d221 449 XA_STATE(xas, &mapping->i_pages, page->index);
c2a7d2a1 450
9f32d221 451 if (S_ISCHR(mapping->host->i_mode))
c2a7d2a1
DW
452 return;
453
27359fd6 454 dax_unlock_entry(&xas, (void *)cookie);
c2a7d2a1
DW
455}
456
ac401cc7 457/*
a77d19f4
MW
458 * Find page cache entry at given index. If it is a DAX entry, return it
459 * with the entry locked. If the page cache doesn't contain an entry at
460 * that index, add a locked empty entry.
ac401cc7 461 *
3159f943 462 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
b15cd800
MW
463 * either return that locked entry or will return VM_FAULT_FALLBACK.
464 * This will happen if there are any PTE entries within the PMD range
465 * that we are requesting.
642261ac 466 *
b15cd800
MW
467 * We always favor PTE entries over PMD entries. There isn't a flow where we
468 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
469 * insertion will fail if it finds any PTE entries already in the tree, and a
470 * PTE insertion will cause an existing PMD entry to be unmapped and
471 * downgraded to PTE entries. This happens for both PMD zero pages as
472 * well as PMD empty entries.
642261ac 473 *
b15cd800
MW
474 * The exception to this downgrade path is for PMD entries that have
475 * real storage backing them. We will leave these real PMD entries in
476 * the tree, and PTE writes will simply dirty the entire PMD entry.
642261ac 477 *
ac401cc7
JK
478 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
479 * persistent memory the benefit is doubtful. We can add that later if we can
480 * show it helps.
b15cd800
MW
481 *
482 * On error, this function does not return an ERR_PTR. Instead it returns
483 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
484 * overlap with xarray value entries.
ac401cc7 485 */
b15cd800 486static void *grab_mapping_entry(struct xa_state *xas,
23c84eb7 487 struct address_space *mapping, unsigned int order)
ac401cc7 488{
b15cd800 489 unsigned long index = xas->xa_index;
1a14e377 490 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
b15cd800 491 void *entry;
642261ac 492
b15cd800 493retry:
1a14e377 494 pmd_downgrade = false;
b15cd800 495 xas_lock_irq(xas);
23c84eb7 496 entry = get_unlocked_entry(xas, order);
91d25ba8 497
642261ac 498 if (entry) {
23c84eb7
MWO
499 if (dax_is_conflict(entry))
500 goto fallback;
0e40de03 501 if (!xa_is_value(entry)) {
49688e65 502 xas_set_err(xas, -EIO);
b15cd800
MW
503 goto out_unlock;
504 }
505
23c84eb7 506 if (order == 0) {
91d25ba8 507 if (dax_is_pmd_entry(entry) &&
642261ac
RZ
508 (dax_is_zero_entry(entry) ||
509 dax_is_empty_entry(entry))) {
510 pmd_downgrade = true;
511 }
512 }
513 }
514
b15cd800
MW
515 if (pmd_downgrade) {
516 /*
517 * Make sure 'entry' remains valid while we drop
518 * the i_pages lock.
519 */
520 dax_lock_entry(xas, entry);
642261ac 521
642261ac
RZ
522 /*
523 * Besides huge zero pages the only other thing that gets
524 * downgraded are empty entries which don't need to be
525 * unmapped.
526 */
b15cd800
MW
527 if (dax_is_zero_entry(entry)) {
528 xas_unlock_irq(xas);
529 unmap_mapping_pages(mapping,
530 xas->xa_index & ~PG_PMD_COLOUR,
531 PG_PMD_NR, false);
532 xas_reset(xas);
533 xas_lock_irq(xas);
e11f8b7b
RZ
534 }
535
b15cd800
MW
536 dax_disassociate_entry(entry, mapping, false);
537 xas_store(xas, NULL); /* undo the PMD join */
698ab77a 538 dax_wake_entry(xas, entry, WAKE_ALL);
7f0e07fb 539 mapping->nrpages -= PG_PMD_NR;
b15cd800
MW
540 entry = NULL;
541 xas_set(xas, index);
542 }
642261ac 543
b15cd800
MW
544 if (entry) {
545 dax_lock_entry(xas, entry);
546 } else {
23c84eb7
MWO
547 unsigned long flags = DAX_EMPTY;
548
549 if (order > 0)
550 flags |= DAX_PMD;
551 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
b15cd800
MW
552 dax_lock_entry(xas, entry);
553 if (xas_error(xas))
554 goto out_unlock;
7f0e07fb 555 mapping->nrpages += 1UL << order;
ac401cc7 556 }
b15cd800
MW
557
558out_unlock:
559 xas_unlock_irq(xas);
560 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
561 goto retry;
562 if (xas->xa_node == XA_ERROR(-ENOMEM))
563 return xa_mk_internal(VM_FAULT_OOM);
564 if (xas_error(xas))
565 return xa_mk_internal(VM_FAULT_SIGBUS);
e3ad61c6 566 return entry;
b15cd800
MW
567fallback:
568 xas_unlock_irq(xas);
569 return xa_mk_internal(VM_FAULT_FALLBACK);
ac401cc7
JK
570}
571
5fac7408 572/**
6bbdd563 573 * dax_layout_busy_page_range - find first pinned page in @mapping
5fac7408 574 * @mapping: address space to scan for a page with ref count > 1
6bbdd563
VG
575 * @start: Starting offset. Page containing 'start' is included.
576 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
577 * pages from 'start' till the end of file are included.
5fac7408
DW
578 *
579 * DAX requires ZONE_DEVICE mapped pages. These pages are never
580 * 'onlined' to the page allocator so they are considered idle when
581 * page->count == 1. A filesystem uses this interface to determine if
582 * any page in the mapping is busy, i.e. for DMA, or other
583 * get_user_pages() usages.
584 *
585 * It is expected that the filesystem is holding locks to block the
586 * establishment of new mappings in this address_space. I.e. it expects
587 * to be able to run unmap_mapping_range() and subsequently not race
588 * mapping_mapped() becoming true.
589 */
6bbdd563
VG
590struct page *dax_layout_busy_page_range(struct address_space *mapping,
591 loff_t start, loff_t end)
5fac7408 592{
084a8990
MW
593 void *entry;
594 unsigned int scanned = 0;
5fac7408 595 struct page *page = NULL;
6bbdd563
VG
596 pgoff_t start_idx = start >> PAGE_SHIFT;
597 pgoff_t end_idx;
598 XA_STATE(xas, &mapping->i_pages, start_idx);
5fac7408
DW
599
600 /*
601 * In the 'limited' case get_user_pages() for dax is disabled.
602 */
603 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
604 return NULL;
605
606 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
607 return NULL;
608
6bbdd563
VG
609 /* If end == LLONG_MAX, all pages from start to till end of file */
610 if (end == LLONG_MAX)
611 end_idx = ULONG_MAX;
612 else
613 end_idx = end >> PAGE_SHIFT;
5fac7408
DW
614 /*
615 * If we race get_user_pages_fast() here either we'll see the
084a8990 616 * elevated page count in the iteration and wait, or
5fac7408
DW
617 * get_user_pages_fast() will see that the page it took a reference
618 * against is no longer mapped in the page tables and bail to the
619 * get_user_pages() slow path. The slow path is protected by
620 * pte_lock() and pmd_lock(). New references are not taken without
6bbdd563 621 * holding those locks, and unmap_mapping_pages() will not zero the
5fac7408
DW
622 * pte or pmd without holding the respective lock, so we are
623 * guaranteed to either see new references or prevent new
624 * references from being established.
625 */
6bbdd563 626 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
5fac7408 627
084a8990 628 xas_lock_irq(&xas);
6bbdd563 629 xas_for_each(&xas, entry, end_idx) {
084a8990
MW
630 if (WARN_ON_ONCE(!xa_is_value(entry)))
631 continue;
632 if (unlikely(dax_is_locked(entry)))
23c84eb7 633 entry = get_unlocked_entry(&xas, 0);
084a8990
MW
634 if (entry)
635 page = dax_busy_page(entry);
4c3d043d 636 put_unlocked_entry(&xas, entry, WAKE_NEXT);
5fac7408
DW
637 if (page)
638 break;
084a8990
MW
639 if (++scanned % XA_CHECK_SCHED)
640 continue;
641
642 xas_pause(&xas);
643 xas_unlock_irq(&xas);
644 cond_resched();
645 xas_lock_irq(&xas);
5fac7408 646 }
084a8990 647 xas_unlock_irq(&xas);
5fac7408
DW
648 return page;
649}
6bbdd563
VG
650EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
651
652struct page *dax_layout_busy_page(struct address_space *mapping)
653{
654 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
655}
5fac7408
DW
656EXPORT_SYMBOL_GPL(dax_layout_busy_page);
657
a77d19f4 658static int __dax_invalidate_entry(struct address_space *mapping,
c6dcf52c
JK
659 pgoff_t index, bool trunc)
660{
07f2d89c 661 XA_STATE(xas, &mapping->i_pages, index);
c6dcf52c
JK
662 int ret = 0;
663 void *entry;
c6dcf52c 664
07f2d89c 665 xas_lock_irq(&xas);
23c84eb7 666 entry = get_unlocked_entry(&xas, 0);
3159f943 667 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
c6dcf52c
JK
668 goto out;
669 if (!trunc &&
07f2d89c
MW
670 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
671 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
c6dcf52c 672 goto out;
d2c997c0 673 dax_disassociate_entry(entry, mapping, trunc);
07f2d89c 674 xas_store(&xas, NULL);
7f0e07fb 675 mapping->nrpages -= 1UL << dax_entry_order(entry);
c6dcf52c
JK
676 ret = 1;
677out:
23738832 678 put_unlocked_entry(&xas, entry, WAKE_ALL);
07f2d89c 679 xas_unlock_irq(&xas);
c6dcf52c
JK
680 return ret;
681}
07f2d89c 682
ac401cc7 683/*
3159f943
MW
684 * Delete DAX entry at @index from @mapping. Wait for it
685 * to be unlocked before deleting it.
ac401cc7
JK
686 */
687int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
688{
a77d19f4 689 int ret = __dax_invalidate_entry(mapping, index, true);
ac401cc7 690
ac401cc7
JK
691 /*
692 * This gets called from truncate / punch_hole path. As such, the caller
693 * must hold locks protecting against concurrent modifications of the
a77d19f4 694 * page cache (usually fs-private i_mmap_sem for writing). Since the
3159f943 695 * caller has seen a DAX entry for this index, we better find it
ac401cc7
JK
696 * at that index as well...
697 */
c6dcf52c
JK
698 WARN_ON_ONCE(!ret);
699 return ret;
700}
701
c6dcf52c 702/*
3159f943 703 * Invalidate DAX entry if it is clean.
c6dcf52c
JK
704 */
705int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
706 pgoff_t index)
707{
a77d19f4 708 return __dax_invalidate_entry(mapping, index, false);
ac401cc7
JK
709}
710
60696eb2 711static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
f7ca90b1 712{
de205114 713 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
429f8de7
CH
714}
715
716static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
717{
60696eb2 718 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
cccbce67 719 void *vto, *kaddr;
cccbce67
DW
720 long rc;
721 int id;
722
cccbce67 723 id = dax_read_lock();
429f8de7 724 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
cccbce67
DW
725 if (rc < 0) {
726 dax_read_unlock(id);
727 return rc;
728 }
429f8de7
CH
729 vto = kmap_atomic(vmf->cow_page);
730 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
f7ca90b1 731 kunmap_atomic(vto);
cccbce67 732 dax_read_unlock(id);
f7ca90b1
MW
733 return 0;
734}
735
642261ac
RZ
736/*
737 * By this point grab_mapping_entry() has ensured that we have a locked entry
738 * of the appropriate size so we don't have to worry about downgrading PMDs to
739 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
740 * already in the tree, we will skip the insertion and just dirty the PMD as
741 * appropriate.
742 */
b15cd800
MW
743static void *dax_insert_entry(struct xa_state *xas,
744 struct address_space *mapping, struct vm_fault *vmf,
745 void *entry, pfn_t pfn, unsigned long flags, bool dirty)
9973c98e 746{
b15cd800 747 void *new_entry = dax_make_entry(pfn, flags);
9973c98e 748
f5b7b748 749 if (dirty)
d2b2a28e 750 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e 751
3159f943 752 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
b15cd800 753 unsigned long index = xas->xa_index;
91d25ba8
RZ
754 /* we are replacing a zero page with block mapping */
755 if (dax_is_pmd_entry(entry))
977fbdcd 756 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
b15cd800 757 PG_PMD_NR, false);
91d25ba8 758 else /* pte entry */
b15cd800 759 unmap_mapping_pages(mapping, index, 1, false);
9973c98e
RZ
760 }
761
b15cd800
MW
762 xas_reset(xas);
763 xas_lock_irq(xas);
1571c029
JK
764 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
765 void *old;
766
d2c997c0 767 dax_disassociate_entry(entry, mapping, false);
73449daf 768 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
642261ac 769 /*
a77d19f4 770 * Only swap our new entry into the page cache if the current
642261ac 771 * entry is a zero page or an empty entry. If a normal PTE or
a77d19f4 772 * PMD entry is already in the cache, we leave it alone. This
642261ac
RZ
773 * means that if we are trying to insert a PTE and the
774 * existing entry is a PMD, we will just leave the PMD in the
775 * tree and dirty it if necessary.
776 */
1571c029 777 old = dax_lock_entry(xas, new_entry);
b15cd800
MW
778 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
779 DAX_LOCKED));
91d25ba8 780 entry = new_entry;
b15cd800
MW
781 } else {
782 xas_load(xas); /* Walk the xa_state */
9973c98e 783 }
91d25ba8 784
f5b7b748 785 if (dirty)
b15cd800 786 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
91d25ba8 787
b15cd800 788 xas_unlock_irq(xas);
91d25ba8 789 return entry;
9973c98e
RZ
790}
791
a77d19f4
MW
792static inline
793unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
4b4bb46d
JK
794{
795 unsigned long address;
796
797 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
798 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
799 return address;
800}
801
802/* Walk all mappings of a given index of a file and writeprotect them */
a77d19f4
MW
803static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
804 unsigned long pfn)
4b4bb46d
JK
805{
806 struct vm_area_struct *vma;
f729c8c9
RZ
807 pte_t pte, *ptep = NULL;
808 pmd_t *pmdp = NULL;
4b4bb46d 809 spinlock_t *ptl;
4b4bb46d
JK
810
811 i_mmap_lock_read(mapping);
812 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
ac46d4f3
JG
813 struct mmu_notifier_range range;
814 unsigned long address;
4b4bb46d
JK
815
816 cond_resched();
817
818 if (!(vma->vm_flags & VM_SHARED))
819 continue;
820
821 address = pgoff_address(index, vma);
a4d1a885
JG
822
823 /*
9fd6dad1 824 * follow_invalidate_pte() will use the range to call
ff5c19ed
CH
825 * mmu_notifier_invalidate_range_start() on our behalf before
826 * taking any lock.
a4d1a885 827 */
9fd6dad1
PB
828 if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
829 &pmdp, &ptl))
4b4bb46d 830 continue;
4b4bb46d 831
0f10851e
JG
832 /*
833 * No need to call mmu_notifier_invalidate_range() as we are
834 * downgrading page table protection not changing it to point
835 * to a new page.
836 *
ad56b738 837 * See Documentation/vm/mmu_notifier.rst
0f10851e 838 */
f729c8c9
RZ
839 if (pmdp) {
840#ifdef CONFIG_FS_DAX_PMD
841 pmd_t pmd;
842
843 if (pfn != pmd_pfn(*pmdp))
844 goto unlock_pmd;
f6f37321 845 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
f729c8c9
RZ
846 goto unlock_pmd;
847
848 flush_cache_page(vma, address, pfn);
024eee0e 849 pmd = pmdp_invalidate(vma, address, pmdp);
f729c8c9
RZ
850 pmd = pmd_wrprotect(pmd);
851 pmd = pmd_mkclean(pmd);
852 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
f729c8c9 853unlock_pmd:
f729c8c9 854#endif
ee190ca6 855 spin_unlock(ptl);
f729c8c9
RZ
856 } else {
857 if (pfn != pte_pfn(*ptep))
858 goto unlock_pte;
859 if (!pte_dirty(*ptep) && !pte_write(*ptep))
860 goto unlock_pte;
861
862 flush_cache_page(vma, address, pfn);
863 pte = ptep_clear_flush(vma, address, ptep);
864 pte = pte_wrprotect(pte);
865 pte = pte_mkclean(pte);
866 set_pte_at(vma->vm_mm, address, ptep, pte);
f729c8c9
RZ
867unlock_pte:
868 pte_unmap_unlock(ptep, ptl);
869 }
4b4bb46d 870
ac46d4f3 871 mmu_notifier_invalidate_range_end(&range);
4b4bb46d
JK
872 }
873 i_mmap_unlock_read(mapping);
874}
875
9fc747f6
MW
876static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
877 struct address_space *mapping, void *entry)
9973c98e 878{
e4b3448b 879 unsigned long pfn, index, count;
3fe0791c 880 long ret = 0;
9973c98e 881
9973c98e 882 /*
a6abc2c0
JK
883 * A page got tagged dirty in DAX mapping? Something is seriously
884 * wrong.
9973c98e 885 */
3159f943 886 if (WARN_ON(!xa_is_value(entry)))
a6abc2c0 887 return -EIO;
9973c98e 888
9fc747f6
MW
889 if (unlikely(dax_is_locked(entry))) {
890 void *old_entry = entry;
891
23c84eb7 892 entry = get_unlocked_entry(xas, 0);
9fc747f6
MW
893
894 /* Entry got punched out / reallocated? */
895 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
896 goto put_unlocked;
897 /*
898 * Entry got reallocated elsewhere? No need to writeback.
899 * We have to compare pfns as we must not bail out due to
900 * difference in lockbit or entry type.
901 */
902 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
903 goto put_unlocked;
904 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
905 dax_is_zero_entry(entry))) {
906 ret = -EIO;
907 goto put_unlocked;
908 }
909
910 /* Another fsync thread may have already done this entry */
911 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
912 goto put_unlocked;
9973c98e
RZ
913 }
914
a6abc2c0 915 /* Lock the entry to serialize with page faults */
9fc747f6
MW
916 dax_lock_entry(xas, entry);
917
a6abc2c0
JK
918 /*
919 * We can clear the tag now but we have to be careful so that concurrent
920 * dax_writeback_one() calls for the same index cannot finish before we
921 * actually flush the caches. This is achieved as the calls will look
b93b0163
MW
922 * at the entry only under the i_pages lock and once they do that
923 * they will see the entry locked and wait for it to unlock.
a6abc2c0 924 */
9fc747f6
MW
925 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
926 xas_unlock_irq(xas);
a6abc2c0 927
642261ac 928 /*
e4b3448b
MW
929 * If dax_writeback_mapping_range() was given a wbc->range_start
930 * in the middle of a PMD, the 'index' we use needs to be
931 * aligned to the start of the PMD.
3fe0791c
DW
932 * This allows us to flush for PMD_SIZE and not have to worry about
933 * partial PMD writebacks.
642261ac 934 */
a77d19f4 935 pfn = dax_to_pfn(entry);
e4b3448b
MW
936 count = 1UL << dax_entry_order(entry);
937 index = xas->xa_index & ~(count - 1);
cccbce67 938
e4b3448b
MW
939 dax_entry_mkclean(mapping, index, pfn);
940 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
4b4bb46d
JK
941 /*
942 * After we have flushed the cache, we can clear the dirty tag. There
943 * cannot be new dirty data in the pfn after the flush has completed as
944 * the pfn mappings are writeprotected and fault waits for mapping
945 * entry lock.
946 */
9fc747f6
MW
947 xas_reset(xas);
948 xas_lock_irq(xas);
949 xas_store(xas, entry);
950 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
698ab77a 951 dax_wake_entry(xas, entry, WAKE_NEXT);
9fc747f6 952
e4b3448b 953 trace_dax_writeback_one(mapping->host, index, count);
9973c98e
RZ
954 return ret;
955
a6abc2c0 956 put_unlocked:
4c3d043d 957 put_unlocked_entry(xas, entry, WAKE_NEXT);
9973c98e
RZ
958 return ret;
959}
960
961/*
962 * Flush the mapping to the persistent domain within the byte range of [start,
963 * end]. This is required by data integrity operations to ensure file data is
964 * on persistent storage prior to completion of the operation.
965 */
7f6d5b52 966int dax_writeback_mapping_range(struct address_space *mapping,
3f666c56 967 struct dax_device *dax_dev, struct writeback_control *wbc)
9973c98e 968{
9fc747f6 969 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
9973c98e 970 struct inode *inode = mapping->host;
9fc747f6 971 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
9fc747f6
MW
972 void *entry;
973 int ret = 0;
974 unsigned int scanned = 0;
9973c98e
RZ
975
976 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
977 return -EIO;
978
7716506a 979 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
7f6d5b52
RZ
980 return 0;
981
9fc747f6 982 trace_dax_writeback_range(inode, xas.xa_index, end_index);
9973c98e 983
9fc747f6 984 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
9973c98e 985
9fc747f6
MW
986 xas_lock_irq(&xas);
987 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
988 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
989 if (ret < 0) {
990 mapping_set_error(mapping, ret);
9973c98e 991 break;
9973c98e 992 }
9fc747f6
MW
993 if (++scanned % XA_CHECK_SCHED)
994 continue;
995
996 xas_pause(&xas);
997 xas_unlock_irq(&xas);
998 cond_resched();
999 xas_lock_irq(&xas);
9973c98e 1000 }
9fc747f6 1001 xas_unlock_irq(&xas);
9fc747f6
MW
1002 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1003 return ret;
9973c98e
RZ
1004}
1005EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1006
65dd814a 1007static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
5e161e40 1008 pfn_t *pfnp)
f7ca90b1 1009{
60696eb2 1010 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
cccbce67 1011 int id, rc;
5e161e40 1012 long length;
f7ca90b1 1013
cccbce67 1014 id = dax_read_lock();
5e161e40 1015 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
86ed913b 1016 NULL, pfnp);
5e161e40
JK
1017 if (length < 0) {
1018 rc = length;
1019 goto out;
cccbce67 1020 }
5e161e40
JK
1021 rc = -EINVAL;
1022 if (PFN_PHYS(length) < size)
1023 goto out;
1024 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1025 goto out;
1026 /* For larger pages we need devmap */
1027 if (length > 1 && !pfn_t_devmap(*pfnp))
1028 goto out;
1029 rc = 0;
1030out:
cccbce67 1031 dax_read_unlock(id);
5e161e40 1032 return rc;
0e3b210c 1033}
0e3b210c 1034
e30331ff 1035/*
91d25ba8
RZ
1036 * The user has performed a load from a hole in the file. Allocating a new
1037 * page in the file would cause excessive storage usage for workloads with
1038 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1039 * If this page is ever written to we will re-fault and change the mapping to
1040 * point to real DAX storage instead.
e30331ff 1041 */
b15cd800
MW
1042static vm_fault_t dax_load_hole(struct xa_state *xas,
1043 struct address_space *mapping, void **entry,
1044 struct vm_fault *vmf)
e30331ff
RZ
1045{
1046 struct inode *inode = mapping->host;
91d25ba8 1047 unsigned long vaddr = vmf->address;
b90ca5cc
MW
1048 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1049 vm_fault_t ret;
e30331ff 1050
b15cd800 1051 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
3159f943
MW
1052 DAX_ZERO_PAGE, false);
1053
ab77dab4 1054 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
e30331ff
RZ
1055 trace_dax_load_hole(inode, vmf, ret);
1056 return ret;
1057}
1058
c2436190
SR
1059#ifdef CONFIG_FS_DAX_PMD
1060static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
65dd814a 1061 const struct iomap *iomap, void **entry)
c2436190
SR
1062{
1063 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1064 unsigned long pmd_addr = vmf->address & PMD_MASK;
1065 struct vm_area_struct *vma = vmf->vma;
1066 struct inode *inode = mapping->host;
1067 pgtable_t pgtable = NULL;
1068 struct page *zero_page;
1069 spinlock_t *ptl;
1070 pmd_t pmd_entry;
1071 pfn_t pfn;
1072
1073 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1074
1075 if (unlikely(!zero_page))
1076 goto fallback;
1077
1078 pfn = page_to_pfn_t(zero_page);
1079 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1080 DAX_PMD | DAX_ZERO_PAGE, false);
1081
1082 if (arch_needs_pgtable_deposit()) {
1083 pgtable = pte_alloc_one(vma->vm_mm);
1084 if (!pgtable)
1085 return VM_FAULT_OOM;
1086 }
1087
1088 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1089 if (!pmd_none(*(vmf->pmd))) {
1090 spin_unlock(ptl);
1091 goto fallback;
1092 }
1093
1094 if (pgtable) {
1095 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1096 mm_inc_nr_ptes(vma->vm_mm);
1097 }
1098 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1099 pmd_entry = pmd_mkhuge(pmd_entry);
1100 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1101 spin_unlock(ptl);
1102 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1103 return VM_FAULT_NOPAGE;
1104
1105fallback:
1106 if (pgtable)
1107 pte_free(vma->vm_mm, pgtable);
1108 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1109 return VM_FAULT_FALLBACK;
1110}
1111#else
1112static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
65dd814a 1113 const struct iomap *iomap, void **entry)
c2436190
SR
1114{
1115 return VM_FAULT_FALLBACK;
1116}
1117#endif /* CONFIG_FS_DAX_PMD */
1118
e5c71954
CH
1119static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
1120 unsigned int offset, size_t size)
1121{
1122 void *kaddr;
1123 long ret;
1124
1125 ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1126 if (ret > 0) {
1127 memset(kaddr + offset, 0, size);
1128 dax_flush(dax_dev, kaddr + offset, size);
1129 }
1130 return ret;
1131}
1132
c6f40468 1133static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
679c8bd3 1134{
c6f40468
CH
1135 const struct iomap *iomap = &iter->iomap;
1136 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1137 loff_t pos = iter->pos;
1138 u64 length = iomap_length(iter);
1139 s64 written = 0;
1140
1141 /* already zeroed? we're done. */
1142 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1143 return length;
1144
1145 do {
1146 unsigned offset = offset_in_page(pos);
1147 unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1148 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1149 long rc;
1150 int id;
1151
1152 id = dax_read_lock();
1153 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1154 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1155 else
1156 rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
1157 dax_read_unlock(id);
cccbce67 1158
c6f40468
CH
1159 if (rc < 0)
1160 return rc;
1161 pos += size;
1162 length -= size;
1163 written += size;
1164 if (did_zero)
1165 *did_zero = true;
1166 } while (length > 0);
e5c71954 1167
c6f40468
CH
1168 return written;
1169}
1170
1171int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1172 const struct iomap_ops *ops)
1173{
1174 struct iomap_iter iter = {
1175 .inode = inode,
1176 .pos = pos,
1177 .len = len,
952da063 1178 .flags = IOMAP_DAX | IOMAP_ZERO,
c6f40468
CH
1179 };
1180 int ret;
1181
1182 while ((ret = iomap_iter(&iter, ops)) > 0)
1183 iter.processed = dax_zero_iter(&iter, did_zero);
1184 return ret;
1185}
1186EXPORT_SYMBOL_GPL(dax_zero_range);
1187
1188int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1189 const struct iomap_ops *ops)
1190{
1191 unsigned int blocksize = i_blocksize(inode);
1192 unsigned int off = pos & (blocksize - 1);
1193
1194 /* Block boundary? Nothing to do */
1195 if (!off)
1196 return 0;
1197 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
679c8bd3 1198}
c6f40468 1199EXPORT_SYMBOL_GPL(dax_truncate_page);
679c8bd3 1200
ca289e0b
CH
1201static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1202 struct iov_iter *iter)
a254e568 1203{
ca289e0b
CH
1204 const struct iomap *iomap = &iomi->iomap;
1205 loff_t length = iomap_length(iomi);
1206 loff_t pos = iomi->pos;
cccbce67 1207 struct dax_device *dax_dev = iomap->dax_dev;
a254e568
CH
1208 loff_t end = pos + length, done = 0;
1209 ssize_t ret = 0;
a77d4786 1210 size_t xfer;
cccbce67 1211 int id;
a254e568
CH
1212
1213 if (iov_iter_rw(iter) == READ) {
ca289e0b 1214 end = min(end, i_size_read(iomi->inode));
a254e568
CH
1215 if (pos >= end)
1216 return 0;
1217
1218 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1219 return iov_iter_zero(min(length, end - pos), iter);
1220 }
1221
1222 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1223 return -EIO;
1224
e3fce68c
JK
1225 /*
1226 * Write can allocate block for an area which has a hole page mapped
1227 * into page tables. We have to tear down these mappings so that data
1228 * written by write(2) is visible in mmap.
1229 */
cd656375 1230 if (iomap->flags & IOMAP_F_NEW) {
ca289e0b 1231 invalidate_inode_pages2_range(iomi->inode->i_mapping,
e3fce68c
JK
1232 pos >> PAGE_SHIFT,
1233 (end - 1) >> PAGE_SHIFT);
1234 }
1235
cccbce67 1236 id = dax_read_lock();
a254e568
CH
1237 while (pos < end) {
1238 unsigned offset = pos & (PAGE_SIZE - 1);
cccbce67 1239 const size_t size = ALIGN(length + offset, PAGE_SIZE);
60696eb2 1240 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
a254e568 1241 ssize_t map_len;
cccbce67 1242 void *kaddr;
a254e568 1243
d1908f52
MH
1244 if (fatal_signal_pending(current)) {
1245 ret = -EINTR;
1246 break;
1247 }
1248
cccbce67 1249 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
86ed913b 1250 &kaddr, NULL);
a254e568
CH
1251 if (map_len < 0) {
1252 ret = map_len;
1253 break;
1254 }
1255
cccbce67
DW
1256 map_len = PFN_PHYS(map_len);
1257 kaddr += offset;
a254e568
CH
1258 map_len -= offset;
1259 if (map_len > end - pos)
1260 map_len = end - pos;
1261
1262 if (iov_iter_rw(iter) == WRITE)
a77d4786 1263 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
fec53774 1264 map_len, iter);
a254e568 1265 else
a77d4786 1266 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
b3a9a0c3 1267 map_len, iter);
a254e568 1268
a77d4786
DW
1269 pos += xfer;
1270 length -= xfer;
1271 done += xfer;
1272
1273 if (xfer == 0)
1274 ret = -EFAULT;
1275 if (xfer < map_len)
1276 break;
a254e568 1277 }
cccbce67 1278 dax_read_unlock(id);
a254e568
CH
1279
1280 return done ? done : ret;
1281}
1282
1283/**
11c59c92 1284 * dax_iomap_rw - Perform I/O to a DAX file
a254e568
CH
1285 * @iocb: The control block for this I/O
1286 * @iter: The addresses to do I/O from or to
1287 * @ops: iomap ops passed from the file system
1288 *
1289 * This function performs read and write operations to directly mapped
1290 * persistent memory. The callers needs to take care of read/write exclusion
1291 * and evicting any page cache pages in the region under I/O.
1292 */
1293ssize_t
11c59c92 1294dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 1295 const struct iomap_ops *ops)
a254e568 1296{
ca289e0b
CH
1297 struct iomap_iter iomi = {
1298 .inode = iocb->ki_filp->f_mapping->host,
1299 .pos = iocb->ki_pos,
1300 .len = iov_iter_count(iter),
952da063 1301 .flags = IOMAP_DAX,
ca289e0b
CH
1302 };
1303 loff_t done = 0;
1304 int ret;
a254e568 1305
168316db 1306 if (iov_iter_rw(iter) == WRITE) {
ca289e0b
CH
1307 lockdep_assert_held_write(&iomi.inode->i_rwsem);
1308 iomi.flags |= IOMAP_WRITE;
168316db 1309 } else {
ca289e0b 1310 lockdep_assert_held(&iomi.inode->i_rwsem);
168316db 1311 }
a254e568 1312
96222d53 1313 if (iocb->ki_flags & IOCB_NOWAIT)
ca289e0b 1314 iomi.flags |= IOMAP_NOWAIT;
96222d53 1315
ca289e0b
CH
1316 while ((ret = iomap_iter(&iomi, ops)) > 0)
1317 iomi.processed = dax_iomap_iter(&iomi, iter);
a254e568 1318
ca289e0b
CH
1319 done = iomi.pos - iocb->ki_pos;
1320 iocb->ki_pos = iomi.pos;
a254e568
CH
1321 return done ? done : ret;
1322}
11c59c92 1323EXPORT_SYMBOL_GPL(dax_iomap_rw);
a7d73fe6 1324
ab77dab4 1325static vm_fault_t dax_fault_return(int error)
9f141d6e
JK
1326{
1327 if (error == 0)
1328 return VM_FAULT_NOPAGE;
c9aed74e 1329 return vmf_error(error);
9f141d6e
JK
1330}
1331
aaa422c4
DW
1332/*
1333 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1334 * flushed on write-faults (non-cow), but not read-faults.
1335 */
1336static bool dax_fault_is_synchronous(unsigned long flags,
65dd814a 1337 struct vm_area_struct *vma, const struct iomap *iomap)
aaa422c4
DW
1338{
1339 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1340 && (iomap->flags & IOMAP_F_DIRTY);
1341}
1342
55f81639
SR
1343/*
1344 * When handling a synchronous page fault and the inode need a fsync, we can
1345 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1346 * insertion for now and return the pfn so that caller can insert it after the
1347 * fsync is done.
1348 */
1349static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1350{
1351 if (WARN_ON_ONCE(!pfnp))
1352 return VM_FAULT_SIGBUS;
1353 *pfnp = pfn;
1354 return VM_FAULT_NEEDDSYNC;
1355}
1356
65dd814a
CH
1357static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1358 const struct iomap_iter *iter)
55f81639 1359{
55f81639
SR
1360 vm_fault_t ret;
1361 int error = 0;
1362
65dd814a 1363 switch (iter->iomap.type) {
55f81639
SR
1364 case IOMAP_HOLE:
1365 case IOMAP_UNWRITTEN:
429f8de7 1366 clear_user_highpage(vmf->cow_page, vmf->address);
55f81639
SR
1367 break;
1368 case IOMAP_MAPPED:
429f8de7 1369 error = copy_cow_page_dax(vmf, iter);
55f81639
SR
1370 break;
1371 default:
1372 WARN_ON_ONCE(1);
1373 error = -EIO;
1374 break;
1375 }
1376
1377 if (error)
1378 return dax_fault_return(error);
1379
1380 __SetPageUptodate(vmf->cow_page);
1381 ret = finish_fault(vmf);
1382 if (!ret)
1383 return VM_FAULT_DONE_COW;
1384 return ret;
1385}
1386
c2436190 1387/**
65dd814a 1388 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
c2436190 1389 * @vmf: vm fault instance
65dd814a 1390 * @iter: iomap iter
c2436190
SR
1391 * @pfnp: pfn to be returned
1392 * @xas: the dax mapping tree of a file
1393 * @entry: an unlocked dax entry to be inserted
1394 * @pmd: distinguish whether it is a pmd fault
c2436190 1395 */
65dd814a
CH
1396static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1397 const struct iomap_iter *iter, pfn_t *pfnp,
1398 struct xa_state *xas, void **entry, bool pmd)
c2436190
SR
1399{
1400 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
65dd814a 1401 const struct iomap *iomap = &iter->iomap;
c2436190
SR
1402 size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1403 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
1404 bool write = vmf->flags & FAULT_FLAG_WRITE;
65dd814a 1405 bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
c2436190
SR
1406 unsigned long entry_flags = pmd ? DAX_PMD : 0;
1407 int err = 0;
1408 pfn_t pfn;
1409
65dd814a
CH
1410 if (!pmd && vmf->cow_page)
1411 return dax_fault_cow_page(vmf, iter);
1412
c2436190
SR
1413 /* if we are reading UNWRITTEN and HOLE, return a hole. */
1414 if (!write &&
1415 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1416 if (!pmd)
1417 return dax_load_hole(xas, mapping, entry, vmf);
1418 return dax_pmd_load_hole(xas, vmf, iomap, entry);
1419 }
1420
1421 if (iomap->type != IOMAP_MAPPED) {
1422 WARN_ON_ONCE(1);
1423 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1424 }
1425
65dd814a 1426 err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
c2436190
SR
1427 if (err)
1428 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1429
1430 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
1431 write && !sync);
1432
1433 if (sync)
1434 return dax_fault_synchronous_pfnp(pfnp, pfn);
1435
1436 /* insert PMD pfn */
1437 if (pmd)
1438 return vmf_insert_pfn_pmd(vmf, pfn, write);
1439
1440 /* insert PTE pfn */
1441 if (write)
1442 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1443 return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1444}
1445
ab77dab4 1446static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
c0b24625 1447 int *iomap_errp, const struct iomap_ops *ops)
a7d73fe6 1448{
65dd814a 1449 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1450 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
65dd814a
CH
1451 struct iomap_iter iter = {
1452 .inode = mapping->host,
1453 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
1454 .len = PAGE_SIZE,
952da063 1455 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 1456 };
ab77dab4 1457 vm_fault_t ret = 0;
a7d73fe6 1458 void *entry;
65dd814a 1459 int error;
a7d73fe6 1460
65dd814a 1461 trace_dax_pte_fault(iter.inode, vmf, ret);
a7d73fe6
CH
1462 /*
1463 * Check whether offset isn't beyond end of file now. Caller is supposed
1464 * to hold locks serializing us with truncate / punch hole so this is
1465 * a reliable test.
1466 */
65dd814a 1467 if (iter.pos >= i_size_read(iter.inode)) {
ab77dab4 1468 ret = VM_FAULT_SIGBUS;
a9c42b33
RZ
1469 goto out;
1470 }
a7d73fe6 1471
65dd814a
CH
1472 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1473 iter.flags |= IOMAP_WRITE;
a7d73fe6 1474
b15cd800
MW
1475 entry = grab_mapping_entry(&xas, mapping, 0);
1476 if (xa_is_internal(entry)) {
1477 ret = xa_to_internal(entry);
13e451fd
JK
1478 goto out;
1479 }
1480
e2093926
RZ
1481 /*
1482 * It is possible, particularly with mixed reads & writes to private
1483 * mappings, that we have raced with a PMD fault that overlaps with
1484 * the PTE we need to set up. If so just return and the fault will be
1485 * retried.
1486 */
1487 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
ab77dab4 1488 ret = VM_FAULT_NOPAGE;
e2093926
RZ
1489 goto unlock_entry;
1490 }
1491
65dd814a
CH
1492 while ((error = iomap_iter(&iter, ops)) > 0) {
1493 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1494 iter.processed = -EIO; /* fs corruption? */
1495 continue;
a7d73fe6
CH
1496 }
1497
65dd814a
CH
1498 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1499 if (ret != VM_FAULT_SIGBUS &&
1500 (iter.iomap.flags & IOMAP_F_NEW)) {
a7d73fe6 1501 count_vm_event(PGMAJFAULT);
65dd814a
CH
1502 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1503 ret |= VM_FAULT_MAJOR;
a7d73fe6 1504 }
1b5a1cb2 1505
65dd814a
CH
1506 if (!(ret & VM_FAULT_ERROR))
1507 iter.processed = PAGE_SIZE;
a7d73fe6
CH
1508 }
1509
65dd814a
CH
1510 if (iomap_errp)
1511 *iomap_errp = error;
1512 if (!ret && error)
1513 ret = dax_fault_return(error);
9f141d6e 1514
c2436190 1515unlock_entry:
b15cd800 1516 dax_unlock_entry(&xas, entry);
c2436190 1517out:
65dd814a
CH
1518 trace_dax_pte_fault_done(iter.inode, vmf, ret);
1519 return ret;
a7d73fe6 1520}
642261ac
RZ
1521
1522#ifdef CONFIG_FS_DAX_PMD
55f81639
SR
1523static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1524 pgoff_t max_pgoff)
642261ac 1525{
f4200391 1526 unsigned long pmd_addr = vmf->address & PMD_MASK;
55f81639 1527 bool write = vmf->flags & FAULT_FLAG_WRITE;
642261ac 1528
55f81639
SR
1529 /*
1530 * Make sure that the faulting address's PMD offset (color) matches
1531 * the PMD offset from the start of the file. This is necessary so
1532 * that a PMD range in the page table overlaps exactly with a PMD
1533 * range in the page cache.
1534 */
1535 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1536 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1537 return true;
642261ac 1538
55f81639
SR
1539 /* Fall back to PTEs if we're going to COW */
1540 if (write && !(vmf->vma->vm_flags & VM_SHARED))
1541 return true;
11cf9d86 1542
55f81639
SR
1543 /* If the PMD would extend outside the VMA */
1544 if (pmd_addr < vmf->vma->vm_start)
1545 return true;
1546 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1547 return true;
642261ac 1548
55f81639
SR
1549 /* If the PMD would extend beyond the file size */
1550 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1551 return true;
653b2ea3 1552
55f81639 1553 return false;
642261ac
RZ
1554}
1555
ab77dab4 1556static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
a2d58167 1557 const struct iomap_ops *ops)
642261ac 1558{
65dd814a 1559 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1560 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
65dd814a
CH
1561 struct iomap_iter iter = {
1562 .inode = mapping->host,
1563 .len = PMD_SIZE,
952da063 1564 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 1565 };
c2436190 1566 vm_fault_t ret = VM_FAULT_FALLBACK;
b15cd800 1567 pgoff_t max_pgoff;
642261ac 1568 void *entry;
642261ac
RZ
1569 int error;
1570
65dd814a
CH
1571 if (vmf->flags & FAULT_FLAG_WRITE)
1572 iter.flags |= IOMAP_WRITE;
642261ac 1573
282a8e03
RZ
1574 /*
1575 * Check whether offset isn't beyond end of file now. Caller is
1576 * supposed to hold locks serializing us with truncate / punch hole so
1577 * this is a reliable test.
1578 */
65dd814a 1579 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
fffa281b 1580
65dd814a 1581 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
642261ac 1582
b15cd800 1583 if (xas.xa_index >= max_pgoff) {
c2436190 1584 ret = VM_FAULT_SIGBUS;
282a8e03
RZ
1585 goto out;
1586 }
642261ac 1587
55f81639 1588 if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
642261ac
RZ
1589 goto fallback;
1590
876f2946 1591 /*
b15cd800
MW
1592 * grab_mapping_entry() will make sure we get an empty PMD entry,
1593 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1594 * entry is already in the array, for instance), it will return
1595 * VM_FAULT_FALLBACK.
876f2946 1596 */
23c84eb7 1597 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
b15cd800 1598 if (xa_is_internal(entry)) {
c2436190 1599 ret = xa_to_internal(entry);
876f2946 1600 goto fallback;
b15cd800 1601 }
876f2946 1602
e2093926
RZ
1603 /*
1604 * It is possible, particularly with mixed reads & writes to private
1605 * mappings, that we have raced with a PTE fault that overlaps with
1606 * the PMD we need to set up. If so just return and the fault will be
1607 * retried.
1608 */
1609 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1610 !pmd_devmap(*vmf->pmd)) {
c2436190 1611 ret = 0;
e2093926
RZ
1612 goto unlock_entry;
1613 }
1614
65dd814a
CH
1615 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1616 while ((error = iomap_iter(&iter, ops)) > 0) {
1617 if (iomap_length(&iter) < PMD_SIZE)
1618 continue; /* actually breaks out of the loop */
caa51d26 1619
65dd814a
CH
1620 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1621 if (ret != VM_FAULT_FALLBACK)
1622 iter.processed = PMD_SIZE;
642261ac
RZ
1623 }
1624
c2436190 1625unlock_entry:
b15cd800 1626 dax_unlock_entry(&xas, entry);
c2436190
SR
1627fallback:
1628 if (ret == VM_FAULT_FALLBACK) {
65dd814a 1629 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
642261ac
RZ
1630 count_vm_event(THP_FAULT_FALLBACK);
1631 }
282a8e03 1632out:
65dd814a 1633 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
c2436190 1634 return ret;
642261ac 1635}
a2d58167 1636#else
ab77dab4 1637static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
01cddfe9 1638 const struct iomap_ops *ops)
a2d58167
DJ
1639{
1640 return VM_FAULT_FALLBACK;
1641}
642261ac 1642#endif /* CONFIG_FS_DAX_PMD */
a2d58167
DJ
1643
1644/**
1645 * dax_iomap_fault - handle a page fault on a DAX file
1646 * @vmf: The description of the fault
cec04e8c 1647 * @pe_size: Size of the page to fault in
9a0dd422 1648 * @pfnp: PFN to insert for synchronous faults if fsync is required
c0b24625 1649 * @iomap_errp: Storage for detailed error code in case of error
cec04e8c 1650 * @ops: Iomap ops passed from the file system
a2d58167
DJ
1651 *
1652 * When a page fault occurs, filesystems may call this helper in
1653 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1654 * has done all the necessary locking for page fault to proceed
1655 * successfully.
1656 */
ab77dab4 1657vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
c0b24625 1658 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
a2d58167 1659{
c791ace1
DJ
1660 switch (pe_size) {
1661 case PE_SIZE_PTE:
c0b24625 1662 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
c791ace1 1663 case PE_SIZE_PMD:
9a0dd422 1664 return dax_iomap_pmd_fault(vmf, pfnp, ops);
a2d58167
DJ
1665 default:
1666 return VM_FAULT_FALLBACK;
1667 }
1668}
1669EXPORT_SYMBOL_GPL(dax_iomap_fault);
71eab6df 1670
a77d19f4 1671/*
71eab6df
JK
1672 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1673 * @vmf: The description of the fault
71eab6df 1674 * @pfn: PFN to insert
cfc93c6c 1675 * @order: Order of entry to insert.
71eab6df 1676 *
a77d19f4
MW
1677 * This function inserts a writeable PTE or PMD entry into the page tables
1678 * for an mmaped DAX file. It also marks the page cache entry as dirty.
71eab6df 1679 */
cfc93c6c
MW
1680static vm_fault_t
1681dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
71eab6df
JK
1682{
1683 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
cfc93c6c
MW
1684 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1685 void *entry;
ab77dab4 1686 vm_fault_t ret;
71eab6df 1687
cfc93c6c 1688 xas_lock_irq(&xas);
23c84eb7 1689 entry = get_unlocked_entry(&xas, order);
71eab6df 1690 /* Did we race with someone splitting entry or so? */
23c84eb7
MWO
1691 if (!entry || dax_is_conflict(entry) ||
1692 (order == 0 && !dax_is_pte_entry(entry))) {
4c3d043d 1693 put_unlocked_entry(&xas, entry, WAKE_NEXT);
cfc93c6c 1694 xas_unlock_irq(&xas);
71eab6df
JK
1695 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1696 VM_FAULT_NOPAGE);
1697 return VM_FAULT_NOPAGE;
1698 }
cfc93c6c
MW
1699 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1700 dax_lock_entry(&xas, entry);
1701 xas_unlock_irq(&xas);
1702 if (order == 0)
ab77dab4 1703 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
71eab6df 1704#ifdef CONFIG_FS_DAX_PMD
cfc93c6c 1705 else if (order == PMD_ORDER)
fce86ff5 1706 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
71eab6df 1707#endif
cfc93c6c 1708 else
ab77dab4 1709 ret = VM_FAULT_FALLBACK;
cfc93c6c 1710 dax_unlock_entry(&xas, entry);
ab77dab4
SJ
1711 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1712 return ret;
71eab6df
JK
1713}
1714
1715/**
1716 * dax_finish_sync_fault - finish synchronous page fault
1717 * @vmf: The description of the fault
1718 * @pe_size: Size of entry to be inserted
1719 * @pfn: PFN to insert
1720 *
1721 * This function ensures that the file range touched by the page fault is
1722 * stored persistently on the media and handles inserting of appropriate page
1723 * table entry.
1724 */
ab77dab4
SJ
1725vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1726 enum page_entry_size pe_size, pfn_t pfn)
71eab6df
JK
1727{
1728 int err;
1729 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
cfc93c6c
MW
1730 unsigned int order = pe_order(pe_size);
1731 size_t len = PAGE_SIZE << order;
71eab6df 1732
71eab6df
JK
1733 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1734 if (err)
1735 return VM_FAULT_SIGBUS;
cfc93c6c 1736 return dax_insert_pfn_mkwrite(vmf, pfn, order);
71eab6df
JK
1737}
1738EXPORT_SYMBOL_GPL(dax_finish_sync_fault);