]> git.ipfire.org Git - people/ms/linux.git/blame - fs/dax.c
dax: use sb_issue_zerout instead of calling dax_clear_sectors
[people/ms/linux.git] / fs / dax.c
CommitLineData
d475c634
MW
1/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
d77e92e2 20#include <linux/dax.h>
d475c634
MW
21#include <linux/fs.h>
22#include <linux/genhd.h>
f7ca90b1
MW
23#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
d475c634 26#include <linux/mutex.h>
9973c98e 27#include <linux/pagevec.h>
2765cfbb 28#include <linux/pmem.h>
289c6aed 29#include <linux/sched.h>
d475c634 30#include <linux/uio.h>
f7ca90b1 31#include <linux/vmstat.h>
34c0fd54 32#include <linux/pfn_t.h>
0e749e54 33#include <linux/sizes.h>
d475c634 34
e4b27491
N
35#define RADIX_DAX_MASK 0xf
36#define RADIX_DAX_SHIFT 4
37#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
38#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
39#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
40#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
41#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
42 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
43
b2e0d162
DW
44static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
45{
46 struct request_queue *q = bdev->bd_queue;
47 long rc = -EIO;
48
49 dax->addr = (void __pmem *) ERR_PTR(-EIO);
50 if (blk_queue_enter(q, true) != 0)
51 return rc;
52
53 rc = bdev_direct_access(bdev, dax);
54 if (rc < 0) {
55 dax->addr = (void __pmem *) ERR_PTR(rc);
56 blk_queue_exit(q);
57 return rc;
58 }
59 return rc;
60}
61
62static void dax_unmap_atomic(struct block_device *bdev,
63 const struct blk_dax_ctl *dax)
64{
65 if (IS_ERR(dax->addr))
66 return;
67 blk_queue_exit(bdev->bd_queue);
68}
69
d1a5f2b4
DW
70struct page *read_dax_sector(struct block_device *bdev, sector_t n)
71{
72 struct page *page = alloc_pages(GFP_KERNEL, 0);
73 struct blk_dax_ctl dax = {
74 .size = PAGE_SIZE,
75 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
76 };
77 long rc;
78
79 if (!page)
80 return ERR_PTR(-ENOMEM);
81
82 rc = dax_map_atomic(bdev, &dax);
83 if (rc < 0)
84 return ERR_PTR(rc);
85 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
86 dax_unmap_atomic(bdev, &dax);
87 return page;
88}
89
d475c634
MW
90static bool buffer_written(struct buffer_head *bh)
91{
92 return buffer_mapped(bh) && !buffer_unwritten(bh);
93}
94
95/*
96 * When ext4 encounters a hole, it returns without modifying the buffer_head
97 * which means that we can't trust b_size. To cope with this, we set b_state
98 * to 0 before calling get_block and, if any bit is set, we know we can trust
99 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
100 * and would save us time calling get_block repeatedly.
101 */
102static bool buffer_size_valid(struct buffer_head *bh)
103{
104 return bh->b_state != 0;
105}
106
b2e0d162
DW
107
108static sector_t to_sector(const struct buffer_head *bh,
109 const struct inode *inode)
110{
111 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
112
113 return sector;
114}
115
a95cd631
OS
116static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
117 loff_t start, loff_t end, get_block_t get_block,
118 struct buffer_head *bh)
d475c634 119{
b2e0d162
DW
120 loff_t pos = start, max = start, bh_max = start;
121 bool hole = false, need_wmb = false;
122 struct block_device *bdev = NULL;
123 int rw = iov_iter_rw(iter), rc;
124 long map_len = 0;
125 struct blk_dax_ctl dax = {
126 .addr = (void __pmem *) ERR_PTR(-EIO),
127 };
069c77bc
JK
128 unsigned blkbits = inode->i_blkbits;
129 sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
130 >> blkbits;
b2e0d162
DW
131
132 if (rw == READ)
d475c634
MW
133 end = min(end, i_size_read(inode));
134
135 while (pos < end) {
2765cfbb 136 size_t len;
d475c634 137 if (pos == max) {
e94f5a22
JM
138 long page = pos >> PAGE_SHIFT;
139 sector_t block = page << (PAGE_SHIFT - blkbits);
d475c634
MW
140 unsigned first = pos - (block << blkbits);
141 long size;
142
143 if (pos == bh_max) {
144 bh->b_size = PAGE_ALIGN(end - pos);
145 bh->b_state = 0;
b2e0d162
DW
146 rc = get_block(inode, block, bh, rw == WRITE);
147 if (rc)
d475c634
MW
148 break;
149 if (!buffer_size_valid(bh))
150 bh->b_size = 1 << blkbits;
151 bh_max = pos - first + bh->b_size;
b2e0d162 152 bdev = bh->b_bdev;
069c77bc
JK
153 /*
154 * We allow uninitialized buffers for writes
155 * beyond EOF as those cannot race with faults
156 */
157 WARN_ON_ONCE(
158 (buffer_new(bh) && block < file_blks) ||
159 (rw == WRITE && buffer_unwritten(bh)));
d475c634
MW
160 } else {
161 unsigned done = bh->b_size -
162 (bh_max - (pos - first));
163 bh->b_blocknr += done >> blkbits;
164 bh->b_size -= done;
165 }
166
b2e0d162 167 hole = rw == READ && !buffer_written(bh);
d475c634 168 if (hole) {
d475c634
MW
169 size = bh->b_size - first;
170 } else {
b2e0d162
DW
171 dax_unmap_atomic(bdev, &dax);
172 dax.sector = to_sector(bh, inode);
173 dax.size = bh->b_size;
174 map_len = dax_map_atomic(bdev, &dax);
175 if (map_len < 0) {
176 rc = map_len;
d475c634 177 break;
b2e0d162 178 }
b2e0d162
DW
179 dax.addr += first;
180 size = map_len - first;
d475c634
MW
181 }
182 max = min(pos + size, end);
183 }
184
2765cfbb 185 if (iov_iter_rw(iter) == WRITE) {
b2e0d162 186 len = copy_from_iter_pmem(dax.addr, max - pos, iter);
2765cfbb
RZ
187 need_wmb = true;
188 } else if (!hole)
b2e0d162 189 len = copy_to_iter((void __force *) dax.addr, max - pos,
e2e05394 190 iter);
d475c634
MW
191 else
192 len = iov_iter_zero(max - pos, iter);
193
cadfbb6e 194 if (!len) {
b2e0d162 195 rc = -EFAULT;
d475c634 196 break;
cadfbb6e 197 }
d475c634
MW
198
199 pos += len;
b2e0d162
DW
200 if (!IS_ERR(dax.addr))
201 dax.addr += len;
d475c634
MW
202 }
203
2765cfbb
RZ
204 if (need_wmb)
205 wmb_pmem();
b2e0d162 206 dax_unmap_atomic(bdev, &dax);
2765cfbb 207
b2e0d162 208 return (pos == start) ? rc : pos - start;
d475c634
MW
209}
210
211/**
212 * dax_do_io - Perform I/O to a DAX file
d475c634
MW
213 * @iocb: The control block for this I/O
214 * @inode: The file which the I/O is directed at
215 * @iter: The addresses to do I/O from or to
216 * @pos: The file offset where the I/O starts
217 * @get_block: The filesystem method used to translate file offsets to blocks
218 * @end_io: A filesystem callback for I/O completion
219 * @flags: See below
220 *
221 * This function uses the same locking scheme as do_blockdev_direct_IO:
222 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
223 * caller for writes. For reads, we take and release the i_mutex ourselves.
224 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
225 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
226 * is in progress.
227 */
a95cd631
OS
228ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
229 struct iov_iter *iter, loff_t pos, get_block_t get_block,
230 dio_iodone_t end_io, int flags)
d475c634
MW
231{
232 struct buffer_head bh;
233 ssize_t retval = -EINVAL;
234 loff_t end = pos + iov_iter_count(iter);
235
236 memset(&bh, 0, sizeof(bh));
eab95db6 237 bh.b_bdev = inode->i_sb->s_bdev;
d475c634 238
c3d98e39 239 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
5955102c 240 inode_lock(inode);
d475c634
MW
241
242 /* Protects against truncate */
bbab37dd
MW
243 if (!(flags & DIO_SKIP_DIO_COUNT))
244 inode_dio_begin(inode);
d475c634 245
a95cd631 246 retval = dax_io(inode, iter, pos, end, get_block, &bh);
d475c634 247
a95cd631 248 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
5955102c 249 inode_unlock(inode);
d475c634 250
187372a3
CH
251 if (end_io) {
252 int err;
253
254 err = end_io(iocb, pos, retval, bh.b_private);
255 if (err)
256 retval = err;
257 }
d475c634 258
bbab37dd
MW
259 if (!(flags & DIO_SKIP_DIO_COUNT))
260 inode_dio_end(inode);
d475c634
MW
261 return retval;
262}
263EXPORT_SYMBOL_GPL(dax_do_io);
f7ca90b1
MW
264
265/*
266 * The user has performed a load from a hole in the file. Allocating
267 * a new page in the file would cause excessive storage usage for
268 * workloads with sparse files. We allocate a page cache page instead.
269 * We'll kick it out of the page cache if it's ever written to,
270 * otherwise it will simply fall out of the page cache under memory
271 * pressure without ever having been dirtied.
272 */
273static int dax_load_hole(struct address_space *mapping, struct page *page,
274 struct vm_fault *vmf)
275{
f7ca90b1
MW
276 if (!page)
277 page = find_or_create_page(mapping, vmf->pgoff,
278 GFP_KERNEL | __GFP_ZERO);
279 if (!page)
280 return VM_FAULT_OOM;
f7ca90b1
MW
281
282 vmf->page = page;
283 return VM_FAULT_LOCKED;
284}
285
b2e0d162
DW
286static int copy_user_bh(struct page *to, struct inode *inode,
287 struct buffer_head *bh, unsigned long vaddr)
f7ca90b1 288{
b2e0d162
DW
289 struct blk_dax_ctl dax = {
290 .sector = to_sector(bh, inode),
291 .size = bh->b_size,
292 };
293 struct block_device *bdev = bh->b_bdev;
e2e05394
RZ
294 void *vto;
295
b2e0d162
DW
296 if (dax_map_atomic(bdev, &dax) < 0)
297 return PTR_ERR(dax.addr);
f7ca90b1 298 vto = kmap_atomic(to);
b2e0d162 299 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
f7ca90b1 300 kunmap_atomic(vto);
b2e0d162 301 dax_unmap_atomic(bdev, &dax);
f7ca90b1
MW
302 return 0;
303}
304
9973c98e 305#define NO_SECTOR -1
09cbfeaf 306#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
9973c98e
RZ
307
308static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
309 sector_t sector, bool pmd_entry, bool dirty)
310{
311 struct radix_tree_root *page_tree = &mapping->page_tree;
312 pgoff_t pmd_index = DAX_PMD_INDEX(index);
313 int type, error = 0;
314 void *entry;
315
316 WARN_ON_ONCE(pmd_entry && !dirty);
d2b2a28e
DM
317 if (dirty)
318 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e
RZ
319
320 spin_lock_irq(&mapping->tree_lock);
321
322 entry = radix_tree_lookup(page_tree, pmd_index);
323 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
324 index = pmd_index;
325 goto dirty;
326 }
327
328 entry = radix_tree_lookup(page_tree, index);
329 if (entry) {
330 type = RADIX_DAX_TYPE(entry);
331 if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
332 type != RADIX_DAX_PMD)) {
333 error = -EIO;
334 goto unlock;
335 }
336
337 if (!pmd_entry || type == RADIX_DAX_PMD)
338 goto dirty;
339
340 /*
341 * We only insert dirty PMD entries into the radix tree. This
342 * means we don't need to worry about removing a dirty PTE
343 * entry and inserting a clean PMD entry, thus reducing the
344 * range we would flush with a follow-up fsync/msync call.
345 */
346 radix_tree_delete(&mapping->page_tree, index);
347 mapping->nrexceptional--;
348 }
349
350 if (sector == NO_SECTOR) {
351 /*
352 * This can happen during correct operation if our pfn_mkwrite
353 * fault raced against a hole punch operation. If this
354 * happens the pte that was hole punched will have been
355 * unmapped and the radix tree entry will have been removed by
356 * the time we are called, but the call will still happen. We
357 * will return all the way up to wp_pfn_shared(), where the
358 * pte_same() check will fail, eventually causing page fault
359 * to be retried by the CPU.
360 */
361 goto unlock;
362 }
363
364 error = radix_tree_insert(page_tree, index,
365 RADIX_DAX_ENTRY(sector, pmd_entry));
366 if (error)
367 goto unlock;
368
369 mapping->nrexceptional++;
370 dirty:
371 if (dirty)
372 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
373 unlock:
374 spin_unlock_irq(&mapping->tree_lock);
375 return error;
376}
377
378static int dax_writeback_one(struct block_device *bdev,
379 struct address_space *mapping, pgoff_t index, void *entry)
380{
381 struct radix_tree_root *page_tree = &mapping->page_tree;
382 int type = RADIX_DAX_TYPE(entry);
383 struct radix_tree_node *node;
384 struct blk_dax_ctl dax;
385 void **slot;
386 int ret = 0;
387
388 spin_lock_irq(&mapping->tree_lock);
389 /*
390 * Regular page slots are stabilized by the page lock even
391 * without the tree itself locked. These unlocked entries
392 * need verification under the tree lock.
393 */
394 if (!__radix_tree_lookup(page_tree, index, &node, &slot))
395 goto unlock;
396 if (*slot != entry)
397 goto unlock;
398
399 /* another fsync thread may have already written back this entry */
400 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
401 goto unlock;
402
403 if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
404 ret = -EIO;
405 goto unlock;
406 }
407
408 dax.sector = RADIX_DAX_SECTOR(entry);
409 dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
410 spin_unlock_irq(&mapping->tree_lock);
411
412 /*
413 * We cannot hold tree_lock while calling dax_map_atomic() because it
414 * eventually calls cond_resched().
415 */
416 ret = dax_map_atomic(bdev, &dax);
417 if (ret < 0)
418 return ret;
419
420 if (WARN_ON_ONCE(ret < dax.size)) {
421 ret = -EIO;
422 goto unmap;
423 }
424
425 wb_cache_pmem(dax.addr, dax.size);
426
427 spin_lock_irq(&mapping->tree_lock);
428 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
429 spin_unlock_irq(&mapping->tree_lock);
430 unmap:
431 dax_unmap_atomic(bdev, &dax);
432 return ret;
433
434 unlock:
435 spin_unlock_irq(&mapping->tree_lock);
436 return ret;
437}
438
439/*
440 * Flush the mapping to the persistent domain within the byte range of [start,
441 * end]. This is required by data integrity operations to ensure file data is
442 * on persistent storage prior to completion of the operation.
443 */
7f6d5b52
RZ
444int dax_writeback_mapping_range(struct address_space *mapping,
445 struct block_device *bdev, struct writeback_control *wbc)
9973c98e
RZ
446{
447 struct inode *inode = mapping->host;
9973c98e
RZ
448 pgoff_t start_index, end_index, pmd_index;
449 pgoff_t indices[PAGEVEC_SIZE];
450 struct pagevec pvec;
451 bool done = false;
452 int i, ret = 0;
453 void *entry;
454
455 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
456 return -EIO;
457
7f6d5b52
RZ
458 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
459 return 0;
460
09cbfeaf
KS
461 start_index = wbc->range_start >> PAGE_SHIFT;
462 end_index = wbc->range_end >> PAGE_SHIFT;
9973c98e
RZ
463 pmd_index = DAX_PMD_INDEX(start_index);
464
465 rcu_read_lock();
466 entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
467 rcu_read_unlock();
468
469 /* see if the start of our range is covered by a PMD entry */
470 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
471 start_index = pmd_index;
472
473 tag_pages_for_writeback(mapping, start_index, end_index);
474
475 pagevec_init(&pvec, 0);
476 while (!done) {
477 pvec.nr = find_get_entries_tag(mapping, start_index,
478 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
479 pvec.pages, indices);
480
481 if (pvec.nr == 0)
482 break;
483
484 for (i = 0; i < pvec.nr; i++) {
485 if (indices[i] > end_index) {
486 done = true;
487 break;
488 }
489
490 ret = dax_writeback_one(bdev, mapping, indices[i],
491 pvec.pages[i]);
492 if (ret < 0)
493 return ret;
494 }
495 }
496 wmb_pmem();
497 return 0;
498}
499EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
500
f7ca90b1
MW
501static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
502 struct vm_area_struct *vma, struct vm_fault *vmf)
503{
f7ca90b1 504 unsigned long vaddr = (unsigned long)vmf->virtual_address;
b2e0d162
DW
505 struct address_space *mapping = inode->i_mapping;
506 struct block_device *bdev = bh->b_bdev;
507 struct blk_dax_ctl dax = {
508 .sector = to_sector(bh, inode),
509 .size = bh->b_size,
510 };
f7ca90b1
MW
511 int error;
512
0f90cc66
RZ
513 i_mmap_lock_read(mapping);
514
b2e0d162
DW
515 if (dax_map_atomic(bdev, &dax) < 0) {
516 error = PTR_ERR(dax.addr);
f7ca90b1
MW
517 goto out;
518 }
b2e0d162 519 dax_unmap_atomic(bdev, &dax);
f7ca90b1 520
9973c98e
RZ
521 error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
522 vmf->flags & FAULT_FLAG_WRITE);
523 if (error)
524 goto out;
525
01c8f1c4 526 error = vm_insert_mixed(vma, vaddr, dax.pfn);
f7ca90b1
MW
527
528 out:
0f90cc66
RZ
529 i_mmap_unlock_read(mapping);
530
f7ca90b1
MW
531 return error;
532}
533
ce5c5d55
DC
534/**
535 * __dax_fault - handle a page fault on a DAX file
536 * @vma: The virtual memory area where the fault occurred
537 * @vmf: The description of the fault
538 * @get_block: The filesystem method used to translate file offsets to blocks
539 *
540 * When a page fault occurs, filesystems may call this helper in their
541 * fault handler for DAX files. __dax_fault() assumes the caller has done all
542 * the necessary locking for the page fault to proceed successfully.
543 */
544int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
02fbd139 545 get_block_t get_block)
f7ca90b1
MW
546{
547 struct file *file = vma->vm_file;
548 struct address_space *mapping = file->f_mapping;
549 struct inode *inode = mapping->host;
550 struct page *page;
551 struct buffer_head bh;
552 unsigned long vaddr = (unsigned long)vmf->virtual_address;
553 unsigned blkbits = inode->i_blkbits;
554 sector_t block;
555 pgoff_t size;
556 int error;
557 int major = 0;
558
559 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
560 if (vmf->pgoff >= size)
561 return VM_FAULT_SIGBUS;
562
563 memset(&bh, 0, sizeof(bh));
564 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
eab95db6 565 bh.b_bdev = inode->i_sb->s_bdev;
f7ca90b1
MW
566 bh.b_size = PAGE_SIZE;
567
568 repeat:
569 page = find_get_page(mapping, vmf->pgoff);
570 if (page) {
571 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
09cbfeaf 572 put_page(page);
f7ca90b1
MW
573 return VM_FAULT_RETRY;
574 }
575 if (unlikely(page->mapping != mapping)) {
576 unlock_page(page);
09cbfeaf 577 put_page(page);
f7ca90b1
MW
578 goto repeat;
579 }
f7ca90b1
MW
580 }
581
582 error = get_block(inode, block, &bh, 0);
583 if (!error && (bh.b_size < PAGE_SIZE))
584 error = -EIO; /* fs corruption? */
585 if (error)
0f90cc66 586 goto unlock_page;
f7ca90b1 587
aef39ab1 588 if (!buffer_mapped(&bh) && !vmf->cow_page) {
f7ca90b1
MW
589 if (vmf->flags & FAULT_FLAG_WRITE) {
590 error = get_block(inode, block, &bh, 1);
591 count_vm_event(PGMAJFAULT);
592 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
593 major = VM_FAULT_MAJOR;
594 if (!error && (bh.b_size < PAGE_SIZE))
595 error = -EIO;
596 if (error)
0f90cc66 597 goto unlock_page;
f7ca90b1
MW
598 } else {
599 return dax_load_hole(mapping, page, vmf);
600 }
601 }
602
603 if (vmf->cow_page) {
604 struct page *new_page = vmf->cow_page;
605 if (buffer_written(&bh))
b2e0d162 606 error = copy_user_bh(new_page, inode, &bh, vaddr);
f7ca90b1
MW
607 else
608 clear_user_highpage(new_page, vaddr);
609 if (error)
0f90cc66 610 goto unlock_page;
f7ca90b1 611 vmf->page = page;
7795bec8 612 if (!page)
0f90cc66 613 i_mmap_lock_read(mapping);
f7ca90b1
MW
614 return VM_FAULT_LOCKED;
615 }
616
617 /* Check we didn't race with a read fault installing a new page */
618 if (!page && major)
619 page = find_lock_page(mapping, vmf->pgoff);
620
621 if (page) {
622 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
09cbfeaf 623 PAGE_SIZE, 0);
f7ca90b1
MW
624 delete_from_page_cache(page);
625 unlock_page(page);
09cbfeaf 626 put_page(page);
9973c98e 627 page = NULL;
f7ca90b1
MW
628 }
629
02fbd139 630 /* Filesystem should not return unwritten buffers to us! */
2b10945c 631 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
f7ca90b1
MW
632 error = dax_insert_mapping(inode, &bh, vma, vmf);
633
634 out:
635 if (error == -ENOMEM)
636 return VM_FAULT_OOM | major;
637 /* -EBUSY is fine, somebody else faulted on the same PTE */
638 if ((error < 0) && (error != -EBUSY))
639 return VM_FAULT_SIGBUS | major;
640 return VM_FAULT_NOPAGE | major;
641
0f90cc66 642 unlock_page:
f7ca90b1
MW
643 if (page) {
644 unlock_page(page);
09cbfeaf 645 put_page(page);
f7ca90b1
MW
646 }
647 goto out;
648}
ce5c5d55 649EXPORT_SYMBOL(__dax_fault);
f7ca90b1
MW
650
651/**
652 * dax_fault - handle a page fault on a DAX file
653 * @vma: The virtual memory area where the fault occurred
654 * @vmf: The description of the fault
655 * @get_block: The filesystem method used to translate file offsets to blocks
656 *
657 * When a page fault occurs, filesystems may call this helper in their
658 * fault handler for DAX files.
659 */
660int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
02fbd139 661 get_block_t get_block)
f7ca90b1
MW
662{
663 int result;
664 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
665
666 if (vmf->flags & FAULT_FLAG_WRITE) {
667 sb_start_pagefault(sb);
668 file_update_time(vma->vm_file);
669 }
02fbd139 670 result = __dax_fault(vma, vmf, get_block);
f7ca90b1
MW
671 if (vmf->flags & FAULT_FLAG_WRITE)
672 sb_end_pagefault(sb);
673
674 return result;
675}
676EXPORT_SYMBOL_GPL(dax_fault);
4c0ccfef 677
844f35db
MW
678#ifdef CONFIG_TRANSPARENT_HUGEPAGE
679/*
680 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
681 * more often than one might expect in the below function.
682 */
683#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
684
cbb38e41
DW
685static void __dax_dbg(struct buffer_head *bh, unsigned long address,
686 const char *reason, const char *fn)
687{
688 if (bh) {
689 char bname[BDEVNAME_SIZE];
690 bdevname(bh->b_bdev, bname);
691 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
692 "length %zd fallback: %s\n", fn, current->comm,
693 address, bname, bh->b_state, (u64)bh->b_blocknr,
694 bh->b_size, reason);
695 } else {
696 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
697 current->comm, address, reason);
698 }
699}
700
701#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
702
844f35db 703int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
02fbd139 704 pmd_t *pmd, unsigned int flags, get_block_t get_block)
844f35db
MW
705{
706 struct file *file = vma->vm_file;
707 struct address_space *mapping = file->f_mapping;
708 struct inode *inode = mapping->host;
709 struct buffer_head bh;
710 unsigned blkbits = inode->i_blkbits;
711 unsigned long pmd_addr = address & PMD_MASK;
712 bool write = flags & FAULT_FLAG_WRITE;
b2e0d162 713 struct block_device *bdev;
844f35db 714 pgoff_t size, pgoff;
b2e0d162 715 sector_t block;
9973c98e
RZ
716 int error, result = 0;
717 bool alloc = false;
844f35db 718
c046c321 719 /* dax pmd mappings require pfn_t_devmap() */
ee82c9ed
DW
720 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
721 return VM_FAULT_FALLBACK;
722
844f35db 723 /* Fall back to PTEs if we're going to COW */
59bf4fb9
TK
724 if (write && !(vma->vm_flags & VM_SHARED)) {
725 split_huge_pmd(vma, pmd, address);
cbb38e41 726 dax_pmd_dbg(NULL, address, "cow write");
844f35db 727 return VM_FAULT_FALLBACK;
59bf4fb9 728 }
844f35db 729 /* If the PMD would extend outside the VMA */
cbb38e41
DW
730 if (pmd_addr < vma->vm_start) {
731 dax_pmd_dbg(NULL, address, "vma start unaligned");
844f35db 732 return VM_FAULT_FALLBACK;
cbb38e41
DW
733 }
734 if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
735 dax_pmd_dbg(NULL, address, "vma end unaligned");
844f35db 736 return VM_FAULT_FALLBACK;
cbb38e41 737 }
844f35db 738
3fdd1b47 739 pgoff = linear_page_index(vma, pmd_addr);
844f35db
MW
740 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
741 if (pgoff >= size)
742 return VM_FAULT_SIGBUS;
743 /* If the PMD would cover blocks out of the file */
cbb38e41
DW
744 if ((pgoff | PG_PMD_COLOUR) >= size) {
745 dax_pmd_dbg(NULL, address,
746 "offset + huge page size > file size");
844f35db 747 return VM_FAULT_FALLBACK;
cbb38e41 748 }
844f35db
MW
749
750 memset(&bh, 0, sizeof(bh));
d4bbe706 751 bh.b_bdev = inode->i_sb->s_bdev;
844f35db
MW
752 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
753
754 bh.b_size = PMD_SIZE;
9973c98e
RZ
755
756 if (get_block(inode, block, &bh, 0) != 0)
844f35db 757 return VM_FAULT_SIGBUS;
9973c98e
RZ
758
759 if (!buffer_mapped(&bh) && write) {
760 if (get_block(inode, block, &bh, 1) != 0)
761 return VM_FAULT_SIGBUS;
762 alloc = true;
2b10945c 763 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
9973c98e
RZ
764 }
765
b2e0d162 766 bdev = bh.b_bdev;
844f35db
MW
767
768 /*
769 * If the filesystem isn't willing to tell us the length of a hole,
770 * just fall back to PTEs. Calling get_block 512 times in a loop
771 * would be silly.
772 */
cbb38e41
DW
773 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
774 dax_pmd_dbg(&bh, address, "allocated block too small");
9973c98e
RZ
775 return VM_FAULT_FALLBACK;
776 }
777
778 /*
779 * If we allocated new storage, make sure no process has any
780 * zero pages covering this hole
781 */
782 if (alloc) {
783 loff_t lstart = pgoff << PAGE_SHIFT;
784 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
785
786 truncate_pagecache_range(inode, lstart, lend);
cbb38e41 787 }
844f35db 788
de14b9cb 789 i_mmap_lock_read(mapping);
46c043ed 790
844f35db 791 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
844f35db 792 spinlock_t *ptl;
d295e341 793 pmd_t entry;
844f35db 794 struct page *zero_page = get_huge_zero_page();
d295e341 795
cbb38e41
DW
796 if (unlikely(!zero_page)) {
797 dax_pmd_dbg(&bh, address, "no zero page");
844f35db 798 goto fallback;
cbb38e41 799 }
844f35db 800
d295e341
KS
801 ptl = pmd_lock(vma->vm_mm, pmd);
802 if (!pmd_none(*pmd)) {
803 spin_unlock(ptl);
cbb38e41 804 dax_pmd_dbg(&bh, address, "pmd already present");
d295e341
KS
805 goto fallback;
806 }
807
cbb38e41
DW
808 dev_dbg(part_to_dev(bdev->bd_part),
809 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
810 __func__, current->comm, address,
811 (unsigned long long) to_sector(&bh, inode));
812
d295e341
KS
813 entry = mk_pmd(zero_page, vma->vm_page_prot);
814 entry = pmd_mkhuge(entry);
815 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
844f35db 816 result = VM_FAULT_NOPAGE;
d295e341 817 spin_unlock(ptl);
844f35db 818 } else {
b2e0d162
DW
819 struct blk_dax_ctl dax = {
820 .sector = to_sector(&bh, inode),
821 .size = PMD_SIZE,
822 };
823 long length = dax_map_atomic(bdev, &dax);
824
844f35db 825 if (length < 0) {
8b3db979
DW
826 dax_pmd_dbg(&bh, address, "dax-error fallback");
827 goto fallback;
844f35db 828 }
cbb38e41
DW
829 if (length < PMD_SIZE) {
830 dax_pmd_dbg(&bh, address, "dax-length too small");
831 dax_unmap_atomic(bdev, &dax);
832 goto fallback;
833 }
834 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
835 dax_pmd_dbg(&bh, address, "pfn unaligned");
b2e0d162 836 dax_unmap_atomic(bdev, &dax);
844f35db 837 goto fallback;
b2e0d162 838 }
844f35db 839
c046c321 840 if (!pfn_t_devmap(dax.pfn)) {
b2e0d162 841 dax_unmap_atomic(bdev, &dax);
cbb38e41 842 dax_pmd_dbg(&bh, address, "pfn not in memmap");
152d7bd8 843 goto fallback;
b2e0d162 844 }
b2e0d162 845 dax_unmap_atomic(bdev, &dax);
0f90cc66 846
9973c98e
RZ
847 /*
848 * For PTE faults we insert a radix tree entry for reads, and
849 * leave it clean. Then on the first write we dirty the radix
850 * tree entry via the dax_pfn_mkwrite() path. This sequence
851 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
852 * call into get_block() to translate the pgoff to a sector in
853 * order to be able to create a new radix tree entry.
854 *
855 * The PMD path doesn't have an equivalent to
856 * dax_pfn_mkwrite(), though, so for a read followed by a
857 * write we traverse all the way through __dax_pmd_fault()
858 * twice. This means we can just skip inserting a radix tree
859 * entry completely on the initial read and just wait until
860 * the write to insert a dirty entry.
861 */
862 if (write) {
863 error = dax_radix_entry(mapping, pgoff, dax.sector,
864 true, true);
865 if (error) {
866 dax_pmd_dbg(&bh, address,
867 "PMD radix insertion failed");
868 goto fallback;
869 }
870 }
871
cbb38e41
DW
872 dev_dbg(part_to_dev(bdev->bd_part),
873 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
874 __func__, current->comm, address,
875 pfn_t_to_pfn(dax.pfn),
876 (unsigned long long) dax.sector);
34c0fd54 877 result |= vmf_insert_pfn_pmd(vma, address, pmd,
f25748e3 878 dax.pfn, write);
844f35db
MW
879 }
880
881 out:
0f90cc66
RZ
882 i_mmap_unlock_read(mapping);
883
844f35db
MW
884 return result;
885
886 fallback:
887 count_vm_event(THP_FAULT_FALLBACK);
888 result = VM_FAULT_FALLBACK;
889 goto out;
890}
891EXPORT_SYMBOL_GPL(__dax_pmd_fault);
892
893/**
894 * dax_pmd_fault - handle a PMD fault on a DAX file
895 * @vma: The virtual memory area where the fault occurred
896 * @vmf: The description of the fault
897 * @get_block: The filesystem method used to translate file offsets to blocks
898 *
899 * When a page fault occurs, filesystems may call this helper in their
900 * pmd_fault handler for DAX files.
901 */
902int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
02fbd139 903 pmd_t *pmd, unsigned int flags, get_block_t get_block)
844f35db
MW
904{
905 int result;
906 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
907
908 if (flags & FAULT_FLAG_WRITE) {
909 sb_start_pagefault(sb);
910 file_update_time(vma->vm_file);
911 }
02fbd139 912 result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
844f35db
MW
913 if (flags & FAULT_FLAG_WRITE)
914 sb_end_pagefault(sb);
915
916 return result;
917}
918EXPORT_SYMBOL_GPL(dax_pmd_fault);
dd8a2b6c 919#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
844f35db 920
0e3b210c
BH
921/**
922 * dax_pfn_mkwrite - handle first write to DAX page
923 * @vma: The virtual memory area where the fault occurred
924 * @vmf: The description of the fault
0e3b210c
BH
925 */
926int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
927{
9973c98e 928 struct file *file = vma->vm_file;
30f471fd 929 int error;
0e3b210c 930
9973c98e
RZ
931 /*
932 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
933 * RADIX_DAX_PTE entry already exists in the radix tree from a
934 * previous call to __dax_fault(). We just want to look up that PTE
935 * entry using vmf->pgoff and make sure the dirty tag is set. This
936 * saves us from having to make a call to get_block() here to look
937 * up the sector.
938 */
30f471fd
RZ
939 error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
940 true);
941
942 if (error == -ENOMEM)
943 return VM_FAULT_OOM;
944 if (error)
945 return VM_FAULT_SIGBUS;
0e3b210c
BH
946 return VM_FAULT_NOPAGE;
947}
948EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
949
4c0ccfef 950/**
25726bc1 951 * dax_zero_page_range - zero a range within a page of a DAX file
4c0ccfef
MW
952 * @inode: The file being truncated
953 * @from: The file offset that is being truncated to
25726bc1 954 * @length: The number of bytes to zero
4c0ccfef
MW
955 * @get_block: The filesystem method used to translate file offsets to blocks
956 *
25726bc1
MW
957 * This function can be called by a filesystem when it is zeroing part of a
958 * page in a DAX file. This is intended for hole-punch operations. If
959 * you are truncating a file, the helper function dax_truncate_page() may be
960 * more convenient.
4c0ccfef 961 *
ea1754a0 962 * We work in terms of PAGE_SIZE here for commonality with
4c0ccfef
MW
963 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
964 * took care of disposing of the unnecessary blocks. Even if the filesystem
965 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
25726bc1 966 * since the file might be mmapped.
4c0ccfef 967 */
25726bc1
MW
968int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
969 get_block_t get_block)
4c0ccfef
MW
970{
971 struct buffer_head bh;
09cbfeaf
KS
972 pgoff_t index = from >> PAGE_SHIFT;
973 unsigned offset = from & (PAGE_SIZE-1);
4c0ccfef
MW
974 int err;
975
976 /* Block boundary? Nothing to do */
977 if (!length)
978 return 0;
09cbfeaf 979 BUG_ON((offset + length) > PAGE_SIZE);
4c0ccfef
MW
980
981 memset(&bh, 0, sizeof(bh));
eab95db6 982 bh.b_bdev = inode->i_sb->s_bdev;
09cbfeaf 983 bh.b_size = PAGE_SIZE;
4c0ccfef
MW
984 err = get_block(inode, index, &bh, 0);
985 if (err < 0)
986 return err;
987 if (buffer_written(&bh)) {
b2e0d162
DW
988 struct block_device *bdev = bh.b_bdev;
989 struct blk_dax_ctl dax = {
990 .sector = to_sector(&bh, inode),
09cbfeaf 991 .size = PAGE_SIZE,
b2e0d162
DW
992 };
993
994 if (dax_map_atomic(bdev, &dax) < 0)
995 return PTR_ERR(dax.addr);
996 clear_pmem(dax.addr + offset, length);
2765cfbb 997 wmb_pmem();
b2e0d162 998 dax_unmap_atomic(bdev, &dax);
4c0ccfef
MW
999 }
1000
1001 return 0;
1002}
25726bc1
MW
1003EXPORT_SYMBOL_GPL(dax_zero_page_range);
1004
1005/**
1006 * dax_truncate_page - handle a partial page being truncated in a DAX file
1007 * @inode: The file being truncated
1008 * @from: The file offset that is being truncated to
1009 * @get_block: The filesystem method used to translate file offsets to blocks
1010 *
1011 * Similar to block_truncate_page(), this function can be called by a
1012 * filesystem when it is truncating a DAX file to handle the partial page.
1013 *
ea1754a0 1014 * We work in terms of PAGE_SIZE here for commonality with
25726bc1
MW
1015 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1016 * took care of disposing of the unnecessary blocks. Even if the filesystem
1017 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1018 * since the file might be mmapped.
1019 */
1020int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1021{
09cbfeaf 1022 unsigned length = PAGE_ALIGN(from) - from;
25726bc1
MW
1023 return dax_zero_page_range(inode, from, length, get_block);
1024}
4c0ccfef 1025EXPORT_SYMBOL_GPL(dax_truncate_page);