]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-5.0/dax-flush-partial-pmds-correctly.patch
5.0-stable patches
[thirdparty/kernel/stable-queue.git] / queue-5.0 / dax-flush-partial-pmds-correctly.patch
1 From e4b3448bc346fedf36db64124a664a959995b085 Mon Sep 17 00:00:00 2001
2 From: Matthew Wilcox <willy@infradead.org>
3 Date: Fri, 1 Mar 2019 11:12:41 -0800
4 Subject: dax: Flush partial PMDs correctly
5
6 From: Matthew Wilcox <willy@infradead.org>
7
8 commit e4b3448bc346fedf36db64124a664a959995b085 upstream.
9
10 The radix tree would rewind the index in an iterator to the lowest index
11 of a multi-slot entry. The XArray iterators instead leave the index
12 unchanged, but I overlooked that when converting DAX from the radix tree
13 to the XArray. Adjust the index that we use for flushing to the start
14 of the PMD range.
15
16 Fixes: c1901cd33cf4 ("page cache: Convert find_get_entries_tag to XArray")
17 Cc: <stable@vger.kernel.org>
18 Reported-by: Piotr Balcer <piotr.balcer@intel.com>
19 Tested-by: Dan Williams <dan.j.williams@intel.com>
20 Reviewed-by: Jan Kara <jack@suse.cz>
21 Signed-off-by: Matthew Wilcox <willy@infradead.org>
22 Signed-off-by: Dan Williams <dan.j.williams@intel.com>
23 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
24
25 ---
26 fs/dax.c | 19 +++++++++----------
27 1 file changed, 9 insertions(+), 10 deletions(-)
28
29 --- a/fs/dax.c
30 +++ b/fs/dax.c
31 @@ -843,9 +843,8 @@ unlock_pte:
32 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
33 struct address_space *mapping, void *entry)
34 {
35 - unsigned long pfn;
36 + unsigned long pfn, index, count;
37 long ret = 0;
38 - size_t size;
39
40 /*
41 * A page got tagged dirty in DAX mapping? Something is seriously
42 @@ -894,17 +893,18 @@ static int dax_writeback_one(struct xa_s
43 xas_unlock_irq(xas);
44
45 /*
46 - * Even if dax_writeback_mapping_range() was given a wbc->range_start
47 - * in the middle of a PMD, the 'index' we are given will be aligned to
48 - * the start index of the PMD, as will the pfn we pull from 'entry'.
49 + * If dax_writeback_mapping_range() was given a wbc->range_start
50 + * in the middle of a PMD, the 'index' we use needs to be
51 + * aligned to the start of the PMD.
52 * This allows us to flush for PMD_SIZE and not have to worry about
53 * partial PMD writebacks.
54 */
55 pfn = dax_to_pfn(entry);
56 - size = PAGE_SIZE << dax_entry_order(entry);
57 + count = 1UL << dax_entry_order(entry);
58 + index = xas->xa_index & ~(count - 1);
59
60 - dax_entry_mkclean(mapping, xas->xa_index, pfn);
61 - dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
62 + dax_entry_mkclean(mapping, index, pfn);
63 + dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
64 /*
65 * After we have flushed the cache, we can clear the dirty tag. There
66 * cannot be new dirty data in the pfn after the flush has completed as
67 @@ -917,8 +917,7 @@ static int dax_writeback_one(struct xa_s
68 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
69 dax_wake_entry(xas, entry, false);
70
71 - trace_dax_writeback_one(mapping->host, xas->xa_index,
72 - size >> PAGE_SHIFT);
73 + trace_dax_writeback_one(mapping->host, index, count);
74 return ret;
75
76 put_unlocked: