]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.suse/SoN-28-nfs-swapcache.patch
Merge branch 'master' of git://git.ipfire.org/ipfire-2.x
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / SoN-28-nfs-swapcache.patch
1 From: Peter Zijlstra <a.p.zijlstra@chello.nl>
2 Subject: nfs: teach the NFS client how to treat PG_swapcache pages
3 Patch-mainline: No
4 References: FATE#303834
5
6 Replace all relevant occurences of page->index and page->mapping in the NFS
7 client with the new page_file_index() and page_file_mapping() functions.
8
9 Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
10 Acked-by: Neil Brown <neilb@suse.de>
11 Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
12
13 ---
14 fs/nfs/file.c | 6 +++---
15 fs/nfs/internal.h | 7 ++++---
16 fs/nfs/pagelist.c | 6 +++---
17 fs/nfs/read.c | 6 +++---
18 fs/nfs/write.c | 53 +++++++++++++++++++++++++++--------------------------
19 5 files changed, 40 insertions(+), 38 deletions(-)
20
21 --- a/fs/nfs/file.c
22 +++ b/fs/nfs/file.c
23 @@ -413,7 +413,7 @@ static void nfs_invalidate_page(struct p
24 if (offset != 0)
25 return;
26 /* Cancel any unstarted writes on this page */
27 - nfs_wb_page_cancel(page->mapping->host, page);
28 + nfs_wb_page_cancel(page_file_mapping(page)->host, page);
29 }
30
31 static int nfs_release_page(struct page *page, gfp_t gfp)
32 @@ -426,7 +426,7 @@ static int nfs_release_page(struct page
33
34 static int nfs_launder_page(struct page *page)
35 {
36 - struct inode *inode = page->mapping->host;
37 + struct inode *inode = page_file_mapping(page)->host;
38
39 dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
40 inode->i_ino, (long long)page_offset(page));
41 @@ -463,7 +463,7 @@ static int nfs_vm_page_mkwrite(struct vm
42 (long long)page_offset(page));
43
44 lock_page(page);
45 - mapping = page->mapping;
46 + mapping = page_file_mapping(page);
47 if (mapping != dentry->d_inode->i_mapping)
48 goto out_unlock;
49
50 --- a/fs/nfs/internal.h
51 +++ b/fs/nfs/internal.h
52 @@ -253,13 +253,14 @@ void nfs_super_set_maxbytes(struct super
53 static inline
54 unsigned int nfs_page_length(struct page *page)
55 {
56 - loff_t i_size = i_size_read(page->mapping->host);
57 + loff_t i_size = i_size_read(page_file_mapping(page)->host);
58
59 if (i_size > 0) {
60 + pgoff_t page_index = page_file_index(page);
61 pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
62 - if (page->index < end_index)
63 + if (page_index < end_index)
64 return PAGE_CACHE_SIZE;
65 - if (page->index == end_index)
66 + if (page_index == end_index)
67 return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
68 }
69 return 0;
70 --- a/fs/nfs/pagelist.c
71 +++ b/fs/nfs/pagelist.c
72 @@ -76,11 +76,11 @@ nfs_create_request(struct nfs_open_conte
73 * update_nfs_request below if the region is not locked. */
74 req->wb_page = page;
75 atomic_set(&req->wb_complete, 0);
76 - req->wb_index = page->index;
77 + req->wb_index = page_file_index(page);
78 page_cache_get(page);
79 BUG_ON(PagePrivate(page));
80 BUG_ON(!PageLocked(page));
81 - BUG_ON(page->mapping->host != inode);
82 + BUG_ON(page_file_mapping(page)->host != inode);
83 req->wb_offset = offset;
84 req->wb_pgbase = offset;
85 req->wb_bytes = count;
86 @@ -376,7 +376,7 @@ void nfs_pageio_cond_complete(struct nfs
87 * nfs_scan_list - Scan a list for matching requests
88 * @nfsi: NFS inode
89 * @dst: Destination list
90 - * @idx_start: lower bound of page->index to scan
91 + * @idx_start: lower bound of page_file_index(page) to scan
92 * @npages: idx_start + npages sets the upper bound to scan.
93 * @tag: tag to scan for
94 *
95 --- a/fs/nfs/read.c
96 +++ b/fs/nfs/read.c
97 @@ -475,11 +475,11 @@ static const struct rpc_call_ops nfs_rea
98 int nfs_readpage(struct file *file, struct page *page)
99 {
100 struct nfs_open_context *ctx;
101 - struct inode *inode = page->mapping->host;
102 + struct inode *inode = page_file_mapping(page)->host;
103 int error;
104
105 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
106 - page, PAGE_CACHE_SIZE, page->index);
107 + page, PAGE_CACHE_SIZE, page_file_index(page));
108 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
109 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
110
111 @@ -526,7 +526,7 @@ static int
112 readpage_async_filler(void *data, struct page *page)
113 {
114 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
115 - struct inode *inode = page->mapping->host;
116 + struct inode *inode = page_file_mapping(page)->host;
117 struct nfs_page *new;
118 unsigned int len;
119 int error;
120 --- a/fs/nfs/write.c
121 +++ b/fs/nfs/write.c
122 @@ -118,7 +118,7 @@ static struct nfs_page *nfs_page_find_re
123
124 static struct nfs_page *nfs_page_find_request(struct page *page)
125 {
126 - struct inode *inode = page->mapping->host;
127 + struct inode *inode = page_file_mapping(page)->host;
128 struct nfs_page *req = NULL;
129
130 spin_lock(&inode->i_lock);
131 @@ -130,16 +130,16 @@ static struct nfs_page *nfs_page_find_re
132 /* Adjust the file length if we're writing beyond the end */
133 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
134 {
135 - struct inode *inode = page->mapping->host;
136 + struct inode *inode = page_file_mapping(page)->host;
137 loff_t end, i_size;
138 pgoff_t end_index;
139
140 spin_lock(&inode->i_lock);
141 i_size = i_size_read(inode);
142 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
143 - if (i_size > 0 && page->index < end_index)
144 + if (i_size > 0 && page_file_index(page) < end_index)
145 goto out;
146 - end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
147 + end = page_file_offset(page) + ((loff_t)offset+count);
148 if (i_size >= end)
149 goto out;
150 i_size_write(inode, end);
151 @@ -152,7 +152,7 @@ out:
152 static void nfs_set_pageerror(struct page *page)
153 {
154 SetPageError(page);
155 - nfs_zap_mapping(page->mapping->host, page->mapping);
156 + nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
157 }
158
159 /* We can set the PG_uptodate flag if we see that a write request
160 @@ -193,7 +193,7 @@ static int nfs_set_page_writeback(struct
161 int ret = test_set_page_writeback(page);
162
163 if (!ret) {
164 - struct inode *inode = page->mapping->host;
165 + struct inode *inode = page_file_mapping(page)->host;
166 struct nfs_server *nfss = NFS_SERVER(inode);
167
168 if (atomic_long_inc_return(&nfss->writeback) >
169 @@ -205,7 +205,7 @@ static int nfs_set_page_writeback(struct
170
171 static void nfs_end_page_writeback(struct page *page)
172 {
173 - struct inode *inode = page->mapping->host;
174 + struct inode *inode = page_file_mapping(page)->host;
175 struct nfs_server *nfss = NFS_SERVER(inode);
176
177 end_page_writeback(page);
178 @@ -220,7 +220,7 @@ static void nfs_end_page_writeback(struc
179 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
180 struct page *page)
181 {
182 - struct inode *inode = page->mapping->host;
183 + struct inode *inode = page_file_mapping(page)->host;
184 struct nfs_page *req;
185 int ret;
186
187 @@ -263,12 +263,12 @@ static int nfs_page_async_flush(struct n
188
189 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
190 {
191 - struct inode *inode = page->mapping->host;
192 + struct inode *inode = page_file_mapping(page)->host;
193
194 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
195 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
196
197 - nfs_pageio_cond_complete(pgio, page->index);
198 + nfs_pageio_cond_complete(pgio, page_file_index(page));
199 return nfs_page_async_flush(pgio, page);
200 }
201
202 @@ -280,7 +280,7 @@ static int nfs_writepage_locked(struct p
203 struct nfs_pageio_descriptor pgio;
204 int err;
205
206 - nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
207 + nfs_pageio_init_write(&pgio, page_file_mapping(page)->host, wb_priority(wbc));
208 err = nfs_do_writepage(page, wbc, &pgio);
209 nfs_pageio_complete(&pgio);
210 if (err < 0)
211 @@ -409,7 +409,8 @@ nfs_mark_request_commit(struct nfs_page
212 NFS_PAGE_TAG_COMMIT);
213 spin_unlock(&inode->i_lock);
214 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
215 - inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
216 + inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
217 + BDI_RECLAIMABLE);
218 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
219 }
220
221 @@ -420,7 +421,7 @@ nfs_clear_request_commit(struct nfs_page
222
223 if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
224 dec_zone_page_state(page, NR_UNSTABLE_NFS);
225 - dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
226 + dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
227 return 1;
228 }
229 return 0;
230 @@ -526,7 +527,7 @@ static void nfs_cancel_commit_list(struc
231 * nfs_scan_commit - Scan an inode for commit requests
232 * @inode: NFS inode to scan
233 * @dst: destination list
234 - * @idx_start: lower bound of page->index to scan.
235 + * @idx_start: lower bound of page_file_index(page) to scan.
236 * @npages: idx_start + npages sets the upper bound to scan.
237 *
238 * Moves requests from the inode's 'commit' request list.
239 @@ -637,7 +638,7 @@ out_err:
240 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
241 struct page *page, unsigned int offset, unsigned int bytes)
242 {
243 - struct inode *inode = page->mapping->host;
244 + struct inode *inode = page_file_mapping(page)->host;
245 struct nfs_page *req;
246 int error;
247
248 @@ -692,7 +693,7 @@ int nfs_flush_incompatible(struct file *
249 nfs_release_request(req);
250 if (!do_flush)
251 return 0;
252 - status = nfs_wb_page(page->mapping->host, page);
253 + status = nfs_wb_page(page_file_mapping(page)->host, page);
254 } while (status == 0);
255 return status;
256 }
257 @@ -718,7 +719,7 @@ int nfs_updatepage(struct file *file, st
258 unsigned int offset, unsigned int count)
259 {
260 struct nfs_open_context *ctx = nfs_file_open_context(file);
261 - struct inode *inode = page->mapping->host;
262 + struct inode *inode = page_file_mapping(page)->host;
263 int status = 0;
264
265 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
266 @@ -726,7 +727,7 @@ int nfs_updatepage(struct file *file, st
267 dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
268 file->f_path.dentry->d_parent->d_name.name,
269 file->f_path.dentry->d_name.name, count,
270 - (long long)(page_offset(page) + offset));
271 + (long long)(page_file_offset(page) + offset));
272
273 /* If we're not using byte range locks, and we know the page
274 * is up to date, it may be more efficient to extend the write
275 @@ -1001,7 +1002,7 @@ static void nfs_writeback_release_partia
276 }
277
278 if (nfs_write_need_commit(data)) {
279 - struct inode *inode = page->mapping->host;
280 + struct inode *inode = page_file_mapping(page)->host;
281
282 spin_lock(&inode->i_lock);
283 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
284 @@ -1262,7 +1263,7 @@ nfs_commit_list(struct inode *inode, str
285 nfs_list_remove_request(req);
286 nfs_mark_request_commit(req);
287 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
288 - dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
289 + dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
290 BDI_RECLAIMABLE);
291 nfs_clear_page_tag_locked(req);
292 }
293 @@ -1453,10 +1454,10 @@ int nfs_wb_nocommit(struct inode *inode)
294 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
295 {
296 struct nfs_page *req;
297 - loff_t range_start = page_offset(page);
298 + loff_t range_start = page_file_offset(page);
299 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
300 struct writeback_control wbc = {
301 - .bdi = page->mapping->backing_dev_info,
302 + .bdi = page_file_mapping(page)->backing_dev_info,
303 .sync_mode = WB_SYNC_ALL,
304 .nr_to_write = LONG_MAX,
305 .range_start = range_start,
306 @@ -1489,7 +1490,7 @@ int nfs_wb_page_cancel(struct inode *ino
307 }
308 if (!PagePrivate(page))
309 return 0;
310 - ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
311 + ret = nfs_sync_mapping_wait(page_file_mapping(page), &wbc, FLUSH_INVALIDATE);
312 out:
313 return ret;
314 }
315 @@ -1497,10 +1498,10 @@ out:
316 static int nfs_wb_page_priority(struct inode *inode, struct page *page,
317 int how)
318 {
319 - loff_t range_start = page_offset(page);
320 + loff_t range_start = page_file_offset(page);
321 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
322 struct writeback_control wbc = {
323 - .bdi = page->mapping->backing_dev_info,
324 + .bdi = page_file_mapping(page)->backing_dev_info,
325 .sync_mode = WB_SYNC_ALL,
326 .nr_to_write = LONG_MAX,
327 .range_start = range_start,
328 @@ -1515,7 +1516,7 @@ static int nfs_wb_page_priority(struct i
329 goto out_error;
330 } else if (!PagePrivate(page))
331 break;
332 - ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
333 + ret = nfs_sync_mapping_wait(page_file_mapping(page), &wbc, how);
334 if (ret < 0)
335 goto out_error;
336 } while (PagePrivate(page));