]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.suse/SoN-28-nfs-swapcache.patch
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / SoN-28-nfs-swapcache.patch
diff --git a/src/patches/suse-2.6.27.31/patches.suse/SoN-28-nfs-swapcache.patch b/src/patches/suse-2.6.27.31/patches.suse/SoN-28-nfs-swapcache.patch
new file mode 100644 (file)
index 0000000..20375c3
--- /dev/null
@@ -0,0 +1,336 @@
+From: Peter Zijlstra <a.p.zijlstra@chello.nl> 
+Subject: nfs: teach the NFS client how to treat PG_swapcache pages
+Patch-mainline: No
+References: FATE#303834
+
+Replace all relevant occurences of page->index and page->mapping in the NFS
+client with the new page_file_index() and page_file_mapping() functions.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Acked-by: Neil Brown <neilb@suse.de>
+Acked-by: Suresh Jayaraman <sjayaraman@suse.de>
+
+---
+ fs/nfs/file.c     |    6 +++---
+ fs/nfs/internal.h |    7 ++++---
+ fs/nfs/pagelist.c |    6 +++---
+ fs/nfs/read.c     |    6 +++---
+ fs/nfs/write.c    |   53 +++++++++++++++++++++++++++--------------------------
+ 5 files changed, 40 insertions(+), 38 deletions(-)
+
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -413,7 +413,7 @@ static void nfs_invalidate_page(struct p
+       if (offset != 0)
+               return;
+       /* Cancel any unstarted writes on this page */
+-      nfs_wb_page_cancel(page->mapping->host, page);
++      nfs_wb_page_cancel(page_file_mapping(page)->host, page);
+ }
+ static int nfs_release_page(struct page *page, gfp_t gfp)
+@@ -426,7 +426,7 @@ static int nfs_release_page(struct page
+ static int nfs_launder_page(struct page *page)
+ {
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
+               inode->i_ino, (long long)page_offset(page));
+@@ -463,7 +463,7 @@ static int nfs_vm_page_mkwrite(struct vm
+               (long long)page_offset(page));
+       lock_page(page);
+-      mapping = page->mapping;
++      mapping = page_file_mapping(page);
+       if (mapping != dentry->d_inode->i_mapping)
+               goto out_unlock;
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -253,13 +253,14 @@ void nfs_super_set_maxbytes(struct super
+ static inline
+ unsigned int nfs_page_length(struct page *page)
+ {
+-      loff_t i_size = i_size_read(page->mapping->host);
++      loff_t i_size = i_size_read(page_file_mapping(page)->host);
+       if (i_size > 0) {
++              pgoff_t page_index = page_file_index(page);
+               pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+-              if (page->index < end_index)
++              if (page_index < end_index)
+                       return PAGE_CACHE_SIZE;
+-              if (page->index == end_index)
++              if (page_index == end_index)
+                       return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
+       }
+       return 0;
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -76,11 +76,11 @@ nfs_create_request(struct nfs_open_conte
+        * update_nfs_request below if the region is not locked. */
+       req->wb_page    = page;
+       atomic_set(&req->wb_complete, 0);
+-      req->wb_index   = page->index;
++      req->wb_index   = page_file_index(page);
+       page_cache_get(page);
+       BUG_ON(PagePrivate(page));
+       BUG_ON(!PageLocked(page));
+-      BUG_ON(page->mapping->host != inode);
++      BUG_ON(page_file_mapping(page)->host != inode);
+       req->wb_offset  = offset;
+       req->wb_pgbase  = offset;
+       req->wb_bytes   = count;
+@@ -376,7 +376,7 @@ void nfs_pageio_cond_complete(struct nfs
+  * nfs_scan_list - Scan a list for matching requests
+  * @nfsi: NFS inode
+  * @dst: Destination list
+- * @idx_start: lower bound of page->index to scan
++ * @idx_start: lower bound of page_file_index(page) to scan
+  * @npages: idx_start + npages sets the upper bound to scan.
+  * @tag: tag to scan for
+  *
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -477,11 +477,11 @@ static const struct rpc_call_ops nfs_rea
+ int nfs_readpage(struct file *file, struct page *page)
+ {
+       struct nfs_open_context *ctx;
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       int             error;
+       dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
+-              page, PAGE_CACHE_SIZE, page->index);
++              page, PAGE_CACHE_SIZE, page_file_index(page));
+       nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+       nfs_add_stats(inode, NFSIOS_READPAGES, 1);
+@@ -528,7 +528,7 @@ static int
+ readpage_async_filler(void *data, struct page *page)
+ {
+       struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       struct nfs_page *new;
+       unsigned int len;
+       int error;
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -120,7 +120,7 @@ static struct nfs_page *nfs_page_find_re
+ static struct nfs_page *nfs_page_find_request(struct page *page)
+ {
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       struct nfs_page *req = NULL;
+       spin_lock(&inode->i_lock);
+@@ -132,16 +132,16 @@ static struct nfs_page *nfs_page_find_re
+ /* Adjust the file length if we're writing beyond the end */
+ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
+ {
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       loff_t end, i_size;
+       pgoff_t end_index;
+       spin_lock(&inode->i_lock);
+       i_size = i_size_read(inode);
+       end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+-      if (i_size > 0 && page->index < end_index)
++      if (i_size > 0 && page_file_index(page) < end_index)
+               goto out;
+-      end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
++      end = page_file_offset(page) + ((loff_t)offset+count);
+       if (i_size >= end)
+               goto out;
+       i_size_write(inode, end);
+@@ -154,7 +154,7 @@ out:
+ static void nfs_set_pageerror(struct page *page)
+ {
+       SetPageError(page);
+-      nfs_zap_mapping(page->mapping->host, page->mapping);
++      nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
+ }
+ /* We can set the PG_uptodate flag if we see that a write request
+@@ -195,7 +195,7 @@ static int nfs_set_page_writeback(struct
+       int ret = test_set_page_writeback(page);
+       if (!ret) {
+-              struct inode *inode = page->mapping->host;
++              struct inode *inode = page_file_mapping(page)->host;
+               struct nfs_server *nfss = NFS_SERVER(inode);
+               if (atomic_long_inc_return(&nfss->writeback) >
+@@ -207,7 +207,7 @@ static int nfs_set_page_writeback(struct
+ static void nfs_end_page_writeback(struct page *page)
+ {
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       struct nfs_server *nfss = NFS_SERVER(inode);
+       end_page_writeback(page);
+@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struc
+ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+                               struct page *page)
+ {
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       struct nfs_page *req;
+       int ret;
+@@ -265,12 +265,12 @@ static int nfs_page_async_flush(struct n
+ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
+ {
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+       nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
+-      nfs_pageio_cond_complete(pgio, page->index);
++      nfs_pageio_cond_complete(pgio, page_file_index(page));
+       return nfs_page_async_flush(pgio, page);
+ }
+@@ -282,7 +282,7 @@ static int nfs_writepage_locked(struct p
+       struct nfs_pageio_descriptor pgio;
+       int err;
+-      nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
++      nfs_pageio_init_write(&pgio, page_file_mapping(page)->host, wb_priority(wbc));
+       err = nfs_do_writepage(page, wbc, &pgio);
+       nfs_pageio_complete(&pgio);
+       if (err < 0)
+@@ -411,7 +411,8 @@ nfs_mark_request_commit(struct nfs_page
+                       NFS_PAGE_TAG_COMMIT);
+       spin_unlock(&inode->i_lock);
+       inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+-      inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
++      inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
++                      BDI_RECLAIMABLE);
+       __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ }
+@@ -422,7 +423,7 @@ nfs_clear_request_commit(struct nfs_page
+       if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
+               dec_zone_page_state(page, NR_UNSTABLE_NFS);
+-              dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
++              dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
+               return 1;
+       }
+       return 0;
+@@ -528,7 +529,7 @@ static void nfs_cancel_commit_list(struc
+  * nfs_scan_commit - Scan an inode for commit requests
+  * @inode: NFS inode to scan
+  * @dst: destination list
+- * @idx_start: lower bound of page->index to scan.
++ * @idx_start: lower bound of page_file_index(page) to scan.
+  * @npages: idx_start + npages sets the upper bound to scan.
+  *
+  * Moves requests from the inode's 'commit' request list.
+@@ -639,7 +640,7 @@ out_err:
+ static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
+               struct page *page, unsigned int offset, unsigned int bytes)
+ {
+-      struct inode *inode = page->mapping->host;
++      struct inode *inode = page_file_mapping(page)->host;
+       struct nfs_page *req;
+       int error;
+@@ -694,7 +695,7 @@ int nfs_flush_incompatible(struct file *
+               nfs_release_request(req);
+               if (!do_flush)
+                       return 0;
+-              status = nfs_wb_page(page->mapping->host, page);
++              status = nfs_wb_page(page_file_mapping(page)->host, page);
+       } while (status == 0);
+       return status;
+ }
+@@ -720,7 +721,7 @@ int nfs_updatepage(struct file *file, st
+               unsigned int offset, unsigned int count)
+ {
+       struct nfs_open_context *ctx = nfs_file_open_context(file);
+-      struct inode    *inode = page->mapping->host;
++      struct inode    *inode = page_file_mapping(page)->host;
+       int             status = 0;
+       nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
+@@ -728,7 +729,7 @@ int nfs_updatepage(struct file *file, st
+       dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
+               file->f_path.dentry->d_parent->d_name.name,
+               file->f_path.dentry->d_name.name, count,
+-              (long long)(page_offset(page) + offset));
++              (long long)(page_file_offset(page) + offset));
+       /* If we're not using byte range locks, and we know the page
+        * is up to date, it may be more efficient to extend the write
+@@ -1003,7 +1004,7 @@ static void nfs_writeback_release_partia
+       }
+       if (nfs_write_need_commit(data)) {
+-              struct inode *inode = page->mapping->host;
++              struct inode *inode = page_file_mapping(page)->host;
+               spin_lock(&inode->i_lock);
+               if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+@@ -1264,7 +1265,7 @@ nfs_commit_list(struct inode *inode, str
+               nfs_list_remove_request(req);
+               nfs_mark_request_commit(req);
+               dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+-              dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
++              dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
+                               BDI_RECLAIMABLE);
+               nfs_clear_page_tag_locked(req);
+       }
+@@ -1455,10 +1456,10 @@ int nfs_wb_nocommit(struct inode *inode)
+ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
+ {
+       struct nfs_page *req;
+-      loff_t range_start = page_offset(page);
++      loff_t range_start = page_file_offset(page);
+       loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+       struct writeback_control wbc = {
+-              .bdi = page->mapping->backing_dev_info,
++              .bdi = page_file_mapping(page)->backing_dev_info,
+               .sync_mode = WB_SYNC_ALL,
+               .nr_to_write = LONG_MAX,
+               .range_start = range_start,
+@@ -1491,7 +1492,7 @@ int nfs_wb_page_cancel(struct inode *ino
+       }
+       if (!PagePrivate(page))
+               return 0;
+-      ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
++      ret = nfs_sync_mapping_wait(page_file_mapping(page), &wbc, FLUSH_INVALIDATE);
+ out:
+       return ret;
+ }
+@@ -1499,10 +1500,10 @@ out:
+ static int nfs_wb_page_priority(struct inode *inode, struct page *page,
+                               int how)
+ {
+-      loff_t range_start = page_offset(page);
++      loff_t range_start = page_file_offset(page);
+       loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+       struct writeback_control wbc = {
+-              .bdi = page->mapping->backing_dev_info,
++              .bdi = page_file_mapping(page)->backing_dev_info,
+               .sync_mode = WB_SYNC_ALL,
+               .nr_to_write = LONG_MAX,
+               .range_start = range_start,
+@@ -1517,7 +1518,7 @@ static int nfs_wb_page_priority(struct i
+                               goto out_error;
+               } else if (!PagePrivate(page))
+                       break;
+-              ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
++              ret = nfs_sync_mapping_wait(page_file_mapping(page), &wbc, how);
+               if (ret < 0)
+                       goto out_error;
+       } while (PagePrivate(page));