if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
current_is_kswapd() || current_is_kcompactd())
return false;
- if (nfs_wb_folio(folio->mapping->host, folio) < 0)
+ if (nfs_wb_folio_reclaim(folio->mapping->host, folio) < 0 ||
+ folio_test_private(folio))
return false;
}
return nfs_fscache_release_folio(folio, gfp);
DEFINE_NFS_FOLIO_EVENT(nfs_aop_readpage);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_aop_readpage_done);
+DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio_reclaim);
+DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_reclaim_done);
+
DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done);
return ret;
}
+/**
+ * nfs_wb_folio_reclaim - Write back all requests on one page
+ * @inode: pointer to page
+ * @folio: pointer to folio
+ *
+ * Assumes that the folio has been locked by the caller
+ */
+int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio)
+{
+ loff_t range_start = folio_pos(folio);
+ size_t len = folio_size(folio);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0,
+ .range_start = range_start,
+ .range_end = range_start + len - 1,
+ .for_sync = 1,
+ };
+ int ret;
+
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+ if (folio_clear_dirty_for_io(folio)) {
+ trace_nfs_writeback_folio_reclaim(inode, range_start, len);
+ ret = nfs_writepage_locked(folio, &wbc);
+ trace_nfs_writeback_folio_reclaim_done(inode, range_start, len,
+ ret);
+ return ret;
+ }
+ nfs_commit_inode(inode, 0);
+ return 0;
+}
+
/**
* nfs_wb_folio - Write back all requests on one page
* @inode: pointer to page
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
+extern int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);