}
}
-static int
-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
{
- int ret;
-
- if (!test_bit(PG_REMOVE, &req->wb_flags))
- return 0;
- ret = nfs_page_group_lock(req);
- if (ret)
- return ret;
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
nfs_page_set_inode_ref(req, inode);
- nfs_page_group_unlock(req);
- return 0;
}
static struct nfs_page *nfs_folio_private_request(struct folio *folio)
return req;
}
-static struct nfs_page *nfs_folio_find_and_lock_request(struct folio *folio)
-{
- struct inode *inode = folio_file_mapping(folio)->host;
- struct nfs_page *req, *head;
- int ret;
-
- for (;;) {
- req = nfs_folio_find_head_request(folio);
- if (!req)
- return req;
- head = nfs_page_group_lock_head(req);
- if (head != req)
- nfs_release_request(req);
- if (IS_ERR(head))
- return head;
- ret = nfs_cancel_remove_inode(head, inode);
- if (ret < 0) {
- nfs_unlock_and_release_request(head);
- return ERR_PTR(ret);
- }
- /* Ensure that nobody removed the request before we locked it */
- if (head == nfs_folio_private_request(folio))
- break;
- if (folio_test_swapcache(folio))
- break;
- nfs_unlock_and_release_request(head);
- }
- return head;
-}
-
/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct folio *folio, unsigned int offset,
unsigned int count)
struct nfs_commit_info cinfo;
int ret;
- nfs_init_cinfo_from_inode(&cinfo, inode);
/*
* A reference is taken only on the head request which acts as a
* reference to the whole page group - the group will not be destroyed
* until the head reference is released.
*/
- head = nfs_folio_find_and_lock_request(folio);
- if (IS_ERR_OR_NULL(head))
- return head;
+retry:
+ head = nfs_folio_find_head_request(folio);
+ if (!head)
+ return NULL;
+
+ while (!nfs_lock_request(head)) {
+ ret = nfs_wait_on_request(head);
+ if (ret < 0) {
+ nfs_release_request(head);
+ return ERR_PTR(ret);
+ }
+ }
ret = nfs_page_group_lock(head);
if (ret < 0)
goto out_unlock;
+ /* Ensure that nobody removed the request before we locked it */
+ if (head != folio->private && !folio_test_swapcache(folio)) {
+ nfs_page_group_unlock(head);
+ nfs_unlock_and_release_request(head);
+ goto retry;
+ }
+
+ nfs_cancel_remove_inode(head, inode);
+
/* lock each request in the page group */
for (subreq = head->wb_this_page;
subreq != head;
{
struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
- if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
+ nfs_page_group_lock(req);
+ if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio_file_mapping(folio);
}
spin_unlock(&mapping->private_lock);
}
+ nfs_page_group_unlock(req);
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
atomic_long_dec(&nfsi->nrequests);