return REPARSE_LINK;
}
-static struct page *ntfs_lock_new_page(struct address_space *mapping,
- pgoff_t index, gfp_t gfp)
+static struct folio *ntfs_lock_new_page(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
{
- struct folio *folio = __filemap_get_folio(
- mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
- struct page *page;
+ struct folio *folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
- return ERR_CAST(folio);
+ return folio;
- if (!folio_test_uptodate(folio))
- return folio_file_page(folio, index);
+ if (!folio_test_uptodate(folio)) {
+ struct page *page = folio_file_page(folio, index);
+
+ if (IS_ERR(page))
+ return ERR_CAST(page);
+ return page_folio(page);
+ }
/* Use a temporary page to avoid data corruption */
folio_unlock(folio);
folio_put(folio);
- page = alloc_page(gfp);
- if (!page)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
return ERR_PTR(-ENOMEM);
- __SetPageLocked(page);
- return page;
+ __folio_set_locked(folio);
+ return folio;
}
/*
u32 i, idx, frame_size, pages_per_frame;
gfp_t gfp_mask;
struct page *pg;
+ struct folio *f;
if (vbo >= i_size_read(&ni->vfs_inode)) {
folio_zero_range(folio, 0, folio_size(folio));
if (i == idx)
continue;
- pg = ntfs_lock_new_page(mapping, index, gfp_mask);
- if (IS_ERR(pg)) {
- err = PTR_ERR(pg);
+ f = ntfs_lock_new_page(mapping, index, gfp_mask);
+ if (IS_ERR(f)) {
+ err = PTR_ERR(f);
goto out1;
}
- pages[i] = pg;
+ pages[i] = &f->page;
}
ni_lock(ni);
}
for (i = 0; i < pages_per_frame; i++, index++) {
- struct page *pg;
+ struct folio *f;
- pg = ntfs_lock_new_page(mapping, index, gfp_mask);
- if (IS_ERR(pg)) {
+ f = ntfs_lock_new_page(mapping, index, gfp_mask);
+ if (IS_ERR(f)) {
while (i--) {
unlock_page(pages[i]);
put_page(pages[i]);
}
- err = PTR_ERR(pg);
+ err = PTR_ERR(f);
goto out;
}
- pages[i] = pg;
+ pages[i] = &f->page;
}
err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1);