ret = -EBUSY;
goto out;
}
- end = -1;
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
*/
for (new_folio = folio_next(folio); new_folio != end_folio;
new_folio = next) {
+ unsigned long nr_pages = folio_nr_pages(new_folio);
+
next = folio_next(new_folio);
expected_refs = folio_expected_ref_count(new_folio) + 1;
lru_add_split_folio(folio, new_folio, lruvec, list);
- /* Some pages can be beyond EOF: drop them from cache */
- if (new_folio->index >= end) {
- if (shmem_mapping(mapping))
- nr_shmem_dropped += folio_nr_pages(new_folio);
- else if (folio_test_clear_dirty(new_folio))
- folio_account_cleaned(
- new_folio,
- inode_to_wb(mapping->host));
- __filemap_remove_folio(new_folio, NULL);
- folio_put_refs(new_folio,
- folio_nr_pages(new_folio));
- } else if (mapping) {
- __xa_store(&mapping->i_pages, new_folio->index,
- new_folio, 0);
- } else if (swap_cache) {
+ /*
+ * Anonymous folio with swap cache.
+ * NOTE: shmem in swap cache is not supported yet.
+ */
+ if (swap_cache) {
__xa_store(&swap_cache->i_pages,
swap_cache_index(new_folio->swap),
new_folio, 0);
+ continue;
+ }
+
+ /* Anonymous folio without swap cache */
+ if (!mapping)
+ continue;
+
+ /* Add the new folio to the page cache. */
+ if (new_folio->index < end) {
+ __xa_store(&mapping->i_pages, new_folio->index,
+ new_folio, 0);
+ continue;
}
+
+ /* Drop folio beyond EOF: ->index >= end */
+ if (shmem_mapping(mapping))
+ nr_shmem_dropped += nr_pages;
+ else if (folio_test_clear_dirty(new_folio))
+ folio_account_cleaned(
+ new_folio, inode_to_wb(mapping->host));
+ __filemap_remove_folio(new_folio, NULL);
+ folio_put_refs(new_folio, nr_pages);
}
/*
* Unfreeze @folio only after all page cache entries, which