struct address_space *mapping = inode->i_mapping;
struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
struct shmem_inode_info *info = SHMEM_I(inode);
+ swp_entry_t swap, index_entry;
struct swap_info_struct *si;
struct folio *folio = NULL;
bool skip_swapcache = false;
- swp_entry_t swap;
int error, nr_pages, order, split_order;
+ pgoff_t offset;
VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
- swap = radix_to_swp_entry(*foliop);
+ index_entry = radix_to_swp_entry(*foliop);
+ swap = index_entry;
*foliop = NULL;
if (is_poisoned_swp_entry(swap))
}
/*
- * Now swap device can only swap in order 0 folio, then we
- * should split the large swap entry stored in the pagecache
- * if necessary.
- */
- split_order = shmem_split_large_entry(inode, index, swap, gfp);
- if (split_order < 0) {
- error = split_order;
- goto failed;
- }
-
- /*
- * If the large swap entry has already been split, it is
+ * Now swap device can only swap in order 0 folio, it is
* necessary to recalculate the new swap entry based on
- * the old order alignment.
+ * the offset, as the swapin index might be unalgined.
*/
- if (split_order > 0) {
- pgoff_t offset = index - round_down(index, 1 << split_order);
-
+ if (order) {
+ offset = index - round_down(index, 1 << order);
swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
}
- /* Here we actually start the io */
folio = shmem_swapin_cluster(swap, gfp, info, index);
if (!folio) {
error = -ENOMEM;
goto failed;
}
- } else if (order > folio_order(folio)) {
+ }
+alloced:
+ if (order > folio_order(folio)) {
/*
- * Swap readahead may swap in order 0 folios into swapcache
+ * Swapin may get smaller folios due to various reasons:
+ * It may fallback to order 0 due to memory pressure or race,
+ * swap readahead may swap in order 0 folios into swapcache
* asynchronously, while the shmem mapping can still stores
* large swap entries. In such cases, we should split the
* large swap entry to prevent possible data corruption.
*/
- split_order = shmem_split_large_entry(inode, index, swap, gfp);
+ split_order = shmem_split_large_entry(inode, index, index_entry, gfp);
if (split_order < 0) {
- folio_put(folio);
- folio = NULL;
error = split_order;
- goto failed;
+ goto failed_nolock;
}
/*
* the old order alignment.
*/
if (split_order > 0) {
- pgoff_t offset = index - round_down(index, 1 << split_order);
-
- swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+ offset = index - round_down(index, 1 << split_order);
+ swap = swp_entry(swp_type(swap), swp_offset(index_entry) + offset);
}
} else if (order < folio_order(folio)) {
swap.val = round_down(swap.val, 1 << folio_order(folio));
index = round_down(index, 1 << folio_order(folio));
}
-alloced:
/*
* We have to do this with the folio locked to prevent races.
* The shmem_confirm_swap below only checks if the first swap
shmem_set_folio_swapin_error(inode, index, folio, swap,
skip_swapcache);
unlock:
- if (skip_swapcache)
- swapcache_clear(si, swap, folio_nr_pages(folio));
- if (folio) {
+ if (folio)
folio_unlock(folio);
+failed_nolock:
+ if (skip_swapcache)
+ swapcache_clear(si, folio->swap, folio_nr_pages(folio));
+ if (folio)
folio_put(folio);
- }
put_swap_device(si);
return error;