count_memcg_event_mm(fault_mm, PGMAJFAULT);
}
- /* Skip swapcache for synchronous device. */
if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
+ /* Direct swapin skipping swap cache & readahead */
folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
- if (!IS_ERR(folio)) {
- skip_swapcache = true;
- goto alloced;
+ if (IS_ERR(folio)) {
+ error = PTR_ERR(folio);
+ folio = NULL;
+ goto failed;
}
-
+ skip_swapcache = true;
+ } else {
/*
- * Direct swapin handled order 0 fallback already,
- * if it failed, abort.
+ * Cached swapin only supports order 0 folio, it is
+ * necessary to recalculate the new swap entry based on
+ * the offset, as the swapin index might be unalgined.
*/
- error = PTR_ERR(folio);
- folio = NULL;
- goto failed;
- }
-
- /*
- * Now swap device can only swap in order 0 folio, it is
- * necessary to recalculate the new swap entry based on
- * the offset, as the swapin index might be unalgined.
- */
- if (order) {
- offset = index - round_down(index, 1 << order);
- swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
- }
+ if (order) {
+ offset = index - round_down(index, 1 << order);
+ swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+ }
- folio = shmem_swapin_cluster(swap, gfp, info, index);
- if (!folio) {
- error = -ENOMEM;
- goto failed;
+ folio = shmem_swapin_cluster(swap, gfp, info, index);
+ if (!folio) {
+ error = -ENOMEM;
+ goto failed;
+ }
}
}
-alloced:
if (order > folio_order(folio)) {
/*
* Swapin may get smaller folios due to various reasons: