* @folio: folio to be added.
* @gfp: memory allocation flags for charge, can be 0 if @charged if true.
* @charged: if the folio is already charged.
- * @skip_if_exists: if the slot is in a cached state, return NULL.
- * This is an old workaround that will be removed shortly.
*
* Update the swap_map and add folio as swap cache, typically before swapin.
* All swap slots covered by the folio must have a non-zero swap count.
*/
static struct folio *__swap_cache_prepare_and_add(swp_entry_t entry,
struct folio *folio,
- gfp_t gfp, bool charged,
- bool skip_if_exists)
+ gfp_t gfp, bool charged)
{
struct folio *swapcache = NULL;
void *shadow;
* might return a folio that is irrelevant to the faulting
* entry because @entry is aligned down. Just return NULL.
*/
- if (ret != -EEXIST || skip_if_exists || folio_test_large(folio))
+ if (ret != -EEXIST || folio_test_large(folio))
goto failed;
swapcache = swap_cache_get_folio(entry);
* @mpol: NUMA memory allocation policy to be applied
* @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
* @new_page_allocated: sets true if allocation happened, false otherwise
- * @skip_if_exists: if the slot is a partially cached state, return NULL.
- * This is a workaround that would be removed shortly.
*
* Allocate a folio in the swap cache for one swap slot, typically before
* doing IO (e.g. swap in or zswap writeback). The swap slot indicated by
*/
struct folio *swap_cache_alloc_folio(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx,
- bool *new_page_allocated,
- bool skip_if_exists)
+ bool *new_page_allocated)
{
struct swap_info_struct *si = __swap_entry_to_info(entry);
struct folio *folio;
if (!folio)
return NULL;
/* Try add the new folio, returns existing folio or NULL on failure. */
- result = __swap_cache_prepare_and_add(entry, folio, gfp_mask,
- false, skip_if_exists);
+ result = __swap_cache_prepare_and_add(entry, folio, gfp_mask, false);
if (result == folio)
*new_page_allocated = true;
else
unsigned long nr_pages = folio_nr_pages(folio);
entry = swp_entry(swp_type(entry), round_down(offset, nr_pages));
- swapcache = __swap_cache_prepare_and_add(entry, folio, 0, true, false);
+ swapcache = __swap_cache_prepare_and_add(entry, folio, 0, true);
if (swapcache == folio)
swap_read_folio(folio, NULL);
return swapcache;
mpol = get_vma_policy(vma, addr, 0, &ilx);
folio = swap_cache_alloc_folio(entry, gfp_mask, mpol, ilx,
- &page_allocated, false);
+ &page_allocated);
mpol_cond_put(mpol);
if (page_allocated)
/* Ok, do the async read-ahead now */
folio = swap_cache_alloc_folio(
swp_entry(swp_type(entry), offset), gfp_mask, mpol, ilx,
- &page_allocated, false);
+ &page_allocated);
if (!folio)
continue;
if (page_allocated) {
skip:
/* The page was likely read above, so no need for plugging here */
folio = swap_cache_alloc_folio(entry, gfp_mask, mpol, ilx,
- &page_allocated, false);
+ &page_allocated);
if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
return folio;
continue;
}
folio = swap_cache_alloc_folio(entry, gfp_mask, mpol, ilx,
- &page_allocated, false);
+ &page_allocated);
if (si)
put_swap_device(si);
if (!folio)
skip:
/* The folio was likely read above, so no need for plugging here */
folio = swap_cache_alloc_folio(targ_entry, gfp_mask, mpol, targ_ilx,
- &page_allocated, false);
+ &page_allocated);
if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
return folio;