* invalidation, so there might not be
* a shadow in the swapcache (yet).
*/
- shadow = get_shadow_from_swap_cache(swp);
+ shadow = swap_cache_get_shadow(swp);
if (!shadow)
goto resched;
}
struct folio *folio = page_folio(p);
int ret;
- delete_from_swap_cache(folio);
+ swap_cache_del_folio(folio);
ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
folio_unlock(folio);
memcg1_swapin(entry, nr_pages);
- shadow = get_shadow_from_swap_cache(entry);
+ shadow = swap_cache_get_shadow(entry);
if (shadow)
workingset_refault(folio, shadow);
}
/*
- * The delete_from_swap_cache() below could be left for
+ * The swap_cache_del_folio() below could be left for
* shrink_folio_list()'s folio_free_swap() to dispose of;
* but I'm a little nervous about letting this folio out of
* shmem_writeout() in a hybrid half-tmpfs-half-swap state
* e.g. folio_mapping(folio) might give an unexpected answer.
*/
- delete_from_swap_cache(folio);
+ swap_cache_del_folio(folio);
goto redirty;
}
if (nr_pages > 1)
new->swap = entry;
memcg1_swapin(entry, nr_pages);
- shadow = get_shadow_from_swap_cache(entry);
+ shadow = swap_cache_get_shadow(entry);
if (shadow)
workingset_refault(new, shadow);
folio_add_lru(new);
nr_pages = folio_nr_pages(folio);
folio_wait_writeback(folio);
if (!skip_swapcache)
- delete_from_swap_cache(folio);
+ swap_cache_del_folio(folio);
/*
* Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
* won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
folio->swap.val = 0;
swapcache_clear(si, swap, nr_pages);
} else {
- delete_from_swap_cache(folio);
+ swap_cache_del_folio(folio);
}
folio_mark_dirty(folio);
swap_free_nr(swap, nr_pages);
return folio_entry.val == round_down(entry.val, nr_pages);
}
+/*
+ * All swap cache helpers below require the caller to ensure the swap entries
+ * used are valid and stablize the device by any of the following ways:
+ * - Hold a reference by get_swap_device(): this ensures a single entry is
+ * valid and increases the swap device's refcount.
+ * - Locking a folio in the swap cache: this ensures the folio's swap entries
+ * are valid and pinned, also implies reference to the device.
+ * - Locking anything referencing the swap entry: e.g. PTL that protects
+ * swap entries in the page table, similar to locking swap cache folio.
+ * - See the comment of get_swap_device() for more complex usage.
+ */
+struct folio *swap_cache_get_folio(swp_entry_t entry);
+void *swap_cache_get_shadow(swp_entry_t entry);
+int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
+ gfp_t gfp, void **shadow);
+void swap_cache_del_folio(struct folio *folio);
+void __swap_cache_del_folio(struct folio *folio,
+ swp_entry_t entry, void *shadow);
+void swap_cache_clear_shadow(int type, unsigned long begin,
+ unsigned long end);
+
void show_swap_cache_info(void);
-void *get_shadow_from_swap_cache(swp_entry_t entry);
-int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
- gfp_t gfp, void **shadowp);
-void __delete_from_swap_cache(struct folio *folio,
- swp_entry_t entry, void *shadow);
-void delete_from_swap_cache(struct folio *folio);
-void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end);
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
-struct folio *swap_cache_get_folio(swp_entry_t entry);
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug);
return NULL;
}
-static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
+static inline void *swap_cache_get_shadow(swp_entry_t entry)
{
return NULL;
}
-static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
- gfp_t gfp_mask, void **shadowp)
-{
- return -1;
-}
-
-static inline void __delete_from_swap_cache(struct folio *folio,
- swp_entry_t entry, void *shadow)
+static inline int swap_cache_add_folio(swp_entry_t entry, struct folio *folio,
+ gfp_t gfp, void **shadow)
{
+ return -EINVAL;
}
-static inline void delete_from_swap_cache(struct folio *folio)
+static inline void swap_cache_del_folio(struct folio *folio)
{
}
-static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end)
+static inline void __swap_cache_del_folio(struct folio *folio, swp_entry_t entry, void *shadow)
{
}
* Context: Caller must ensure @entry is valid and protect the swap device
* with reference count or locks.
* Return: Returns the found folio on success, NULL otherwise. The caller
- * must lock and check if the folio still matches the swap entry before
- * use (e.g. with folio_matches_swap_entry).
+ * must lock nd check if the folio still matches the swap entry before
+ * use (e.g., folio_matches_swap_entry).
*/
struct folio *swap_cache_get_folio(swp_entry_t entry)
{
return folio;
}
-void *get_shadow_from_swap_cache(swp_entry_t entry)
+/**
+ * swap_cache_get_shadow - Looks up a shadow in the swap cache.
+ * @entry: swap entry used for the lookup.
+ *
+ * Context: Caller must ensure @entry is valid and protect the swap device
+ * with reference count or locks.
+ * Return: Returns either NULL or an XA_VALUE (shadow).
+ */
+void *swap_cache_get_shadow(swp_entry_t entry)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swap_cache_index(entry);
return NULL;
}
-/*
- * add_to_swap_cache resembles filemap_add_folio on swapper_space,
- * but sets SwapCache flag and 'swap' instead of mapping and index.
+/**
+ * swap_cache_add_folio - Add a folio into the swap cache.
+ * @folio: The folio to be added.
+ * @entry: The swap entry corresponding to the folio.
+ * @gfp: gfp_mask for XArray node allocation.
+ * @shadowp: If a shadow is found, return the shadow.
+ *
+ * Context: Caller must ensure @entry is valid and protect the swap device
+ * with reference count or locks.
+ * The caller also needs to mark the corresponding swap_map slots with
+ * SWAP_HAS_CACHE to avoid race or conflict.
+ * Return: Returns 0 on success, error code otherwise.
*/
-int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
- gfp_t gfp, void **shadowp)
+int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
+ gfp_t gfp, void **shadowp)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swap_cache_index(entry);
return xas_error(&xas);
}
-/*
- * This must be called only on folios that have
- * been verified to be in the swap cache.
+/**
+ * __swap_cache_del_folio - Removes a folio from the swap cache.
+ * @folio: The folio.
+ * @entry: The first swap entry that the folio corresponds to.
+ * @shadow: shadow value to be filled in the swap cache.
+ *
+ * Removes a folio from the swap cache and fills a shadow in place.
+ * This won't put the folio's refcount. The caller has to do that.
+ *
+ * Context: Caller must hold the xa_lock, ensure the folio is
+ * locked and in the swap cache, using the index of @entry.
*/
-void __delete_from_swap_cache(struct folio *folio,
- swp_entry_t entry, void *shadow)
+void __swap_cache_del_folio(struct folio *folio,
+ swp_entry_t entry, void *shadow)
{
struct address_space *address_space = swap_address_space(entry);
int i;
__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
}
-/*
- * This must be called only on folios that have
- * been verified to be in the swap cache and locked.
- * It will never put the folio into the free list,
- * the caller has a reference on the folio.
+/**
+ * swap_cache_del_folio - Removes a folio from the swap cache.
+ * @folio: The folio.
+ *
+ * Same as __swap_cache_del_folio, but handles lock and refcount. The
+ * caller must ensure the folio is either clean or has a swap count
+ * equal to zero, or it may cause data loss.
+ *
+ * Context: Caller must ensure the folio is locked and in the swap cache.
*/
-void delete_from_swap_cache(struct folio *folio)
+void swap_cache_del_folio(struct folio *folio)
{
swp_entry_t entry = folio->swap;
struct address_space *address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(folio, entry, NULL);
+ __swap_cache_del_folio(folio, entry, NULL);
xa_unlock_irq(&address_space->i_pages);
put_swap_folio(folio, entry);
folio_ref_sub(folio, folio_nr_pages(folio));
}
-void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end)
+/**
+ * swap_cache_clear_shadow - Clears a set of shadows in the swap cache.
+ * @type: Indicates the swap device.
+ * @begin: Beginning offset of the range.
+ * @end: Ending offset of the range.
+ *
+ * Context: Caller must ensure the range is valid and hold a reference to
+ * the swap device.
+ */
+void swap_cache_clear_shadow(int type, unsigned long begin,
+ unsigned long end)
{
unsigned long curr = begin;
void *old;
goto put_and_return;
/*
- * We might race against __delete_from_swap_cache(), and
+ * We might race against __swap_cache_del_folio(), and
* stumble across a swap_map entry whose SWAP_HAS_CACHE
* has not yet been cleared. Or race against another
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */
- if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
+ if (swap_cache_add_folio(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock;
memcg1_swapin(entry, 1);
if (!need_reclaim)
goto out_unlock;
- delete_from_swap_cache(folio);
+ swap_cache_del_folio(folio);
folio_set_dirty(folio);
ret = nr_pages;
out_unlock:
swap_slot_free_notify(si->bdev, offset);
offset++;
}
- clear_shadow_from_swap_cache(si->type, begin, end);
+ swap_cache_clear_shadow(si->type, begin, end);
/*
* Make sure that try_to_unuse() observes si->inuse_pages reaching 0
* TODO: this could cause a theoretical memory reclaim
* deadlock in the swap out path.
*/
- if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
+ if (swap_cache_add_folio(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
goto out_free;
return 0;
if (folio_swapped(folio))
return false;
- delete_from_swap_cache(folio);
+ swap_cache_del_folio(folio);
folio_set_dirty(folio);
return true;
}
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
- __delete_from_swap_cache(folio, swap, shadow);
+ __swap_cache_del_folio(folio, swap, shadow);
memcg1_swapout(folio, swap);
xa_unlock_irq(&mapping->i_pages);
put_swap_folio(folio, swap);
out:
if (ret && ret != -EEXIST) {
- delete_from_swap_cache(folio);
+ swap_cache_del_folio(folio);
folio_unlock(folio);
}
folio_put(folio);