1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/swap_state.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/migrate.h>
20 #include <linux/vmalloc.h>
21 #include <linux/swap_slots.h>
22 #include <linux/huge_mm.h>
23 #include <linux/shmem_fs.h>
28 * swapper_space is a fiction, retained to simplify the path through
29 * vmscan's shrink_page_list.
31 static const struct address_space_operations swap_aops
= {
32 .writepage
= swap_writepage
,
33 .dirty_folio
= noop_dirty_folio
,
34 #ifdef CONFIG_MIGRATION
35 .migrate_folio
= migrate_folio
,
39 struct address_space
*swapper_spaces
[MAX_SWAPFILES
] __read_mostly
;
40 static unsigned int nr_swapper_spaces
[MAX_SWAPFILES
] __read_mostly
;
41 static bool enable_vma_readahead __read_mostly
= true;
43 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52 #define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
57 /* Initial readahead hits is 4 to start up with a small window */
58 #define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61 static atomic_t swapin_readahead_hits
= ATOMIC_INIT(4);
63 void show_swap_cache_info(void)
65 printk("%lu pages in swap cache\n", total_swapcache_pages());
66 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
67 printk("Total swap = %lukB\n", K(total_swap_pages
));
70 void *get_shadow_from_swap_cache(swp_entry_t entry
)
72 struct address_space
*address_space
= swap_address_space(entry
);
73 pgoff_t idx
= swp_offset(entry
);
76 page
= xa_load(&address_space
->i_pages
, idx
);
77 if (xa_is_value(page
))
83 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
84 * but sets SwapCache flag and private instead of mapping and index.
86 int add_to_swap_cache(struct folio
*folio
, swp_entry_t entry
,
87 gfp_t gfp
, void **shadowp
)
89 struct address_space
*address_space
= swap_address_space(entry
);
90 pgoff_t idx
= swp_offset(entry
);
91 XA_STATE_ORDER(xas
, &address_space
->i_pages
, idx
, folio_order(folio
));
92 unsigned long i
, nr
= folio_nr_pages(folio
);
95 xas_set_update(&xas
, workingset_update_node
);
97 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
98 VM_BUG_ON_FOLIO(folio_test_swapcache(folio
), folio
);
99 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio
), folio
);
101 folio_ref_add(folio
, nr
);
102 folio_set_swapcache(folio
);
107 xas_create_range(&xas
);
110 for (i
= 0; i
< nr
; i
++) {
111 VM_BUG_ON_FOLIO(xas
.xa_index
!= idx
+ i
, folio
);
112 old
= xas_load(&xas
);
113 if (xa_is_value(old
)) {
117 xas_store(&xas
, folio
);
120 address_space
->nrpages
+= nr
;
121 __node_stat_mod_folio(folio
, NR_FILE_PAGES
, nr
);
122 __lruvec_stat_mod_folio(folio
, NR_SWAPCACHE
, nr
);
124 xas_unlock_irq(&xas
);
125 } while (xas_nomem(&xas
, gfp
));
127 if (!xas_error(&xas
))
130 folio_clear_swapcache(folio
);
131 folio_ref_sub(folio
, nr
);
132 return xas_error(&xas
);
136 * This must be called only on folios that have
137 * been verified to be in the swap cache.
139 void __delete_from_swap_cache(struct folio
*folio
,
140 swp_entry_t entry
, void *shadow
)
142 struct address_space
*address_space
= swap_address_space(entry
);
144 long nr
= folio_nr_pages(folio
);
145 pgoff_t idx
= swp_offset(entry
);
146 XA_STATE(xas
, &address_space
->i_pages
, idx
);
148 xas_set_update(&xas
, workingset_update_node
);
150 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
151 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio
), folio
);
152 VM_BUG_ON_FOLIO(folio_test_writeback(folio
), folio
);
154 for (i
= 0; i
< nr
; i
++) {
155 void *entry
= xas_store(&xas
, shadow
);
156 VM_BUG_ON_PAGE(entry
!= folio
, entry
);
160 folio_clear_swapcache(folio
);
161 address_space
->nrpages
-= nr
;
162 __node_stat_mod_folio(folio
, NR_FILE_PAGES
, -nr
);
163 __lruvec_stat_mod_folio(folio
, NR_SWAPCACHE
, -nr
);
167 * add_to_swap - allocate swap space for a folio
168 * @folio: folio we want to move to swap
170 * Allocate swap space for the folio and add the folio to the
173 * Context: Caller needs to hold the folio lock.
174 * Return: Whether the folio was added to the swap cache.
176 bool add_to_swap(struct folio
*folio
)
181 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
182 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio
), folio
);
184 entry
= folio_alloc_swap(folio
);
189 * XArray node allocations from PF_MEMALLOC contexts could
190 * completely exhaust the page allocator. __GFP_NOMEMALLOC
191 * stops emergency reserves from being allocated.
193 * TODO: this could cause a theoretical memory reclaim
194 * deadlock in the swap out path.
197 * Add it to the swap cache.
199 err
= add_to_swap_cache(folio
, entry
,
200 __GFP_HIGH
|__GFP_NOMEMALLOC
|__GFP_NOWARN
, NULL
);
203 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
204 * clear SWAP_HAS_CACHE flag.
208 * Normally the folio will be dirtied in unmap because its
209 * pte should be dirty. A special case is MADV_FREE page. The
210 * page's pte could have dirty bit cleared but the folio's
211 * SwapBacked flag is still set because clearing the dirty bit
212 * and SwapBacked flag has no lock protected. For such folio,
213 * unmap will not set dirty bit for it, so folio reclaim will
214 * not write the folio out. This can cause data corruption when
215 * the folio is swapped in later. Always setting the dirty flag
216 * for the folio solves the problem.
218 folio_mark_dirty(folio
);
223 put_swap_folio(folio
, entry
);
228 * This must be called only on folios that have
229 * been verified to be in the swap cache and locked.
230 * It will never put the folio into the free list,
231 * the caller has a reference on the folio.
233 void delete_from_swap_cache(struct folio
*folio
)
235 swp_entry_t entry
= folio
->swap
;
236 struct address_space
*address_space
= swap_address_space(entry
);
238 xa_lock_irq(&address_space
->i_pages
);
239 __delete_from_swap_cache(folio
, entry
, NULL
);
240 xa_unlock_irq(&address_space
->i_pages
);
242 put_swap_folio(folio
, entry
);
243 folio_ref_sub(folio
, folio_nr_pages(folio
));
246 void clear_shadow_from_swap_cache(int type
, unsigned long begin
,
249 unsigned long curr
= begin
;
253 swp_entry_t entry
= swp_entry(type
, curr
);
254 struct address_space
*address_space
= swap_address_space(entry
);
255 XA_STATE(xas
, &address_space
->i_pages
, curr
);
257 xas_set_update(&xas
, workingset_update_node
);
259 xa_lock_irq(&address_space
->i_pages
);
260 xas_for_each(&xas
, old
, end
) {
261 if (!xa_is_value(old
))
263 xas_store(&xas
, NULL
);
265 xa_unlock_irq(&address_space
->i_pages
);
267 /* search the next swapcache until we meet end */
268 curr
>>= SWAP_ADDRESS_SPACE_SHIFT
;
270 curr
<<= SWAP_ADDRESS_SPACE_SHIFT
;
277 * If we are the only user, then try to free up the swap cache.
279 * Its ok to check the swapcache flag without the folio lock
280 * here because we are going to recheck again inside
281 * folio_free_swap() _with_ the lock.
284 void free_swap_cache(struct page
*page
)
286 struct folio
*folio
= page_folio(page
);
288 if (folio_test_swapcache(folio
) && !folio_mapped(folio
) &&
289 folio_trylock(folio
)) {
290 folio_free_swap(folio
);
296 * Perform a free_page(), also freeing any swap cache associated with
297 * this page if it is the last user of the page.
299 void free_page_and_swap_cache(struct page
*page
)
301 free_swap_cache(page
);
302 if (!is_huge_zero_page(page
))
307 * Passed an array of pages, drop them all from swapcache and then release
308 * them. They are removed from the LRU and freed if this is their last use.
310 void free_pages_and_swap_cache(struct encoded_page
**pages
, int nr
)
313 for (int i
= 0; i
< nr
; i
++)
314 free_swap_cache(encoded_page_ptr(pages
[i
]));
315 release_pages(pages
, nr
);
318 static inline bool swap_use_vma_readahead(void)
320 return READ_ONCE(enable_vma_readahead
) && !atomic_read(&nr_rotate_swap
);
324 * Lookup a swap entry in the swap cache. A found folio will be returned
325 * unlocked and with its refcount incremented - we rely on the kernel
326 * lock getting page table operations atomic even if we drop the folio
327 * lock before returning.
329 * Caller must lock the swap device or hold a reference to keep it valid.
331 struct folio
*swap_cache_get_folio(swp_entry_t entry
,
332 struct vm_area_struct
*vma
, unsigned long addr
)
336 folio
= filemap_get_folio(swap_address_space(entry
), swp_offset(entry
));
337 if (!IS_ERR(folio
)) {
338 bool vma_ra
= swap_use_vma_readahead();
342 * At the moment, we don't support PG_readahead for anon THP
343 * so let's bail out rather than confusing the readahead stat.
345 if (unlikely(folio_test_large(folio
)))
348 readahead
= folio_test_clear_readahead(folio
);
350 unsigned long ra_val
;
353 ra_val
= GET_SWAP_RA_VAL(vma
);
354 win
= SWAP_RA_WIN(ra_val
);
355 hits
= SWAP_RA_HITS(ra_val
);
357 hits
= min_t(int, hits
+ 1, SWAP_RA_HITS_MAX
);
358 atomic_long_set(&vma
->swap_readahead_info
,
359 SWAP_RA_VAL(addr
, win
, hits
));
363 count_vm_event(SWAP_RA_HIT
);
365 atomic_inc(&swapin_readahead_hits
);
375 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
376 * @mapping: The address_space to search.
377 * @index: The page cache index.
379 * This differs from filemap_get_folio() in that it will also look for the
380 * folio in the swap cache.
382 * Return: The found folio or %NULL.
384 struct folio
*filemap_get_incore_folio(struct address_space
*mapping
,
388 struct swap_info_struct
*si
;
389 struct folio
*folio
= filemap_get_entry(mapping
, index
);
392 return ERR_PTR(-ENOENT
);
393 if (!xa_is_value(folio
))
395 if (!shmem_mapping(mapping
))
396 return ERR_PTR(-ENOENT
);
398 swp
= radix_to_swp_entry(folio
);
399 /* There might be swapin error entries in shmem mapping. */
400 if (non_swap_entry(swp
))
401 return ERR_PTR(-ENOENT
);
402 /* Prevent swapoff from happening to us */
403 si
= get_swap_device(swp
);
405 return ERR_PTR(-ENOENT
);
406 index
= swp_offset(swp
);
407 folio
= filemap_get_folio(swap_address_space(swp
), index
);
412 struct page
*__read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
413 struct vm_area_struct
*vma
, unsigned long addr
,
414 bool *new_page_allocated
)
416 struct swap_info_struct
*si
;
421 *new_page_allocated
= false;
422 si
= get_swap_device(entry
);
429 * First check the swap cache. Since this is normally
430 * called after swap_cache_get_folio() failed, re-calling
431 * that would confuse statistics.
433 folio
= filemap_get_folio(swap_address_space(entry
),
435 if (!IS_ERR(folio
)) {
436 page
= folio_file_page(folio
, swp_offset(entry
));
441 * Just skip read ahead for unused swap slot.
442 * During swap_off when swap_slot_cache is disabled,
443 * we have to handle the race between putting
444 * swap entry in swap cache and marking swap slot
445 * as SWAP_HAS_CACHE. That's done in later part of code or
446 * else swap_off will be aborted if we return NULL.
448 if (!swap_swapcount(si
, entry
) && swap_slot_cache_enabled
)
452 * Get a new page to read into from swap. Allocate it now,
453 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
454 * cause any racers to loop around until we add it to cache.
456 folio
= vma_alloc_folio(gfp_mask
, 0, vma
, addr
, false);
461 * Swap entry may have been freed since our caller observed it.
463 err
= swapcache_prepare(entry
);
472 * We might race against __delete_from_swap_cache(), and
473 * stumble across a swap_map entry whose SWAP_HAS_CACHE
474 * has not yet been cleared. Or race against another
475 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
476 * in swap_map, but not yet added its page to swap cache.
478 schedule_timeout_uninterruptible(1);
482 * The swap entry is ours to swap in. Prepare the new page.
485 __folio_set_locked(folio
);
486 __folio_set_swapbacked(folio
);
488 if (mem_cgroup_swapin_charge_folio(folio
, NULL
, gfp_mask
, entry
))
491 /* May fail (-ENOMEM) if XArray node allocation failed. */
492 if (add_to_swap_cache(folio
, entry
, gfp_mask
& GFP_RECLAIM_MASK
, &shadow
))
495 mem_cgroup_swapin_uncharge_swap(entry
);
498 workingset_refault(folio
, shadow
);
500 /* Caller will initiate read into locked folio */
501 folio_add_lru(folio
);
502 *new_page_allocated
= true;
509 put_swap_folio(folio
, entry
);
518 * Locate a page of swap in physical memory, reserving swap cache space
519 * and reading the disk if it is not already cached.
520 * A failure return means that either the page allocation failed or that
521 * the swap entry is no longer in use.
523 * get/put_swap_device() aren't needed to call this function, because
524 * __read_swap_cache_async() call them and swap_readpage() holds the
525 * swap cache folio lock.
527 struct page
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
528 struct vm_area_struct
*vma
,
529 unsigned long addr
, struct swap_iocb
**plug
)
531 bool page_was_allocated
;
532 struct page
*retpage
= __read_swap_cache_async(entry
, gfp_mask
,
533 vma
, addr
, &page_was_allocated
);
535 if (page_was_allocated
)
536 swap_readpage(retpage
, false, plug
);
541 static unsigned int __swapin_nr_pages(unsigned long prev_offset
,
542 unsigned long offset
,
547 unsigned int pages
, last_ra
;
550 * This heuristic has been found to work well on both sequential and
551 * random loads, swapping to hard disk or to SSD: please don't ask
552 * what the "+ 2" means, it just happens to work well, that's all.
557 * We can have no readahead hits to judge by: but must not get
558 * stuck here forever, so check for an adjacent offset instead
559 * (and don't even bother to check whether swap type is same).
561 if (offset
!= prev_offset
+ 1 && offset
!= prev_offset
- 1)
564 unsigned int roundup
= 4;
565 while (roundup
< pages
)
570 if (pages
> max_pages
)
573 /* Don't shrink readahead too fast */
574 last_ra
= prev_win
/ 2;
581 static unsigned long swapin_nr_pages(unsigned long offset
)
583 static unsigned long prev_offset
;
584 unsigned int hits
, pages
, max_pages
;
585 static atomic_t last_readahead_pages
;
587 max_pages
= 1 << READ_ONCE(page_cluster
);
591 hits
= atomic_xchg(&swapin_readahead_hits
, 0);
592 pages
= __swapin_nr_pages(READ_ONCE(prev_offset
), offset
, hits
,
594 atomic_read(&last_readahead_pages
));
596 WRITE_ONCE(prev_offset
, offset
);
597 atomic_set(&last_readahead_pages
, pages
);
603 * swap_cluster_readahead - swap in pages in hope we need them soon
604 * @entry: swap entry of this memory
605 * @gfp_mask: memory allocation flags
606 * @vmf: fault information
608 * Returns the struct page for entry and addr, after queueing swapin.
610 * Primitive swap readahead code. We simply read an aligned block of
611 * (1 << page_cluster) entries in the swap area. This method is chosen
612 * because it doesn't cost us any seek time. We also make sure to queue
613 * the 'original' request together with the readahead ones...
615 * This has been extended to use the NUMA policies from the mm triggering
618 * Caller must hold read mmap_lock if vmf->vma is not NULL.
620 struct page
*swap_cluster_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
621 struct vm_fault
*vmf
)
624 unsigned long entry_offset
= swp_offset(entry
);
625 unsigned long offset
= entry_offset
;
626 unsigned long start_offset
, end_offset
;
628 struct swap_info_struct
*si
= swp_swap_info(entry
);
629 struct blk_plug plug
;
630 struct swap_iocb
*splug
= NULL
;
632 struct vm_area_struct
*vma
= vmf
->vma
;
633 unsigned long addr
= vmf
->address
;
635 mask
= swapin_nr_pages(offset
) - 1;
639 /* Read a page_cluster sized and aligned cluster around offset. */
640 start_offset
= offset
& ~mask
;
641 end_offset
= offset
| mask
;
642 if (!start_offset
) /* First page is swap header. */
644 if (end_offset
>= si
->max
)
645 end_offset
= si
->max
- 1;
647 blk_start_plug(&plug
);
648 for (offset
= start_offset
; offset
<= end_offset
; offset
++) {
649 /* Ok, do the async read-ahead now */
650 page
= __read_swap_cache_async(
651 swp_entry(swp_type(entry
), offset
),
652 gfp_mask
, vma
, addr
, &page_allocated
);
655 if (page_allocated
) {
656 swap_readpage(page
, false, &splug
);
657 if (offset
!= entry_offset
) {
658 SetPageReadahead(page
);
659 count_vm_event(SWAP_RA
);
664 blk_finish_plug(&plug
);
665 swap_read_unplug(splug
);
667 lru_add_drain(); /* Push any new pages onto the LRU now */
669 /* The page was likely read above, so no need for plugging here */
670 return read_swap_cache_async(entry
, gfp_mask
, vma
, addr
, NULL
);
673 int init_swap_address_space(unsigned int type
, unsigned long nr_pages
)
675 struct address_space
*spaces
, *space
;
678 nr
= DIV_ROUND_UP(nr_pages
, SWAP_ADDRESS_SPACE_PAGES
);
679 spaces
= kvcalloc(nr
, sizeof(struct address_space
), GFP_KERNEL
);
682 for (i
= 0; i
< nr
; i
++) {
684 xa_init_flags(&space
->i_pages
, XA_FLAGS_LOCK_IRQ
);
685 atomic_set(&space
->i_mmap_writable
, 0);
686 space
->a_ops
= &swap_aops
;
687 /* swap cache doesn't use writeback related tags */
688 mapping_set_no_writeback_tags(space
);
690 nr_swapper_spaces
[type
] = nr
;
691 swapper_spaces
[type
] = spaces
;
696 void exit_swap_address_space(unsigned int type
)
699 struct address_space
*spaces
= swapper_spaces
[type
];
701 for (i
= 0; i
< nr_swapper_spaces
[type
]; i
++)
702 VM_WARN_ON_ONCE(!mapping_empty(&spaces
[i
]));
704 nr_swapper_spaces
[type
] = 0;
705 swapper_spaces
[type
] = NULL
;
708 #define SWAP_RA_ORDER_CEILING 5
710 struct vma_swap_readahead
{
712 unsigned short offset
;
713 unsigned short nr_pte
;
716 static void swap_ra_info(struct vm_fault
*vmf
,
717 struct vma_swap_readahead
*ra_info
)
719 struct vm_area_struct
*vma
= vmf
->vma
;
720 unsigned long ra_val
;
721 unsigned long faddr
, pfn
, fpfn
, lpfn
, rpfn
;
722 unsigned long start
, end
;
723 unsigned int max_win
, hits
, prev_win
, win
;
725 max_win
= 1 << min_t(unsigned int, READ_ONCE(page_cluster
),
726 SWAP_RA_ORDER_CEILING
);
732 faddr
= vmf
->address
;
733 fpfn
= PFN_DOWN(faddr
);
734 ra_val
= GET_SWAP_RA_VAL(vma
);
735 pfn
= PFN_DOWN(SWAP_RA_ADDR(ra_val
));
736 prev_win
= SWAP_RA_WIN(ra_val
);
737 hits
= SWAP_RA_HITS(ra_val
);
738 ra_info
->win
= win
= __swapin_nr_pages(pfn
, fpfn
, hits
,
740 atomic_long_set(&vma
->swap_readahead_info
,
741 SWAP_RA_VAL(faddr
, win
, 0));
745 if (fpfn
== pfn
+ 1) {
748 } else if (pfn
== fpfn
+ 1) {
749 lpfn
= fpfn
- win
+ 1;
752 unsigned int left
= (win
- 1) / 2;
755 rpfn
= fpfn
+ win
- left
;
757 start
= max3(lpfn
, PFN_DOWN(vma
->vm_start
),
758 PFN_DOWN(faddr
& PMD_MASK
));
759 end
= min3(rpfn
, PFN_DOWN(vma
->vm_end
),
760 PFN_DOWN((faddr
& PMD_MASK
) + PMD_SIZE
));
762 ra_info
->nr_pte
= end
- start
;
763 ra_info
->offset
= fpfn
- start
;
767 * swap_vma_readahead - swap in pages in hope we need them soon
768 * @fentry: swap entry of this memory
769 * @gfp_mask: memory allocation flags
770 * @vmf: fault information
772 * Returns the struct page for entry and addr, after queueing swapin.
774 * Primitive swap readahead code. We simply read in a few pages whose
775 * virtual addresses are around the fault address in the same vma.
777 * Caller must hold read mmap_lock if vmf->vma is not NULL.
780 static struct page
*swap_vma_readahead(swp_entry_t fentry
, gfp_t gfp_mask
,
781 struct vm_fault
*vmf
)
783 struct blk_plug plug
;
784 struct swap_iocb
*splug
= NULL
;
785 struct vm_area_struct
*vma
= vmf
->vma
;
787 pte_t
*pte
= NULL
, pentry
;
792 struct vma_swap_readahead ra_info
= {
796 swap_ra_info(vmf
, &ra_info
);
797 if (ra_info
.win
== 1)
800 addr
= vmf
->address
- (ra_info
.offset
* PAGE_SIZE
);
802 blk_start_plug(&plug
);
803 for (i
= 0; i
< ra_info
.nr_pte
; i
++, addr
+= PAGE_SIZE
) {
805 pte
= pte_offset_map(vmf
->pmd
, addr
);
809 pentry
= ptep_get_lockless(pte
);
810 if (!is_swap_pte(pentry
))
812 entry
= pte_to_swp_entry(pentry
);
813 if (unlikely(non_swap_entry(entry
)))
817 page
= __read_swap_cache_async(entry
, gfp_mask
, vma
,
818 addr
, &page_allocated
);
821 if (page_allocated
) {
822 swap_readpage(page
, false, &splug
);
823 if (i
!= ra_info
.offset
) {
824 SetPageReadahead(page
);
825 count_vm_event(SWAP_RA
);
832 blk_finish_plug(&plug
);
833 swap_read_unplug(splug
);
836 /* The page was likely read above, so no need for plugging here */
837 return read_swap_cache_async(fentry
, gfp_mask
, vma
, vmf
->address
,
842 * swapin_readahead - swap in pages in hope we need them soon
843 * @entry: swap entry of this memory
844 * @gfp_mask: memory allocation flags
845 * @vmf: fault information
847 * Returns the struct page for entry and addr, after queueing swapin.
849 * It's a main entry function for swap readahead. By the configuration,
850 * it will read ahead blocks by cluster-based(ie, physical disk based)
851 * or vma-based(ie, virtual address based on faulty address) readahead.
853 struct page
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
854 struct vm_fault
*vmf
)
856 return swap_use_vma_readahead() ?
857 swap_vma_readahead(entry
, gfp_mask
, vmf
) :
858 swap_cluster_readahead(entry
, gfp_mask
, vmf
);
862 static ssize_t
vma_ra_enabled_show(struct kobject
*kobj
,
863 struct kobj_attribute
*attr
, char *buf
)
865 return sysfs_emit(buf
, "%s\n",
866 enable_vma_readahead
? "true" : "false");
868 static ssize_t
vma_ra_enabled_store(struct kobject
*kobj
,
869 struct kobj_attribute
*attr
,
870 const char *buf
, size_t count
)
874 ret
= kstrtobool(buf
, &enable_vma_readahead
);
880 static struct kobj_attribute vma_ra_enabled_attr
= __ATTR_RW(vma_ra_enabled
);
882 static struct attribute
*swap_attrs
[] = {
883 &vma_ra_enabled_attr
.attr
,
887 static const struct attribute_group swap_attr_group
= {
891 static int __init
swap_init_sysfs(void)
894 struct kobject
*swap_kobj
;
896 swap_kobj
= kobject_create_and_add("swap", mm_kobj
);
898 pr_err("failed to create swap kobject\n");
901 err
= sysfs_create_group(swap_kobj
, &swap_attr_group
);
903 pr_err("failed to register swap group\n");
909 kobject_put(swap_kobj
);
912 subsys_initcall(swap_init_sysfs
);