]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/swap_state.c
mm/swap: use dedicated entry for swap in folio
[thirdparty/linux.git] / mm / swap_state.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
1da177e4 10#include <linux/mm.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4
LT
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
46017e95 14#include <linux/swapops.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/pagemap.h>
1da177e4 17#include <linux/backing-dev.h>
3fb5c298 18#include <linux/blkdev.h>
b20a3503 19#include <linux/migrate.h>
4b3ef9da 20#include <linux/vmalloc.h>
67afa38e 21#include <linux/swap_slots.h>
38d8b4e6 22#include <linux/huge_mm.h>
61ef1865 23#include <linux/shmem_fs.h>
243bce09 24#include "internal.h"
014bb1de 25#include "swap.h"
1da177e4
LT
26
27/*
28 * swapper_space is a fiction, retained to simplify the path through
7eaceacc 29 * vmscan's shrink_page_list.
1da177e4 30 */
f5e54d6e 31static const struct address_space_operations swap_aops = {
1da177e4 32 .writepage = swap_writepage,
4c4a7634 33 .dirty_folio = noop_dirty_folio,
1c93923c 34#ifdef CONFIG_MIGRATION
54184650 35 .migrate_folio = migrate_folio,
1c93923c 36#endif
1da177e4
LT
37};
38
783cb68e
CD
39struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
f5c754d6 41static bool enable_vma_readahead __read_mostly = true;
ec560175 42
ec560175
HY
43#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
47
48#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
51
52#define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
56
57/* Initial readahead hits is 4 to start up with a small window */
58#define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
1da177e4 60
579f8290
SL
61static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
62
1da177e4
LT
63void show_swap_cache_info(void)
64{
33806f06 65 printk("%lu pages in swap cache\n", total_swapcache_pages());
3cb8eaa4
Z
66 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
67 printk("Total swap = %lukB\n", K(total_swap_pages));
1da177e4
LT
68}
69
aae466b0
JK
70void *get_shadow_from_swap_cache(swp_entry_t entry)
71{
72 struct address_space *address_space = swap_address_space(entry);
73 pgoff_t idx = swp_offset(entry);
74 struct page *page;
75
8c647dd1 76 page = xa_load(&address_space->i_pages, idx);
aae466b0
JK
77 if (xa_is_value(page))
78 return page;
aae466b0
JK
79 return NULL;
80}
81
1da177e4 82/*
2bb876b5 83 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
1da177e4
LT
84 * but sets SwapCache flag and private instead of mapping and index.
85 */
a4c366f0 86int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
3852f676 87 gfp_t gfp, void **shadowp)
1da177e4 88{
8d93b41c 89 struct address_space *address_space = swap_address_space(entry);
38d8b4e6 90 pgoff_t idx = swp_offset(entry);
a4c366f0
MWO
91 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
92 unsigned long i, nr = folio_nr_pages(folio);
3852f676 93 void *old;
1da177e4 94
5649d113
YY
95 xas_set_update(&xas, workingset_update_node);
96
a4c366f0
MWO
97 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
98 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
99 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
51726b12 100
a4c366f0
MWO
101 folio_ref_add(folio, nr);
102 folio_set_swapcache(folio);
cfeed8ff 103 folio_set_swap_entry(folio, entry);
31a56396 104
8d93b41c
MW
105 do {
106 xas_lock_irq(&xas);
107 xas_create_range(&xas);
108 if (xas_error(&xas))
109 goto unlock;
110 for (i = 0; i < nr; i++) {
a4c366f0 111 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
3852f676
JK
112 old = xas_load(&xas);
113 if (xa_is_value(old)) {
3852f676
JK
114 if (shadowp)
115 *shadowp = old;
116 }
a4c366f0 117 xas_store(&xas, folio);
8d93b41c
MW
118 xas_next(&xas);
119 }
38d8b4e6 120 address_space->nrpages += nr;
a4c366f0
MWO
121 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
122 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
8d93b41c
MW
123unlock:
124 xas_unlock_irq(&xas);
125 } while (xas_nomem(&xas, gfp));
31a56396 126
8d93b41c
MW
127 if (!xas_error(&xas))
128 return 0;
31a56396 129
a4c366f0
MWO
130 folio_clear_swapcache(folio);
131 folio_ref_sub(folio, nr);
8d93b41c 132 return xas_error(&xas);
1da177e4
LT
133}
134
1da177e4 135/*
ceff9d33 136 * This must be called only on folios that have
1da177e4
LT
137 * been verified to be in the swap cache.
138 */
ceff9d33 139void __delete_from_swap_cache(struct folio *folio,
3852f676 140 swp_entry_t entry, void *shadow)
1da177e4 141{
4e17ec25 142 struct address_space *address_space = swap_address_space(entry);
ceff9d33
MWO
143 int i;
144 long nr = folio_nr_pages(folio);
4e17ec25
MW
145 pgoff_t idx = swp_offset(entry);
146 XA_STATE(xas, &address_space->i_pages, idx);
33806f06 147
5649d113
YY
148 xas_set_update(&xas, workingset_update_node);
149
ceff9d33
MWO
150 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
151 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
152 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
1da177e4 153
38d8b4e6 154 for (i = 0; i < nr; i++) {
3852f676 155 void *entry = xas_store(&xas, shadow);
b9eb7776 156 VM_BUG_ON_PAGE(entry != folio, entry);
4e17ec25 157 xas_next(&xas);
38d8b4e6 158 }
cfeed8ff
DH
159 entry.val = 0;
160 folio_set_swap_entry(folio, entry);
ceff9d33 161 folio_clear_swapcache(folio);
38d8b4e6 162 address_space->nrpages -= nr;
ceff9d33
MWO
163 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
164 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
1da177e4
LT
165}
166
167/**
09c02e56
MWO
168 * add_to_swap - allocate swap space for a folio
169 * @folio: folio we want to move to swap
1da177e4 170 *
09c02e56
MWO
171 * Allocate swap space for the folio and add the folio to the
172 * swap cache.
173 *
174 * Context: Caller needs to hold the folio lock.
175 * Return: Whether the folio was added to the swap cache.
1da177e4 176 */
09c02e56 177bool add_to_swap(struct folio *folio)
1da177e4
LT
178{
179 swp_entry_t entry;
1da177e4
LT
180 int err;
181
09c02e56
MWO
182 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
183 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1da177e4 184
e2e3fdc7 185 entry = folio_alloc_swap(folio);
2ca4532a 186 if (!entry.val)
09c02e56 187 return false;
0f074658 188
2ca4532a 189 /*
8d93b41c 190 * XArray node allocations from PF_MEMALLOC contexts could
2ca4532a
DN
191 * completely exhaust the page allocator. __GFP_NOMEMALLOC
192 * stops emergency reserves from being allocated.
193 *
194 * TODO: this could cause a theoretical memory reclaim
195 * deadlock in the swap out path.
196 */
197 /*
854e9ed0 198 * Add it to the swap cache.
2ca4532a 199 */
a4c366f0 200 err = add_to_swap_cache(folio, entry,
3852f676 201 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
38d8b4e6 202 if (err)
bd53b714 203 /*
2ca4532a
DN
204 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205 * clear SWAP_HAS_CACHE flag.
1da177e4 206 */
0f074658 207 goto fail;
9625456c 208 /*
09c02e56
MWO
209 * Normally the folio will be dirtied in unmap because its
210 * pte should be dirty. A special case is MADV_FREE page. The
211 * page's pte could have dirty bit cleared but the folio's
212 * SwapBacked flag is still set because clearing the dirty bit
213 * and SwapBacked flag has no lock protected. For such folio,
214 * unmap will not set dirty bit for it, so folio reclaim will
215 * not write the folio out. This can cause data corruption when
216 * the folio is swapped in later. Always setting the dirty flag
217 * for the folio solves the problem.
9625456c 218 */
09c02e56 219 folio_mark_dirty(folio);
38d8b4e6 220
09c02e56 221 return true;
38d8b4e6 222
38d8b4e6 223fail:
4081f744 224 put_swap_folio(folio, entry);
09c02e56 225 return false;
1da177e4
LT
226}
227
228/*
75fa68a5 229 * This must be called only on folios that have
1da177e4 230 * been verified to be in the swap cache and locked.
75fa68a5
MWO
231 * It will never put the folio into the free list,
232 * the caller has a reference on the folio.
1da177e4 233 */
75fa68a5 234void delete_from_swap_cache(struct folio *folio)
1da177e4 235{
75fa68a5 236 swp_entry_t entry = folio_swap_entry(folio);
4e17ec25 237 struct address_space *address_space = swap_address_space(entry);
1da177e4 238
b93b0163 239 xa_lock_irq(&address_space->i_pages);
ceff9d33 240 __delete_from_swap_cache(folio, entry, NULL);
b93b0163 241 xa_unlock_irq(&address_space->i_pages);
1da177e4 242
4081f744 243 put_swap_folio(folio, entry);
75fa68a5 244 folio_ref_sub(folio, folio_nr_pages(folio));
1da177e4
LT
245}
246
3852f676
JK
247void clear_shadow_from_swap_cache(int type, unsigned long begin,
248 unsigned long end)
249{
250 unsigned long curr = begin;
251 void *old;
252
253 for (;;) {
3852f676
JK
254 swp_entry_t entry = swp_entry(type, curr);
255 struct address_space *address_space = swap_address_space(entry);
256 XA_STATE(xas, &address_space->i_pages, curr);
257
5649d113
YY
258 xas_set_update(&xas, workingset_update_node);
259
3852f676
JK
260 xa_lock_irq(&address_space->i_pages);
261 xas_for_each(&xas, old, end) {
262 if (!xa_is_value(old))
263 continue;
264 xas_store(&xas, NULL);
3852f676 265 }
3852f676
JK
266 xa_unlock_irq(&address_space->i_pages);
267
268 /* search the next swapcache until we meet end */
269 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
270 curr++;
271 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
272 if (curr > end)
273 break;
274 }
275}
276
c33c7948
RR
277/*
278 * If we are the only user, then try to free up the swap cache.
279 *
aedd74d4 280 * Its ok to check the swapcache flag without the folio lock
a2c43eed 281 * here because we are going to recheck again inside
aedd74d4 282 * folio_free_swap() _with_ the lock.
1da177e4
LT
283 * - Marcelo
284 */
f4c4a3f4 285void free_swap_cache(struct page *page)
1da177e4 286{
aedd74d4
MWO
287 struct folio *folio = page_folio(page);
288
289 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
290 folio_trylock(folio)) {
291 folio_free_swap(folio);
292 folio_unlock(folio);
1da177e4
LT
293 }
294}
295
c33c7948 296/*
1da177e4 297 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 298 * this page if it is the last user of the page.
1da177e4
LT
299 */
300void free_page_and_swap_cache(struct page *page)
301{
302 free_swap_cache(page);
6fcb52a5 303 if (!is_huge_zero_page(page))
770a5370 304 put_page(page);
1da177e4
LT
305}
306
307/*
308 * Passed an array of pages, drop them all from swapcache and then release
309 * them. They are removed from the LRU and freed if this is their last use.
310 */
7cc8f9c7 311void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
1da177e4 312{
1da177e4 313 lru_add_drain();
7cc8f9c7
LT
314 for (int i = 0; i < nr; i++)
315 free_swap_cache(encoded_page_ptr(pages[i]));
316 release_pages(pages, nr);
1da177e4
LT
317}
318
e9e9b7ec
MK
319static inline bool swap_use_vma_readahead(void)
320{
321 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
322}
323
1da177e4 324/*
c9edc242 325 * Lookup a swap entry in the swap cache. A found folio will be returned
1da177e4 326 * unlocked and with its refcount incremented - we rely on the kernel
c9edc242 327 * lock getting page table operations atomic even if we drop the folio
1da177e4 328 * lock before returning.
cbc2bd98
KS
329 *
330 * Caller must lock the swap device or hold a reference to keep it valid.
1da177e4 331 */
c9edc242
MWO
332struct folio *swap_cache_get_folio(swp_entry_t entry,
333 struct vm_area_struct *vma, unsigned long addr)
1da177e4 334{
c9edc242 335 struct folio *folio;
1da177e4 336
c9edc242 337 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
66dabbb6 338 if (!IS_ERR(folio)) {
eaf649eb
MK
339 bool vma_ra = swap_use_vma_readahead();
340 bool readahead;
341
eaf649eb
MK
342 /*
343 * At the moment, we don't support PG_readahead for anon THP
344 * so let's bail out rather than confusing the readahead stat.
345 */
c9edc242
MWO
346 if (unlikely(folio_test_large(folio)))
347 return folio;
eaf649eb 348
c9edc242 349 readahead = folio_test_clear_readahead(folio);
eaf649eb
MK
350 if (vma && vma_ra) {
351 unsigned long ra_val;
352 int win, hits;
353
354 ra_val = GET_SWAP_RA_VAL(vma);
355 win = SWAP_RA_WIN(ra_val);
356 hits = SWAP_RA_HITS(ra_val);
ec560175
HY
357 if (readahead)
358 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
359 atomic_long_set(&vma->swap_readahead_info,
360 SWAP_RA_VAL(addr, win, hits));
361 }
eaf649eb 362
ec560175 363 if (readahead) {
cbc65df2 364 count_vm_event(SWAP_RA_HIT);
eaf649eb 365 if (!vma || !vma_ra)
ec560175 366 atomic_inc(&swapin_readahead_hits);
cbc65df2 367 }
66dabbb6
CH
368 } else {
369 folio = NULL;
579f8290 370 }
eaf649eb 371
c9edc242
MWO
372 return folio;
373}
374
61ef1865 375/**
524984ff 376 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
61ef1865
MWO
377 * @mapping: The address_space to search.
378 * @index: The page cache index.
379 *
524984ff
MWO
380 * This differs from filemap_get_folio() in that it will also look for the
381 * folio in the swap cache.
61ef1865 382 *
524984ff 383 * Return: The found folio or %NULL.
61ef1865 384 */
524984ff
MWO
385struct folio *filemap_get_incore_folio(struct address_space *mapping,
386 pgoff_t index)
61ef1865
MWO
387{
388 swp_entry_t swp;
389 struct swap_info_struct *si;
097b3e59 390 struct folio *folio = filemap_get_entry(mapping, index);
61ef1865 391
66dabbb6
CH
392 if (!folio)
393 return ERR_PTR(-ENOENT);
dd8095b1 394 if (!xa_is_value(folio))
66dabbb6 395 return folio;
61ef1865 396 if (!shmem_mapping(mapping))
66dabbb6 397 return ERR_PTR(-ENOENT);
61ef1865 398
dd8095b1 399 swp = radix_to_swp_entry(folio);
ba6851b4
ML
400 /* There might be swapin error entries in shmem mapping. */
401 if (non_swap_entry(swp))
66dabbb6 402 return ERR_PTR(-ENOENT);
61ef1865
MWO
403 /* Prevent swapoff from happening to us */
404 si = get_swap_device(swp);
405 if (!si)
66dabbb6 406 return ERR_PTR(-ENOENT);
dd8095b1
MWO
407 index = swp_offset(swp);
408 folio = filemap_get_folio(swap_address_space(swp), index);
61ef1865 409 put_swap_device(si);
524984ff 410 return folio;
61ef1865
MWO
411}
412
5b999aad
DS
413struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
414 struct vm_area_struct *vma, unsigned long addr,
415 bool *new_page_allocated)
1da177e4 416{
eb085574 417 struct swap_info_struct *si;
a0d3374b 418 struct folio *folio;
46a774d3 419 struct page *page;
aae466b0 420 void *shadow = NULL;
4c6355b2 421
5b999aad 422 *new_page_allocated = false;
46a774d3
HY
423 si = get_swap_device(entry);
424 if (!si)
425 return NULL;
1da177e4 426
4c6355b2
JW
427 for (;;) {
428 int err;
1da177e4
LT
429 /*
430 * First check the swap cache. Since this is normally
cb691e2f 431 * called after swap_cache_get_folio() failed, re-calling
1da177e4
LT
432 * that would confuse statistics.
433 */
a0d3374b
MWO
434 folio = filemap_get_folio(swap_address_space(entry),
435 swp_offset(entry));
46a774d3
HY
436 if (!IS_ERR(folio)) {
437 page = folio_file_page(folio, swp_offset(entry));
438 goto got_page;
439 }
1da177e4 440
ba81f838
HY
441 /*
442 * Just skip read ahead for unused swap slot.
443 * During swap_off when swap_slot_cache is disabled,
444 * we have to handle the race between putting
445 * swap entry in swap cache and marking swap slot
446 * as SWAP_HAS_CACHE. That's done in later part of code or
447 * else swap_off will be aborted if we return NULL.
448 */
3ecdeb0f 449 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
46a774d3 450 goto fail_put_swap;
e8c26ab6 451
1da177e4 452 /*
4c6355b2
JW
453 * Get a new page to read into from swap. Allocate it now,
454 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
455 * cause any racers to loop around until we add it to cache.
1da177e4 456 */
a0d3374b
MWO
457 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
458 if (!folio)
46a774d3 459 goto fail_put_swap;
1da177e4 460
f000944d
HD
461 /*
462 * Swap entry may have been freed since our caller observed it.
463 */
355cfa73 464 err = swapcache_prepare(entry);
4c6355b2 465 if (!err)
f000944d
HD
466 break;
467
a0d3374b 468 folio_put(folio);
4c6355b2 469 if (err != -EEXIST)
46a774d3 470 goto fail_put_swap;
4c6355b2 471
2ca4532a 472 /*
4c6355b2
JW
473 * We might race against __delete_from_swap_cache(), and
474 * stumble across a swap_map entry whose SWAP_HAS_CACHE
475 * has not yet been cleared. Or race against another
476 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
477 * in swap_map, but not yet added its page to swap cache.
2ca4532a 478 */
029c4628 479 schedule_timeout_uninterruptible(1);
4c6355b2
JW
480 }
481
482 /*
483 * The swap entry is ours to swap in. Prepare the new page.
484 */
485
a0d3374b
MWO
486 __folio_set_locked(folio);
487 __folio_set_swapbacked(folio);
4c6355b2 488
65995918 489 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
4c6355b2 490 goto fail_unlock;
4c6355b2 491
0add0c77 492 /* May fail (-ENOMEM) if XArray node allocation failed. */
a4c366f0 493 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
4c6355b2 494 goto fail_unlock;
0add0c77
SB
495
496 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 497
aae466b0 498 if (shadow)
a0d3374b 499 workingset_refault(folio, shadow);
314b57fb 500
a0d3374b
MWO
501 /* Caller will initiate read into locked folio */
502 folio_add_lru(folio);
4c6355b2 503 *new_page_allocated = true;
46a774d3
HY
504 page = &folio->page;
505got_page:
506 put_swap_device(si);
507 return page;
1da177e4 508
4c6355b2 509fail_unlock:
4081f744 510 put_swap_folio(folio, entry);
a0d3374b
MWO
511 folio_unlock(folio);
512 folio_put(folio);
46a774d3
HY
513fail_put_swap:
514 put_swap_device(si);
4c6355b2 515 return NULL;
1da177e4 516}
46017e95 517
5b999aad
DS
518/*
519 * Locate a page of swap in physical memory, reserving swap cache space
520 * and reading the disk if it is not already cached.
521 * A failure return means that either the page allocation failed or that
522 * the swap entry is no longer in use.
46a774d3
HY
523 *
524 * get/put_swap_device() aren't needed to call this function, because
525 * __read_swap_cache_async() call them and swap_readpage() holds the
526 * swap cache folio lock.
5b999aad
DS
527 */
528struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
5169b844 529 struct vm_area_struct *vma,
b243dcbf 530 unsigned long addr, struct swap_iocb **plug)
5b999aad
DS
531{
532 bool page_was_allocated;
533 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
534 vma, addr, &page_was_allocated);
535
536 if (page_was_allocated)
b243dcbf 537 swap_readpage(retpage, false, plug);
5b999aad
DS
538
539 return retpage;
540}
541
ec560175
HY
542static unsigned int __swapin_nr_pages(unsigned long prev_offset,
543 unsigned long offset,
544 int hits,
545 int max_pages,
546 int prev_win)
579f8290 547{
ec560175 548 unsigned int pages, last_ra;
579f8290
SL
549
550 /*
551 * This heuristic has been found to work well on both sequential and
552 * random loads, swapping to hard disk or to SSD: please don't ask
553 * what the "+ 2" means, it just happens to work well, that's all.
554 */
ec560175 555 pages = hits + 2;
579f8290
SL
556 if (pages == 2) {
557 /*
558 * We can have no readahead hits to judge by: but must not get
559 * stuck here forever, so check for an adjacent offset instead
560 * (and don't even bother to check whether swap type is same).
561 */
562 if (offset != prev_offset + 1 && offset != prev_offset - 1)
563 pages = 1;
579f8290
SL
564 } else {
565 unsigned int roundup = 4;
566 while (roundup < pages)
567 roundup <<= 1;
568 pages = roundup;
569 }
570
571 if (pages > max_pages)
572 pages = max_pages;
573
574 /* Don't shrink readahead too fast */
ec560175 575 last_ra = prev_win / 2;
579f8290
SL
576 if (pages < last_ra)
577 pages = last_ra;
ec560175
HY
578
579 return pages;
580}
581
582static unsigned long swapin_nr_pages(unsigned long offset)
583{
584 static unsigned long prev_offset;
585 unsigned int hits, pages, max_pages;
586 static atomic_t last_readahead_pages;
587
588 max_pages = 1 << READ_ONCE(page_cluster);
589 if (max_pages <= 1)
590 return 1;
591
592 hits = atomic_xchg(&swapin_readahead_hits, 0);
d6c1f098
QC
593 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
594 max_pages,
ec560175
HY
595 atomic_read(&last_readahead_pages));
596 if (!hits)
d6c1f098 597 WRITE_ONCE(prev_offset, offset);
579f8290
SL
598 atomic_set(&last_readahead_pages, pages);
599
600 return pages;
601}
602
46017e95 603/**
e9e9b7ec 604 * swap_cluster_readahead - swap in pages in hope we need them soon
46017e95 605 * @entry: swap entry of this memory
7682486b 606 * @gfp_mask: memory allocation flags
e9e9b7ec 607 * @vmf: fault information
46017e95
HD
608 *
609 * Returns the struct page for entry and addr, after queueing swapin.
610 *
611 * Primitive swap readahead code. We simply read an aligned block of
612 * (1 << page_cluster) entries in the swap area. This method is chosen
613 * because it doesn't cost us any seek time. We also make sure to queue
614 * the 'original' request together with the readahead ones...
615 *
616 * This has been extended to use the NUMA policies from the mm triggering
617 * the readahead.
618 *
c1e8d7c6 619 * Caller must hold read mmap_lock if vmf->vma is not NULL.
46017e95 620 */
e9e9b7ec
MK
621struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
622 struct vm_fault *vmf)
46017e95 623{
46017e95 624 struct page *page;
579f8290
SL
625 unsigned long entry_offset = swp_offset(entry);
626 unsigned long offset = entry_offset;
67f96aa2 627 unsigned long start_offset, end_offset;
579f8290 628 unsigned long mask;
e9a6effa 629 struct swap_info_struct *si = swp_swap_info(entry);
3fb5c298 630 struct blk_plug plug;
5169b844 631 struct swap_iocb *splug = NULL;
b243dcbf 632 bool page_allocated;
e9e9b7ec
MK
633 struct vm_area_struct *vma = vmf->vma;
634 unsigned long addr = vmf->address;
46017e95 635
579f8290
SL
636 mask = swapin_nr_pages(offset) - 1;
637 if (!mask)
638 goto skip;
639
67f96aa2
RR
640 /* Read a page_cluster sized and aligned cluster around offset. */
641 start_offset = offset & ~mask;
642 end_offset = offset | mask;
643 if (!start_offset) /* First page is swap header. */
644 start_offset++;
e9a6effa
HY
645 if (end_offset >= si->max)
646 end_offset = si->max - 1;
67f96aa2 647
3fb5c298 648 blk_start_plug(&plug);
67f96aa2 649 for (offset = start_offset; offset <= end_offset ; offset++) {
46017e95 650 /* Ok, do the async read-ahead now */
c4fa6309
HY
651 page = __read_swap_cache_async(
652 swp_entry(swp_type(entry), offset),
653 gfp_mask, vma, addr, &page_allocated);
46017e95 654 if (!page)
67f96aa2 655 continue;
c4fa6309 656 if (page_allocated) {
5169b844 657 swap_readpage(page, false, &splug);
eaf649eb 658 if (offset != entry_offset) {
c4fa6309
HY
659 SetPageReadahead(page);
660 count_vm_event(SWAP_RA);
661 }
cbc65df2 662 }
09cbfeaf 663 put_page(page);
46017e95 664 }
3fb5c298 665 blk_finish_plug(&plug);
5169b844 666 swap_read_unplug(splug);
3fb5c298 667
46017e95 668 lru_add_drain(); /* Push any new pages onto the LRU now */
579f8290 669skip:
5169b844 670 /* The page was likely read above, so no need for plugging here */
b243dcbf 671 return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL);
46017e95 672}
4b3ef9da
HY
673
674int init_swap_address_space(unsigned int type, unsigned long nr_pages)
675{
676 struct address_space *spaces, *space;
677 unsigned int i, nr;
678
679 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
778e1cdd 680 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
4b3ef9da
HY
681 if (!spaces)
682 return -ENOMEM;
683 for (i = 0; i < nr; i++) {
684 space = spaces + i;
a2833486 685 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
4b3ef9da
HY
686 atomic_set(&space->i_mmap_writable, 0);
687 space->a_ops = &swap_aops;
688 /* swap cache doesn't use writeback related tags */
689 mapping_set_no_writeback_tags(space);
4b3ef9da
HY
690 }
691 nr_swapper_spaces[type] = nr;
054f1d1f 692 swapper_spaces[type] = spaces;
4b3ef9da
HY
693
694 return 0;
695}
696
697void exit_swap_address_space(unsigned int type)
698{
eea4a501
HY
699 int i;
700 struct address_space *spaces = swapper_spaces[type];
701
702 for (i = 0; i < nr_swapper_spaces[type]; i++)
703 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
704 kvfree(spaces);
4b3ef9da 705 nr_swapper_spaces[type] = 0;
054f1d1f 706 swapper_spaces[type] = NULL;
4b3ef9da 707}
ec560175 708
4f8fcf4c
HD
709#define SWAP_RA_ORDER_CEILING 5
710
711struct vma_swap_readahead {
712 unsigned short win;
713 unsigned short offset;
714 unsigned short nr_pte;
715};
716
eaf649eb 717static void swap_ra_info(struct vm_fault *vmf,
16ba391e 718 struct vma_swap_readahead *ra_info)
ec560175
HY
719{
720 struct vm_area_struct *vma = vmf->vma;
eaf649eb 721 unsigned long ra_val;
16ba391e 722 unsigned long faddr, pfn, fpfn, lpfn, rpfn;
ec560175 723 unsigned long start, end;
16ba391e 724 unsigned int max_win, hits, prev_win, win;
ec560175 725
61b63972
HY
726 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
727 SWAP_RA_ORDER_CEILING);
728 if (max_win == 1) {
eaf649eb
MK
729 ra_info->win = 1;
730 return;
61b63972
HY
731 }
732
ec560175 733 faddr = vmf->address;
ec560175 734 fpfn = PFN_DOWN(faddr);
eaf649eb
MK
735 ra_val = GET_SWAP_RA_VAL(vma);
736 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
737 prev_win = SWAP_RA_WIN(ra_val);
738 hits = SWAP_RA_HITS(ra_val);
739 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
ec560175
HY
740 max_win, prev_win);
741 atomic_long_set(&vma->swap_readahead_info,
742 SWAP_RA_VAL(faddr, win, 0));
18ad72f5 743 if (win == 1)
eaf649eb 744 return;
ec560175 745
16ba391e
KS
746 if (fpfn == pfn + 1) {
747 lpfn = fpfn;
748 rpfn = fpfn + win;
749 } else if (pfn == fpfn + 1) {
750 lpfn = fpfn - win + 1;
751 rpfn = fpfn + 1;
752 } else {
753 unsigned int left = (win - 1) / 2;
754
755 lpfn = fpfn - left;
756 rpfn = fpfn + win - left;
ec560175 757 }
16ba391e
KS
758 start = max3(lpfn, PFN_DOWN(vma->vm_start),
759 PFN_DOWN(faddr & PMD_MASK));
760 end = min3(rpfn, PFN_DOWN(vma->vm_end),
761 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
762
eaf649eb
MK
763 ra_info->nr_pte = end - start;
764 ra_info->offset = fpfn - start;
ec560175
HY
765}
766
e9f59873
YS
767/**
768 * swap_vma_readahead - swap in pages in hope we need them soon
27ec4878 769 * @fentry: swap entry of this memory
e9f59873
YS
770 * @gfp_mask: memory allocation flags
771 * @vmf: fault information
772 *
773 * Returns the struct page for entry and addr, after queueing swapin.
774 *
cb152a1a 775 * Primitive swap readahead code. We simply read in a few pages whose
e9f59873
YS
776 * virtual addresses are around the fault address in the same vma.
777 *
c1e8d7c6 778 * Caller must hold read mmap_lock if vmf->vma is not NULL.
e9f59873
YS
779 *
780 */
f5c754d6
CIK
781static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
782 struct vm_fault *vmf)
ec560175
HY
783{
784 struct blk_plug plug;
5169b844 785 struct swap_iocb *splug = NULL;
ec560175
HY
786 struct vm_area_struct *vma = vmf->vma;
787 struct page *page;
4f8fcf4c
HD
788 pte_t *pte = NULL, pentry;
789 unsigned long addr;
ec560175
HY
790 swp_entry_t entry;
791 unsigned int i;
792 bool page_allocated;
e97af699
ML
793 struct vma_swap_readahead ra_info = {
794 .win = 1,
795 };
ec560175 796
eaf649eb
MK
797 swap_ra_info(vmf, &ra_info);
798 if (ra_info.win == 1)
ec560175
HY
799 goto skip;
800
4f8fcf4c
HD
801 addr = vmf->address - (ra_info.offset * PAGE_SIZE);
802
ec560175 803 blk_start_plug(&plug);
4f8fcf4c
HD
804 for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
805 if (!pte++) {
806 pte = pte_offset_map(vmf->pmd, addr);
807 if (!pte)
808 break;
809 }
810 pentry = ptep_get_lockless(pte);
92bafb20 811 if (!is_swap_pte(pentry))
ec560175
HY
812 continue;
813 entry = pte_to_swp_entry(pentry);
814 if (unlikely(non_swap_entry(entry)))
815 continue;
4f8fcf4c
HD
816 pte_unmap(pte);
817 pte = NULL;
ec560175 818 page = __read_swap_cache_async(entry, gfp_mask, vma,
4f8fcf4c 819 addr, &page_allocated);
ec560175
HY
820 if (!page)
821 continue;
822 if (page_allocated) {
5169b844 823 swap_readpage(page, false, &splug);
eaf649eb 824 if (i != ra_info.offset) {
ec560175
HY
825 SetPageReadahead(page);
826 count_vm_event(SWAP_RA);
827 }
828 }
829 put_page(page);
830 }
4f8fcf4c
HD
831 if (pte)
832 pte_unmap(pte);
ec560175 833 blk_finish_plug(&plug);
5169b844 834 swap_read_unplug(splug);
ec560175
HY
835 lru_add_drain();
836skip:
5169b844 837 /* The page was likely read above, so no need for plugging here */
ec560175 838 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
b243dcbf 839 NULL);
ec560175 840}
d9bfcfdc 841
e9e9b7ec
MK
842/**
843 * swapin_readahead - swap in pages in hope we need them soon
844 * @entry: swap entry of this memory
845 * @gfp_mask: memory allocation flags
846 * @vmf: fault information
847 *
848 * Returns the struct page for entry and addr, after queueing swapin.
849 *
850 * It's a main entry function for swap readahead. By the configuration,
851 * it will read ahead blocks by cluster-based(ie, physical disk based)
852 * or vma-based(ie, virtual address based on faulty address) readahead.
853 */
854struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
855 struct vm_fault *vmf)
856{
857 return swap_use_vma_readahead() ?
858 swap_vma_readahead(entry, gfp_mask, vmf) :
859 swap_cluster_readahead(entry, gfp_mask, vmf);
860}
861
d9bfcfdc
HY
862#ifdef CONFIG_SYSFS
863static ssize_t vma_ra_enabled_show(struct kobject *kobj,
864 struct kobj_attribute *attr, char *buf)
865{
ae7a927d
JP
866 return sysfs_emit(buf, "%s\n",
867 enable_vma_readahead ? "true" : "false");
d9bfcfdc
HY
868}
869static ssize_t vma_ra_enabled_store(struct kobject *kobj,
870 struct kobj_attribute *attr,
871 const char *buf, size_t count)
872{
717aeab4
JG
873 ssize_t ret;
874
875 ret = kstrtobool(buf, &enable_vma_readahead);
876 if (ret)
877 return ret;
d9bfcfdc
HY
878
879 return count;
880}
6106b93e 881static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
d9bfcfdc 882
d9bfcfdc
HY
883static struct attribute *swap_attrs[] = {
884 &vma_ra_enabled_attr.attr,
d9bfcfdc
HY
885 NULL,
886};
887
e48333b6 888static const struct attribute_group swap_attr_group = {
d9bfcfdc
HY
889 .attrs = swap_attrs,
890};
891
892static int __init swap_init_sysfs(void)
893{
894 int err;
895 struct kobject *swap_kobj;
896
897 swap_kobj = kobject_create_and_add("swap", mm_kobj);
898 if (!swap_kobj) {
899 pr_err("failed to create swap kobject\n");
900 return -ENOMEM;
901 }
902 err = sysfs_create_group(swap_kobj, &swap_attr_group);
903 if (err) {
904 pr_err("failed to register swap group\n");
905 goto delete_obj;
906 }
907 return 0;
908
909delete_obj:
910 kobject_put(swap_kobj);
911 return err;
912}
913subsys_initcall(swap_init_sysfs);
914#endif