]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/page_io.c
mm: factor out a swap_readpage_bdev helper
[thirdparty/linux.git] / mm / page_io.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
21#include <linux/writeback.h>
38b5faf4 22#include <linux/frontswap.h>
b430e9d1 23#include <linux/blkdev.h>
93779069 24#include <linux/psi.h>
e2e40f2c 25#include <linux/uio.h>
b0ba2d0f 26#include <linux/sched/task.h>
a3d5dc90 27#include <linux/delayacct.h>
014bb1de 28#include "swap.h"
1da177e4 29
cf1e3fe4 30static void end_swap_bio_write(struct bio *bio)
1da177e4 31{
263663cd 32 struct page *page = bio_first_page_all(bio);
1da177e4 33
4e4cbee9 34 if (bio->bi_status) {
1da177e4 35 SetPageError(page);
6ddab3b9
PZ
36 /*
37 * We failed to write the page out to swap-space.
38 * Re-dirty the page in order to avoid it being reclaimed.
39 * Also print a dire warning that things will go BAD (tm)
40 * very quickly.
41 *
575ced1c 42 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
6ddab3b9
PZ
43 */
44 set_page_dirty(page);
25eaab43
GD
45 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
46 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
47 (unsigned long long)bio->bi_iter.bi_sector);
6ddab3b9
PZ
48 ClearPageReclaim(page);
49 }
1da177e4
LT
50 end_page_writeback(page);
51 bio_put(bio);
1da177e4
LT
52}
53
4246a0b6 54static void end_swap_bio_read(struct bio *bio)
1da177e4 55{
263663cd 56 struct page *page = bio_first_page_all(bio);
23955622 57 struct task_struct *waiter = bio->bi_private;
1da177e4 58
4e4cbee9 59 if (bio->bi_status) {
1da177e4
LT
60 SetPageError(page);
61 ClearPageUptodate(page);
25eaab43
GD
62 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
63 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
64 (unsigned long long)bio->bi_iter.bi_sector);
b430e9d1 65 goto out;
1da177e4 66 }
b430e9d1
MK
67
68 SetPageUptodate(page);
b430e9d1 69out:
1da177e4 70 unlock_page(page);
23955622 71 WRITE_ONCE(bio->bi_private, NULL);
1da177e4 72 bio_put(bio);
87518530
ON
73 if (waiter) {
74 blk_wake_io_task(waiter);
75 put_task_struct(waiter);
76 }
1da177e4
LT
77}
78
a509bc1a
MG
79int generic_swapfile_activate(struct swap_info_struct *sis,
80 struct file *swap_file,
81 sector_t *span)
82{
83 struct address_space *mapping = swap_file->f_mapping;
84 struct inode *inode = mapping->host;
85 unsigned blocks_per_page;
86 unsigned long page_no;
87 unsigned blkbits;
88 sector_t probe_block;
89 sector_t last_block;
90 sector_t lowest_block = -1;
91 sector_t highest_block = 0;
92 int nr_extents = 0;
93 int ret;
94
95 blkbits = inode->i_blkbits;
96 blocks_per_page = PAGE_SIZE >> blkbits;
97
98 /*
4efaceb1 99 * Map all the blocks into the extent tree. This code doesn't try
a509bc1a
MG
100 * to be very smart.
101 */
102 probe_block = 0;
103 page_no = 0;
104 last_block = i_size_read(inode) >> blkbits;
105 while ((probe_block + blocks_per_page) <= last_block &&
106 page_no < sis->max) {
107 unsigned block_in_page;
108 sector_t first_block;
109
7e4411bf
MP
110 cond_resched();
111
30460e1e
CM
112 first_block = probe_block;
113 ret = bmap(inode, &first_block);
114 if (ret || !first_block)
a509bc1a
MG
115 goto bad_bmap;
116
117 /*
118 * It must be PAGE_SIZE aligned on-disk
119 */
120 if (first_block & (blocks_per_page - 1)) {
121 probe_block++;
122 goto reprobe;
123 }
124
125 for (block_in_page = 1; block_in_page < blocks_per_page;
126 block_in_page++) {
127 sector_t block;
128
30460e1e
CM
129 block = probe_block + block_in_page;
130 ret = bmap(inode, &block);
131 if (ret || !block)
a509bc1a 132 goto bad_bmap;
30460e1e 133
a509bc1a
MG
134 if (block != first_block + block_in_page) {
135 /* Discontiguity */
136 probe_block++;
137 goto reprobe;
138 }
139 }
140
141 first_block >>= (PAGE_SHIFT - blkbits);
142 if (page_no) { /* exclude the header page */
143 if (first_block < lowest_block)
144 lowest_block = first_block;
145 if (first_block > highest_block)
146 highest_block = first_block;
147 }
148
149 /*
150 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
151 */
152 ret = add_swap_extent(sis, page_no, 1, first_block);
153 if (ret < 0)
154 goto out;
155 nr_extents += ret;
156 page_no++;
157 probe_block += blocks_per_page;
158reprobe:
159 continue;
160 }
161 ret = nr_extents;
162 *span = 1 + highest_block - lowest_block;
163 if (page_no == 0)
164 page_no = 1; /* force Empty message */
165 sis->max = page_no;
166 sis->pages = page_no - 1;
167 sis->highest_bit = page_no - 1;
168out:
169 return ret;
170bad_bmap:
1170532b 171 pr_err("swapon: swapfile has holes\n");
a509bc1a
MG
172 ret = -EINVAL;
173 goto out;
174}
175
1da177e4
LT
176/*
177 * We may have stale swap cache pages in memory: notice
178 * them here and get rid of the unnecessary final write.
179 */
180int swap_writepage(struct page *page, struct writeback_control *wbc)
181{
71fa1a53 182 struct folio *folio = page_folio(page);
2f772e6c 183 int ret = 0;
1da177e4 184
71fa1a53
MWO
185 if (folio_free_swap(folio)) {
186 folio_unlock(folio);
1da177e4
LT
187 goto out;
188 }
8a84802e
SP
189 /*
190 * Arch code may have to preserve more data than just the page
191 * contents, e.g. memory tags.
192 */
71fa1a53 193 ret = arch_prepare_to_swap(&folio->page);
8a84802e 194 if (ret) {
71fa1a53
MWO
195 folio_mark_dirty(folio);
196 folio_unlock(folio);
8a84802e
SP
197 goto out;
198 }
71fa1a53
MWO
199 if (frontswap_store(&folio->page) == 0) {
200 folio_start_writeback(folio);
201 folio_unlock(folio);
202 folio_end_writeback(folio);
38b5faf4
DM
203 goto out;
204 }
71fa1a53 205 ret = __swap_writepage(&folio->page, wbc);
2f772e6c
SJ
206out:
207 return ret;
208}
209
225311a4
HY
210static inline void count_swpout_vm_event(struct page *page)
211{
212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
213 if (unlikely(PageTransHuge(page)))
214 count_vm_event(THP_SWPOUT);
215#endif
6c357848 216 count_vm_events(PSWPOUT, thp_nr_pages(page));
225311a4
HY
217}
218
a18b9b15
CH
219#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
220static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
221{
222 struct cgroup_subsys_state *css;
bcfe06bf 223 struct mem_cgroup *memcg;
a18b9b15 224
bcfe06bf
RG
225 memcg = page_memcg(page);
226 if (!memcg)
a18b9b15
CH
227 return;
228
229 rcu_read_lock();
bcfe06bf 230 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
a18b9b15
CH
231 bio_associate_blkg_from_css(bio, css);
232 rcu_read_unlock();
233}
234#else
235#define bio_associate_blkg_from_page(bio, page) do { } while (0)
236#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
237
e1209d3a
N
238struct swap_iocb {
239 struct kiocb iocb;
5169b844
N
240 struct bio_vec bvec[SWAP_CLUSTER_MAX];
241 int pages;
a1a0dfd5 242 int len;
e1209d3a
N
243};
244static mempool_t *sio_pool;
245
246int sio_pool_init(void)
2f772e6c 247{
e1209d3a
N
248 if (!sio_pool) {
249 mempool_t *pool = mempool_create_kmalloc_pool(
250 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
251 if (cmpxchg(&sio_pool, NULL, pool))
252 mempool_destroy(pool);
253 }
254 if (!sio_pool)
255 return -ENOMEM;
256 return 0;
257}
62c230bc 258
7eadabc0
N
259static void sio_write_complete(struct kiocb *iocb, long ret)
260{
261 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 262 struct page *page = sio->bvec[0].bv_page;
2282679f 263 int p;
62c230bc 264
a1a0dfd5 265 if (ret != sio->len) {
7eadabc0
N
266 /*
267 * In the case of swap-over-nfs, this can be a
268 * temporary failure if the system has limited
269 * memory for allocating transmit buffers.
270 * Mark the page dirty and avoid
271 * folio_rotate_reclaimable but rate-limit the
272 * messages but do not flag PageError like
273 * the normal direct-to-bio case as it could
274 * be temporary.
275 */
7eadabc0
N
276 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
277 ret, page_file_offset(page));
2282679f
N
278 for (p = 0; p < sio->pages; p++) {
279 page = sio->bvec[p].bv_page;
2d30d31e 280 set_page_dirty(page);
0cdc444a 281 ClearPageReclaim(page);
62c230bc 282 }
6341a446
N
283 } else {
284 for (p = 0; p < sio->pages; p++)
285 count_swpout_vm_event(sio->bvec[p].bv_page);
62c230bc
MG
286 }
287
2282679f
N
288 for (p = 0; p < sio->pages; p++)
289 end_page_writeback(sio->bvec[p].bv_page);
290
7eadabc0
N
291 mempool_free(sio, sio_pool);
292}
293
294static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
295{
2282679f 296 struct swap_iocb *sio = NULL;
7eadabc0
N
297 struct swap_info_struct *sis = page_swap_info(page);
298 struct file *swap_file = sis->swap_file;
2282679f 299 loff_t pos = page_file_offset(page);
7eadabc0
N
300
301 set_page_writeback(page);
302 unlock_page(page);
2282679f
N
303 if (wbc->swap_plug)
304 sio = *wbc->swap_plug;
305 if (sio) {
306 if (sio->iocb.ki_filp != swap_file ||
a1a0dfd5 307 sio->iocb.ki_pos + sio->len != pos) {
2282679f
N
308 swap_write_unplug(sio);
309 sio = NULL;
310 }
311 }
312 if (!sio) {
313 sio = mempool_alloc(sio_pool, GFP_NOIO);
314 init_sync_kiocb(&sio->iocb, swap_file);
315 sio->iocb.ki_complete = sio_write_complete;
316 sio->iocb.ki_pos = pos;
317 sio->pages = 0;
a1a0dfd5 318 sio->len = 0;
2282679f
N
319 }
320 sio->bvec[sio->pages].bv_page = page;
a1a0dfd5 321 sio->bvec[sio->pages].bv_len = thp_size(page);
2282679f 322 sio->bvec[sio->pages].bv_offset = 0;
a1a0dfd5 323 sio->len += thp_size(page);
2282679f
N
324 sio->pages += 1;
325 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
326 swap_write_unplug(sio);
327 sio = NULL;
328 }
329 if (wbc->swap_plug)
330 *wbc->swap_plug = sio;
331
332 return 0;
7eadabc0
N
333}
334
cf1e3fe4 335int __swap_writepage(struct page *page, struct writeback_control *wbc)
2f772e6c
SJ
336{
337 struct bio *bio;
4e49ea4a 338 int ret;
2f772e6c 339 struct swap_info_struct *sis = page_swap_info(page);
62c230bc 340
cc30c5d6 341 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
7eadabc0
N
342 /*
343 * ->flags can be updated non-atomicially (scan_swap_map_slots),
344 * but that will never affect SWP_FS_OPS, so the data_race
345 * is safe.
346 */
347 if (data_race(sis->flags & SWP_FS_OPS))
348 return swap_writepage_fs(page, wbc);
62c230bc 349
dd6bd0d9
MW
350 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
351 if (!ret) {
225311a4 352 count_swpout_vm_event(page);
dd6bd0d9
MW
353 return 0;
354 }
355
07888c66
CH
356 bio = bio_alloc(sis->bdev, 1,
357 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
358 GFP_NOIO);
48d15436 359 bio->bi_iter.bi_sector = swap_page_sector(page);
cf1e3fe4 360 bio->bi_end_io = end_swap_bio_write;
48d15436
CH
361 bio_add_page(bio, page, thp_size(page), 0);
362
6a7f6d86 363 bio_associate_blkg_from_page(bio, page);
225311a4 364 count_swpout_vm_event(page);
1da177e4
LT
365 set_page_writeback(page);
366 unlock_page(page);
4e49ea4a 367 submit_bio(bio);
548d9782
ML
368
369 return 0;
1da177e4
LT
370}
371
2282679f
N
372void swap_write_unplug(struct swap_iocb *sio)
373{
374 struct iov_iter from;
375 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
376 int ret;
377
de4eda9d 378 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
2282679f
N
379 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
380 if (ret != -EIOCBQUEUED)
381 sio_write_complete(&sio->iocb, ret);
382}
383
e1209d3a
N
384static void sio_read_complete(struct kiocb *iocb, long ret)
385{
386 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 387 int p;
e1209d3a 388
a1a0dfd5 389 if (ret == sio->len) {
5169b844
N
390 for (p = 0; p < sio->pages; p++) {
391 struct page *page = sio->bvec[p].bv_page;
392
393 SetPageUptodate(page);
394 unlock_page(page);
395 }
396 count_vm_events(PSWPIN, sio->pages);
e1209d3a 397 } else {
5169b844
N
398 for (p = 0; p < sio->pages; p++) {
399 struct page *page = sio->bvec[p].bv_page;
400
401 SetPageError(page);
402 ClearPageUptodate(page);
403 unlock_page(page);
404 }
405 pr_alert_ratelimited("Read-error on swap-device\n");
e1209d3a 406 }
e1209d3a
N
407 mempool_free(sio, sio_pool);
408}
409
5169b844
N
410static void swap_readpage_fs(struct page *page,
411 struct swap_iocb **plug)
e1209d3a
N
412{
413 struct swap_info_struct *sis = page_swap_info(page);
5169b844 414 struct swap_iocb *sio = NULL;
e1209d3a 415 loff_t pos = page_file_offset(page);
e1209d3a 416
5169b844
N
417 if (plug)
418 sio = *plug;
419 if (sio) {
420 if (sio->iocb.ki_filp != sis->swap_file ||
a1a0dfd5 421 sio->iocb.ki_pos + sio->len != pos) {
5169b844
N
422 swap_read_unplug(sio);
423 sio = NULL;
424 }
425 }
426 if (!sio) {
427 sio = mempool_alloc(sio_pool, GFP_KERNEL);
428 init_sync_kiocb(&sio->iocb, sis->swap_file);
429 sio->iocb.ki_pos = pos;
430 sio->iocb.ki_complete = sio_read_complete;
431 sio->pages = 0;
a1a0dfd5 432 sio->len = 0;
5169b844
N
433 }
434 sio->bvec[sio->pages].bv_page = page;
a1a0dfd5 435 sio->bvec[sio->pages].bv_len = thp_size(page);
5169b844 436 sio->bvec[sio->pages].bv_offset = 0;
a1a0dfd5 437 sio->len += thp_size(page);
5169b844
N
438 sio->pages += 1;
439 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
440 swap_read_unplug(sio);
441 sio = NULL;
442 }
443 if (plug)
444 *plug = sio;
e1209d3a
N
445}
446
14bd75f5
CH
447static void swap_readpage_bdev(struct page *page, bool synchronous,
448 struct swap_info_struct *sis)
1da177e4
LT
449{
450 struct bio *bio;
62c230bc 451
a8c1408f
CH
452 if ((sis->flags & SWP_SYNCHRONOUS_IO) &&
453 !bdev_read_page(sis->bdev, swap_page_sector(page), page)) {
454 count_vm_event(PSWPIN);
14bd75f5 455 return;
dd6bd0d9
MW
456 }
457
07888c66 458 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
48d15436
CH
459 bio->bi_iter.bi_sector = swap_page_sector(page);
460 bio->bi_end_io = end_swap_bio_read;
461 bio_add_page(bio, page, thp_size(page), 0);
b0ba2d0f
TH
462 /*
463 * Keep this task valid during swap readpage because the oom killer may
464 * attempt to access it in the page fault retry time check.
465 */
87518530 466 if (synchronous) {
87518530
ON
467 get_task_struct(current);
468 bio->bi_private = current;
469 }
f8891e5e 470 count_vm_event(PSWPIN);
23955622 471 bio_get(bio);
3e08773c 472 submit_bio(bio);
0bcac06f 473 while (synchronous) {
1ac5cd49 474 set_current_state(TASK_UNINTERRUPTIBLE);
23955622
SL
475 if (!READ_ONCE(bio->bi_private))
476 break;
477
9650b453 478 blk_io_schedule();
23955622
SL
479 }
480 __set_current_state(TASK_RUNNING);
481 bio_put(bio);
14bd75f5
CH
482}
483
484void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
485{
486 struct swap_info_struct *sis = page_swap_info(page);
487 bool workingset = PageWorkingset(page);
488 unsigned long pflags;
489 bool in_thrashing;
490
491 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
492 VM_BUG_ON_PAGE(!PageLocked(page), page);
493 VM_BUG_ON_PAGE(PageUptodate(page), page);
494
495 /*
496 * Count submission time as memory stall and delay. When the device
497 * is congested, or the submitting cgroup IO-throttled, submission
498 * can be a significant part of overall IO time.
499 */
500 if (workingset) {
501 delayacct_thrashing_start(&in_thrashing);
502 psi_memstall_enter(&pflags);
503 }
504 delayacct_swapin_start();
505
506 if (frontswap_load(page) == 0) {
507 SetPageUptodate(page);
508 unlock_page(page);
509 } else if (data_race(sis->flags & SWP_FS_OPS)) {
510 swap_readpage_fs(page, plug);
511 } else {
512 swap_readpage_bdev(page, synchronous, sis);
513 }
23955622 514
3a9bb7b1
YY
515 if (workingset) {
516 delayacct_thrashing_end(&in_thrashing);
d8c47cc7 517 psi_memstall_leave(&pflags);
3a9bb7b1 518 }
a3d5dc90 519 delayacct_swapin_end();
1da177e4 520}
62c230bc 521
5169b844 522void __swap_read_unplug(struct swap_iocb *sio)
62c230bc 523{
5169b844
N
524 struct iov_iter from;
525 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
526 int ret;
cc30c5d6 527
de4eda9d 528 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
5169b844
N
529 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
530 if (ret != -EIOCBQUEUED)
531 sio_read_complete(&sio->iocb, ret);
62c230bc 532}