]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/page_io.c
mm: remove the __swap_writepage return value
[thirdparty/linux.git] / mm / page_io.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
21#include <linux/writeback.h>
38b5faf4 22#include <linux/frontswap.h>
b430e9d1 23#include <linux/blkdev.h>
93779069 24#include <linux/psi.h>
e2e40f2c 25#include <linux/uio.h>
b0ba2d0f 26#include <linux/sched/task.h>
a3d5dc90 27#include <linux/delayacct.h>
014bb1de 28#include "swap.h"
1da177e4 29
cf1e3fe4 30static void end_swap_bio_write(struct bio *bio)
1da177e4 31{
263663cd 32 struct page *page = bio_first_page_all(bio);
1da177e4 33
4e4cbee9 34 if (bio->bi_status) {
1da177e4 35 SetPageError(page);
6ddab3b9
PZ
36 /*
37 * We failed to write the page out to swap-space.
38 * Re-dirty the page in order to avoid it being reclaimed.
39 * Also print a dire warning that things will go BAD (tm)
40 * very quickly.
41 *
575ced1c 42 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
6ddab3b9
PZ
43 */
44 set_page_dirty(page);
25eaab43
GD
45 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
46 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
47 (unsigned long long)bio->bi_iter.bi_sector);
6ddab3b9
PZ
48 ClearPageReclaim(page);
49 }
1da177e4
LT
50 end_page_writeback(page);
51 bio_put(bio);
1da177e4
LT
52}
53
9b4e30bd 54static void __end_swap_bio_read(struct bio *bio)
1da177e4 55{
263663cd 56 struct page *page = bio_first_page_all(bio);
1da177e4 57
4e4cbee9 58 if (bio->bi_status) {
1da177e4
LT
59 SetPageError(page);
60 ClearPageUptodate(page);
25eaab43
GD
61 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
62 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
63 (unsigned long long)bio->bi_iter.bi_sector);
9b4e30bd
CH
64 } else {
65 SetPageUptodate(page);
1da177e4
LT
66 }
67 unlock_page(page);
9b4e30bd
CH
68}
69
70static void end_swap_bio_read(struct bio *bio)
71{
72 __end_swap_bio_read(bio);
1da177e4 73 bio_put(bio);
1da177e4
LT
74}
75
a509bc1a
MG
76int generic_swapfile_activate(struct swap_info_struct *sis,
77 struct file *swap_file,
78 sector_t *span)
79{
80 struct address_space *mapping = swap_file->f_mapping;
81 struct inode *inode = mapping->host;
82 unsigned blocks_per_page;
83 unsigned long page_no;
84 unsigned blkbits;
85 sector_t probe_block;
86 sector_t last_block;
87 sector_t lowest_block = -1;
88 sector_t highest_block = 0;
89 int nr_extents = 0;
90 int ret;
91
92 blkbits = inode->i_blkbits;
93 blocks_per_page = PAGE_SIZE >> blkbits;
94
95 /*
4efaceb1 96 * Map all the blocks into the extent tree. This code doesn't try
a509bc1a
MG
97 * to be very smart.
98 */
99 probe_block = 0;
100 page_no = 0;
101 last_block = i_size_read(inode) >> blkbits;
102 while ((probe_block + blocks_per_page) <= last_block &&
103 page_no < sis->max) {
104 unsigned block_in_page;
105 sector_t first_block;
106
7e4411bf
MP
107 cond_resched();
108
30460e1e
CM
109 first_block = probe_block;
110 ret = bmap(inode, &first_block);
111 if (ret || !first_block)
a509bc1a
MG
112 goto bad_bmap;
113
114 /*
115 * It must be PAGE_SIZE aligned on-disk
116 */
117 if (first_block & (blocks_per_page - 1)) {
118 probe_block++;
119 goto reprobe;
120 }
121
122 for (block_in_page = 1; block_in_page < blocks_per_page;
123 block_in_page++) {
124 sector_t block;
125
30460e1e
CM
126 block = probe_block + block_in_page;
127 ret = bmap(inode, &block);
128 if (ret || !block)
a509bc1a 129 goto bad_bmap;
30460e1e 130
a509bc1a
MG
131 if (block != first_block + block_in_page) {
132 /* Discontiguity */
133 probe_block++;
134 goto reprobe;
135 }
136 }
137
138 first_block >>= (PAGE_SHIFT - blkbits);
139 if (page_no) { /* exclude the header page */
140 if (first_block < lowest_block)
141 lowest_block = first_block;
142 if (first_block > highest_block)
143 highest_block = first_block;
144 }
145
146 /*
147 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
148 */
149 ret = add_swap_extent(sis, page_no, 1, first_block);
150 if (ret < 0)
151 goto out;
152 nr_extents += ret;
153 page_no++;
154 probe_block += blocks_per_page;
155reprobe:
156 continue;
157 }
158 ret = nr_extents;
159 *span = 1 + highest_block - lowest_block;
160 if (page_no == 0)
161 page_no = 1; /* force Empty message */
162 sis->max = page_no;
163 sis->pages = page_no - 1;
164 sis->highest_bit = page_no - 1;
165out:
166 return ret;
167bad_bmap:
1170532b 168 pr_err("swapon: swapfile has holes\n");
a509bc1a
MG
169 ret = -EINVAL;
170 goto out;
171}
172
1da177e4
LT
173/*
174 * We may have stale swap cache pages in memory: notice
175 * them here and get rid of the unnecessary final write.
176 */
177int swap_writepage(struct page *page, struct writeback_control *wbc)
178{
71fa1a53 179 struct folio *folio = page_folio(page);
e3e2762b 180 int ret;
1da177e4 181
71fa1a53
MWO
182 if (folio_free_swap(folio)) {
183 folio_unlock(folio);
e3e2762b 184 return 0;
1da177e4 185 }
8a84802e
SP
186 /*
187 * Arch code may have to preserve more data than just the page
188 * contents, e.g. memory tags.
189 */
71fa1a53 190 ret = arch_prepare_to_swap(&folio->page);
8a84802e 191 if (ret) {
71fa1a53
MWO
192 folio_mark_dirty(folio);
193 folio_unlock(folio);
e3e2762b 194 return ret;
8a84802e 195 }
71fa1a53
MWO
196 if (frontswap_store(&folio->page) == 0) {
197 folio_start_writeback(folio);
198 folio_unlock(folio);
199 folio_end_writeback(folio);
e3e2762b 200 return 0;
38b5faf4 201 }
e3e2762b
CH
202 __swap_writepage(&folio->page, wbc);
203 return 0;
2f772e6c
SJ
204}
205
225311a4
HY
206static inline void count_swpout_vm_event(struct page *page)
207{
208#ifdef CONFIG_TRANSPARENT_HUGEPAGE
209 if (unlikely(PageTransHuge(page)))
210 count_vm_event(THP_SWPOUT);
211#endif
6c357848 212 count_vm_events(PSWPOUT, thp_nr_pages(page));
225311a4
HY
213}
214
a18b9b15
CH
215#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
216static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
217{
218 struct cgroup_subsys_state *css;
bcfe06bf 219 struct mem_cgroup *memcg;
a18b9b15 220
bcfe06bf
RG
221 memcg = page_memcg(page);
222 if (!memcg)
a18b9b15
CH
223 return;
224
225 rcu_read_lock();
bcfe06bf 226 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
a18b9b15
CH
227 bio_associate_blkg_from_css(bio, css);
228 rcu_read_unlock();
229}
230#else
231#define bio_associate_blkg_from_page(bio, page) do { } while (0)
232#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
233
e1209d3a
N
234struct swap_iocb {
235 struct kiocb iocb;
5169b844
N
236 struct bio_vec bvec[SWAP_CLUSTER_MAX];
237 int pages;
a1a0dfd5 238 int len;
e1209d3a
N
239};
240static mempool_t *sio_pool;
241
242int sio_pool_init(void)
2f772e6c 243{
e1209d3a
N
244 if (!sio_pool) {
245 mempool_t *pool = mempool_create_kmalloc_pool(
246 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
247 if (cmpxchg(&sio_pool, NULL, pool))
248 mempool_destroy(pool);
249 }
250 if (!sio_pool)
251 return -ENOMEM;
252 return 0;
253}
62c230bc 254
7eadabc0
N
255static void sio_write_complete(struct kiocb *iocb, long ret)
256{
257 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 258 struct page *page = sio->bvec[0].bv_page;
2282679f 259 int p;
62c230bc 260
a1a0dfd5 261 if (ret != sio->len) {
7eadabc0
N
262 /*
263 * In the case of swap-over-nfs, this can be a
264 * temporary failure if the system has limited
265 * memory for allocating transmit buffers.
266 * Mark the page dirty and avoid
267 * folio_rotate_reclaimable but rate-limit the
268 * messages but do not flag PageError like
269 * the normal direct-to-bio case as it could
270 * be temporary.
271 */
7eadabc0
N
272 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
273 ret, page_file_offset(page));
2282679f
N
274 for (p = 0; p < sio->pages; p++) {
275 page = sio->bvec[p].bv_page;
2d30d31e 276 set_page_dirty(page);
0cdc444a 277 ClearPageReclaim(page);
62c230bc 278 }
6341a446
N
279 } else {
280 for (p = 0; p < sio->pages; p++)
281 count_swpout_vm_event(sio->bvec[p].bv_page);
62c230bc
MG
282 }
283
2282679f
N
284 for (p = 0; p < sio->pages; p++)
285 end_page_writeback(sio->bvec[p].bv_page);
286
7eadabc0
N
287 mempool_free(sio, sio_pool);
288}
289
e3e2762b 290static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
7eadabc0 291{
2282679f 292 struct swap_iocb *sio = NULL;
7eadabc0
N
293 struct swap_info_struct *sis = page_swap_info(page);
294 struct file *swap_file = sis->swap_file;
2282679f 295 loff_t pos = page_file_offset(page);
7eadabc0
N
296
297 set_page_writeback(page);
298 unlock_page(page);
2282679f
N
299 if (wbc->swap_plug)
300 sio = *wbc->swap_plug;
301 if (sio) {
302 if (sio->iocb.ki_filp != swap_file ||
a1a0dfd5 303 sio->iocb.ki_pos + sio->len != pos) {
2282679f
N
304 swap_write_unplug(sio);
305 sio = NULL;
306 }
307 }
308 if (!sio) {
309 sio = mempool_alloc(sio_pool, GFP_NOIO);
310 init_sync_kiocb(&sio->iocb, swap_file);
311 sio->iocb.ki_complete = sio_write_complete;
312 sio->iocb.ki_pos = pos;
313 sio->pages = 0;
a1a0dfd5 314 sio->len = 0;
2282679f
N
315 }
316 sio->bvec[sio->pages].bv_page = page;
a1a0dfd5 317 sio->bvec[sio->pages].bv_len = thp_size(page);
2282679f 318 sio->bvec[sio->pages].bv_offset = 0;
a1a0dfd5 319 sio->len += thp_size(page);
2282679f
N
320 sio->pages += 1;
321 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
322 swap_write_unplug(sio);
323 sio = NULL;
324 }
325 if (wbc->swap_plug)
326 *wbc->swap_plug = sio;
7eadabc0
N
327}
328
e3e2762b 329void __swap_writepage(struct page *page, struct writeback_control *wbc)
2f772e6c
SJ
330{
331 struct bio *bio;
4e49ea4a 332 int ret;
2f772e6c 333 struct swap_info_struct *sis = page_swap_info(page);
62c230bc 334
cc30c5d6 335 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
7eadabc0
N
336 /*
337 * ->flags can be updated non-atomicially (scan_swap_map_slots),
338 * but that will never affect SWP_FS_OPS, so the data_race
339 * is safe.
340 */
341 if (data_race(sis->flags & SWP_FS_OPS))
342 return swap_writepage_fs(page, wbc);
62c230bc 343
dd6bd0d9
MW
344 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
345 if (!ret) {
225311a4 346 count_swpout_vm_event(page);
e3e2762b 347 return;
dd6bd0d9
MW
348 }
349
07888c66
CH
350 bio = bio_alloc(sis->bdev, 1,
351 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
352 GFP_NOIO);
48d15436 353 bio->bi_iter.bi_sector = swap_page_sector(page);
cf1e3fe4 354 bio->bi_end_io = end_swap_bio_write;
48d15436
CH
355 bio_add_page(bio, page, thp_size(page), 0);
356
6a7f6d86 357 bio_associate_blkg_from_page(bio, page);
225311a4 358 count_swpout_vm_event(page);
1da177e4
LT
359 set_page_writeback(page);
360 unlock_page(page);
4e49ea4a 361 submit_bio(bio);
1da177e4
LT
362}
363
2282679f
N
364void swap_write_unplug(struct swap_iocb *sio)
365{
366 struct iov_iter from;
367 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
368 int ret;
369
de4eda9d 370 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
2282679f
N
371 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
372 if (ret != -EIOCBQUEUED)
373 sio_write_complete(&sio->iocb, ret);
374}
375
e1209d3a
N
376static void sio_read_complete(struct kiocb *iocb, long ret)
377{
378 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 379 int p;
e1209d3a 380
a1a0dfd5 381 if (ret == sio->len) {
5169b844
N
382 for (p = 0; p < sio->pages; p++) {
383 struct page *page = sio->bvec[p].bv_page;
384
385 SetPageUptodate(page);
386 unlock_page(page);
387 }
388 count_vm_events(PSWPIN, sio->pages);
e1209d3a 389 } else {
5169b844
N
390 for (p = 0; p < sio->pages; p++) {
391 struct page *page = sio->bvec[p].bv_page;
392
393 SetPageError(page);
394 ClearPageUptodate(page);
395 unlock_page(page);
396 }
397 pr_alert_ratelimited("Read-error on swap-device\n");
e1209d3a 398 }
e1209d3a
N
399 mempool_free(sio, sio_pool);
400}
401
5169b844
N
402static void swap_readpage_fs(struct page *page,
403 struct swap_iocb **plug)
e1209d3a
N
404{
405 struct swap_info_struct *sis = page_swap_info(page);
5169b844 406 struct swap_iocb *sio = NULL;
e1209d3a 407 loff_t pos = page_file_offset(page);
e1209d3a 408
5169b844
N
409 if (plug)
410 sio = *plug;
411 if (sio) {
412 if (sio->iocb.ki_filp != sis->swap_file ||
a1a0dfd5 413 sio->iocb.ki_pos + sio->len != pos) {
5169b844
N
414 swap_read_unplug(sio);
415 sio = NULL;
416 }
417 }
418 if (!sio) {
419 sio = mempool_alloc(sio_pool, GFP_KERNEL);
420 init_sync_kiocb(&sio->iocb, sis->swap_file);
421 sio->iocb.ki_pos = pos;
422 sio->iocb.ki_complete = sio_read_complete;
423 sio->pages = 0;
a1a0dfd5 424 sio->len = 0;
5169b844
N
425 }
426 sio->bvec[sio->pages].bv_page = page;
a1a0dfd5 427 sio->bvec[sio->pages].bv_len = thp_size(page);
5169b844 428 sio->bvec[sio->pages].bv_offset = 0;
a1a0dfd5 429 sio->len += thp_size(page);
5169b844
N
430 sio->pages += 1;
431 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
432 swap_read_unplug(sio);
433 sio = NULL;
434 }
435 if (plug)
436 *plug = sio;
e1209d3a
N
437}
438
9b4e30bd 439static void swap_readpage_bdev_sync(struct page *page,
14bd75f5 440 struct swap_info_struct *sis)
1da177e4 441{
9b4e30bd
CH
442 struct bio_vec bv;
443 struct bio bio;
62c230bc 444
a8c1408f
CH
445 if ((sis->flags & SWP_SYNCHRONOUS_IO) &&
446 !bdev_read_page(sis->bdev, swap_page_sector(page), page)) {
447 count_vm_event(PSWPIN);
14bd75f5 448 return;
dd6bd0d9
MW
449 }
450
9b4e30bd
CH
451 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
452 bio.bi_iter.bi_sector = swap_page_sector(page);
453 bio_add_page(&bio, page, thp_size(page), 0);
b0ba2d0f
TH
454 /*
455 * Keep this task valid during swap readpage because the oom killer may
456 * attempt to access it in the page fault retry time check.
457 */
9b4e30bd 458 get_task_struct(current);
f8891e5e 459 count_vm_event(PSWPIN);
9b4e30bd
CH
460 submit_bio_wait(&bio);
461 __end_swap_bio_read(&bio);
462 put_task_struct(current);
463}
464
465static void swap_readpage_bdev_async(struct page *page,
466 struct swap_info_struct *sis)
467{
468 struct bio *bio;
23955622 469
9b4e30bd
CH
470 if ((sis->flags & SWP_SYNCHRONOUS_IO) &&
471 !bdev_read_page(sis->bdev, swap_page_sector(page), page)) {
472 count_vm_event(PSWPIN);
473 return;
23955622 474 }
9b4e30bd
CH
475
476 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
477 bio->bi_iter.bi_sector = swap_page_sector(page);
478 bio->bi_end_io = end_swap_bio_read;
479 bio_add_page(bio, page, thp_size(page), 0);
480 count_vm_event(PSWPIN);
481 submit_bio(bio);
14bd75f5
CH
482}
483
484void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
485{
486 struct swap_info_struct *sis = page_swap_info(page);
487 bool workingset = PageWorkingset(page);
488 unsigned long pflags;
489 bool in_thrashing;
490
491 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
492 VM_BUG_ON_PAGE(!PageLocked(page), page);
493 VM_BUG_ON_PAGE(PageUptodate(page), page);
494
495 /*
496 * Count submission time as memory stall and delay. When the device
497 * is congested, or the submitting cgroup IO-throttled, submission
498 * can be a significant part of overall IO time.
499 */
500 if (workingset) {
501 delayacct_thrashing_start(&in_thrashing);
502 psi_memstall_enter(&pflags);
503 }
504 delayacct_swapin_start();
505
506 if (frontswap_load(page) == 0) {
507 SetPageUptodate(page);
508 unlock_page(page);
509 } else if (data_race(sis->flags & SWP_FS_OPS)) {
510 swap_readpage_fs(page, plug);
9b4e30bd
CH
511 } else if (synchronous) {
512 swap_readpage_bdev_sync(page, sis);
14bd75f5 513 } else {
9b4e30bd 514 swap_readpage_bdev_async(page, sis);
14bd75f5 515 }
23955622 516
3a9bb7b1
YY
517 if (workingset) {
518 delayacct_thrashing_end(&in_thrashing);
d8c47cc7 519 psi_memstall_leave(&pflags);
3a9bb7b1 520 }
a3d5dc90 521 delayacct_swapin_end();
1da177e4 522}
62c230bc 523
5169b844 524void __swap_read_unplug(struct swap_iocb *sio)
62c230bc 525{
5169b844
N
526 struct iov_iter from;
527 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
528 int ret;
cc30c5d6 529
de4eda9d 530 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
5169b844
N
531 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
532 if (ret != -EIOCBQUEUED)
533 sio_read_complete(&sio->iocb, ret);
62c230bc 534}