]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/page_io.c
mm: submit multipage write for SWP_FS_OPS swap-space
[thirdparty/linux.git] / mm / page_io.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
62c230bc 21#include <linux/buffer_head.h>
1da177e4 22#include <linux/writeback.h>
38b5faf4 23#include <linux/frontswap.h>
b430e9d1 24#include <linux/blkdev.h>
93779069 25#include <linux/psi.h>
e2e40f2c 26#include <linux/uio.h>
b0ba2d0f 27#include <linux/sched/task.h>
a3d5dc90 28#include <linux/delayacct.h>
014bb1de 29#include "swap.h"
1da177e4 30
4246a0b6 31void end_swap_bio_write(struct bio *bio)
1da177e4 32{
263663cd 33 struct page *page = bio_first_page_all(bio);
1da177e4 34
4e4cbee9 35 if (bio->bi_status) {
1da177e4 36 SetPageError(page);
6ddab3b9
PZ
37 /*
38 * We failed to write the page out to swap-space.
39 * Re-dirty the page in order to avoid it being reclaimed.
40 * Also print a dire warning that things will go BAD (tm)
41 * very quickly.
42 *
575ced1c 43 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
6ddab3b9
PZ
44 */
45 set_page_dirty(page);
25eaab43
GD
46 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
47 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
48 (unsigned long long)bio->bi_iter.bi_sector);
6ddab3b9
PZ
49 ClearPageReclaim(page);
50 }
1da177e4
LT
51 end_page_writeback(page);
52 bio_put(bio);
1da177e4
LT
53}
54
4246a0b6 55static void end_swap_bio_read(struct bio *bio)
1da177e4 56{
263663cd 57 struct page *page = bio_first_page_all(bio);
23955622 58 struct task_struct *waiter = bio->bi_private;
1da177e4 59
4e4cbee9 60 if (bio->bi_status) {
1da177e4
LT
61 SetPageError(page);
62 ClearPageUptodate(page);
25eaab43
GD
63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
65 (unsigned long long)bio->bi_iter.bi_sector);
b430e9d1 66 goto out;
1da177e4 67 }
b430e9d1
MK
68
69 SetPageUptodate(page);
b430e9d1 70out:
1da177e4 71 unlock_page(page);
23955622 72 WRITE_ONCE(bio->bi_private, NULL);
1da177e4 73 bio_put(bio);
87518530
ON
74 if (waiter) {
75 blk_wake_io_task(waiter);
76 put_task_struct(waiter);
77 }
1da177e4
LT
78}
79
a509bc1a
MG
80int generic_swapfile_activate(struct swap_info_struct *sis,
81 struct file *swap_file,
82 sector_t *span)
83{
84 struct address_space *mapping = swap_file->f_mapping;
85 struct inode *inode = mapping->host;
86 unsigned blocks_per_page;
87 unsigned long page_no;
88 unsigned blkbits;
89 sector_t probe_block;
90 sector_t last_block;
91 sector_t lowest_block = -1;
92 sector_t highest_block = 0;
93 int nr_extents = 0;
94 int ret;
95
96 blkbits = inode->i_blkbits;
97 blocks_per_page = PAGE_SIZE >> blkbits;
98
99 /*
4efaceb1 100 * Map all the blocks into the extent tree. This code doesn't try
a509bc1a
MG
101 * to be very smart.
102 */
103 probe_block = 0;
104 page_no = 0;
105 last_block = i_size_read(inode) >> blkbits;
106 while ((probe_block + blocks_per_page) <= last_block &&
107 page_no < sis->max) {
108 unsigned block_in_page;
109 sector_t first_block;
110
7e4411bf
MP
111 cond_resched();
112
30460e1e
CM
113 first_block = probe_block;
114 ret = bmap(inode, &first_block);
115 if (ret || !first_block)
a509bc1a
MG
116 goto bad_bmap;
117
118 /*
119 * It must be PAGE_SIZE aligned on-disk
120 */
121 if (first_block & (blocks_per_page - 1)) {
122 probe_block++;
123 goto reprobe;
124 }
125
126 for (block_in_page = 1; block_in_page < blocks_per_page;
127 block_in_page++) {
128 sector_t block;
129
30460e1e
CM
130 block = probe_block + block_in_page;
131 ret = bmap(inode, &block);
132 if (ret || !block)
a509bc1a 133 goto bad_bmap;
30460e1e 134
a509bc1a
MG
135 if (block != first_block + block_in_page) {
136 /* Discontiguity */
137 probe_block++;
138 goto reprobe;
139 }
140 }
141
142 first_block >>= (PAGE_SHIFT - blkbits);
143 if (page_no) { /* exclude the header page */
144 if (first_block < lowest_block)
145 lowest_block = first_block;
146 if (first_block > highest_block)
147 highest_block = first_block;
148 }
149
150 /*
151 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
152 */
153 ret = add_swap_extent(sis, page_no, 1, first_block);
154 if (ret < 0)
155 goto out;
156 nr_extents += ret;
157 page_no++;
158 probe_block += blocks_per_page;
159reprobe:
160 continue;
161 }
162 ret = nr_extents;
163 *span = 1 + highest_block - lowest_block;
164 if (page_no == 0)
165 page_no = 1; /* force Empty message */
166 sis->max = page_no;
167 sis->pages = page_no - 1;
168 sis->highest_bit = page_no - 1;
169out:
170 return ret;
171bad_bmap:
1170532b 172 pr_err("swapon: swapfile has holes\n");
a509bc1a
MG
173 ret = -EINVAL;
174 goto out;
175}
176
1da177e4
LT
177/*
178 * We may have stale swap cache pages in memory: notice
179 * them here and get rid of the unnecessary final write.
180 */
181int swap_writepage(struct page *page, struct writeback_control *wbc)
182{
2f772e6c 183 int ret = 0;
1da177e4 184
a2c43eed 185 if (try_to_free_swap(page)) {
1da177e4
LT
186 unlock_page(page);
187 goto out;
188 }
8a84802e
SP
189 /*
190 * Arch code may have to preserve more data than just the page
191 * contents, e.g. memory tags.
192 */
193 ret = arch_prepare_to_swap(page);
194 if (ret) {
195 set_page_dirty(page);
196 unlock_page(page);
197 goto out;
198 }
165c8aed 199 if (frontswap_store(page) == 0) {
38b5faf4
DM
200 set_page_writeback(page);
201 unlock_page(page);
202 end_page_writeback(page);
203 goto out;
204 }
1eec6702 205 ret = __swap_writepage(page, wbc, end_swap_bio_write);
2f772e6c
SJ
206out:
207 return ret;
208}
209
225311a4
HY
210static inline void count_swpout_vm_event(struct page *page)
211{
212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
213 if (unlikely(PageTransHuge(page)))
214 count_vm_event(THP_SWPOUT);
215#endif
6c357848 216 count_vm_events(PSWPOUT, thp_nr_pages(page));
225311a4
HY
217}
218
a18b9b15
CH
219#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
220static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
221{
222 struct cgroup_subsys_state *css;
bcfe06bf 223 struct mem_cgroup *memcg;
a18b9b15 224
bcfe06bf
RG
225 memcg = page_memcg(page);
226 if (!memcg)
a18b9b15
CH
227 return;
228
229 rcu_read_lock();
bcfe06bf 230 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
a18b9b15
CH
231 bio_associate_blkg_from_css(bio, css);
232 rcu_read_unlock();
233}
234#else
235#define bio_associate_blkg_from_page(bio, page) do { } while (0)
236#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
237
e1209d3a
N
238struct swap_iocb {
239 struct kiocb iocb;
5169b844
N
240 struct bio_vec bvec[SWAP_CLUSTER_MAX];
241 int pages;
e1209d3a
N
242};
243static mempool_t *sio_pool;
244
245int sio_pool_init(void)
246{
247 if (!sio_pool) {
248 mempool_t *pool = mempool_create_kmalloc_pool(
249 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
250 if (cmpxchg(&sio_pool, NULL, pool))
251 mempool_destroy(pool);
252 }
253 if (!sio_pool)
254 return -ENOMEM;
255 return 0;
256}
257
7eadabc0
N
258static void sio_write_complete(struct kiocb *iocb, long ret)
259{
260 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 261 struct page *page = sio->bvec[0].bv_page;
2282679f 262 int p;
7eadabc0 263
2282679f 264 if (ret != PAGE_SIZE * sio->pages) {
7eadabc0
N
265 /*
266 * In the case of swap-over-nfs, this can be a
267 * temporary failure if the system has limited
268 * memory for allocating transmit buffers.
269 * Mark the page dirty and avoid
270 * folio_rotate_reclaimable but rate-limit the
271 * messages but do not flag PageError like
272 * the normal direct-to-bio case as it could
273 * be temporary.
274 */
7eadabc0
N
275 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
276 ret, page_file_offset(page));
2282679f
N
277 for (p = 0; p < sio->pages; p++) {
278 page = sio->bvec[p].bv_page;
279 set_page_dirty(page);
280 ClearPageReclaim(page);
281 }
7eadabc0 282 } else
2282679f
N
283 count_vm_events(PSWPOUT, sio->pages);
284
285 for (p = 0; p < sio->pages; p++)
286 end_page_writeback(sio->bvec[p].bv_page);
287
7eadabc0
N
288 mempool_free(sio, sio_pool);
289}
290
291static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
292{
2282679f 293 struct swap_iocb *sio = NULL;
7eadabc0
N
294 struct swap_info_struct *sis = page_swap_info(page);
295 struct file *swap_file = sis->swap_file;
2282679f 296 loff_t pos = page_file_offset(page);
7eadabc0
N
297
298 set_page_writeback(page);
299 unlock_page(page);
2282679f
N
300 if (wbc->swap_plug)
301 sio = *wbc->swap_plug;
302 if (sio) {
303 if (sio->iocb.ki_filp != swap_file ||
304 sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
305 swap_write_unplug(sio);
306 sio = NULL;
307 }
308 }
309 if (!sio) {
310 sio = mempool_alloc(sio_pool, GFP_NOIO);
311 init_sync_kiocb(&sio->iocb, swap_file);
312 sio->iocb.ki_complete = sio_write_complete;
313 sio->iocb.ki_pos = pos;
314 sio->pages = 0;
315 }
316 sio->bvec[sio->pages].bv_page = page;
317 sio->bvec[sio->pages].bv_len = PAGE_SIZE;
318 sio->bvec[sio->pages].bv_offset = 0;
319 sio->pages += 1;
320 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
321 swap_write_unplug(sio);
322 sio = NULL;
323 }
324 if (wbc->swap_plug)
325 *wbc->swap_plug = sio;
326
327 return 0;
7eadabc0
N
328}
329
1eec6702 330int __swap_writepage(struct page *page, struct writeback_control *wbc,
2282679f 331 bio_end_io_t end_write_func)
2f772e6c
SJ
332{
333 struct bio *bio;
4e49ea4a 334 int ret;
2f772e6c 335 struct swap_info_struct *sis = page_swap_info(page);
62c230bc 336
cc30c5d6 337 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
7eadabc0
N
338 /*
339 * ->flags can be updated non-atomicially (scan_swap_map_slots),
340 * but that will never affect SWP_FS_OPS, so the data_race
341 * is safe.
342 */
343 if (data_race(sis->flags & SWP_FS_OPS))
344 return swap_writepage_fs(page, wbc);
62c230bc 345
dd6bd0d9
MW
346 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
347 if (!ret) {
225311a4 348 count_swpout_vm_event(page);
dd6bd0d9
MW
349 return 0;
350 }
351
07888c66
CH
352 bio = bio_alloc(sis->bdev, 1,
353 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
354 GFP_NOIO);
48d15436 355 bio->bi_iter.bi_sector = swap_page_sector(page);
48d15436
CH
356 bio->bi_end_io = end_write_func;
357 bio_add_page(bio, page, thp_size(page), 0);
358
6a7f6d86 359 bio_associate_blkg_from_page(bio, page);
225311a4 360 count_swpout_vm_event(page);
1da177e4
LT
361 set_page_writeback(page);
362 unlock_page(page);
4e49ea4a 363 submit_bio(bio);
548d9782
ML
364
365 return 0;
1da177e4
LT
366}
367
2282679f
N
368void swap_write_unplug(struct swap_iocb *sio)
369{
370 struct iov_iter from;
371 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
372 int ret;
373
374 iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
375 PAGE_SIZE * sio->pages);
376 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
377 if (ret != -EIOCBQUEUED)
378 sio_write_complete(&sio->iocb, ret);
379}
380
e1209d3a
N
381static void sio_read_complete(struct kiocb *iocb, long ret)
382{
383 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 384 int p;
e1209d3a 385
5169b844
N
386 if (ret == PAGE_SIZE * sio->pages) {
387 for (p = 0; p < sio->pages; p++) {
388 struct page *page = sio->bvec[p].bv_page;
389
390 SetPageUptodate(page);
391 unlock_page(page);
392 }
393 count_vm_events(PSWPIN, sio->pages);
e1209d3a 394 } else {
5169b844
N
395 for (p = 0; p < sio->pages; p++) {
396 struct page *page = sio->bvec[p].bv_page;
397
398 SetPageError(page);
399 ClearPageUptodate(page);
400 unlock_page(page);
401 }
402 pr_alert_ratelimited("Read-error on swap-device\n");
e1209d3a 403 }
e1209d3a
N
404 mempool_free(sio, sio_pool);
405}
406
5169b844
N
407static void swap_readpage_fs(struct page *page,
408 struct swap_iocb **plug)
e1209d3a
N
409{
410 struct swap_info_struct *sis = page_swap_info(page);
5169b844 411 struct swap_iocb *sio = NULL;
e1209d3a 412 loff_t pos = page_file_offset(page);
e1209d3a 413
5169b844
N
414 if (plug)
415 sio = *plug;
416 if (sio) {
417 if (sio->iocb.ki_filp != sis->swap_file ||
418 sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
419 swap_read_unplug(sio);
420 sio = NULL;
421 }
422 }
423 if (!sio) {
424 sio = mempool_alloc(sio_pool, GFP_KERNEL);
425 init_sync_kiocb(&sio->iocb, sis->swap_file);
426 sio->iocb.ki_pos = pos;
427 sio->iocb.ki_complete = sio_read_complete;
428 sio->pages = 0;
429 }
430 sio->bvec[sio->pages].bv_page = page;
431 sio->bvec[sio->pages].bv_len = PAGE_SIZE;
432 sio->bvec[sio->pages].bv_offset = 0;
433 sio->pages += 1;
434 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
435 swap_read_unplug(sio);
436 sio = NULL;
437 }
438 if (plug)
439 *plug = sio;
e1209d3a
N
440}
441
5169b844
N
442int swap_readpage(struct page *page, bool synchronous,
443 struct swap_iocb **plug)
1da177e4
LT
444{
445 struct bio *bio;
446 int ret = 0;
62c230bc 447 struct swap_info_struct *sis = page_swap_info(page);
d8c47cc7 448 bool workingset = PageWorkingset(page);
93779069 449 unsigned long pflags;
1da177e4 450
0bcac06f 451 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
309381fe
SL
452 VM_BUG_ON_PAGE(!PageLocked(page), page);
453 VM_BUG_ON_PAGE(PageUptodate(page), page);
93779069
MK
454
455 /*
456 * Count submission time as memory stall. When the device is congested,
457 * or the submitting cgroup IO-throttled, submission can be a
458 * significant part of overall IO time.
459 */
d8c47cc7
JW
460 if (workingset)
461 psi_memstall_enter(&pflags);
a3d5dc90 462 delayacct_swapin_start();
93779069 463
165c8aed 464 if (frontswap_load(page) == 0) {
38b5faf4
DM
465 SetPageUptodate(page);
466 unlock_page(page);
467 goto out;
468 }
62c230bc 469
32646315 470 if (data_race(sis->flags & SWP_FS_OPS)) {
5169b844 471 swap_readpage_fs(page, plug);
93779069 472 goto out;
62c230bc
MG
473 }
474
5115db10
CH
475 if (sis->flags & SWP_SYNCHRONOUS_IO) {
476 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
477 if (!ret) {
5115db10
CH
478 count_vm_event(PSWPIN);
479 goto out;
480 }
dd6bd0d9
MW
481 }
482
483 ret = 0;
07888c66 484 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
48d15436
CH
485 bio->bi_iter.bi_sector = swap_page_sector(page);
486 bio->bi_end_io = end_swap_bio_read;
487 bio_add_page(bio, page, thp_size(page), 0);
b0ba2d0f
TH
488 /*
489 * Keep this task valid during swap readpage because the oom killer may
490 * attempt to access it in the page fault retry time check.
491 */
87518530 492 if (synchronous) {
6ce913fe 493 bio->bi_opf |= REQ_POLLED;
87518530
ON
494 get_task_struct(current);
495 bio->bi_private = current;
496 }
f8891e5e 497 count_vm_event(PSWPIN);
23955622 498 bio_get(bio);
3e08773c 499 submit_bio(bio);
0bcac06f 500 while (synchronous) {
1ac5cd49 501 set_current_state(TASK_UNINTERRUPTIBLE);
23955622
SL
502 if (!READ_ONCE(bio->bi_private))
503 break;
504
5a72e899 505 if (!bio_poll(bio, NULL, 0))
0f190a7a 506 blk_io_schedule();
23955622
SL
507 }
508 __set_current_state(TASK_RUNNING);
509 bio_put(bio);
510
1da177e4 511out:
d8c47cc7
JW
512 if (workingset)
513 psi_memstall_leave(&pflags);
a3d5dc90 514 delayacct_swapin_end();
1da177e4
LT
515 return ret;
516}
5169b844
N
517
518void __swap_read_unplug(struct swap_iocb *sio)
519{
520 struct iov_iter from;
521 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
522 int ret;
523
524 iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
525 PAGE_SIZE * sio->pages);
526 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
527 if (ret != -EIOCBQUEUED)
528 sio_read_complete(&sio->iocb, ret);
529}