]> git.ipfire.org Git - people/ms/linux.git/blame - fs/f2fs/data.c
f2fs: set GFP_NOFS for grab_cache_page
[people/ms/linux.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
8f46dcae 17#include <linux/pagevec.h>
eb47b800
JK
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
e2e40f2c 21#include <linux/uio.h>
f1e88660 22#include <linux/cleancache.h>
eb47b800
JK
23
24#include "f2fs.h"
25#include "node.h"
26#include "segment.h"
db9f7c1a 27#include "trace.h"
848753aa 28#include <trace/events/f2fs.h>
eb47b800 29
4246a0b6 30static void f2fs_read_end_io(struct bio *bio)
93dfe2ac 31{
f568849e
LT
32 struct bio_vec *bvec;
33 int i;
93dfe2ac 34
4375a336 35 if (f2fs_bio_encrypted(bio)) {
4246a0b6 36 if (bio->bi_error) {
4375a336
JK
37 f2fs_release_crypto_ctx(bio->bi_private);
38 } else {
39 f2fs_end_io_crypto_work(bio->bi_private, bio);
40 return;
41 }
42 }
43
12377024
CY
44 bio_for_each_segment_all(bvec, bio, i) {
45 struct page *page = bvec->bv_page;
f1e88660 46
4246a0b6 47 if (!bio->bi_error) {
f1e88660
JK
48 SetPageUptodate(page);
49 } else {
50 ClearPageUptodate(page);
51 SetPageError(page);
52 }
53 unlock_page(page);
54 }
f1e88660
JK
55 bio_put(bio);
56}
57
4246a0b6 58static void f2fs_write_end_io(struct bio *bio)
93dfe2ac 59{
1b1f559f 60 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
61 struct bio_vec *bvec;
62 int i;
93dfe2ac 63
f568849e 64 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
65 struct page *page = bvec->bv_page;
66
4375a336
JK
67 f2fs_restore_and_release_control_page(&page);
68
4246a0b6 69 if (unlikely(bio->bi_error)) {
cf779cab 70 set_page_dirty(page);
93dfe2ac 71 set_bit(AS_EIO, &page->mapping->flags);
744602cf 72 f2fs_stop_checkpoint(sbi);
93dfe2ac
JK
73 }
74 end_page_writeback(page);
75 dec_page_count(sbi, F2FS_WRITEBACK);
f568849e 76 }
93dfe2ac 77
93dfe2ac
JK
78 if (!get_pages(sbi, F2FS_WRITEBACK) &&
79 !list_empty(&sbi->cp_wait.task_list))
80 wake_up(&sbi->cp_wait);
81
82 bio_put(bio);
83}
84
940a6d34
GZ
85/*
86 * Low-level block read/write IO operations.
87 */
88static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
89 int npages, bool is_read)
90{
91 struct bio *bio;
92
740432f8 93 bio = f2fs_bio_alloc(npages);
940a6d34
GZ
94
95 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 96 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 97 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
12377024 98 bio->bi_private = is_read ? NULL : sbi;
940a6d34
GZ
99
100 return bio;
101}
102
458e6197 103static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 104{
458e6197 105 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
106
107 if (!io->bio)
108 return;
109
6a8f8ca5 110 if (is_read_io(fio->rw))
2ace38e0 111 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca5 112 else
2ace38e0 113 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34 114
6a8f8ca5 115 submit_bio(fio->rw, io->bio);
93dfe2ac
JK
116 io->bio = NULL;
117}
118
119void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
458e6197 120 enum page_type type, int rw)
93dfe2ac
JK
121{
122 enum page_type btype = PAGE_TYPE_OF_BIO(type);
123 struct f2fs_bio_info *io;
124
125 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
126
df0f8dc0 127 down_write(&io->io_rwsem);
458e6197
JK
128
129 /* change META to META_FLUSH in the checkpoint procedure */
130 if (type >= META_FLUSH) {
131 io->fio.type = META_FLUSH;
0f7b2abd
JK
132 if (test_opt(sbi, NOBARRIER))
133 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
134 else
135 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
458e6197
JK
136 }
137 __submit_merged_bio(io);
df0f8dc0 138 up_write(&io->io_rwsem);
93dfe2ac
JK
139}
140
141/*
142 * Fill the locked page with data located in the block address.
143 * Return unlocked page.
144 */
05ca3632 145int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac 146{
93dfe2ac 147 struct bio *bio;
4375a336 148 struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
93dfe2ac 149
2ace38e0 150 trace_f2fs_submit_page_bio(page, fio);
05ca3632 151 f2fs_trace_ios(fio, 0);
93dfe2ac
JK
152
153 /* Allocate a new bio */
05ca3632 154 bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
93dfe2ac
JK
155
156 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
157 bio_put(bio);
93dfe2ac
JK
158 return -EFAULT;
159 }
160
cf04e8eb 161 submit_bio(fio->rw, bio);
93dfe2ac
JK
162 return 0;
163}
164
05ca3632 165void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
93dfe2ac 166{
05ca3632 167 struct f2fs_sb_info *sbi = fio->sbi;
458e6197 168 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 169 struct f2fs_bio_info *io;
940a6d34 170 bool is_read = is_read_io(fio->rw);
4375a336 171 struct page *bio_page;
93dfe2ac 172
940a6d34 173 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 174
cf04e8eb 175 verify_block_addr(sbi, fio->blk_addr);
93dfe2ac 176
df0f8dc0 177 down_write(&io->io_rwsem);
93dfe2ac 178
940a6d34 179 if (!is_read)
93dfe2ac
JK
180 inc_page_count(sbi, F2FS_WRITEBACK);
181
cf04e8eb 182 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
458e6197
JK
183 io->fio.rw != fio->rw))
184 __submit_merged_bio(io);
93dfe2ac
JK
185alloc_new:
186 if (io->bio == NULL) {
90a893c7 187 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 188
cf04e8eb 189 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
458e6197 190 io->fio = *fio;
93dfe2ac
JK
191 }
192
4375a336
JK
193 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
194
195 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
93dfe2ac 196 PAGE_CACHE_SIZE) {
458e6197 197 __submit_merged_bio(io);
93dfe2ac
JK
198 goto alloc_new;
199 }
200
cf04e8eb 201 io->last_block_in_bio = fio->blk_addr;
05ca3632 202 f2fs_trace_ios(fio, 0);
93dfe2ac 203
df0f8dc0 204 up_write(&io->io_rwsem);
05ca3632 205 trace_f2fs_submit_page_mbio(fio->page, fio);
93dfe2ac
JK
206}
207
0a8165d7 208/*
eb47b800
JK
209 * Lock ordering for the change of data block address:
210 * ->data_page
211 * ->node_page
212 * update block addresses in the node page
213 */
216a620a 214void set_data_blkaddr(struct dnode_of_data *dn)
eb47b800
JK
215{
216 struct f2fs_node *rn;
217 __le32 *addr_array;
218 struct page *node_page = dn->node_page;
219 unsigned int ofs_in_node = dn->ofs_in_node;
220
5514f0aa 221 f2fs_wait_on_page_writeback(node_page, NODE);
eb47b800 222
45590710 223 rn = F2FS_NODE(node_page);
eb47b800
JK
224
225 /* Get physical address of data block */
226 addr_array = blkaddr_in_node(rn);
e1509cf2 227 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
eb47b800
JK
228 set_page_dirty(node_page);
229}
230
231int reserve_new_block(struct dnode_of_data *dn)
232{
4081363f 233 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 234
6bacf52f 235 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
eb47b800 236 return -EPERM;
cfb271d4 237 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
eb47b800
JK
238 return -ENOSPC;
239
c01e2853
NJ
240 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
241
eb47b800 242 dn->data_blkaddr = NEW_ADDR;
216a620a 243 set_data_blkaddr(dn);
a18ff063 244 mark_inode_dirty(dn->inode);
eb47b800
JK
245 sync_inode_page(dn);
246 return 0;
247}
248
b600965c
HL
249int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
250{
251 bool need_put = dn->inode_page ? false : true;
252 int err;
253
254 err = get_dnode_of_data(dn, index, ALLOC_NODE);
255 if (err)
256 return err;
a8865372 257
b600965c
HL
258 if (dn->data_blkaddr == NULL_ADDR)
259 err = reserve_new_block(dn);
a8865372 260 if (err || need_put)
b600965c
HL
261 f2fs_put_dnode(dn);
262 return err;
263}
264
759af1c9 265int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
eb47b800 266{
028a41e8 267 struct extent_info ei;
759af1c9 268 struct inode *inode = dn->inode;
028a41e8 269
759af1c9
FL
270 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
271 dn->data_blkaddr = ei.blk + index - ei.fofs;
272 return 0;
429511cd 273 }
028a41e8 274
759af1c9 275 return f2fs_reserve_block(dn, index);
eb47b800
JK
276}
277
a56c7c6f
JK
278struct page *get_read_data_page(struct inode *inode, pgoff_t index,
279 int rw, bool for_write)
eb47b800 280{
eb47b800
JK
281 struct address_space *mapping = inode->i_mapping;
282 struct dnode_of_data dn;
283 struct page *page;
cb3bc9ee 284 struct extent_info ei;
eb47b800 285 int err;
cf04e8eb 286 struct f2fs_io_info fio = {
05ca3632 287 .sbi = F2FS_I_SB(inode),
cf04e8eb 288 .type = DATA,
43f3eae1 289 .rw = rw,
4375a336 290 .encrypted_page = NULL,
cf04e8eb 291 };
eb47b800 292
4375a336
JK
293 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
294 return read_mapping_page(mapping, index, NULL);
295
a56c7c6f 296 page = f2fs_grab_cache_page(mapping, index, for_write);
650495de
JK
297 if (!page)
298 return ERR_PTR(-ENOMEM);
299
cb3bc9ee
CY
300 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
301 dn.data_blkaddr = ei.blk + index - ei.fofs;
302 goto got_it;
303 }
304
eb47b800 305 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 306 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
86531d6b
JK
307 if (err)
308 goto put_err;
eb47b800
JK
309 f2fs_put_dnode(&dn);
310
6bacf52f 311 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
86531d6b
JK
312 err = -ENOENT;
313 goto put_err;
650495de 314 }
cb3bc9ee 315got_it:
43f3eae1
JK
316 if (PageUptodate(page)) {
317 unlock_page(page);
eb47b800 318 return page;
43f3eae1 319 }
eb47b800 320
d59ff4df
JK
321 /*
322 * A new dentry page is allocated but not able to be written, since its
323 * new inode page couldn't be allocated due to -ENOSPC.
324 * In such the case, its blkaddr can be remained as NEW_ADDR.
325 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
326 */
327 if (dn.data_blkaddr == NEW_ADDR) {
328 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
329 SetPageUptodate(page);
43f3eae1 330 unlock_page(page);
d59ff4df
JK
331 return page;
332 }
eb47b800 333
cf04e8eb 334 fio.blk_addr = dn.data_blkaddr;
05ca3632
JK
335 fio.page = page;
336 err = f2fs_submit_page_bio(&fio);
393ff91f 337 if (err)
86531d6b 338 goto put_err;
43f3eae1 339 return page;
86531d6b
JK
340
341put_err:
342 f2fs_put_page(page, 1);
343 return ERR_PTR(err);
43f3eae1
JK
344}
345
346struct page *find_data_page(struct inode *inode, pgoff_t index)
347{
348 struct address_space *mapping = inode->i_mapping;
349 struct page *page;
350
351 page = find_get_page(mapping, index);
352 if (page && PageUptodate(page))
353 return page;
354 f2fs_put_page(page, 0);
355
a56c7c6f 356 page = get_read_data_page(inode, index, READ_SYNC, false);
43f3eae1
JK
357 if (IS_ERR(page))
358 return page;
359
360 if (PageUptodate(page))
361 return page;
362
363 wait_on_page_locked(page);
364 if (unlikely(!PageUptodate(page))) {
365 f2fs_put_page(page, 0);
366 return ERR_PTR(-EIO);
367 }
368 return page;
369}
370
371/*
372 * If it tries to access a hole, return an error.
373 * Because, the callers, functions in dir.c and GC, should be able to know
374 * whether this page exists or not.
375 */
a56c7c6f
JK
376struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
377 bool for_write)
43f3eae1
JK
378{
379 struct address_space *mapping = inode->i_mapping;
380 struct page *page;
381repeat:
a56c7c6f 382 page = get_read_data_page(inode, index, READ_SYNC, for_write);
43f3eae1
JK
383 if (IS_ERR(page))
384 return page;
393ff91f 385
43f3eae1 386 /* wait for read completion */
393ff91f 387 lock_page(page);
6bacf52f 388 if (unlikely(!PageUptodate(page))) {
393ff91f
JK
389 f2fs_put_page(page, 1);
390 return ERR_PTR(-EIO);
eb47b800 391 }
6bacf52f 392 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
393 f2fs_put_page(page, 1);
394 goto repeat;
eb47b800
JK
395 }
396 return page;
397}
398
0a8165d7 399/*
eb47b800
JK
400 * Caller ensures that this data page is never allocated.
401 * A new zero-filled data page is allocated in the page cache.
39936837 402 *
4f4124d0
CY
403 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
404 * f2fs_unlock_op().
470f00e9
CY
405 * Note that, ipage is set only by make_empty_dir, and if any error occur,
406 * ipage should be released by this function.
eb47b800 407 */
64aa7ed9 408struct page *get_new_data_page(struct inode *inode,
a8865372 409 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 410{
eb47b800
JK
411 struct address_space *mapping = inode->i_mapping;
412 struct page *page;
413 struct dnode_of_data dn;
414 int err;
01f28610 415repeat:
a56c7c6f 416 page = f2fs_grab_cache_page(mapping, index, true);
470f00e9
CY
417 if (!page) {
418 /*
419 * before exiting, we should make sure ipage will be released
420 * if any error occur.
421 */
422 f2fs_put_page(ipage, 1);
01f28610 423 return ERR_PTR(-ENOMEM);
470f00e9 424 }
eb47b800 425
a8865372 426 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 427 err = f2fs_reserve_block(&dn, index);
01f28610
JK
428 if (err) {
429 f2fs_put_page(page, 1);
eb47b800 430 return ERR_PTR(err);
a8865372 431 }
01f28610
JK
432 if (!ipage)
433 f2fs_put_dnode(&dn);
eb47b800
JK
434
435 if (PageUptodate(page))
01f28610 436 goto got_it;
eb47b800
JK
437
438 if (dn.data_blkaddr == NEW_ADDR) {
439 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
393ff91f 440 SetPageUptodate(page);
eb47b800 441 } else {
4375a336 442 f2fs_put_page(page, 1);
a8865372 443
a56c7c6f 444 page = get_read_data_page(inode, index, READ_SYNC, true);
4375a336 445 if (IS_ERR(page))
afcb7ca0 446 goto repeat;
4375a336
JK
447
448 /* wait for read completion */
449 lock_page(page);
eb47b800 450 }
01f28610 451got_it:
9edcdabf
CY
452 if (new_i_size && i_size_read(inode) <
453 ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
454 i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
699489bb
JK
455 /* Only the directory inode sets new_i_size */
456 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
eb47b800
JK
457 }
458 return page;
459}
460
bfad7c2d
JK
461static int __allocate_data_block(struct dnode_of_data *dn)
462{
4081363f 463 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
976e4c50 464 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
bfad7c2d 465 struct f2fs_summary sum;
bfad7c2d 466 struct node_info ni;
38aa0889 467 int seg = CURSEG_WARM_DATA;
976e4c50 468 pgoff_t fofs;
bfad7c2d
JK
469
470 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
471 return -EPERM;
df6136ef
CY
472
473 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
474 if (dn->data_blkaddr == NEW_ADDR)
475 goto alloc;
476
bfad7c2d
JK
477 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
478 return -ENOSPC;
479
df6136ef 480alloc:
bfad7c2d
JK
481 get_node_info(sbi, dn->nid, &ni);
482 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
483
38aa0889
JK
484 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
485 seg = CURSEG_DIRECT_IO;
486
df6136ef
CY
487 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
488 &sum, seg);
216a620a 489 set_data_blkaddr(dn);
bfad7c2d 490
976e4c50
JK
491 /* update i_size */
492 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
493 dn->ofs_in_node;
9edcdabf
CY
494 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
495 i_size_write(dn->inode,
496 ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
976e4c50 497
3e72f721 498 /* direct IO doesn't use extent cache to maximize the performance */
a28ef1f5 499 f2fs_drop_largest_extent(dn->inode, fofs);
3e72f721 500
bfad7c2d
JK
501 return 0;
502}
503
59b802e5
JK
504static void __allocate_data_blocks(struct inode *inode, loff_t offset,
505 size_t count)
506{
507 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
508 struct dnode_of_data dn;
509 u64 start = F2FS_BYTES_TO_BLK(offset);
510 u64 len = F2FS_BYTES_TO_BLK(count);
511 bool allocated;
512 u64 end_offset;
513
514 while (len) {
515 f2fs_balance_fs(sbi);
516 f2fs_lock_op(sbi);
517
518 /* When reading holes, we need its node page */
519 set_new_dnode(&dn, inode, NULL, NULL, 0);
520 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
521 goto out;
522
523 allocated = false;
524 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
525
526 while (dn.ofs_in_node < end_offset && len) {
d6d4f1cb
CY
527 block_t blkaddr;
528
f9811703
CY
529 if (unlikely(f2fs_cp_error(sbi)))
530 goto sync_out;
531
d6d4f1cb 532 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
df6136ef 533 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
59b802e5
JK
534 if (__allocate_data_block(&dn))
535 goto sync_out;
536 allocated = true;
537 }
538 len--;
539 start++;
540 dn.ofs_in_node++;
541 }
542
543 if (allocated)
544 sync_inode_page(&dn);
545
546 f2fs_put_dnode(&dn);
547 f2fs_unlock_op(sbi);
548 }
549 return;
550
551sync_out:
552 if (allocated)
553 sync_inode_page(&dn);
554 f2fs_put_dnode(&dn);
555out:
556 f2fs_unlock_op(sbi);
557 return;
558}
559
0a8165d7 560/*
003a3e1d
JK
561 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
562 * f2fs_map_blocks structure.
4f4124d0
CY
563 * If original data blocks are allocated, then give them to blockdev.
564 * Otherwise,
565 * a. preallocate requested block addresses
566 * b. do not use extent cache for better performance
567 * c. give the block addresses to blockdev
eb47b800 568 */
003a3e1d 569static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
e2b4e2bc 570 int create, int flag)
eb47b800 571{
003a3e1d 572 unsigned int maxblocks = map->m_len;
eb47b800 573 struct dnode_of_data dn;
f9811703 574 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
bfad7c2d
JK
575 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
576 pgoff_t pgofs, end_offset;
577 int err = 0, ofs = 1;
a2e7d1bf 578 struct extent_info ei;
bfad7c2d 579 bool allocated = false;
eb47b800 580
003a3e1d
JK
581 map->m_len = 0;
582 map->m_flags = 0;
583
584 /* it only supports block size == page size */
585 pgofs = (pgoff_t)map->m_lblk;
eb47b800 586
7e4dde79 587 if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
003a3e1d
JK
588 map->m_pblk = ei.blk + pgofs - ei.fofs;
589 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
590 map->m_flags = F2FS_MAP_MAPPED;
bfad7c2d 591 goto out;
a2e7d1bf 592 }
bfad7c2d 593
59b802e5 594 if (create)
4081363f 595 f2fs_lock_op(F2FS_I_SB(inode));
eb47b800
JK
596
597 /* When reading holes, we need its node page */
598 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 599 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 600 if (err) {
bfad7c2d
JK
601 if (err == -ENOENT)
602 err = 0;
603 goto unlock_out;
848753aa 604 }
973163fc
CY
605
606 if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
607 if (create) {
f9811703
CY
608 if (unlikely(f2fs_cp_error(sbi))) {
609 err = -EIO;
610 goto put_out;
611 }
973163fc
CY
612 err = __allocate_data_block(&dn);
613 if (err)
614 goto put_out;
615 allocated = true;
616 map->m_flags = F2FS_MAP_NEW;
617 } else {
618 if (flag != F2FS_GET_BLOCK_FIEMAP ||
619 dn.data_blkaddr != NEW_ADDR) {
620 if (flag == F2FS_GET_BLOCK_BMAP)
621 err = -ENOENT;
622 goto put_out;
623 }
624
625 /*
626 * preallocated unwritten block should be mapped
627 * for fiemap.
628 */
629 if (dn.data_blkaddr == NEW_ADDR)
630 map->m_flags = F2FS_MAP_UNWRITTEN;
e2b4e2bc 631 }
e2b4e2bc 632 }
eb47b800 633
973163fc
CY
634 map->m_flags |= F2FS_MAP_MAPPED;
635 map->m_pblk = dn.data_blkaddr;
636 map->m_len = 1;
bfad7c2d 637
6403eb1f 638 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d
JK
639 dn.ofs_in_node++;
640 pgofs++;
641
642get_next:
643 if (dn.ofs_in_node >= end_offset) {
644 if (allocated)
645 sync_inode_page(&dn);
646 allocated = false;
647 f2fs_put_dnode(&dn);
648
649 set_new_dnode(&dn, inode, NULL, NULL, 0);
650 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 651 if (err) {
bfad7c2d
JK
652 if (err == -ENOENT)
653 err = 0;
654 goto unlock_out;
655 }
e2b4e2bc 656
6403eb1f 657 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d 658 }
eb47b800 659
003a3e1d 660 if (maxblocks > map->m_len) {
bfad7c2d 661 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
973163fc
CY
662
663 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
664 if (create) {
f9811703
CY
665 if (unlikely(f2fs_cp_error(sbi))) {
666 err = -EIO;
667 goto sync_out;
668 }
973163fc
CY
669 err = __allocate_data_block(&dn);
670 if (err)
671 goto sync_out;
672 allocated = true;
673 map->m_flags |= F2FS_MAP_NEW;
674 blkaddr = dn.data_blkaddr;
675 } else {
676 /*
677 * we only merge preallocated unwritten blocks
678 * for fiemap.
679 */
680 if (flag != F2FS_GET_BLOCK_FIEMAP ||
681 blkaddr != NEW_ADDR)
682 goto sync_out;
683 }
bfad7c2d 684 }
973163fc 685
e1c42045 686 /* Give more consecutive addresses for the readahead */
7f63eb77
JK
687 if ((map->m_pblk != NEW_ADDR &&
688 blkaddr == (map->m_pblk + ofs)) ||
689 (map->m_pblk == NEW_ADDR &&
690 blkaddr == NEW_ADDR)) {
bfad7c2d
JK
691 ofs++;
692 dn.ofs_in_node++;
693 pgofs++;
003a3e1d 694 map->m_len++;
bfad7c2d
JK
695 goto get_next;
696 }
eb47b800 697 }
bfad7c2d
JK
698sync_out:
699 if (allocated)
700 sync_inode_page(&dn);
701put_out:
eb47b800 702 f2fs_put_dnode(&dn);
bfad7c2d
JK
703unlock_out:
704 if (create)
4081363f 705 f2fs_unlock_op(F2FS_I_SB(inode));
bfad7c2d 706out:
003a3e1d 707 trace_f2fs_map_blocks(inode, map, err);
bfad7c2d 708 return err;
eb47b800
JK
709}
710
003a3e1d 711static int __get_data_block(struct inode *inode, sector_t iblock,
e2b4e2bc 712 struct buffer_head *bh, int create, int flag)
003a3e1d
JK
713{
714 struct f2fs_map_blocks map;
715 int ret;
716
717 map.m_lblk = iblock;
718 map.m_len = bh->b_size >> inode->i_blkbits;
719
e2b4e2bc 720 ret = f2fs_map_blocks(inode, &map, create, flag);
003a3e1d
JK
721 if (!ret) {
722 map_bh(bh, inode->i_sb, map.m_pblk);
723 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
724 bh->b_size = map.m_len << inode->i_blkbits;
725 }
726 return ret;
727}
728
ccfb3000 729static int get_data_block(struct inode *inode, sector_t iblock,
e2b4e2bc
CY
730 struct buffer_head *bh_result, int create, int flag)
731{
732 return __get_data_block(inode, iblock, bh_result, create, flag);
733}
734
735static int get_data_block_dio(struct inode *inode, sector_t iblock,
ccfb3000
JK
736 struct buffer_head *bh_result, int create)
737{
e2b4e2bc
CY
738 return __get_data_block(inode, iblock, bh_result, create,
739 F2FS_GET_BLOCK_DIO);
ccfb3000
JK
740}
741
e2b4e2bc 742static int get_data_block_bmap(struct inode *inode, sector_t iblock,
ccfb3000
JK
743 struct buffer_head *bh_result, int create)
744{
e2b4e2bc
CY
745 return __get_data_block(inode, iblock, bh_result, create,
746 F2FS_GET_BLOCK_BMAP);
ccfb3000
JK
747}
748
7f63eb77
JK
749static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
750{
751 return (offset >> inode->i_blkbits);
752}
753
754static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
755{
756 return (blk << inode->i_blkbits);
757}
758
9ab70134
JK
759int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
760 u64 start, u64 len)
761{
7f63eb77
JK
762 struct buffer_head map_bh;
763 sector_t start_blk, last_blk;
764 loff_t isize = i_size_read(inode);
765 u64 logical = 0, phys = 0, size = 0;
766 u32 flags = 0;
767 bool past_eof = false, whole_file = false;
768 int ret = 0;
769
770 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
771 if (ret)
772 return ret;
773
774 mutex_lock(&inode->i_mutex);
775
776 if (len >= isize) {
777 whole_file = true;
778 len = isize;
779 }
780
781 if (logical_to_blk(inode, len) == 0)
782 len = blk_to_logical(inode, 1);
783
784 start_blk = logical_to_blk(inode, start);
785 last_blk = logical_to_blk(inode, start + len - 1);
786next:
787 memset(&map_bh, 0, sizeof(struct buffer_head));
788 map_bh.b_size = len;
789
e2b4e2bc
CY
790 ret = get_data_block(inode, start_blk, &map_bh, 0,
791 F2FS_GET_BLOCK_FIEMAP);
7f63eb77
JK
792 if (ret)
793 goto out;
794
795 /* HOLE */
796 if (!buffer_mapped(&map_bh)) {
797 start_blk++;
798
799 if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
800 past_eof = 1;
801
802 if (past_eof && size) {
803 flags |= FIEMAP_EXTENT_LAST;
804 ret = fiemap_fill_next_extent(fieinfo, logical,
805 phys, size, flags);
806 } else if (size) {
807 ret = fiemap_fill_next_extent(fieinfo, logical,
808 phys, size, flags);
809 size = 0;
810 }
811
812 /* if we have holes up to/past EOF then we're done */
813 if (start_blk > last_blk || past_eof || ret)
814 goto out;
815 } else {
816 if (start_blk > last_blk && !whole_file) {
817 ret = fiemap_fill_next_extent(fieinfo, logical,
818 phys, size, flags);
819 goto out;
820 }
821
822 /*
823 * if size != 0 then we know we already have an extent
824 * to add, so add it.
825 */
826 if (size) {
827 ret = fiemap_fill_next_extent(fieinfo, logical,
828 phys, size, flags);
829 if (ret)
830 goto out;
831 }
832
833 logical = blk_to_logical(inode, start_blk);
834 phys = blk_to_logical(inode, map_bh.b_blocknr);
835 size = map_bh.b_size;
836 flags = 0;
837 if (buffer_unwritten(&map_bh))
838 flags = FIEMAP_EXTENT_UNWRITTEN;
839
840 start_blk += logical_to_blk(inode, size);
841
842 /*
843 * If we are past the EOF, then we need to make sure as
844 * soon as we find a hole that the last extent we found
845 * is marked with FIEMAP_EXTENT_LAST
846 */
847 if (!past_eof && logical + size >= isize)
848 past_eof = true;
849 }
850 cond_resched();
851 if (fatal_signal_pending(current))
852 ret = -EINTR;
853 else
854 goto next;
855out:
856 if (ret == 1)
857 ret = 0;
858
859 mutex_unlock(&inode->i_mutex);
860 return ret;
9ab70134
JK
861}
862
f1e88660
JK
863/*
864 * This function was originally taken from fs/mpage.c, and customized for f2fs.
865 * Major change was from block_size == page_size in f2fs by default.
866 */
867static int f2fs_mpage_readpages(struct address_space *mapping,
868 struct list_head *pages, struct page *page,
869 unsigned nr_pages)
870{
871 struct bio *bio = NULL;
872 unsigned page_idx;
873 sector_t last_block_in_bio = 0;
874 struct inode *inode = mapping->host;
875 const unsigned blkbits = inode->i_blkbits;
876 const unsigned blocksize = 1 << blkbits;
877 sector_t block_in_file;
878 sector_t last_block;
879 sector_t last_block_in_file;
880 sector_t block_nr;
881 struct block_device *bdev = inode->i_sb->s_bdev;
882 struct f2fs_map_blocks map;
883
884 map.m_pblk = 0;
885 map.m_lblk = 0;
886 map.m_len = 0;
887 map.m_flags = 0;
888
889 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
890
891 prefetchw(&page->flags);
892 if (pages) {
893 page = list_entry(pages->prev, struct page, lru);
894 list_del(&page->lru);
895 if (add_to_page_cache_lru(page, mapping,
896 page->index, GFP_KERNEL))
897 goto next_page;
898 }
899
900 block_in_file = (sector_t)page->index;
901 last_block = block_in_file + nr_pages;
902 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
903 blkbits;
904 if (last_block > last_block_in_file)
905 last_block = last_block_in_file;
906
907 /*
908 * Map blocks using the previous result first.
909 */
910 if ((map.m_flags & F2FS_MAP_MAPPED) &&
911 block_in_file > map.m_lblk &&
912 block_in_file < (map.m_lblk + map.m_len))
913 goto got_it;
914
915 /*
916 * Then do more f2fs_map_blocks() calls until we are
917 * done with this page.
918 */
919 map.m_flags = 0;
920
921 if (block_in_file < last_block) {
922 map.m_lblk = block_in_file;
923 map.m_len = last_block - block_in_file;
924
46c9e141
CY
925 if (f2fs_map_blocks(inode, &map, 0,
926 F2FS_GET_BLOCK_READ))
f1e88660
JK
927 goto set_error_page;
928 }
929got_it:
930 if ((map.m_flags & F2FS_MAP_MAPPED)) {
931 block_nr = map.m_pblk + block_in_file - map.m_lblk;
932 SetPageMappedToDisk(page);
933
934 if (!PageUptodate(page) && !cleancache_get_page(page)) {
935 SetPageUptodate(page);
936 goto confused;
937 }
938 } else {
939 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
940 SetPageUptodate(page);
941 unlock_page(page);
942 goto next_page;
943 }
944
945 /*
946 * This page will go to BIO. Do we need to send this
947 * BIO off first?
948 */
949 if (bio && (last_block_in_bio != block_nr - 1)) {
950submit_and_realloc:
951 submit_bio(READ, bio);
952 bio = NULL;
953 }
954 if (bio == NULL) {
4375a336
JK
955 struct f2fs_crypto_ctx *ctx = NULL;
956
957 if (f2fs_encrypted_inode(inode) &&
958 S_ISREG(inode->i_mode)) {
959 struct page *cpage;
960
961 ctx = f2fs_get_crypto_ctx(inode);
962 if (IS_ERR(ctx))
963 goto set_error_page;
964
965 /* wait the page to be moved by cleaning */
966 cpage = find_lock_page(
967 META_MAPPING(F2FS_I_SB(inode)),
968 block_nr);
969 if (cpage) {
970 f2fs_wait_on_page_writeback(cpage,
971 DATA);
972 f2fs_put_page(cpage, 1);
973 }
974 }
975
f1e88660 976 bio = bio_alloc(GFP_KERNEL,
b54ffb73 977 min_t(int, nr_pages, BIO_MAX_PAGES));
4375a336
JK
978 if (!bio) {
979 if (ctx)
980 f2fs_release_crypto_ctx(ctx);
f1e88660 981 goto set_error_page;
4375a336 982 }
f1e88660
JK
983 bio->bi_bdev = bdev;
984 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
12377024 985 bio->bi_end_io = f2fs_read_end_io;
4375a336 986 bio->bi_private = ctx;
f1e88660
JK
987 }
988
989 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
990 goto submit_and_realloc;
991
992 last_block_in_bio = block_nr;
993 goto next_page;
994set_error_page:
995 SetPageError(page);
996 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
997 unlock_page(page);
998 goto next_page;
999confused:
1000 if (bio) {
1001 submit_bio(READ, bio);
1002 bio = NULL;
1003 }
1004 unlock_page(page);
1005next_page:
1006 if (pages)
1007 page_cache_release(page);
1008 }
1009 BUG_ON(pages && !list_empty(pages));
1010 if (bio)
1011 submit_bio(READ, bio);
1012 return 0;
1013}
1014
eb47b800
JK
1015static int f2fs_read_data_page(struct file *file, struct page *page)
1016{
9ffe0fb5 1017 struct inode *inode = page->mapping->host;
b3d208f9 1018 int ret = -EAGAIN;
9ffe0fb5 1019
c20e89cd
CY
1020 trace_f2fs_readpage(page, DATA);
1021
e1c42045 1022 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
1023 if (f2fs_has_inline_data(inode))
1024 ret = f2fs_read_inline_data(inode, page);
b3d208f9 1025 if (ret == -EAGAIN)
f1e88660 1026 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
9ffe0fb5 1027 return ret;
eb47b800
JK
1028}
1029
1030static int f2fs_read_data_pages(struct file *file,
1031 struct address_space *mapping,
1032 struct list_head *pages, unsigned nr_pages)
1033{
9ffe0fb5
HL
1034 struct inode *inode = file->f_mapping->host;
1035
1036 /* If the file has inline data, skip readpages */
1037 if (f2fs_has_inline_data(inode))
1038 return 0;
1039
f1e88660 1040 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
eb47b800
JK
1041}
1042
05ca3632 1043int do_write_data_page(struct f2fs_io_info *fio)
eb47b800 1044{
05ca3632 1045 struct page *page = fio->page;
eb47b800 1046 struct inode *inode = page->mapping->host;
eb47b800
JK
1047 struct dnode_of_data dn;
1048 int err = 0;
1049
1050 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 1051 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
1052 if (err)
1053 return err;
1054
cf04e8eb 1055 fio->blk_addr = dn.data_blkaddr;
eb47b800
JK
1056
1057 /* This page is already truncated */
2bca1e23
JK
1058 if (fio->blk_addr == NULL_ADDR) {
1059 ClearPageUptodate(page);
eb47b800 1060 goto out_writepage;
2bca1e23 1061 }
eb47b800 1062
4375a336
JK
1063 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1064 fio->encrypted_page = f2fs_encrypt(inode, fio->page);
1065 if (IS_ERR(fio->encrypted_page)) {
1066 err = PTR_ERR(fio->encrypted_page);
1067 goto out_writepage;
1068 }
1069 }
1070
eb47b800
JK
1071 set_page_writeback(page);
1072
1073 /*
1074 * If current allocation needs SSR,
1075 * it had better in-place writes for updated data.
1076 */
cf04e8eb 1077 if (unlikely(fio->blk_addr != NEW_ADDR &&
b25958b6
HL
1078 !is_cold_data(page) &&
1079 need_inplace_update(inode))) {
05ca3632 1080 rewrite_data_page(fio);
fff04f90 1081 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
8ce67cb0 1082 trace_f2fs_do_write_data_page(page, IPU);
eb47b800 1083 } else {
05ca3632 1084 write_data_page(&dn, fio);
216a620a 1085 set_data_blkaddr(&dn);
7e4dde79 1086 f2fs_update_extent_cache(&dn);
8ce67cb0 1087 trace_f2fs_do_write_data_page(page, OPU);
fff04f90 1088 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
3c6c2beb
JK
1089 if (page->index == 0)
1090 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
eb47b800
JK
1091 }
1092out_writepage:
1093 f2fs_put_dnode(&dn);
1094 return err;
1095}
1096
1097static int f2fs_write_data_page(struct page *page,
1098 struct writeback_control *wbc)
1099{
1100 struct inode *inode = page->mapping->host;
4081363f 1101 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
1102 loff_t i_size = i_size_read(inode);
1103 const pgoff_t end_index = ((unsigned long long) i_size)
1104 >> PAGE_CACHE_SHIFT;
9ffe0fb5 1105 unsigned offset = 0;
39936837 1106 bool need_balance_fs = false;
eb47b800 1107 int err = 0;
458e6197 1108 struct f2fs_io_info fio = {
05ca3632 1109 .sbi = sbi,
458e6197 1110 .type = DATA,
6c311ec6 1111 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
05ca3632 1112 .page = page,
4375a336 1113 .encrypted_page = NULL,
458e6197 1114 };
eb47b800 1115
ecda0de3
CY
1116 trace_f2fs_writepage(page, DATA);
1117
eb47b800 1118 if (page->index < end_index)
39936837 1119 goto write;
eb47b800
JK
1120
1121 /*
1122 * If the offset is out-of-range of file size,
1123 * this page does not have to be written to disk.
1124 */
1125 offset = i_size & (PAGE_CACHE_SIZE - 1);
76f60268 1126 if ((page->index >= end_index + 1) || !offset)
39936837 1127 goto out;
eb47b800
JK
1128
1129 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
39936837 1130write:
caf0047e 1131 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
eb47b800 1132 goto redirty_out;
1e84371f
JK
1133 if (f2fs_is_drop_cache(inode))
1134 goto out;
1135 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
1136 available_free_memory(sbi, BASE_CHECK))
1137 goto redirty_out;
eb47b800 1138
39936837 1139 /* Dentry blocks are controlled by checkpoint */
eb47b800 1140 if (S_ISDIR(inode->i_mode)) {
cf779cab
JK
1141 if (unlikely(f2fs_cp_error(sbi)))
1142 goto redirty_out;
05ca3632 1143 err = do_write_data_page(&fio);
8618b881
JK
1144 goto done;
1145 }
9ffe0fb5 1146
cf779cab
JK
1147 /* we should bypass data pages to proceed the kworkder jobs */
1148 if (unlikely(f2fs_cp_error(sbi))) {
1149 SetPageError(page);
a7ffdbe2 1150 goto out;
cf779cab
JK
1151 }
1152
8618b881 1153 if (!wbc->for_reclaim)
39936837 1154 need_balance_fs = true;
8618b881 1155 else if (has_not_enough_free_secs(sbi, 0))
39936837 1156 goto redirty_out;
eb47b800 1157
b3d208f9 1158 err = -EAGAIN;
8618b881 1159 f2fs_lock_op(sbi);
b3d208f9
JK
1160 if (f2fs_has_inline_data(inode))
1161 err = f2fs_write_inline_data(inode, page);
1162 if (err == -EAGAIN)
05ca3632 1163 err = do_write_data_page(&fio);
8618b881
JK
1164 f2fs_unlock_op(sbi);
1165done:
1166 if (err && err != -ENOENT)
1167 goto redirty_out;
eb47b800 1168
eb47b800 1169 clear_cold_data(page);
39936837 1170out:
a7ffdbe2 1171 inode_dec_dirty_pages(inode);
2bca1e23
JK
1172 if (err)
1173 ClearPageUptodate(page);
eb47b800 1174 unlock_page(page);
39936837 1175 if (need_balance_fs)
eb47b800 1176 f2fs_balance_fs(sbi);
2aea39ec
JK
1177 if (wbc->for_reclaim)
1178 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
1179 return 0;
1180
eb47b800 1181redirty_out:
76f60268 1182 redirty_page_for_writepage(wbc, page);
8618b881 1183 return AOP_WRITEPAGE_ACTIVATE;
eb47b800
JK
1184}
1185
fa9150a8
NJ
1186static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1187 void *data)
1188{
1189 struct address_space *mapping = data;
1190 int ret = mapping->a_ops->writepage(page, wbc);
1191 mapping_set_error(mapping, ret);
1192 return ret;
1193}
1194
8f46dcae
CY
1195/*
1196 * This function was copied from write_cche_pages from mm/page-writeback.c.
1197 * The major change is making write step of cold data page separately from
1198 * warm/hot data page.
1199 */
1200static int f2fs_write_cache_pages(struct address_space *mapping,
1201 struct writeback_control *wbc, writepage_t writepage,
1202 void *data)
1203{
1204 int ret = 0;
1205 int done = 0;
1206 struct pagevec pvec;
1207 int nr_pages;
1208 pgoff_t uninitialized_var(writeback_index);
1209 pgoff_t index;
1210 pgoff_t end; /* Inclusive */
1211 pgoff_t done_index;
1212 int cycled;
1213 int range_whole = 0;
1214 int tag;
1215 int step = 0;
1216
1217 pagevec_init(&pvec, 0);
1218next:
1219 if (wbc->range_cyclic) {
1220 writeback_index = mapping->writeback_index; /* prev offset */
1221 index = writeback_index;
1222 if (index == 0)
1223 cycled = 1;
1224 else
1225 cycled = 0;
1226 end = -1;
1227 } else {
1228 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1229 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1230 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1231 range_whole = 1;
1232 cycled = 1; /* ignore range_cyclic tests */
1233 }
1234 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1235 tag = PAGECACHE_TAG_TOWRITE;
1236 else
1237 tag = PAGECACHE_TAG_DIRTY;
1238retry:
1239 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1240 tag_pages_for_writeback(mapping, index, end);
1241 done_index = index;
1242 while (!done && (index <= end)) {
1243 int i;
1244
1245 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1246 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1247 if (nr_pages == 0)
1248 break;
1249
1250 for (i = 0; i < nr_pages; i++) {
1251 struct page *page = pvec.pages[i];
1252
1253 if (page->index > end) {
1254 done = 1;
1255 break;
1256 }
1257
1258 done_index = page->index;
1259
1260 lock_page(page);
1261
1262 if (unlikely(page->mapping != mapping)) {
1263continue_unlock:
1264 unlock_page(page);
1265 continue;
1266 }
1267
1268 if (!PageDirty(page)) {
1269 /* someone wrote it for us */
1270 goto continue_unlock;
1271 }
1272
737f1899 1273 if (step == is_cold_data(page))
8f46dcae
CY
1274 goto continue_unlock;
1275
1276 if (PageWriteback(page)) {
1277 if (wbc->sync_mode != WB_SYNC_NONE)
1278 f2fs_wait_on_page_writeback(page, DATA);
1279 else
1280 goto continue_unlock;
1281 }
1282
1283 BUG_ON(PageWriteback(page));
1284 if (!clear_page_dirty_for_io(page))
1285 goto continue_unlock;
1286
1287 ret = (*writepage)(page, wbc, data);
1288 if (unlikely(ret)) {
1289 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1290 unlock_page(page);
1291 ret = 0;
1292 } else {
1293 done_index = page->index + 1;
1294 done = 1;
1295 break;
1296 }
1297 }
1298
1299 if (--wbc->nr_to_write <= 0 &&
1300 wbc->sync_mode == WB_SYNC_NONE) {
1301 done = 1;
1302 break;
1303 }
1304 }
1305 pagevec_release(&pvec);
1306 cond_resched();
1307 }
1308
1309 if (step < 1) {
1310 step++;
1311 goto next;
1312 }
1313
1314 if (!cycled && !done) {
1315 cycled = 1;
1316 index = 0;
1317 end = writeback_index - 1;
1318 goto retry;
1319 }
1320 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1321 mapping->writeback_index = done_index;
1322
1323 return ret;
1324}
1325
25ca923b 1326static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
1327 struct writeback_control *wbc)
1328{
1329 struct inode *inode = mapping->host;
4081363f 1330 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5463e7c1 1331 bool locked = false;
eb47b800 1332 int ret;
50c8cdb3 1333 long diff;
eb47b800 1334
e5748434
CY
1335 trace_f2fs_writepages(mapping->host, wbc, DATA);
1336
cfb185a1 1337 /* deal with chardevs and other special file */
1338 if (!mapping->a_ops->writepage)
1339 return 0;
1340
6a290544
CY
1341 /* skip writing if there is no dirty page in this inode */
1342 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1343 return 0;
1344
a1257023
JK
1345 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1346 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1347 available_free_memory(sbi, DIRTY_DENTS))
1348 goto skip_write;
1349
d5669f7b
JK
1350 /* during POR, we don't need to trigger writepage at all. */
1351 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1352 goto skip_write;
1353
50c8cdb3 1354 diff = nr_pages_to_write(sbi, DATA, wbc);
eb47b800 1355
5463e7c1
JK
1356 if (!S_ISDIR(inode->i_mode)) {
1357 mutex_lock(&sbi->writepages);
1358 locked = true;
1359 }
8f46dcae 1360 ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
bb96a8d5 1361 f2fs_submit_merged_bio(sbi, DATA, WRITE);
5463e7c1
JK
1362 if (locked)
1363 mutex_unlock(&sbi->writepages);
458e6197 1364
eb47b800
JK
1365 remove_dirty_dir_inode(inode);
1366
50c8cdb3 1367 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
eb47b800 1368 return ret;
d3baf95d
JK
1369
1370skip_write:
a7ffdbe2 1371 wbc->pages_skipped += get_dirty_pages(inode);
d3baf95d 1372 return 0;
eb47b800
JK
1373}
1374
3aab8f82
CY
1375static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1376{
1377 struct inode *inode = mapping->host;
1378
1379 if (to > inode->i_size) {
1380 truncate_pagecache(inode, inode->i_size);
764aa3e9 1381 truncate_blocks(inode, inode->i_size, true);
3aab8f82
CY
1382 }
1383}
1384
eb47b800
JK
1385static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1386 loff_t pos, unsigned len, unsigned flags,
1387 struct page **pagep, void **fsdata)
1388{
1389 struct inode *inode = mapping->host;
4081363f 1390 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86531d6b
JK
1391 struct page *page = NULL;
1392 struct page *ipage;
eb47b800
JK
1393 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1394 struct dnode_of_data dn;
1395 int err = 0;
1396
62aed044
CY
1397 trace_f2fs_write_begin(inode, pos, len, flags);
1398
eb47b800 1399 f2fs_balance_fs(sbi);
5f727395
JK
1400
1401 /*
1402 * We should check this at this moment to avoid deadlock on inode page
1403 * and #0 page. The locking rule for inline_data conversion should be:
1404 * lock_page(page #0) -> lock_page(inode_page)
1405 */
1406 if (index != 0) {
1407 err = f2fs_convert_inline_inode(inode);
1408 if (err)
1409 goto fail;
1410 }
afcb7ca0 1411repeat:
eb47b800 1412 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
1413 if (!page) {
1414 err = -ENOMEM;
1415 goto fail;
1416 }
d5f66990 1417
eb47b800
JK
1418 *pagep = page;
1419
e479556b 1420 f2fs_lock_op(sbi);
9ba69cf9
JK
1421
1422 /* check inline_data */
1423 ipage = get_node_page(sbi, inode->i_ino);
cd34e296
CY
1424 if (IS_ERR(ipage)) {
1425 err = PTR_ERR(ipage);
9ba69cf9 1426 goto unlock_fail;
cd34e296 1427 }
9ba69cf9 1428
b3d208f9
JK
1429 set_new_dnode(&dn, inode, ipage, ipage, 0);
1430
9ba69cf9 1431 if (f2fs_has_inline_data(inode)) {
b3d208f9
JK
1432 if (pos + len <= MAX_INLINE_DATA) {
1433 read_inline_data(page, ipage);
1434 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1435 sync_inode_page(&dn);
1436 goto put_next;
b3d208f9 1437 }
5f727395
JK
1438 err = f2fs_convert_inline_page(&dn, page);
1439 if (err)
1440 goto put_fail;
b600965c 1441 }
759af1c9
FL
1442
1443 err = f2fs_get_block(&dn, index);
9ba69cf9 1444 if (err)
8cdcb713 1445 goto put_fail;
b3d208f9 1446put_next:
9ba69cf9
JK
1447 f2fs_put_dnode(&dn);
1448 f2fs_unlock_op(sbi);
1449
b3d208f9
JK
1450 f2fs_wait_on_page_writeback(page, DATA);
1451
90d4388a
CY
1452 if (len == PAGE_CACHE_SIZE)
1453 goto out_update;
1454 if (PageUptodate(page))
1455 goto out_clear;
eb47b800
JK
1456
1457 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1458 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1459 unsigned end = start + len;
1460
1461 /* Reading beyond i_size is simple: memset to zero */
1462 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
90d4388a 1463 goto out_update;
eb47b800
JK
1464 }
1465
b3d208f9 1466 if (dn.data_blkaddr == NEW_ADDR) {
eb47b800
JK
1467 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1468 } else {
cf04e8eb 1469 struct f2fs_io_info fio = {
05ca3632 1470 .sbi = sbi,
cf04e8eb
JK
1471 .type = DATA,
1472 .rw = READ_SYNC,
1473 .blk_addr = dn.data_blkaddr,
05ca3632 1474 .page = page,
4375a336 1475 .encrypted_page = NULL,
cf04e8eb 1476 };
05ca3632 1477 err = f2fs_submit_page_bio(&fio);
9234f319
JK
1478 if (err)
1479 goto fail;
d54c795b 1480
393ff91f 1481 lock_page(page);
6bacf52f 1482 if (unlikely(!PageUptodate(page))) {
3aab8f82
CY
1483 err = -EIO;
1484 goto fail;
eb47b800 1485 }
6bacf52f 1486 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1487 f2fs_put_page(page, 1);
1488 goto repeat;
eb47b800 1489 }
4375a336
JK
1490
1491 /* avoid symlink page */
1492 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1493 err = f2fs_decrypt_one(inode, page);
86531d6b 1494 if (err)
4375a336 1495 goto fail;
4375a336 1496 }
eb47b800 1497 }
90d4388a 1498out_update:
eb47b800 1499 SetPageUptodate(page);
90d4388a 1500out_clear:
eb47b800
JK
1501 clear_cold_data(page);
1502 return 0;
9ba69cf9 1503
8cdcb713
JK
1504put_fail:
1505 f2fs_put_dnode(&dn);
9ba69cf9
JK
1506unlock_fail:
1507 f2fs_unlock_op(sbi);
3aab8f82 1508fail:
86531d6b 1509 f2fs_put_page(page, 1);
3aab8f82
CY
1510 f2fs_write_failed(mapping, pos + len);
1511 return err;
eb47b800
JK
1512}
1513
a1dd3c13
JK
1514static int f2fs_write_end(struct file *file,
1515 struct address_space *mapping,
1516 loff_t pos, unsigned len, unsigned copied,
1517 struct page *page, void *fsdata)
1518{
1519 struct inode *inode = page->mapping->host;
1520
dfb2bf38
CY
1521 trace_f2fs_write_end(inode, pos, len, copied);
1522
34ba94ba 1523 set_page_dirty(page);
a1dd3c13
JK
1524
1525 if (pos + copied > i_size_read(inode)) {
1526 i_size_write(inode, pos + copied);
1527 mark_inode_dirty(inode);
1528 update_inode_page(inode);
1529 }
1530
75c3c8bc 1531 f2fs_put_page(page, 1);
a1dd3c13
JK
1532 return copied;
1533}
1534
6f673763
OS
1535static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1536 loff_t offset)
944fcfc1
JK
1537{
1538 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1 1539
944fcfc1
JK
1540 if (offset & blocksize_mask)
1541 return -EINVAL;
1542
5b46f25d
AV
1543 if (iov_iter_alignment(iter) & blocksize_mask)
1544 return -EINVAL;
1545
944fcfc1
JK
1546 return 0;
1547}
1548
22c6186e
OS
1549static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1550 loff_t offset)
eb47b800
JK
1551{
1552 struct file *file = iocb->ki_filp;
3aab8f82
CY
1553 struct address_space *mapping = file->f_mapping;
1554 struct inode *inode = mapping->host;
1555 size_t count = iov_iter_count(iter);
1556 int err;
944fcfc1 1557
b3d208f9
JK
1558 /* we don't need to use inline_data strictly */
1559 if (f2fs_has_inline_data(inode)) {
1560 err = f2fs_convert_inline_inode(inode);
1561 if (err)
1562 return err;
1563 }
9ffe0fb5 1564
fcc85a4d
JK
1565 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1566 return 0;
1567
c15e8599
CY
1568 err = check_direct_IO(inode, iter, offset);
1569 if (err)
1570 return err;
944fcfc1 1571
6f673763 1572 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
70407fad 1573
f9811703 1574 if (iov_iter_rw(iter) == WRITE) {
59b802e5 1575 __allocate_data_blocks(inode, offset, count);
f9811703
CY
1576 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1577 err = -EIO;
1578 goto out;
1579 }
1580 }
59b802e5 1581
e2b4e2bc 1582 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
f9811703 1583out:
6f673763 1584 if (err < 0 && iov_iter_rw(iter) == WRITE)
3aab8f82 1585 f2fs_write_failed(mapping, offset + count);
70407fad 1586
6f673763 1587 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
70407fad 1588
3aab8f82 1589 return err;
eb47b800
JK
1590}
1591
487261f3
CY
1592void f2fs_invalidate_page(struct page *page, unsigned int offset,
1593 unsigned int length)
eb47b800
JK
1594{
1595 struct inode *inode = page->mapping->host;
487261f3 1596 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 1597
487261f3
CY
1598 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1599 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
a7ffdbe2
JK
1600 return;
1601
487261f3
CY
1602 if (PageDirty(page)) {
1603 if (inode->i_ino == F2FS_META_INO(sbi))
1604 dec_page_count(sbi, F2FS_DIRTY_META);
1605 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1606 dec_page_count(sbi, F2FS_DIRTY_NODES);
1607 else
1608 inode_dec_dirty_pages(inode);
1609 }
decd36b6
CY
1610
1611 /* This is atomic written page, keep Private */
1612 if (IS_ATOMIC_WRITTEN_PAGE(page))
1613 return;
1614
eb47b800
JK
1615 ClearPagePrivate(page);
1616}
1617
487261f3 1618int f2fs_release_page(struct page *page, gfp_t wait)
eb47b800 1619{
f68daeeb
JK
1620 /* If this is dirty page, keep PagePrivate */
1621 if (PageDirty(page))
1622 return 0;
1623
decd36b6
CY
1624 /* This is atomic written page, keep Private */
1625 if (IS_ATOMIC_WRITTEN_PAGE(page))
1626 return 0;
1627
eb47b800 1628 ClearPagePrivate(page);
c3850aa1 1629 return 1;
eb47b800
JK
1630}
1631
1632static int f2fs_set_data_page_dirty(struct page *page)
1633{
1634 struct address_space *mapping = page->mapping;
1635 struct inode *inode = mapping->host;
1636
26c6b887
JK
1637 trace_f2fs_set_page_dirty(page, DATA);
1638
eb47b800 1639 SetPageUptodate(page);
34ba94ba 1640
1e84371f 1641 if (f2fs_is_atomic_file(inode)) {
decd36b6
CY
1642 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
1643 register_inmem_page(inode, page);
1644 return 1;
1645 }
1646 /*
1647 * Previously, this page has been registered, we just
1648 * return here.
1649 */
1650 return 0;
34ba94ba
JK
1651 }
1652
eb47b800
JK
1653 if (!PageDirty(page)) {
1654 __set_page_dirty_nobuffers(page);
a7ffdbe2 1655 update_dirty_page(inode, page);
eb47b800
JK
1656 return 1;
1657 }
1658 return 0;
1659}
1660
c01e54b7
JK
1661static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1662{
454ae7e5
CY
1663 struct inode *inode = mapping->host;
1664
b3d208f9
JK
1665 /* we don't need to use inline_data strictly */
1666 if (f2fs_has_inline_data(inode)) {
1667 int err = f2fs_convert_inline_inode(inode);
1668 if (err)
1669 return err;
1670 }
e2b4e2bc 1671 return generic_block_bmap(mapping, block, get_data_block_bmap);
429511cd
CY
1672}
1673
eb47b800
JK
1674const struct address_space_operations f2fs_dblock_aops = {
1675 .readpage = f2fs_read_data_page,
1676 .readpages = f2fs_read_data_pages,
1677 .writepage = f2fs_write_data_page,
1678 .writepages = f2fs_write_data_pages,
1679 .write_begin = f2fs_write_begin,
a1dd3c13 1680 .write_end = f2fs_write_end,
eb47b800 1681 .set_page_dirty = f2fs_set_data_page_dirty,
487261f3
CY
1682 .invalidatepage = f2fs_invalidate_page,
1683 .releasepage = f2fs_release_page,
eb47b800 1684 .direct_IO = f2fs_direct_IO,
c01e54b7 1685 .bmap = f2fs_bmap,
eb47b800 1686};