]> git.ipfire.org Git - people/ms/linux.git/blame - fs/f2fs/compress.c
f2fs: don't call fscrypt_get_encryption_info() explicitly in f2fs_tmpfile()
[people/ms/linux.git] / fs / f2fs / compress.c
CommitLineData
4c8ff709
CY
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs compress support
4 *
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6 */
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/writeback.h>
11#include <linux/backing-dev.h>
12#include <linux/lzo.h>
13#include <linux/lz4.h>
14
15#include "f2fs.h"
16#include "node.h"
17#include <trace/events/f2fs.h>
18
19struct f2fs_compress_ops {
20 int (*init_compress_ctx)(struct compress_ctx *cc);
21 void (*destroy_compress_ctx)(struct compress_ctx *cc);
22 int (*compress_pages)(struct compress_ctx *cc);
23 int (*decompress_pages)(struct decompress_io_ctx *dic);
24};
25
26static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
27{
28 return index & (cc->cluster_size - 1);
29}
30
31static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
32{
33 return index >> cc->log_cluster_size;
34}
35
36static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
37{
38 return cc->cluster_idx << cc->log_cluster_size;
39}
40
41bool f2fs_is_compressed_page(struct page *page)
42{
43 if (!PagePrivate(page))
44 return false;
45 if (!page_private(page))
46 return false;
47 if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
48 return false;
49 f2fs_bug_on(F2FS_M_SB(page->mapping),
50 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
51 return true;
52}
53
54static void f2fs_set_compressed_page(struct page *page,
55 struct inode *inode, pgoff_t index, void *data, refcount_t *r)
56{
57 SetPagePrivate(page);
58 set_page_private(page, (unsigned long)data);
59
60 /* i_crypto_info and iv index */
61 page->index = index;
62 page->mapping = inode->i_mapping;
63 if (r)
64 refcount_inc(r);
65}
66
67static void f2fs_put_compressed_page(struct page *page)
68{
69 set_page_private(page, (unsigned long)NULL);
70 ClearPagePrivate(page);
71 page->mapping = NULL;
72 unlock_page(page);
73 put_page(page);
74}
75
76static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
77{
78 int i;
79
80 for (i = 0; i < len; i++) {
81 if (!cc->rpages[i])
82 continue;
83 if (unlock)
84 unlock_page(cc->rpages[i]);
85 else
86 put_page(cc->rpages[i]);
87 }
88}
89
90static void f2fs_put_rpages(struct compress_ctx *cc)
91{
92 f2fs_drop_rpages(cc, cc->cluster_size, false);
93}
94
95static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
96{
97 f2fs_drop_rpages(cc, len, true);
98}
99
100static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
101 struct address_space *mapping,
102 pgoff_t start, int len)
103{
104 int i;
105
106 for (i = 0; i < len; i++) {
107 struct page *page = find_get_page(mapping, start + i);
108
109 put_page(page);
110 put_page(page);
111 }
112}
113
114static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
115 struct writeback_control *wbc, bool redirty, int unlock)
116{
117 unsigned int i;
118
119 for (i = 0; i < cc->cluster_size; i++) {
120 if (!cc->rpages[i])
121 continue;
122 if (redirty)
123 redirty_page_for_writepage(wbc, cc->rpages[i]);
124 f2fs_put_page(cc->rpages[i], unlock);
125 }
126}
127
128struct page *f2fs_compress_control_page(struct page *page)
129{
130 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
131}
132
133int f2fs_init_compress_ctx(struct compress_ctx *cc)
134{
135 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
136
137 if (cc->nr_rpages)
138 return 0;
139
140 cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
141 cc->log_cluster_size, GFP_NOFS);
142 return cc->rpages ? 0 : -ENOMEM;
143}
144
145void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
146{
147 kfree(cc->rpages);
148 cc->rpages = NULL;
149 cc->nr_rpages = 0;
150 cc->nr_cpages = 0;
151 cc->cluster_idx = NULL_CLUSTER;
152}
153
154void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
155{
156 unsigned int cluster_ofs;
157
158 if (!f2fs_cluster_can_merge_page(cc, page->index))
159 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
160
161 cluster_ofs = offset_in_cluster(cc, page->index);
162 cc->rpages[cluster_ofs] = page;
163 cc->nr_rpages++;
164 cc->cluster_idx = cluster_idx(cc, page->index);
165}
166
167#ifdef CONFIG_F2FS_FS_LZO
168static int lzo_init_compress_ctx(struct compress_ctx *cc)
169{
170 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
171 LZO1X_MEM_COMPRESS, GFP_NOFS);
172 if (!cc->private)
173 return -ENOMEM;
174
175 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
176 return 0;
177}
178
179static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
180{
181 kvfree(cc->private);
182 cc->private = NULL;
183}
184
185static int lzo_compress_pages(struct compress_ctx *cc)
186{
187 int ret;
188
189 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
190 &cc->clen, cc->private);
191 if (ret != LZO_E_OK) {
192 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
193 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
194 return -EIO;
195 }
196 return 0;
197}
198
199static int lzo_decompress_pages(struct decompress_io_ctx *dic)
200{
201 int ret;
202
203 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
204 dic->rbuf, &dic->rlen);
205 if (ret != LZO_E_OK) {
206 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
207 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
208 return -EIO;
209 }
210
211 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
212 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
213 "expected:%lu\n", KERN_ERR,
214 F2FS_I_SB(dic->inode)->sb->s_id,
215 dic->rlen,
216 PAGE_SIZE << dic->log_cluster_size);
217 return -EIO;
218 }
219 return 0;
220}
221
222static const struct f2fs_compress_ops f2fs_lzo_ops = {
223 .init_compress_ctx = lzo_init_compress_ctx,
224 .destroy_compress_ctx = lzo_destroy_compress_ctx,
225 .compress_pages = lzo_compress_pages,
226 .decompress_pages = lzo_decompress_pages,
227};
228#endif
229
230#ifdef CONFIG_F2FS_FS_LZ4
231static int lz4_init_compress_ctx(struct compress_ctx *cc)
232{
233 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
234 LZ4_MEM_COMPRESS, GFP_NOFS);
235 if (!cc->private)
236 return -ENOMEM;
237
238 cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
239 return 0;
240}
241
242static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
243{
244 kvfree(cc->private);
245 cc->private = NULL;
246}
247
248static int lz4_compress_pages(struct compress_ctx *cc)
249{
250 int len;
251
252 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
253 cc->clen, cc->private);
254 if (!len) {
255 printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
256 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
257 return -EIO;
258 }
259 cc->clen = len;
260 return 0;
261}
262
263static int lz4_decompress_pages(struct decompress_io_ctx *dic)
264{
265 int ret;
266
267 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
268 dic->clen, dic->rlen);
269 if (ret < 0) {
270 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
271 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
272 return -EIO;
273 }
274
275 if (ret != PAGE_SIZE << dic->log_cluster_size) {
276 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
277 "expected:%lu\n", KERN_ERR,
278 F2FS_I_SB(dic->inode)->sb->s_id,
279 dic->rlen,
280 PAGE_SIZE << dic->log_cluster_size);
281 return -EIO;
282 }
283 return 0;
284}
285
286static const struct f2fs_compress_ops f2fs_lz4_ops = {
287 .init_compress_ctx = lz4_init_compress_ctx,
288 .destroy_compress_ctx = lz4_destroy_compress_ctx,
289 .compress_pages = lz4_compress_pages,
290 .decompress_pages = lz4_decompress_pages,
291};
292#endif
293
294static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
295#ifdef CONFIG_F2FS_FS_LZO
296 &f2fs_lzo_ops,
297#else
298 NULL,
299#endif
300#ifdef CONFIG_F2FS_FS_LZ4
301 &f2fs_lz4_ops,
302#else
303 NULL,
304#endif
305};
306
307bool f2fs_is_compress_backend_ready(struct inode *inode)
308{
309 if (!f2fs_compressed_file(inode))
310 return true;
311 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
312}
313
314static struct page *f2fs_grab_page(void)
315{
316 struct page *page;
317
318 page = alloc_page(GFP_NOFS);
319 if (!page)
320 return NULL;
321 lock_page(page);
322 return page;
323}
324
325static int f2fs_compress_pages(struct compress_ctx *cc)
326{
327 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
328 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
329 const struct f2fs_compress_ops *cops =
330 f2fs_cops[fi->i_compress_algorithm];
331 unsigned int max_len, nr_cpages;
332 int i, ret;
333
334 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
335 cc->cluster_size, fi->i_compress_algorithm);
336
337 ret = cops->init_compress_ctx(cc);
338 if (ret)
339 goto out;
340
341 max_len = COMPRESS_HEADER_SIZE + cc->clen;
342 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
343
344 cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
345 cc->nr_cpages, GFP_NOFS);
346 if (!cc->cpages) {
347 ret = -ENOMEM;
348 goto destroy_compress_ctx;
349 }
350
351 for (i = 0; i < cc->nr_cpages; i++) {
352 cc->cpages[i] = f2fs_grab_page();
353 if (!cc->cpages[i]) {
354 ret = -ENOMEM;
355 goto out_free_cpages;
356 }
357 }
358
359 cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
360 if (!cc->rbuf) {
361 ret = -ENOMEM;
362 goto out_free_cpages;
363 }
364
365 cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
366 if (!cc->cbuf) {
367 ret = -ENOMEM;
368 goto out_vunmap_rbuf;
369 }
370
371 ret = cops->compress_pages(cc);
372 if (ret)
373 goto out_vunmap_cbuf;
374
375 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
376
377 if (cc->clen > max_len) {
378 ret = -EAGAIN;
379 goto out_vunmap_cbuf;
380 }
381
382 cc->cbuf->clen = cpu_to_le32(cc->clen);
4c8ff709
CY
383
384 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
385 cc->cbuf->reserved[i] = cpu_to_le32(0);
386
7fa6d598
EB
387 nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
388
389 /* zero out any unused part of the last page */
390 memset(&cc->cbuf->cdata[cc->clen], 0,
391 (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
392
4c8ff709
CY
393 vunmap(cc->cbuf);
394 vunmap(cc->rbuf);
395
4c8ff709
CY
396 for (i = nr_cpages; i < cc->nr_cpages; i++) {
397 f2fs_put_compressed_page(cc->cpages[i]);
398 cc->cpages[i] = NULL;
399 }
400
401 cc->nr_cpages = nr_cpages;
402
403 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
404 cc->clen, ret);
405 return 0;
406
407out_vunmap_cbuf:
408 vunmap(cc->cbuf);
409out_vunmap_rbuf:
410 vunmap(cc->rbuf);
411out_free_cpages:
412 for (i = 0; i < cc->nr_cpages; i++) {
413 if (cc->cpages[i])
414 f2fs_put_compressed_page(cc->cpages[i]);
415 }
416 kfree(cc->cpages);
417 cc->cpages = NULL;
418destroy_compress_ctx:
419 cops->destroy_compress_ctx(cc);
420out:
421 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
422 cc->clen, ret);
423 return ret;
424}
425
426void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
427{
428 struct decompress_io_ctx *dic =
429 (struct decompress_io_ctx *)page_private(page);
430 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
431 struct f2fs_inode_info *fi= F2FS_I(dic->inode);
432 const struct f2fs_compress_ops *cops =
433 f2fs_cops[fi->i_compress_algorithm];
434 int ret;
435
436 dec_page_count(sbi, F2FS_RD_DATA);
437
438 if (bio->bi_status || PageError(page))
439 dic->failed = true;
440
441 if (refcount_dec_not_one(&dic->ref))
442 return;
443
444 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
445 dic->cluster_size, fi->i_compress_algorithm);
446
447 /* submit partial compressed pages */
448 if (dic->failed) {
449 ret = -EIO;
450 goto out_free_dic;
451 }
452
453 dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
454 if (!dic->rbuf) {
455 ret = -ENOMEM;
456 goto out_free_dic;
457 }
458
459 dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
460 if (!dic->cbuf) {
461 ret = -ENOMEM;
462 goto out_vunmap_rbuf;
463 }
464
465 dic->clen = le32_to_cpu(dic->cbuf->clen);
466 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
467
468 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
469 ret = -EFSCORRUPTED;
470 goto out_vunmap_cbuf;
471 }
472
473 ret = cops->decompress_pages(dic);
474
475out_vunmap_cbuf:
476 vunmap(dic->cbuf);
477out_vunmap_rbuf:
478 vunmap(dic->rbuf);
479out_free_dic:
480 if (!verity)
481 f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
482 ret, false);
483
484 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
485 dic->clen, ret);
486 if (!verity)
487 f2fs_free_dic(dic);
488}
489
490static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
491{
492 if (cc->cluster_idx == NULL_CLUSTER)
493 return true;
494 return cc->cluster_idx == cluster_idx(cc, index);
495}
496
497bool f2fs_cluster_is_empty(struct compress_ctx *cc)
498{
499 return cc->nr_rpages == 0;
500}
501
502static bool f2fs_cluster_is_full(struct compress_ctx *cc)
503{
504 return cc->cluster_size == cc->nr_rpages;
505}
506
507bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
508{
509 if (f2fs_cluster_is_empty(cc))
510 return true;
511 return is_page_in_cluster(cc, index);
512}
513
514static bool __cluster_may_compress(struct compress_ctx *cc)
515{
516 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
517 loff_t i_size = i_size_read(cc->inode);
518 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
519 int i;
520
521 for (i = 0; i < cc->cluster_size; i++) {
522 struct page *page = cc->rpages[i];
523
524 f2fs_bug_on(sbi, !page);
525
526 if (unlikely(f2fs_cp_error(sbi)))
527 return false;
528 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
529 return false;
530
531 /* beyond EOF */
532 if (page->index >= nr_pages)
533 return false;
534 }
535 return true;
536}
537
1a67cbe1 538static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
4c8ff709
CY
539{
540 struct dnode_of_data dn;
541 int ret;
542
543 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
544 ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
545 LOOKUP_NODE);
546 if (ret) {
547 if (ret == -ENOENT)
548 ret = 0;
549 goto fail;
550 }
551
552 if (dn.data_blkaddr == COMPRESS_ADDR) {
553 int i;
554
555 ret = 1;
556 for (i = 1; i < cc->cluster_size; i++) {
557 block_t blkaddr;
558
a2ced1ce 559 blkaddr = data_blkaddr(dn.inode,
4c8ff709 560 dn.node_page, dn.ofs_in_node + i);
1a67cbe1
CY
561 if (compr) {
562 if (__is_valid_data_blkaddr(blkaddr))
563 ret++;
564 } else {
565 if (blkaddr != NULL_ADDR)
566 ret++;
567 }
4c8ff709
CY
568 }
569 }
570fail:
571 f2fs_put_dnode(&dn);
572 return ret;
573}
574
1a67cbe1
CY
575/* return # of compressed blocks in compressed cluster */
576static int f2fs_compressed_blocks(struct compress_ctx *cc)
577{
578 return __f2fs_cluster_blocks(cc, true);
579}
580
581/* return # of valid blocks in compressed cluster */
582static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
583{
584 return __f2fs_cluster_blocks(cc, false);
585}
586
4c8ff709
CY
587int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
588{
589 struct compress_ctx cc = {
590 .inode = inode,
591 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
592 .cluster_size = F2FS_I(inode)->i_cluster_size,
593 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
594 };
595
1a67cbe1 596 return f2fs_cluster_blocks(&cc, false);
4c8ff709
CY
597}
598
599static bool cluster_may_compress(struct compress_ctx *cc)
600{
601 if (!f2fs_compressed_file(cc->inode))
602 return false;
603 if (f2fs_is_atomic_file(cc->inode))
604 return false;
605 if (f2fs_is_mmap_file(cc->inode))
606 return false;
607 if (!f2fs_cluster_is_full(cc))
608 return false;
609 return __cluster_may_compress(cc);
610}
611
612static void set_cluster_writeback(struct compress_ctx *cc)
613{
614 int i;
615
616 for (i = 0; i < cc->cluster_size; i++) {
617 if (cc->rpages[i])
618 set_page_writeback(cc->rpages[i]);
619 }
620}
621
622static void set_cluster_dirty(struct compress_ctx *cc)
623{
624 int i;
625
626 for (i = 0; i < cc->cluster_size; i++)
627 if (cc->rpages[i])
628 set_page_dirty(cc->rpages[i]);
629}
630
631static int prepare_compress_overwrite(struct compress_ctx *cc,
632 struct page **pagep, pgoff_t index, void **fsdata)
633{
634 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
635 struct address_space *mapping = cc->inode->i_mapping;
636 struct page *page;
637 struct dnode_of_data dn;
638 sector_t last_block_in_bio;
639 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
640 pgoff_t start_idx = start_idx_of_cluster(cc);
641 int i, ret;
642 bool prealloc;
643
644retry:
1a67cbe1 645 ret = f2fs_cluster_blocks(cc, false);
4c8ff709
CY
646 if (ret <= 0)
647 return ret;
648
649 /* compressed case */
650 prealloc = (ret < cc->cluster_size);
651
652 ret = f2fs_init_compress_ctx(cc);
653 if (ret)
654 return ret;
655
656 /* keep page reference to avoid page reclaim */
657 for (i = 0; i < cc->cluster_size; i++) {
658 page = f2fs_pagecache_get_page(mapping, start_idx + i,
659 fgp_flag, GFP_NOFS);
660 if (!page) {
661 ret = -ENOMEM;
662 goto unlock_pages;
663 }
664
665 if (PageUptodate(page))
666 unlock_page(page);
667 else
668 f2fs_compress_ctx_add_page(cc, page);
669 }
670
671 if (!f2fs_cluster_is_empty(cc)) {
672 struct bio *bio = NULL;
673
674 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
0683728a 675 &last_block_in_bio, false, true);
4c8ff709
CY
676 f2fs_destroy_compress_ctx(cc);
677 if (ret)
678 goto release_pages;
679 if (bio)
680 f2fs_submit_bio(sbi, bio, DATA);
681
682 ret = f2fs_init_compress_ctx(cc);
683 if (ret)
684 goto release_pages;
685 }
686
687 for (i = 0; i < cc->cluster_size; i++) {
688 f2fs_bug_on(sbi, cc->rpages[i]);
689
690 page = find_lock_page(mapping, start_idx + i);
691 f2fs_bug_on(sbi, !page);
692
693 f2fs_wait_on_page_writeback(page, DATA, true, true);
694
695 f2fs_compress_ctx_add_page(cc, page);
696 f2fs_put_page(page, 0);
697
698 if (!PageUptodate(page)) {
699 f2fs_unlock_rpages(cc, i + 1);
700 f2fs_put_rpages_mapping(cc, mapping, start_idx,
701 cc->cluster_size);
702 f2fs_destroy_compress_ctx(cc);
703 goto retry;
704 }
705 }
706
707 if (prealloc) {
708 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
709
710 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
711
712 for (i = cc->cluster_size - 1; i > 0; i--) {
713 ret = f2fs_get_block(&dn, start_idx + i);
714 if (ret) {
715 i = cc->cluster_size;
716 break;
717 }
718
719 if (dn.data_blkaddr != NEW_ADDR)
720 break;
721 }
722
723 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
724 }
725
726 if (likely(!ret)) {
727 *fsdata = cc->rpages;
728 *pagep = cc->rpages[offset_in_cluster(cc, index)];
729 return cc->cluster_size;
730 }
731
732unlock_pages:
733 f2fs_unlock_rpages(cc, i);
734release_pages:
735 f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
736 f2fs_destroy_compress_ctx(cc);
737 return ret;
738}
739
740int f2fs_prepare_compress_overwrite(struct inode *inode,
741 struct page **pagep, pgoff_t index, void **fsdata)
742{
743 struct compress_ctx cc = {
744 .inode = inode,
745 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
746 .cluster_size = F2FS_I(inode)->i_cluster_size,
747 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
748 .rpages = NULL,
749 .nr_rpages = 0,
750 };
751
752 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
753}
754
755bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
756 pgoff_t index, unsigned copied)
757
758{
759 struct compress_ctx cc = {
760 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
761 .cluster_size = F2FS_I(inode)->i_cluster_size,
762 .rpages = fsdata,
763 };
764 bool first_index = (index == cc.rpages[0]->index);
765
766 if (copied)
767 set_cluster_dirty(&cc);
768
769 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
770 f2fs_destroy_compress_ctx(&cc);
771
772 return first_index;
773}
774
775static int f2fs_write_compressed_pages(struct compress_ctx *cc,
776 int *submitted,
777 struct writeback_control *wbc,
778 enum iostat_type io_type)
779{
780 struct inode *inode = cc->inode;
781 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
782 struct f2fs_inode_info *fi = F2FS_I(inode);
783 struct f2fs_io_info fio = {
784 .sbi = sbi,
785 .ino = cc->inode->i_ino,
786 .type = DATA,
787 .op = REQ_OP_WRITE,
788 .op_flags = wbc_to_write_flags(wbc),
789 .old_blkaddr = NEW_ADDR,
790 .page = NULL,
791 .encrypted_page = NULL,
792 .compressed_page = NULL,
793 .submitted = false,
4c8ff709
CY
794 .io_type = io_type,
795 .io_wbc = wbc,
796 .encrypted = f2fs_encrypted_file(cc->inode),
797 };
798 struct dnode_of_data dn;
799 struct node_info ni;
800 struct compress_io_ctx *cic;
801 pgoff_t start_idx = start_idx_of_cluster(cc);
802 unsigned int last_index = cc->cluster_size - 1;
803 loff_t psize;
804 int i, err;
805
df77fbd8
CY
806 if (!f2fs_trylock_op(sbi))
807 return -EAGAIN;
4c8ff709 808
df77fbd8 809 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
4c8ff709
CY
810
811 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
812 if (err)
813 goto out_unlock_op;
814
815 for (i = 0; i < cc->cluster_size; i++) {
a2ced1ce 816 if (data_blkaddr(dn.inode, dn.node_page,
4c8ff709
CY
817 dn.ofs_in_node + i) == NULL_ADDR)
818 goto out_put_dnode;
819 }
820
821 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
822
823 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
824 if (err)
825 goto out_put_dnode;
826
827 fio.version = ni.version;
828
829 cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
830 if (!cic)
831 goto out_put_dnode;
832
833 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
834 cic->inode = inode;
835 refcount_set(&cic->ref, 1);
836 cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
837 cc->log_cluster_size, GFP_NOFS);
838 if (!cic->rpages)
839 goto out_put_cic;
840
841 cic->nr_rpages = cc->cluster_size;
842
843 for (i = 0; i < cc->nr_cpages; i++) {
844 f2fs_set_compressed_page(cc->cpages[i], inode,
845 cc->rpages[i + 1]->index,
846 cic, i ? &cic->ref : NULL);
847 fio.compressed_page = cc->cpages[i];
848 if (fio.encrypted) {
849 fio.page = cc->rpages[i + 1];
850 err = f2fs_encrypt_one_page(&fio);
851 if (err)
852 goto out_destroy_crypt;
853 cc->cpages[i] = fio.encrypted_page;
854 }
855 }
856
857 set_cluster_writeback(cc);
858
859 for (i = 0; i < cc->cluster_size; i++)
860 cic->rpages[i] = cc->rpages[i];
861
862 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
863 block_t blkaddr;
864
a2ced1ce 865 blkaddr = f2fs_data_blkaddr(&dn);
95978caa 866 fio.page = cc->rpages[i];
4c8ff709
CY
867 fio.old_blkaddr = blkaddr;
868
869 /* cluster header */
870 if (i == 0) {
871 if (blkaddr == COMPRESS_ADDR)
872 fio.compr_blocks++;
873 if (__is_valid_data_blkaddr(blkaddr))
874 f2fs_invalidate_blocks(sbi, blkaddr);
875 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
876 goto unlock_continue;
877 }
878
879 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
880 fio.compr_blocks++;
881
882 if (i > cc->nr_cpages) {
883 if (__is_valid_data_blkaddr(blkaddr)) {
884 f2fs_invalidate_blocks(sbi, blkaddr);
885 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
886 }
887 goto unlock_continue;
888 }
889
890 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
891
892 if (fio.encrypted)
893 fio.encrypted_page = cc->cpages[i - 1];
894 else
895 fio.compressed_page = cc->cpages[i - 1];
896
897 cc->cpages[i - 1] = NULL;
898 f2fs_outplace_write_data(&dn, &fio);
899 (*submitted)++;
900unlock_continue:
901 inode_dec_dirty_pages(cc->inode);
902 unlock_page(fio.page);
903 }
904
905 if (fio.compr_blocks)
906 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
907 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
908
909 set_inode_flag(cc->inode, FI_APPEND_WRITE);
910 if (cc->cluster_idx == 0)
911 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
912
913 f2fs_put_dnode(&dn);
914 f2fs_unlock_op(sbi);
915
c10c9820 916 spin_lock(&fi->i_size_lock);
4c8ff709
CY
917 if (fi->last_disk_size < psize)
918 fi->last_disk_size = psize;
c10c9820 919 spin_unlock(&fi->i_size_lock);
4c8ff709
CY
920
921 f2fs_put_rpages(cc);
922 f2fs_destroy_compress_ctx(cc);
923 return 0;
924
925out_destroy_crypt:
926 kfree(cic->rpages);
927
928 for (--i; i >= 0; i--)
929 fscrypt_finalize_bounce_page(&cc->cpages[i]);
930 for (i = 0; i < cc->nr_cpages; i++) {
931 if (!cc->cpages[i])
932 continue;
933 f2fs_put_page(cc->cpages[i], 1);
934 }
935out_put_cic:
936 kfree(cic);
937out_put_dnode:
938 f2fs_put_dnode(&dn);
939out_unlock_op:
940 f2fs_unlock_op(sbi);
941 return -EAGAIN;
942}
943
944void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
945{
946 struct f2fs_sb_info *sbi = bio->bi_private;
947 struct compress_io_ctx *cic =
948 (struct compress_io_ctx *)page_private(page);
949 int i;
950
951 if (unlikely(bio->bi_status))
952 mapping_set_error(cic->inode->i_mapping, -EIO);
953
954 f2fs_put_compressed_page(page);
955
956 dec_page_count(sbi, F2FS_WB_DATA);
957
958 if (refcount_dec_not_one(&cic->ref))
959 return;
960
961 for (i = 0; i < cic->nr_rpages; i++) {
962 WARN_ON(!cic->rpages[i]);
963 clear_cold_data(cic->rpages[i]);
964 end_page_writeback(cic->rpages[i]);
965 }
966
967 kfree(cic->rpages);
968 kfree(cic);
969}
970
971static int f2fs_write_raw_pages(struct compress_ctx *cc,
972 int *submitted,
973 struct writeback_control *wbc,
974 enum iostat_type io_type)
975{
976 struct address_space *mapping = cc->inode->i_mapping;
977 int _submitted, compr_blocks, ret;
978 int i = -1, err = 0;
979
980 compr_blocks = f2fs_compressed_blocks(cc);
981 if (compr_blocks < 0) {
982 err = compr_blocks;
983 goto out_err;
984 }
985
986 for (i = 0; i < cc->cluster_size; i++) {
987 if (!cc->rpages[i])
988 continue;
989retry_write:
990 if (cc->rpages[i]->mapping != mapping) {
991 unlock_page(cc->rpages[i]);
992 continue;
993 }
994
995 BUG_ON(!PageLocked(cc->rpages[i]));
996
997 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
998 NULL, NULL, wbc, io_type,
999 compr_blocks);
1000 if (ret) {
1001 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1002 unlock_page(cc->rpages[i]);
1003 ret = 0;
1004 } else if (ret == -EAGAIN) {
466357dc
CY
1005 /*
1006 * for quota file, just redirty left pages to
1007 * avoid deadlock caused by cluster update race
1008 * from foreground operation.
1009 */
1010 if (IS_NOQUOTA(cc->inode)) {
1011 err = 0;
1012 goto out_err;
1013 }
4c8ff709
CY
1014 ret = 0;
1015 cond_resched();
5df7731f
CY
1016 congestion_wait(BLK_RW_ASYNC,
1017 DEFAULT_IO_TIMEOUT);
4c8ff709
CY
1018 lock_page(cc->rpages[i]);
1019 clear_page_dirty_for_io(cc->rpages[i]);
1020 goto retry_write;
1021 }
1022 err = ret;
466357dc 1023 goto out_err;
4c8ff709
CY
1024 }
1025
1026 *submitted += _submitted;
1027 }
1028 return 0;
4c8ff709
CY
1029out_err:
1030 for (++i; i < cc->cluster_size; i++) {
1031 if (!cc->rpages[i])
1032 continue;
1033 redirty_page_for_writepage(wbc, cc->rpages[i]);
1034 unlock_page(cc->rpages[i]);
1035 }
1036 return err;
1037}
1038
1039int f2fs_write_multi_pages(struct compress_ctx *cc,
1040 int *submitted,
1041 struct writeback_control *wbc,
1042 enum iostat_type io_type)
1043{
1044 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1045 const struct f2fs_compress_ops *cops =
1046 f2fs_cops[fi->i_compress_algorithm];
1047 int err;
1048
1049 *submitted = 0;
1050 if (cluster_may_compress(cc)) {
1051 err = f2fs_compress_pages(cc);
1052 if (err == -EAGAIN) {
1053 goto write;
1054 } else if (err) {
1055 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1056 goto destroy_out;
1057 }
1058
1059 err = f2fs_write_compressed_pages(cc, submitted,
1060 wbc, io_type);
1061 cops->destroy_compress_ctx(cc);
1062 if (!err)
1063 return 0;
1064 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1065 }
1066write:
1067 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1068
1069 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1070 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1071destroy_out:
1072 f2fs_destroy_compress_ctx(cc);
1073 return err;
1074}
1075
1076struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1077{
1078 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1079 struct decompress_io_ctx *dic;
1080 pgoff_t start_idx = start_idx_of_cluster(cc);
1081 int i;
1082
1083 dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1084 if (!dic)
1085 return ERR_PTR(-ENOMEM);
1086
1087 dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1088 cc->log_cluster_size, GFP_NOFS);
1089 if (!dic->rpages) {
1090 kfree(dic);
1091 return ERR_PTR(-ENOMEM);
1092 }
1093
1094 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1095 dic->inode = cc->inode;
1096 refcount_set(&dic->ref, 1);
1097 dic->cluster_idx = cc->cluster_idx;
1098 dic->cluster_size = cc->cluster_size;
1099 dic->log_cluster_size = cc->log_cluster_size;
1100 dic->nr_cpages = cc->nr_cpages;
1101 dic->failed = false;
1102
1103 for (i = 0; i < dic->cluster_size; i++)
1104 dic->rpages[i] = cc->rpages[i];
1105 dic->nr_rpages = cc->cluster_size;
1106
1107 dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1108 dic->nr_cpages, GFP_NOFS);
1109 if (!dic->cpages)
1110 goto out_free;
1111
1112 for (i = 0; i < dic->nr_cpages; i++) {
1113 struct page *page;
1114
1115 page = f2fs_grab_page();
1116 if (!page)
1117 goto out_free;
1118
1119 f2fs_set_compressed_page(page, cc->inode,
1120 start_idx + i + 1,
1121 dic, i ? &dic->ref : NULL);
1122 dic->cpages[i] = page;
1123 }
1124
1125 dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1126 dic->cluster_size, GFP_NOFS);
1127 if (!dic->tpages)
1128 goto out_free;
1129
1130 for (i = 0; i < dic->cluster_size; i++) {
1131 if (cc->rpages[i])
1132 continue;
1133
1134 dic->tpages[i] = f2fs_grab_page();
1135 if (!dic->tpages[i])
1136 goto out_free;
1137 }
1138
1139 for (i = 0; i < dic->cluster_size; i++) {
1140 if (dic->tpages[i])
1141 continue;
1142 dic->tpages[i] = cc->rpages[i];
1143 }
1144
1145 return dic;
1146
1147out_free:
1148 f2fs_free_dic(dic);
1149 return ERR_PTR(-ENOMEM);
1150}
1151
1152void f2fs_free_dic(struct decompress_io_ctx *dic)
1153{
1154 int i;
1155
1156 if (dic->tpages) {
1157 for (i = 0; i < dic->cluster_size; i++) {
1158 if (dic->rpages[i])
1159 continue;
61fbae2b
CY
1160 unlock_page(dic->tpages[i]);
1161 put_page(dic->tpages[i]);
4c8ff709
CY
1162 }
1163 kfree(dic->tpages);
1164 }
1165
1166 if (dic->cpages) {
1167 for (i = 0; i < dic->nr_cpages; i++) {
1168 if (!dic->cpages[i])
1169 continue;
1170 f2fs_put_compressed_page(dic->cpages[i]);
1171 }
1172 kfree(dic->cpages);
1173 }
1174
1175 kfree(dic->rpages);
1176 kfree(dic);
1177}
1178
1179void f2fs_decompress_end_io(struct page **rpages,
1180 unsigned int cluster_size, bool err, bool verity)
1181{
1182 int i;
1183
1184 for (i = 0; i < cluster_size; i++) {
1185 struct page *rpage = rpages[i];
1186
1187 if (!rpage)
1188 continue;
1189
1190 if (err || PageError(rpage)) {
1191 ClearPageUptodate(rpage);
1192 ClearPageError(rpage);
1193 } else {
1194 if (!verity || fsverity_verify_page(rpage))
1195 SetPageUptodate(rpage);
1196 else
1197 SetPageError(rpage);
1198 }
1199 unlock_page(rpage);
1200 }
1201}