]> git.ipfire.org Git - people/ms/linux.git/blame - fs/f2fs/compress.c
f2fs: compress: remove unneeded f2fs_put_dnode()
[people/ms/linux.git] / fs / f2fs / compress.c
CommitLineData
4c8ff709
CY
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs compress support
4 *
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6 */
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/writeback.h>
11#include <linux/backing-dev.h>
12#include <linux/lzo.h>
13#include <linux/lz4.h>
50cfa66f 14#include <linux/zstd.h>
4c8ff709
CY
15
16#include "f2fs.h"
17#include "node.h"
18#include <trace/events/f2fs.h>
19
c68d6c88
CY
20static struct kmem_cache *cic_entry_slab;
21static struct kmem_cache *dic_entry_slab;
22
31083031
CY
23static void *page_array_alloc(struct inode *inode, int nr)
24{
25 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
26 unsigned int size = sizeof(struct page *) * nr;
27
28 if (likely(size <= sbi->page_array_slab_size))
29 return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
30 return f2fs_kzalloc(sbi, size, GFP_NOFS);
31}
32
33static void page_array_free(struct inode *inode, void *pages, int nr)
34{
35 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
36 unsigned int size = sizeof(struct page *) * nr;
37
38 if (!pages)
39 return;
40
41 if (likely(size <= sbi->page_array_slab_size))
42 kmem_cache_free(sbi->page_array_slab, pages);
43 else
44 kfree(pages);
45}
46
4c8ff709
CY
47struct f2fs_compress_ops {
48 int (*init_compress_ctx)(struct compress_ctx *cc);
49 void (*destroy_compress_ctx)(struct compress_ctx *cc);
50 int (*compress_pages)(struct compress_ctx *cc);
23b1faaa
CY
51 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
52 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
4c8ff709
CY
53 int (*decompress_pages)(struct decompress_io_ctx *dic);
54};
55
56static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
57{
58 return index & (cc->cluster_size - 1);
59}
60
61static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
62{
63 return index >> cc->log_cluster_size;
64}
65
66static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
67{
68 return cc->cluster_idx << cc->log_cluster_size;
69}
70
71bool f2fs_is_compressed_page(struct page *page)
72{
73 if (!PagePrivate(page))
74 return false;
75 if (!page_private(page))
76 return false;
b763f3be 77 if (page_private_nonpointer(page))
4c8ff709 78 return false;
29b993c7 79
4c8ff709
CY
80 f2fs_bug_on(F2FS_M_SB(page->mapping),
81 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
82 return true;
83}
84
85static void f2fs_set_compressed_page(struct page *page,
887347a0 86 struct inode *inode, pgoff_t index, void *data)
4c8ff709 87{
b763f3be 88 attach_page_private(page, (void *)data);
4c8ff709
CY
89
90 /* i_crypto_info and iv index */
91 page->index = index;
92 page->mapping = inode->i_mapping;
4c8ff709
CY
93}
94
4c8ff709
CY
95static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
96{
97 int i;
98
99 for (i = 0; i < len; i++) {
100 if (!cc->rpages[i])
101 continue;
102 if (unlock)
103 unlock_page(cc->rpages[i]);
104 else
105 put_page(cc->rpages[i]);
106 }
107}
108
109static void f2fs_put_rpages(struct compress_ctx *cc)
110{
111 f2fs_drop_rpages(cc, cc->cluster_size, false);
112}
113
114static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
115{
116 f2fs_drop_rpages(cc, len, true);
117}
118
4c8ff709
CY
119static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
120 struct writeback_control *wbc, bool redirty, int unlock)
121{
122 unsigned int i;
123
124 for (i = 0; i < cc->cluster_size; i++) {
125 if (!cc->rpages[i])
126 continue;
127 if (redirty)
128 redirty_page_for_writepage(wbc, cc->rpages[i]);
129 f2fs_put_page(cc->rpages[i], unlock);
130 }
131}
132
133struct page *f2fs_compress_control_page(struct page *page)
134{
135 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
136}
137
138int f2fs_init_compress_ctx(struct compress_ctx *cc)
139{
adfc6943 140 if (cc->rpages)
4c8ff709
CY
141 return 0;
142
31083031 143 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709
CY
144 return cc->rpages ? 0 : -ENOMEM;
145}
146
8bfbfb0d 147void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
4c8ff709 148{
31083031 149 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
4c8ff709
CY
150 cc->rpages = NULL;
151 cc->nr_rpages = 0;
152 cc->nr_cpages = 0;
8bfbfb0d
CY
153 if (!reuse)
154 cc->cluster_idx = NULL_CLUSTER;
4c8ff709
CY
155}
156
157void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
158{
159 unsigned int cluster_ofs;
160
161 if (!f2fs_cluster_can_merge_page(cc, page->index))
162 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
163
164 cluster_ofs = offset_in_cluster(cc, page->index);
165 cc->rpages[cluster_ofs] = page;
166 cc->nr_rpages++;
167 cc->cluster_idx = cluster_idx(cc, page->index);
168}
169
170#ifdef CONFIG_F2FS_FS_LZO
171static int lzo_init_compress_ctx(struct compress_ctx *cc)
172{
173 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
174 LZO1X_MEM_COMPRESS, GFP_NOFS);
175 if (!cc->private)
176 return -ENOMEM;
177
178 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
179 return 0;
180}
181
182static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
183{
184 kvfree(cc->private);
185 cc->private = NULL;
186}
187
188static int lzo_compress_pages(struct compress_ctx *cc)
189{
190 int ret;
191
192 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
193 &cc->clen, cc->private);
194 if (ret != LZO_E_OK) {
195 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
196 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
197 return -EIO;
198 }
199 return 0;
200}
201
202static int lzo_decompress_pages(struct decompress_io_ctx *dic)
203{
204 int ret;
205
206 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
207 dic->rbuf, &dic->rlen);
208 if (ret != LZO_E_OK) {
209 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
210 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
211 return -EIO;
212 }
213
214 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
215 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
216 "expected:%lu\n", KERN_ERR,
217 F2FS_I_SB(dic->inode)->sb->s_id,
218 dic->rlen,
219 PAGE_SIZE << dic->log_cluster_size);
220 return -EIO;
221 }
222 return 0;
223}
224
225static const struct f2fs_compress_ops f2fs_lzo_ops = {
226 .init_compress_ctx = lzo_init_compress_ctx,
227 .destroy_compress_ctx = lzo_destroy_compress_ctx,
228 .compress_pages = lzo_compress_pages,
229 .decompress_pages = lzo_decompress_pages,
230};
231#endif
232
233#ifdef CONFIG_F2FS_FS_LZ4
234static int lz4_init_compress_ctx(struct compress_ctx *cc)
235{
3fde13f8
CY
236 unsigned int size = LZ4_MEM_COMPRESS;
237
238#ifdef CONFIG_F2FS_FS_LZ4HC
239 if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
240 size = LZ4HC_MEM_COMPRESS;
241#endif
242
243 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
4c8ff709
CY
244 if (!cc->private)
245 return -ENOMEM;
246
f6644143
CY
247 /*
248 * we do not change cc->clen to LZ4_compressBound(inputsize) to
249 * adapt worst compress case, because lz4 compressor can handle
250 * output budget properly.
251 */
252 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
4c8ff709
CY
253 return 0;
254}
255
256static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
257{
258 kvfree(cc->private);
259 cc->private = NULL;
260}
261
3fde13f8
CY
262#ifdef CONFIG_F2FS_FS_LZ4HC
263static int lz4hc_compress_pages(struct compress_ctx *cc)
264{
265 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
266 COMPRESS_LEVEL_OFFSET;
267 int len;
268
269 if (level)
270 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
271 cc->clen, level, cc->private);
272 else
273 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
274 cc->clen, cc->private);
275 if (!len)
276 return -EAGAIN;
277
278 cc->clen = len;
279 return 0;
280}
281#endif
282
4c8ff709
CY
283static int lz4_compress_pages(struct compress_ctx *cc)
284{
285 int len;
286
3fde13f8
CY
287#ifdef CONFIG_F2FS_FS_LZ4HC
288 return lz4hc_compress_pages(cc);
289#endif
4c8ff709
CY
290 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
291 cc->clen, cc->private);
f6644143
CY
292 if (!len)
293 return -EAGAIN;
294
4c8ff709
CY
295 cc->clen = len;
296 return 0;
297}
298
299static int lz4_decompress_pages(struct decompress_io_ctx *dic)
300{
301 int ret;
302
303 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
304 dic->clen, dic->rlen);
305 if (ret < 0) {
306 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
307 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
308 return -EIO;
309 }
310
311 if (ret != PAGE_SIZE << dic->log_cluster_size) {
312 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
313 "expected:%lu\n", KERN_ERR,
314 F2FS_I_SB(dic->inode)->sb->s_id,
315 dic->rlen,
316 PAGE_SIZE << dic->log_cluster_size);
317 return -EIO;
318 }
319 return 0;
320}
321
322static const struct f2fs_compress_ops f2fs_lz4_ops = {
323 .init_compress_ctx = lz4_init_compress_ctx,
324 .destroy_compress_ctx = lz4_destroy_compress_ctx,
325 .compress_pages = lz4_compress_pages,
326 .decompress_pages = lz4_decompress_pages,
327};
328#endif
329
50cfa66f
CY
330#ifdef CONFIG_F2FS_FS_ZSTD
331#define F2FS_ZSTD_DEFAULT_CLEVEL 1
332
333static int zstd_init_compress_ctx(struct compress_ctx *cc)
334{
335 ZSTD_parameters params;
336 ZSTD_CStream *stream;
337 void *workspace;
338 unsigned int workspace_size;
3fde13f8
CY
339 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
340 COMPRESS_LEVEL_OFFSET;
341
342 if (!level)
343 level = F2FS_ZSTD_DEFAULT_CLEVEL;
50cfa66f 344
3fde13f8 345 params = ZSTD_getParams(level, cc->rlen, 0);
50cfa66f
CY
346 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
347
348 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
349 workspace_size, GFP_NOFS);
350 if (!workspace)
351 return -ENOMEM;
352
353 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
354 if (!stream) {
355 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
356 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
357 __func__);
358 kvfree(workspace);
359 return -EIO;
360 }
361
362 cc->private = workspace;
363 cc->private2 = stream;
364
365 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
366 return 0;
367}
368
369static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
370{
371 kvfree(cc->private);
372 cc->private = NULL;
373 cc->private2 = NULL;
374}
375
376static int zstd_compress_pages(struct compress_ctx *cc)
377{
378 ZSTD_CStream *stream = cc->private2;
379 ZSTD_inBuffer inbuf;
380 ZSTD_outBuffer outbuf;
381 int src_size = cc->rlen;
382 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
383 int ret;
384
385 inbuf.pos = 0;
386 inbuf.src = cc->rbuf;
387 inbuf.size = src_size;
388
389 outbuf.pos = 0;
390 outbuf.dst = cc->cbuf->cdata;
391 outbuf.size = dst_size;
392
393 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
394 if (ZSTD_isError(ret)) {
395 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
396 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
397 __func__, ZSTD_getErrorCode(ret));
398 return -EIO;
399 }
400
401 ret = ZSTD_endStream(stream, &outbuf);
402 if (ZSTD_isError(ret)) {
403 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
404 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
405 __func__, ZSTD_getErrorCode(ret));
406 return -EIO;
407 }
408
1454c978
CY
409 /*
410 * there is compressed data remained in intermediate buffer due to
411 * no more space in cbuf.cdata
412 */
413 if (ret)
414 return -EAGAIN;
415
50cfa66f
CY
416 cc->clen = outbuf.pos;
417 return 0;
418}
419
420static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
421{
422 ZSTD_DStream *stream;
423 void *workspace;
424 unsigned int workspace_size;
0e2b7385
CY
425 unsigned int max_window_size =
426 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
50cfa66f 427
0e2b7385 428 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
50cfa66f
CY
429
430 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
431 workspace_size, GFP_NOFS);
432 if (!workspace)
433 return -ENOMEM;
434
0e2b7385 435 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
50cfa66f
CY
436 if (!stream) {
437 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
438 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
439 __func__);
440 kvfree(workspace);
441 return -EIO;
442 }
443
444 dic->private = workspace;
445 dic->private2 = stream;
446
447 return 0;
448}
449
450static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
451{
452 kvfree(dic->private);
453 dic->private = NULL;
454 dic->private2 = NULL;
455}
456
457static int zstd_decompress_pages(struct decompress_io_ctx *dic)
458{
459 ZSTD_DStream *stream = dic->private2;
460 ZSTD_inBuffer inbuf;
461 ZSTD_outBuffer outbuf;
462 int ret;
463
464 inbuf.pos = 0;
465 inbuf.src = dic->cbuf->cdata;
466 inbuf.size = dic->clen;
467
468 outbuf.pos = 0;
469 outbuf.dst = dic->rbuf;
470 outbuf.size = dic->rlen;
471
472 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
473 if (ZSTD_isError(ret)) {
474 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
475 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
476 __func__, ZSTD_getErrorCode(ret));
477 return -EIO;
478 }
479
480 if (dic->rlen != outbuf.pos) {
481 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
482 "expected:%lu\n", KERN_ERR,
483 F2FS_I_SB(dic->inode)->sb->s_id,
484 __func__, dic->rlen,
485 PAGE_SIZE << dic->log_cluster_size);
486 return -EIO;
487 }
488
489 return 0;
490}
491
492static const struct f2fs_compress_ops f2fs_zstd_ops = {
493 .init_compress_ctx = zstd_init_compress_ctx,
494 .destroy_compress_ctx = zstd_destroy_compress_ctx,
495 .compress_pages = zstd_compress_pages,
496 .init_decompress_ctx = zstd_init_decompress_ctx,
497 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
498 .decompress_pages = zstd_decompress_pages,
499};
500#endif
501
6d92b201
CY
502#ifdef CONFIG_F2FS_FS_LZO
503#ifdef CONFIG_F2FS_FS_LZORLE
504static int lzorle_compress_pages(struct compress_ctx *cc)
505{
506 int ret;
507
508 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
509 &cc->clen, cc->private);
510 if (ret != LZO_E_OK) {
511 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
512 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
513 return -EIO;
514 }
515 return 0;
516}
517
518static const struct f2fs_compress_ops f2fs_lzorle_ops = {
519 .init_compress_ctx = lzo_init_compress_ctx,
520 .destroy_compress_ctx = lzo_destroy_compress_ctx,
521 .compress_pages = lzorle_compress_pages,
522 .decompress_pages = lzo_decompress_pages,
523};
524#endif
525#endif
526
4c8ff709
CY
527static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
528#ifdef CONFIG_F2FS_FS_LZO
529 &f2fs_lzo_ops,
530#else
531 NULL,
532#endif
533#ifdef CONFIG_F2FS_FS_LZ4
534 &f2fs_lz4_ops,
535#else
536 NULL,
537#endif
50cfa66f
CY
538#ifdef CONFIG_F2FS_FS_ZSTD
539 &f2fs_zstd_ops,
540#else
541 NULL,
542#endif
6d92b201
CY
543#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
544 &f2fs_lzorle_ops,
545#else
546 NULL,
547#endif
4c8ff709
CY
548};
549
550bool f2fs_is_compress_backend_ready(struct inode *inode)
551{
552 if (!f2fs_compressed_file(inode))
553 return true;
554 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
555}
556
99bbe307 557static mempool_t *compress_page_pool;
5e6bbde9
CY
558static int num_compress_pages = 512;
559module_param(num_compress_pages, uint, 0444);
560MODULE_PARM_DESC(num_compress_pages,
561 "Number of intermediate compress pages to preallocate");
562
563int f2fs_init_compress_mempool(void)
564{
565 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
566 if (!compress_page_pool)
567 return -ENOMEM;
568
569 return 0;
570}
571
572void f2fs_destroy_compress_mempool(void)
573{
574 mempool_destroy(compress_page_pool);
575}
576
577static struct page *f2fs_compress_alloc_page(void)
4c8ff709
CY
578{
579 struct page *page;
580
5e6bbde9 581 page = mempool_alloc(compress_page_pool, GFP_NOFS);
4c8ff709 582 lock_page(page);
5e6bbde9 583
4c8ff709
CY
584 return page;
585}
586
5e6bbde9
CY
587static void f2fs_compress_free_page(struct page *page)
588{
589 if (!page)
590 return;
b763f3be 591 detach_page_private(page);
5e6bbde9
CY
592 page->mapping = NULL;
593 unlock_page(page);
594 mempool_free(page, compress_page_pool);
595}
596
6fcaebac
DJ
597#define MAX_VMAP_RETRIES 3
598
599static void *f2fs_vmap(struct page **pages, unsigned int count)
600{
601 int i;
602 void *buf = NULL;
603
604 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
605 buf = vm_map_ram(pages, count, -1);
606 if (buf)
607 break;
608 vm_unmap_aliases();
609 }
610 return buf;
611}
612
4c8ff709
CY
613static int f2fs_compress_pages(struct compress_ctx *cc)
614{
4c8ff709
CY
615 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
616 const struct f2fs_compress_ops *cops =
617 f2fs_cops[fi->i_compress_algorithm];
31083031
CY
618 unsigned int max_len, new_nr_cpages;
619 struct page **new_cpages;
b28f047b 620 u32 chksum = 0;
4c8ff709
CY
621 int i, ret;
622
623 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
624 cc->cluster_size, fi->i_compress_algorithm);
625
23b1faaa
CY
626 if (cops->init_compress_ctx) {
627 ret = cops->init_compress_ctx(cc);
628 if (ret)
629 goto out;
630 }
4c8ff709
CY
631
632 max_len = COMPRESS_HEADER_SIZE + cc->clen;
633 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
634
31083031 635 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
4c8ff709
CY
636 if (!cc->cpages) {
637 ret = -ENOMEM;
638 goto destroy_compress_ctx;
639 }
640
641 for (i = 0; i < cc->nr_cpages; i++) {
5e6bbde9 642 cc->cpages[i] = f2fs_compress_alloc_page();
4c8ff709
CY
643 if (!cc->cpages[i]) {
644 ret = -ENOMEM;
645 goto out_free_cpages;
646 }
647 }
648
6fcaebac 649 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
4c8ff709
CY
650 if (!cc->rbuf) {
651 ret = -ENOMEM;
652 goto out_free_cpages;
653 }
654
6fcaebac 655 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
4c8ff709
CY
656 if (!cc->cbuf) {
657 ret = -ENOMEM;
658 goto out_vunmap_rbuf;
659 }
660
661 ret = cops->compress_pages(cc);
662 if (ret)
663 goto out_vunmap_cbuf;
664
665 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
666
667 if (cc->clen > max_len) {
668 ret = -EAGAIN;
669 goto out_vunmap_cbuf;
670 }
671
672 cc->cbuf->clen = cpu_to_le32(cc->clen);
4c8ff709 673
b28f047b
CY
674 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
675 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
676 cc->cbuf->cdata, cc->clen);
677 cc->cbuf->chksum = cpu_to_le32(chksum);
678
4c8ff709
CY
679 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
680 cc->cbuf->reserved[i] = cpu_to_le32(0);
681
31083031
CY
682 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
683
684 /* Now we're going to cut unnecessary tail pages */
685 new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
686 if (!new_cpages) {
687 ret = -ENOMEM;
688 goto out_vunmap_cbuf;
689 }
7fa6d598
EB
690
691 /* zero out any unused part of the last page */
692 memset(&cc->cbuf->cdata[cc->clen], 0,
31083031
CY
693 (new_nr_cpages * PAGE_SIZE) -
694 (cc->clen + COMPRESS_HEADER_SIZE));
7fa6d598 695
6fcaebac
DJ
696 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
697 vm_unmap_ram(cc->rbuf, cc->cluster_size);
4c8ff709 698
31083031
CY
699 for (i = 0; i < cc->nr_cpages; i++) {
700 if (i < new_nr_cpages) {
701 new_cpages[i] = cc->cpages[i];
702 continue;
703 }
5e6bbde9 704 f2fs_compress_free_page(cc->cpages[i]);
4c8ff709
CY
705 cc->cpages[i] = NULL;
706 }
707
23b1faaa
CY
708 if (cops->destroy_compress_ctx)
709 cops->destroy_compress_ctx(cc);
09ff4801 710
31083031
CY
711 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
712 cc->cpages = new_cpages;
713 cc->nr_cpages = new_nr_cpages;
4c8ff709
CY
714
715 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
716 cc->clen, ret);
717 return 0;
718
719out_vunmap_cbuf:
6fcaebac 720 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
4c8ff709 721out_vunmap_rbuf:
6fcaebac 722 vm_unmap_ram(cc->rbuf, cc->cluster_size);
4c8ff709
CY
723out_free_cpages:
724 for (i = 0; i < cc->nr_cpages; i++) {
725 if (cc->cpages[i])
5e6bbde9 726 f2fs_compress_free_page(cc->cpages[i]);
4c8ff709 727 }
31083031 728 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
4c8ff709
CY
729 cc->cpages = NULL;
730destroy_compress_ctx:
23b1faaa
CY
731 if (cops->destroy_compress_ctx)
732 cops->destroy_compress_ctx(cc);
4c8ff709
CY
733out:
734 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
735 cc->clen, ret);
736 return ret;
737}
738
7f59b277 739static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
4c8ff709 740{
4c8ff709 741 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
7f59b277 742 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
4c8ff709
CY
743 const struct f2fs_compress_ops *cops =
744 f2fs_cops[fi->i_compress_algorithm];
745 int ret;
b2f57a8e 746 int i;
4c8ff709 747
4c8ff709
CY
748 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
749 dic->cluster_size, fi->i_compress_algorithm);
750
4c8ff709
CY
751 if (dic->failed) {
752 ret = -EIO;
7f59b277 753 goto out_end_io;
4c8ff709
CY
754 }
755
31083031 756 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
b2f57a8e
CY
757 if (!dic->tpages) {
758 ret = -ENOMEM;
7f59b277 759 goto out_end_io;
b2f57a8e
CY
760 }
761
762 for (i = 0; i < dic->cluster_size; i++) {
763 if (dic->rpages[i]) {
764 dic->tpages[i] = dic->rpages[i];
765 continue;
766 }
767
768 dic->tpages[i] = f2fs_compress_alloc_page();
769 if (!dic->tpages[i]) {
770 ret = -ENOMEM;
7f59b277 771 goto out_end_io;
b2f57a8e
CY
772 }
773 }
774
23b1faaa
CY
775 if (cops->init_decompress_ctx) {
776 ret = cops->init_decompress_ctx(dic);
777 if (ret)
7f59b277 778 goto out_end_io;
23b1faaa
CY
779 }
780
6fcaebac 781 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
4c8ff709
CY
782 if (!dic->rbuf) {
783 ret = -ENOMEM;
7f59b277 784 goto out_destroy_decompress_ctx;
4c8ff709
CY
785 }
786
6fcaebac 787 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
4c8ff709
CY
788 if (!dic->cbuf) {
789 ret = -ENOMEM;
790 goto out_vunmap_rbuf;
791 }
792
793 dic->clen = le32_to_cpu(dic->cbuf->clen);
794 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
795
796 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
797 ret = -EFSCORRUPTED;
798 goto out_vunmap_cbuf;
799 }
800
801 ret = cops->decompress_pages(dic);
802
75e91c88 803 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
b28f047b
CY
804 u32 provided = le32_to_cpu(dic->cbuf->chksum);
805 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
806
807 if (provided != calculated) {
808 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
809 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
810 printk_ratelimited(
811 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
812 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
813 provided, calculated);
814 }
815 set_sbi_flag(sbi, SBI_NEED_FSCK);
b28f047b
CY
816 }
817 }
818
4c8ff709 819out_vunmap_cbuf:
6fcaebac 820 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
4c8ff709 821out_vunmap_rbuf:
6fcaebac 822 vm_unmap_ram(dic->rbuf, dic->cluster_size);
7f59b277 823out_destroy_decompress_ctx:
23b1faaa
CY
824 if (cops->destroy_decompress_ctx)
825 cops->destroy_decompress_ctx(dic);
7f59b277 826out_end_io:
4c8ff709
CY
827 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
828 dic->clen, ret);
7f59b277
EB
829 f2fs_decompress_end_io(dic, ret);
830}
831
832/*
833 * This is called when a page of a compressed cluster has been read from disk
834 * (or failed to be read from disk). It checks whether this page was the last
835 * page being waited on in the cluster, and if so, it decompresses the cluster
836 * (or in the case of a failure, cleans up without actually decompressing).
837 */
838void f2fs_end_read_compressed_page(struct page *page, bool failed)
839{
840 struct decompress_io_ctx *dic =
841 (struct decompress_io_ctx *)page_private(page);
842 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
843
844 dec_page_count(sbi, F2FS_RD_DATA);
845
846 if (failed)
847 WRITE_ONCE(dic->failed, true);
848
849 if (atomic_dec_and_test(&dic->remaining_pages))
850 f2fs_decompress_cluster(dic);
4c8ff709
CY
851}
852
853static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
854{
855 if (cc->cluster_idx == NULL_CLUSTER)
856 return true;
857 return cc->cluster_idx == cluster_idx(cc, index);
858}
859
860bool f2fs_cluster_is_empty(struct compress_ctx *cc)
861{
862 return cc->nr_rpages == 0;
863}
864
865static bool f2fs_cluster_is_full(struct compress_ctx *cc)
866{
867 return cc->cluster_size == cc->nr_rpages;
868}
869
870bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
871{
872 if (f2fs_cluster_is_empty(cc))
873 return true;
874 return is_page_in_cluster(cc, index);
875}
876
5db479f0 877static bool cluster_has_invalid_data(struct compress_ctx *cc)
4c8ff709 878{
4c8ff709
CY
879 loff_t i_size = i_size_read(cc->inode);
880 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
881 int i;
882
883 for (i = 0; i < cc->cluster_size; i++) {
884 struct page *page = cc->rpages[i];
885
8af85f71 886 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
4c8ff709
CY
887
888 /* beyond EOF */
889 if (page->index >= nr_pages)
5db479f0 890 return true;
4c8ff709 891 }
5db479f0 892 return false;
4c8ff709
CY
893}
894
1a67cbe1 895static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
4c8ff709
CY
896{
897 struct dnode_of_data dn;
898 int ret;
899
900 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
901 ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
902 LOOKUP_NODE);
903 if (ret) {
904 if (ret == -ENOENT)
905 ret = 0;
906 goto fail;
907 }
908
909 if (dn.data_blkaddr == COMPRESS_ADDR) {
910 int i;
911
912 ret = 1;
913 for (i = 1; i < cc->cluster_size; i++) {
914 block_t blkaddr;
915
a2ced1ce 916 blkaddr = data_blkaddr(dn.inode,
4c8ff709 917 dn.node_page, dn.ofs_in_node + i);
1a67cbe1
CY
918 if (compr) {
919 if (__is_valid_data_blkaddr(blkaddr))
920 ret++;
921 } else {
922 if (blkaddr != NULL_ADDR)
923 ret++;
924 }
4c8ff709
CY
925 }
926 }
927fail:
928 f2fs_put_dnode(&dn);
929 return ret;
930}
931
1a67cbe1
CY
932/* return # of compressed blocks in compressed cluster */
933static int f2fs_compressed_blocks(struct compress_ctx *cc)
934{
935 return __f2fs_cluster_blocks(cc, true);
936}
937
938/* return # of valid blocks in compressed cluster */
d078319d 939static int f2fs_cluster_blocks(struct compress_ctx *cc)
1a67cbe1
CY
940{
941 return __f2fs_cluster_blocks(cc, false);
942}
943
4c8ff709
CY
944int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
945{
946 struct compress_ctx cc = {
947 .inode = inode,
948 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
949 .cluster_size = F2FS_I(inode)->i_cluster_size,
950 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
951 };
952
d078319d 953 return f2fs_cluster_blocks(&cc);
4c8ff709
CY
954}
955
956static bool cluster_may_compress(struct compress_ctx *cc)
957{
602a16d5 958 if (!f2fs_need_compress_data(cc->inode))
4c8ff709
CY
959 return false;
960 if (f2fs_is_atomic_file(cc->inode))
961 return false;
962 if (f2fs_is_mmap_file(cc->inode))
963 return false;
964 if (!f2fs_cluster_is_full(cc))
965 return false;
dc35d73a
CY
966 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
967 return false;
5db479f0 968 return !cluster_has_invalid_data(cc);
4c8ff709
CY
969}
970
971static void set_cluster_writeback(struct compress_ctx *cc)
972{
973 int i;
974
975 for (i = 0; i < cc->cluster_size; i++) {
976 if (cc->rpages[i])
977 set_page_writeback(cc->rpages[i]);
978 }
979}
980
981static void set_cluster_dirty(struct compress_ctx *cc)
982{
983 int i;
984
985 for (i = 0; i < cc->cluster_size; i++)
986 if (cc->rpages[i])
987 set_page_dirty(cc->rpages[i]);
988}
989
990static int prepare_compress_overwrite(struct compress_ctx *cc,
991 struct page **pagep, pgoff_t index, void **fsdata)
992{
993 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
994 struct address_space *mapping = cc->inode->i_mapping;
995 struct page *page;
996 struct dnode_of_data dn;
997 sector_t last_block_in_bio;
998 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
999 pgoff_t start_idx = start_idx_of_cluster(cc);
1000 int i, ret;
1001 bool prealloc;
1002
1003retry:
d078319d 1004 ret = f2fs_cluster_blocks(cc);
4c8ff709
CY
1005 if (ret <= 0)
1006 return ret;
1007
1008 /* compressed case */
1009 prealloc = (ret < cc->cluster_size);
1010
1011 ret = f2fs_init_compress_ctx(cc);
1012 if (ret)
1013 return ret;
1014
1015 /* keep page reference to avoid page reclaim */
1016 for (i = 0; i < cc->cluster_size; i++) {
1017 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1018 fgp_flag, GFP_NOFS);
1019 if (!page) {
1020 ret = -ENOMEM;
1021 goto unlock_pages;
1022 }
1023
1024 if (PageUptodate(page))
a949dc5f 1025 f2fs_put_page(page, 1);
4c8ff709
CY
1026 else
1027 f2fs_compress_ctx_add_page(cc, page);
1028 }
1029
1030 if (!f2fs_cluster_is_empty(cc)) {
1031 struct bio *bio = NULL;
1032
1033 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
0683728a 1034 &last_block_in_bio, false, true);
a949dc5f 1035 f2fs_put_rpages(cc);
8bfbfb0d 1036 f2fs_destroy_compress_ctx(cc, true);
4c8ff709 1037 if (ret)
a949dc5f 1038 goto out;
4c8ff709
CY
1039 if (bio)
1040 f2fs_submit_bio(sbi, bio, DATA);
1041
1042 ret = f2fs_init_compress_ctx(cc);
1043 if (ret)
a949dc5f 1044 goto out;
4c8ff709
CY
1045 }
1046
1047 for (i = 0; i < cc->cluster_size; i++) {
1048 f2fs_bug_on(sbi, cc->rpages[i]);
1049
1050 page = find_lock_page(mapping, start_idx + i);
a949dc5f
CY
1051 if (!page) {
1052 /* page can be truncated */
1053 goto release_and_retry;
1054 }
4c8ff709
CY
1055
1056 f2fs_wait_on_page_writeback(page, DATA, true, true);
4c8ff709 1057 f2fs_compress_ctx_add_page(cc, page);
4c8ff709
CY
1058
1059 if (!PageUptodate(page)) {
a949dc5f
CY
1060release_and_retry:
1061 f2fs_put_rpages(cc);
4c8ff709 1062 f2fs_unlock_rpages(cc, i + 1);
8bfbfb0d 1063 f2fs_destroy_compress_ctx(cc, true);
4c8ff709
CY
1064 goto retry;
1065 }
1066 }
1067
1068 if (prealloc) {
0ef81833 1069 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
4c8ff709
CY
1070
1071 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1072
1073 for (i = cc->cluster_size - 1; i > 0; i--) {
1074 ret = f2fs_get_block(&dn, start_idx + i);
1075 if (ret) {
1076 i = cc->cluster_size;
1077 break;
1078 }
1079
1080 if (dn.data_blkaddr != NEW_ADDR)
1081 break;
1082 }
1083
0ef81833 1084 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
4c8ff709
CY
1085 }
1086
1087 if (likely(!ret)) {
1088 *fsdata = cc->rpages;
1089 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1090 return cc->cluster_size;
1091 }
1092
1093unlock_pages:
a949dc5f 1094 f2fs_put_rpages(cc);
4c8ff709 1095 f2fs_unlock_rpages(cc, i);
8bfbfb0d 1096 f2fs_destroy_compress_ctx(cc, true);
a949dc5f 1097out:
4c8ff709
CY
1098 return ret;
1099}
1100
1101int f2fs_prepare_compress_overwrite(struct inode *inode,
1102 struct page **pagep, pgoff_t index, void **fsdata)
1103{
1104 struct compress_ctx cc = {
1105 .inode = inode,
1106 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1107 .cluster_size = F2FS_I(inode)->i_cluster_size,
1108 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1109 .rpages = NULL,
1110 .nr_rpages = 0,
1111 };
1112
1113 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1114}
1115
1116bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1117 pgoff_t index, unsigned copied)
1118
1119{
1120 struct compress_ctx cc = {
31083031 1121 .inode = inode,
4c8ff709
CY
1122 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1123 .cluster_size = F2FS_I(inode)->i_cluster_size,
1124 .rpages = fsdata,
1125 };
1126 bool first_index = (index == cc.rpages[0]->index);
1127
1128 if (copied)
1129 set_cluster_dirty(&cc);
1130
1131 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
8bfbfb0d 1132 f2fs_destroy_compress_ctx(&cc, false);
4c8ff709
CY
1133
1134 return first_index;
1135}
1136
3265d3db
CY
1137int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1138{
1139 void *fsdata = NULL;
1140 struct page *pagep;
1141 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1142 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1143 log_cluster_size;
1144 int err;
1145
1146 err = f2fs_is_compressed_cluster(inode, start_idx);
1147 if (err < 0)
1148 return err;
1149
1150 /* truncate normal cluster */
1151 if (!err)
1152 return f2fs_do_truncate_blocks(inode, from, lock);
1153
1154 /* truncate compressed cluster */
1155 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1156 start_idx, &fsdata);
1157
1158 /* should not be a normal cluster */
1159 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1160
1161 if (err <= 0)
1162 return err;
1163
1164 if (err > 0) {
1165 struct page **rpages = fsdata;
1166 int cluster_size = F2FS_I(inode)->i_cluster_size;
1167 int i;
1168
1169 for (i = cluster_size - 1; i >= 0; i--) {
1170 loff_t start = rpages[i]->index << PAGE_SHIFT;
1171
1172 if (from <= start) {
1173 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1174 } else {
1175 zero_user_segment(rpages[i], from - start,
1176 PAGE_SIZE);
1177 break;
1178 }
1179 }
1180
1181 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1182 }
1183 return 0;
1184}
1185
4c8ff709
CY
1186static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1187 int *submitted,
1188 struct writeback_control *wbc,
1189 enum iostat_type io_type)
1190{
1191 struct inode *inode = cc->inode;
1192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1193 struct f2fs_inode_info *fi = F2FS_I(inode);
1194 struct f2fs_io_info fio = {
1195 .sbi = sbi,
1196 .ino = cc->inode->i_ino,
1197 .type = DATA,
1198 .op = REQ_OP_WRITE,
1199 .op_flags = wbc_to_write_flags(wbc),
1200 .old_blkaddr = NEW_ADDR,
1201 .page = NULL,
1202 .encrypted_page = NULL,
1203 .compressed_page = NULL,
1204 .submitted = false,
4c8ff709
CY
1205 .io_type = io_type,
1206 .io_wbc = wbc,
27aacd28 1207 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
4c8ff709
CY
1208 };
1209 struct dnode_of_data dn;
1210 struct node_info ni;
1211 struct compress_io_ctx *cic;
1212 pgoff_t start_idx = start_idx_of_cluster(cc);
1213 unsigned int last_index = cc->cluster_size - 1;
1214 loff_t psize;
1215 int i, err;
1216
ee68d271
CY
1217 /* we should bypass data pages to proceed the kworkder jobs */
1218 if (unlikely(f2fs_cp_error(sbi))) {
1219 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1220 goto out_free;
1221 }
1222
79963d96
CY
1223 if (IS_NOQUOTA(inode)) {
1224 /*
1225 * We need to wait for node_write to avoid block allocation during
1226 * checkpoint. This can only happen to quota writes which can cause
1227 * the below discard race condition.
1228 */
1229 down_read(&sbi->node_write);
1230 } else if (!f2fs_trylock_op(sbi)) {
31083031 1231 goto out_free;
79963d96 1232 }
4c8ff709 1233
df77fbd8 1234 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
4c8ff709
CY
1235
1236 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1237 if (err)
1238 goto out_unlock_op;
1239
1240 for (i = 0; i < cc->cluster_size; i++) {
a2ced1ce 1241 if (data_blkaddr(dn.inode, dn.node_page,
4c8ff709
CY
1242 dn.ofs_in_node + i) == NULL_ADDR)
1243 goto out_put_dnode;
1244 }
1245
1246 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1247
1248 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1249 if (err)
1250 goto out_put_dnode;
1251
1252 fio.version = ni.version;
1253
c68d6c88 1254 cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
4c8ff709
CY
1255 if (!cic)
1256 goto out_put_dnode;
1257
1258 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1259 cic->inode = inode;
e6c3948d 1260 atomic_set(&cic->pending_pages, cc->nr_cpages);
31083031 1261 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709
CY
1262 if (!cic->rpages)
1263 goto out_put_cic;
1264
1265 cic->nr_rpages = cc->cluster_size;
1266
1267 for (i = 0; i < cc->nr_cpages; i++) {
1268 f2fs_set_compressed_page(cc->cpages[i], inode,
887347a0 1269 cc->rpages[i + 1]->index, cic);
4c8ff709 1270 fio.compressed_page = cc->cpages[i];
f567adb0
CY
1271
1272 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1273 dn.ofs_in_node + i + 1);
1274
1275 /* wait for GCed page writeback via META_MAPPING */
1276 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1277
4c8ff709
CY
1278 if (fio.encrypted) {
1279 fio.page = cc->rpages[i + 1];
1280 err = f2fs_encrypt_one_page(&fio);
1281 if (err)
1282 goto out_destroy_crypt;
1283 cc->cpages[i] = fio.encrypted_page;
1284 }
1285 }
1286
1287 set_cluster_writeback(cc);
1288
1289 for (i = 0; i < cc->cluster_size; i++)
1290 cic->rpages[i] = cc->rpages[i];
1291
1292 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1293 block_t blkaddr;
1294
a2ced1ce 1295 blkaddr = f2fs_data_blkaddr(&dn);
95978caa 1296 fio.page = cc->rpages[i];
4c8ff709
CY
1297 fio.old_blkaddr = blkaddr;
1298
1299 /* cluster header */
1300 if (i == 0) {
1301 if (blkaddr == COMPRESS_ADDR)
1302 fio.compr_blocks++;
1303 if (__is_valid_data_blkaddr(blkaddr))
1304 f2fs_invalidate_blocks(sbi, blkaddr);
1305 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1306 goto unlock_continue;
1307 }
1308
1309 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1310 fio.compr_blocks++;
1311
1312 if (i > cc->nr_cpages) {
1313 if (__is_valid_data_blkaddr(blkaddr)) {
1314 f2fs_invalidate_blocks(sbi, blkaddr);
1315 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1316 }
1317 goto unlock_continue;
1318 }
1319
1320 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1321
1322 if (fio.encrypted)
1323 fio.encrypted_page = cc->cpages[i - 1];
1324 else
1325 fio.compressed_page = cc->cpages[i - 1];
1326
1327 cc->cpages[i - 1] = NULL;
1328 f2fs_outplace_write_data(&dn, &fio);
1329 (*submitted)++;
1330unlock_continue:
1331 inode_dec_dirty_pages(cc->inode);
1332 unlock_page(fio.page);
1333 }
1334
1335 if (fio.compr_blocks)
1336 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1337 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
5ac443e2 1338 add_compr_block_stat(inode, cc->nr_cpages);
4c8ff709
CY
1339
1340 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1341 if (cc->cluster_idx == 0)
1342 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1343
1344 f2fs_put_dnode(&dn);
79963d96
CY
1345 if (IS_NOQUOTA(inode))
1346 up_read(&sbi->node_write);
1347 else
435cbab9 1348 f2fs_unlock_op(sbi);
4c8ff709 1349
c10c9820 1350 spin_lock(&fi->i_size_lock);
4c8ff709
CY
1351 if (fi->last_disk_size < psize)
1352 fi->last_disk_size = psize;
c10c9820 1353 spin_unlock(&fi->i_size_lock);
4c8ff709
CY
1354
1355 f2fs_put_rpages(cc);
31083031
CY
1356 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1357 cc->cpages = NULL;
8bfbfb0d 1358 f2fs_destroy_compress_ctx(cc, false);
4c8ff709
CY
1359 return 0;
1360
1361out_destroy_crypt:
31083031 1362 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
4c8ff709
CY
1363
1364 for (--i; i >= 0; i--)
1365 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1366 for (i = 0; i < cc->nr_cpages; i++) {
1367 if (!cc->cpages[i])
1368 continue;
a12cc5b4
CY
1369 f2fs_compress_free_page(cc->cpages[i]);
1370 cc->cpages[i] = NULL;
4c8ff709
CY
1371 }
1372out_put_cic:
c68d6c88 1373 kmem_cache_free(cic_entry_slab, cic);
4c8ff709
CY
1374out_put_dnode:
1375 f2fs_put_dnode(&dn);
1376out_unlock_op:
79963d96
CY
1377 if (IS_NOQUOTA(inode))
1378 up_read(&sbi->node_write);
1379 else
435cbab9 1380 f2fs_unlock_op(sbi);
31083031
CY
1381out_free:
1382 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1383 cc->cpages = NULL;
4c8ff709
CY
1384 return -EAGAIN;
1385}
1386
1387void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1388{
1389 struct f2fs_sb_info *sbi = bio->bi_private;
1390 struct compress_io_ctx *cic =
1391 (struct compress_io_ctx *)page_private(page);
1392 int i;
1393
1394 if (unlikely(bio->bi_status))
1395 mapping_set_error(cic->inode->i_mapping, -EIO);
1396
5e6bbde9 1397 f2fs_compress_free_page(page);
4c8ff709
CY
1398
1399 dec_page_count(sbi, F2FS_WB_DATA);
1400
e6c3948d 1401 if (atomic_dec_return(&cic->pending_pages))
4c8ff709
CY
1402 return;
1403
1404 for (i = 0; i < cic->nr_rpages; i++) {
1405 WARN_ON(!cic->rpages[i]);
b763f3be 1406 clear_page_private_gcing(cic->rpages[i]);
4c8ff709
CY
1407 end_page_writeback(cic->rpages[i]);
1408 }
1409
31083031 1410 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
c68d6c88 1411 kmem_cache_free(cic_entry_slab, cic);
4c8ff709
CY
1412}
1413
1414static int f2fs_write_raw_pages(struct compress_ctx *cc,
1415 int *submitted,
1416 struct writeback_control *wbc,
1417 enum iostat_type io_type)
1418{
1419 struct address_space *mapping = cc->inode->i_mapping;
1420 int _submitted, compr_blocks, ret;
1421 int i = -1, err = 0;
1422
1423 compr_blocks = f2fs_compressed_blocks(cc);
1424 if (compr_blocks < 0) {
1425 err = compr_blocks;
1426 goto out_err;
1427 }
1428
1429 for (i = 0; i < cc->cluster_size; i++) {
1430 if (!cc->rpages[i])
1431 continue;
1432retry_write:
1433 if (cc->rpages[i]->mapping != mapping) {
1434 unlock_page(cc->rpages[i]);
1435 continue;
1436 }
1437
1438 BUG_ON(!PageLocked(cc->rpages[i]));
1439
1440 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1441 NULL, NULL, wbc, io_type,
3afae09f 1442 compr_blocks, false);
4c8ff709
CY
1443 if (ret) {
1444 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1445 unlock_page(cc->rpages[i]);
1446 ret = 0;
1447 } else if (ret == -EAGAIN) {
466357dc
CY
1448 /*
1449 * for quota file, just redirty left pages to
1450 * avoid deadlock caused by cluster update race
1451 * from foreground operation.
1452 */
1453 if (IS_NOQUOTA(cc->inode)) {
1454 err = 0;
1455 goto out_err;
1456 }
4c8ff709
CY
1457 ret = 0;
1458 cond_resched();
5df7731f
CY
1459 congestion_wait(BLK_RW_ASYNC,
1460 DEFAULT_IO_TIMEOUT);
4c8ff709 1461 lock_page(cc->rpages[i]);
eb1353cf
CY
1462
1463 if (!PageDirty(cc->rpages[i])) {
1464 unlock_page(cc->rpages[i]);
1465 continue;
1466 }
1467
4c8ff709
CY
1468 clear_page_dirty_for_io(cc->rpages[i]);
1469 goto retry_write;
1470 }
1471 err = ret;
466357dc 1472 goto out_err;
4c8ff709
CY
1473 }
1474
1475 *submitted += _submitted;
1476 }
3afae09f
CY
1477
1478 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1479
4c8ff709 1480 return 0;
4c8ff709
CY
1481out_err:
1482 for (++i; i < cc->cluster_size; i++) {
1483 if (!cc->rpages[i])
1484 continue;
1485 redirty_page_for_writepage(wbc, cc->rpages[i]);
1486 unlock_page(cc->rpages[i]);
1487 }
1488 return err;
1489}
1490
1491int f2fs_write_multi_pages(struct compress_ctx *cc,
1492 int *submitted,
1493 struct writeback_control *wbc,
1494 enum iostat_type io_type)
1495{
4c8ff709
CY
1496 int err;
1497
1498 *submitted = 0;
1499 if (cluster_may_compress(cc)) {
1500 err = f2fs_compress_pages(cc);
1501 if (err == -EAGAIN) {
1502 goto write;
1503 } else if (err) {
1504 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1505 goto destroy_out;
1506 }
1507
1508 err = f2fs_write_compressed_pages(cc, submitted,
1509 wbc, io_type);
4c8ff709
CY
1510 if (!err)
1511 return 0;
1512 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1513 }
1514write:
1515 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1516
1517 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1518 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1519destroy_out:
8bfbfb0d 1520 f2fs_destroy_compress_ctx(cc, false);
4c8ff709
CY
1521 return err;
1522}
1523
7f59b277
EB
1524static void f2fs_free_dic(struct decompress_io_ctx *dic);
1525
4c8ff709
CY
1526struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1527{
4c8ff709
CY
1528 struct decompress_io_ctx *dic;
1529 pgoff_t start_idx = start_idx_of_cluster(cc);
1530 int i;
1531
c68d6c88 1532 dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
4c8ff709
CY
1533 if (!dic)
1534 return ERR_PTR(-ENOMEM);
1535
31083031 1536 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709 1537 if (!dic->rpages) {
c68d6c88 1538 kmem_cache_free(dic_entry_slab, dic);
4c8ff709
CY
1539 return ERR_PTR(-ENOMEM);
1540 }
1541
1542 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1543 dic->inode = cc->inode;
7f59b277 1544 atomic_set(&dic->remaining_pages, cc->nr_cpages);
4c8ff709
CY
1545 dic->cluster_idx = cc->cluster_idx;
1546 dic->cluster_size = cc->cluster_size;
1547 dic->log_cluster_size = cc->log_cluster_size;
1548 dic->nr_cpages = cc->nr_cpages;
7f59b277 1549 refcount_set(&dic->refcnt, 1);
4c8ff709 1550 dic->failed = false;
7f59b277 1551 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
4c8ff709
CY
1552
1553 for (i = 0; i < dic->cluster_size; i++)
1554 dic->rpages[i] = cc->rpages[i];
1555 dic->nr_rpages = cc->cluster_size;
1556
31083031 1557 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
4c8ff709
CY
1558 if (!dic->cpages)
1559 goto out_free;
1560
1561 for (i = 0; i < dic->nr_cpages; i++) {
1562 struct page *page;
1563
5e6bbde9 1564 page = f2fs_compress_alloc_page();
4c8ff709
CY
1565 if (!page)
1566 goto out_free;
1567
1568 f2fs_set_compressed_page(page, cc->inode,
887347a0 1569 start_idx + i + 1, dic);
4c8ff709
CY
1570 dic->cpages[i] = page;
1571 }
1572
4c8ff709
CY
1573 return dic;
1574
1575out_free:
1576 f2fs_free_dic(dic);
1577 return ERR_PTR(-ENOMEM);
1578}
1579
7f59b277 1580static void f2fs_free_dic(struct decompress_io_ctx *dic)
4c8ff709
CY
1581{
1582 int i;
1583
1584 if (dic->tpages) {
1585 for (i = 0; i < dic->cluster_size; i++) {
1586 if (dic->rpages[i])
1587 continue;
8908e753
CY
1588 if (!dic->tpages[i])
1589 continue;
5e6bbde9 1590 f2fs_compress_free_page(dic->tpages[i]);
4c8ff709 1591 }
31083031 1592 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
4c8ff709
CY
1593 }
1594
1595 if (dic->cpages) {
1596 for (i = 0; i < dic->nr_cpages; i++) {
1597 if (!dic->cpages[i])
1598 continue;
5e6bbde9 1599 f2fs_compress_free_page(dic->cpages[i]);
4c8ff709 1600 }
31083031 1601 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
4c8ff709
CY
1602 }
1603
31083031 1604 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
c68d6c88 1605 kmem_cache_free(dic_entry_slab, dic);
4c8ff709
CY
1606}
1607
7f59b277
EB
1608static void f2fs_put_dic(struct decompress_io_ctx *dic)
1609{
1610 if (refcount_dec_and_test(&dic->refcnt))
1611 f2fs_free_dic(dic);
1612}
1613
1614/*
1615 * Update and unlock the cluster's pagecache pages, and release the reference to
1616 * the decompress_io_ctx that was being held for I/O completion.
1617 */
1618static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
4c8ff709
CY
1619{
1620 int i;
1621
7f59b277
EB
1622 for (i = 0; i < dic->cluster_size; i++) {
1623 struct page *rpage = dic->rpages[i];
4c8ff709
CY
1624
1625 if (!rpage)
1626 continue;
1627
7f59b277
EB
1628 /* PG_error was set if verity failed. */
1629 if (failed || PageError(rpage)) {
1630 ClearPageUptodate(rpage);
1631 /* will re-read again later */
1632 ClearPageError(rpage);
1633 } else {
23c51bed 1634 SetPageUptodate(rpage);
4c8ff709
CY
1635 }
1636 unlock_page(rpage);
1637 }
7f59b277
EB
1638
1639 f2fs_put_dic(dic);
1640}
1641
1642static void f2fs_verify_cluster(struct work_struct *work)
1643{
1644 struct decompress_io_ctx *dic =
1645 container_of(work, struct decompress_io_ctx, verity_work);
1646 int i;
1647
1648 /* Verify the cluster's decompressed pages with fs-verity. */
1649 for (i = 0; i < dic->cluster_size; i++) {
1650 struct page *rpage = dic->rpages[i];
1651
1652 if (rpage && !fsverity_verify_page(rpage))
1653 SetPageError(rpage);
1654 }
1655
1656 __f2fs_decompress_end_io(dic, false);
1657}
1658
1659/*
1660 * This is called when a compressed cluster has been decompressed
1661 * (or failed to be read and/or decompressed).
1662 */
1663void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1664{
1665 if (!failed && dic->need_verity) {
1666 /*
1667 * Note that to avoid deadlocks, the verity work can't be done
1668 * on the decompression workqueue. This is because verifying
1669 * the data pages can involve reading metadata pages from the
1670 * file, and these metadata pages may be compressed.
1671 */
1672 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1673 fsverity_enqueue_verify_work(&dic->verity_work);
1674 } else {
1675 __f2fs_decompress_end_io(dic, failed);
1676 }
1677}
1678
1679/*
1680 * Put a reference to a compressed page's decompress_io_ctx.
1681 *
1682 * This is called when the page is no longer needed and can be freed.
1683 */
1684void f2fs_put_page_dic(struct page *page)
1685{
1686 struct decompress_io_ctx *dic =
1687 (struct decompress_io_ctx *)page_private(page);
1688
1689 f2fs_put_dic(dic);
4c8ff709 1690}
31083031
CY
1691
1692int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1693{
1694 dev_t dev = sbi->sb->s_bdev->bd_dev;
1695 char slab_name[32];
1696
1697 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1698
1699 sbi->page_array_slab_size = sizeof(struct page *) <<
1700 F2FS_OPTION(sbi).compress_log_size;
1701
1702 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1703 sbi->page_array_slab_size);
1704 if (!sbi->page_array_slab)
1705 return -ENOMEM;
1706 return 0;
1707}
1708
1709void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1710{
1711 kmem_cache_destroy(sbi->page_array_slab);
1712}
c68d6c88
CY
1713
1714static int __init f2fs_init_cic_cache(void)
1715{
1716 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1717 sizeof(struct compress_io_ctx));
1718 if (!cic_entry_slab)
1719 return -ENOMEM;
1720 return 0;
1721}
1722
1723static void f2fs_destroy_cic_cache(void)
1724{
1725 kmem_cache_destroy(cic_entry_slab);
1726}
1727
1728static int __init f2fs_init_dic_cache(void)
1729{
1730 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1731 sizeof(struct decompress_io_ctx));
1732 if (!dic_entry_slab)
1733 return -ENOMEM;
1734 return 0;
1735}
1736
1737static void f2fs_destroy_dic_cache(void)
1738{
1739 kmem_cache_destroy(dic_entry_slab);
1740}
1741
1742int __init f2fs_init_compress_cache(void)
1743{
1744 int err;
1745
1746 err = f2fs_init_cic_cache();
1747 if (err)
1748 goto out;
1749 err = f2fs_init_dic_cache();
1750 if (err)
1751 goto free_cic;
1752 return 0;
1753free_cic:
1754 f2fs_destroy_cic_cache();
1755out:
1756 return -ENOMEM;
1757}
1758
1759void f2fs_destroy_compress_cache(void)
1760{
1761 f2fs_destroy_dic_cache();
1762 f2fs_destroy_cic_cache();
1763}