1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/pagemap.h>
11 #include <linux/pagevec.h>
12 #include <linux/highmem.h>
13 #include <linux/kthread.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/psi.h>
20 #include <linux/slab.h>
21 #include <linux/sched/mm.h>
22 #include <linux/log2.h>
23 #include <linux/shrinker.h>
24 #include <crypto/hash.h>
28 #include "btrfs_inode.h"
30 #include "ordered-data.h"
31 #include "compression.h"
32 #include "extent_io.h"
33 #include "extent_map.h"
38 static struct bio_set btrfs_compressed_bioset
;
40 static const char* const btrfs_compress_types
[] = { "", "zlib", "lzo", "zstd" };
42 const char* btrfs_compress_type2str(enum btrfs_compression_type type
)
45 case BTRFS_COMPRESS_ZLIB
:
46 case BTRFS_COMPRESS_LZO
:
47 case BTRFS_COMPRESS_ZSTD
:
48 case BTRFS_COMPRESS_NONE
:
49 return btrfs_compress_types
[type
];
57 static inline struct compressed_bio
*to_compressed_bio(struct btrfs_bio
*bbio
)
59 return container_of(bbio
, struct compressed_bio
, bbio
);
62 static struct compressed_bio
*alloc_compressed_bio(struct btrfs_inode
*inode
,
63 u64 start
, blk_opf_t op
,
64 btrfs_bio_end_io_t end_io
)
66 struct btrfs_bio
*bbio
;
68 bbio
= btrfs_bio(bio_alloc_bioset(NULL
, BTRFS_MAX_COMPRESSED_PAGES
, op
,
69 GFP_NOFS
, &btrfs_compressed_bioset
));
70 btrfs_bio_init(bbio
, inode
->root
->fs_info
, end_io
, NULL
);
72 bbio
->file_offset
= start
;
73 return to_compressed_bio(bbio
);
76 bool btrfs_compress_is_valid_type(const char *str
, size_t len
)
80 for (i
= 1; i
< ARRAY_SIZE(btrfs_compress_types
); i
++) {
81 size_t comp_len
= strlen(btrfs_compress_types
[i
]);
86 if (!strncmp(btrfs_compress_types
[i
], str
, comp_len
))
92 static int compression_compress_pages(int type
, struct list_head
*ws
,
93 struct address_space
*mapping
, u64 start
, struct page
**pages
,
94 unsigned long *out_pages
, unsigned long *total_in
,
95 unsigned long *total_out
)
98 case BTRFS_COMPRESS_ZLIB
:
99 return zlib_compress_pages(ws
, mapping
, start
, pages
,
100 out_pages
, total_in
, total_out
);
101 case BTRFS_COMPRESS_LZO
:
102 return lzo_compress_pages(ws
, mapping
, start
, pages
,
103 out_pages
, total_in
, total_out
);
104 case BTRFS_COMPRESS_ZSTD
:
105 return zstd_compress_pages(ws
, mapping
, start
, pages
,
106 out_pages
, total_in
, total_out
);
107 case BTRFS_COMPRESS_NONE
:
110 * This can happen when compression races with remount setting
111 * it to 'no compress', while caller doesn't call
112 * inode_need_compress() to check if we really need to
115 * Not a big deal, just need to inform caller that we
116 * haven't allocated any pages yet.
123 static int compression_decompress_bio(struct list_head
*ws
,
124 struct compressed_bio
*cb
)
126 switch (cb
->compress_type
) {
127 case BTRFS_COMPRESS_ZLIB
: return zlib_decompress_bio(ws
, cb
);
128 case BTRFS_COMPRESS_LZO
: return lzo_decompress_bio(ws
, cb
);
129 case BTRFS_COMPRESS_ZSTD
: return zstd_decompress_bio(ws
, cb
);
130 case BTRFS_COMPRESS_NONE
:
133 * This can't happen, the type is validated several times
134 * before we get here.
140 static int compression_decompress(int type
, struct list_head
*ws
,
141 const u8
*data_in
, struct page
*dest_page
,
142 unsigned long dest_pgoff
, size_t srclen
, size_t destlen
)
145 case BTRFS_COMPRESS_ZLIB
: return zlib_decompress(ws
, data_in
, dest_page
,
146 dest_pgoff
, srclen
, destlen
);
147 case BTRFS_COMPRESS_LZO
: return lzo_decompress(ws
, data_in
, dest_page
,
148 dest_pgoff
, srclen
, destlen
);
149 case BTRFS_COMPRESS_ZSTD
: return zstd_decompress(ws
, data_in
, dest_page
,
150 dest_pgoff
, srclen
, destlen
);
151 case BTRFS_COMPRESS_NONE
:
154 * This can't happen, the type is validated several times
155 * before we get here.
161 static void btrfs_free_compressed_pages(struct compressed_bio
*cb
)
163 for (unsigned int i
= 0; i
< cb
->nr_pages
; i
++)
164 btrfs_free_compr_page(cb
->compressed_pages
[i
]);
165 kfree(cb
->compressed_pages
);
168 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
171 * Global cache of last unused pages for compression/decompression.
173 static struct btrfs_compr_pool
{
174 struct shrinker
*shrinker
;
176 struct list_head list
;
181 static unsigned long btrfs_compr_pool_count(struct shrinker
*sh
, struct shrink_control
*sc
)
186 * We must not read the values more than once if 'ret' gets expanded in
187 * the return statement so we don't accidentally return a negative
188 * number, even if the first condition finds it positive.
190 ret
= READ_ONCE(compr_pool
.count
) - READ_ONCE(compr_pool
.thresh
);
192 return ret
> 0 ? ret
: 0;
195 static unsigned long btrfs_compr_pool_scan(struct shrinker
*sh
, struct shrink_control
*sc
)
197 struct list_head remove
;
198 struct list_head
*tmp
, *next
;
201 if (compr_pool
.count
== 0)
204 INIT_LIST_HEAD(&remove
);
206 /* For now, just simply drain the whole list. */
207 spin_lock(&compr_pool
.lock
);
208 list_splice_init(&compr_pool
.list
, &remove
);
209 freed
= compr_pool
.count
;
210 compr_pool
.count
= 0;
211 spin_unlock(&compr_pool
.lock
);
213 list_for_each_safe(tmp
, next
, &remove
) {
214 struct page
*page
= list_entry(tmp
, struct page
, lru
);
216 ASSERT(page_ref_count(page
) == 1);
224 * Common wrappers for page allocation from compression wrappers
226 struct page
*btrfs_alloc_compr_page(void)
228 struct page
*page
= NULL
;
230 spin_lock(&compr_pool
.lock
);
231 if (compr_pool
.count
> 0) {
232 page
= list_first_entry(&compr_pool
.list
, struct page
, lru
);
233 list_del_init(&page
->lru
);
236 spin_unlock(&compr_pool
.lock
);
241 return alloc_page(GFP_NOFS
);
244 void btrfs_free_compr_page(struct page
*page
)
246 bool do_free
= false;
248 spin_lock(&compr_pool
.lock
);
249 if (compr_pool
.count
> compr_pool
.thresh
) {
252 list_add(&page
->lru
, &compr_pool
.list
);
255 spin_unlock(&compr_pool
.lock
);
260 ASSERT(page_ref_count(page
) == 1);
264 static void end_bbio_comprssed_read(struct btrfs_bio
*bbio
)
266 struct compressed_bio
*cb
= to_compressed_bio(bbio
);
267 blk_status_t status
= bbio
->bio
.bi_status
;
270 status
= errno_to_blk_status(btrfs_decompress_bio(cb
));
272 btrfs_free_compressed_pages(cb
);
273 btrfs_bio_end_io(cb
->orig_bbio
, status
);
278 * Clear the writeback bits on all of the file
279 * pages for a compressed write
281 static noinline
void end_compressed_writeback(const struct compressed_bio
*cb
)
283 struct inode
*inode
= &cb
->bbio
.inode
->vfs_inode
;
284 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
285 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
286 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
287 struct folio_batch fbatch
;
288 const int error
= blk_status_to_errno(cb
->bbio
.bio
.bi_status
);
293 mapping_set_error(inode
->i_mapping
, error
);
295 folio_batch_init(&fbatch
);
296 while (index
<= end_index
) {
297 ret
= filemap_get_folios(inode
->i_mapping
, &index
, end_index
,
303 for (i
= 0; i
< ret
; i
++) {
304 struct folio
*folio
= fbatch
.folios
[i
];
306 btrfs_folio_clamp_clear_writeback(fs_info
, folio
,
309 folio_batch_release(&fbatch
);
311 /* the inode may be gone now */
314 static void btrfs_finish_compressed_write_work(struct work_struct
*work
)
316 struct compressed_bio
*cb
=
317 container_of(work
, struct compressed_bio
, write_end_work
);
319 btrfs_finish_ordered_extent(cb
->bbio
.ordered
, NULL
, cb
->start
, cb
->len
,
320 cb
->bbio
.bio
.bi_status
== BLK_STS_OK
);
323 end_compressed_writeback(cb
);
324 /* Note, our inode could be gone now */
326 btrfs_free_compressed_pages(cb
);
327 bio_put(&cb
->bbio
.bio
);
331 * Do the cleanup once all the compressed pages hit the disk. This will clear
332 * writeback on the file pages and free the compressed pages.
334 * This also calls the writeback end hooks for the file pages so that metadata
335 * and checksums can be updated in the file.
337 static void end_bbio_comprssed_write(struct btrfs_bio
*bbio
)
339 struct compressed_bio
*cb
= to_compressed_bio(bbio
);
340 struct btrfs_fs_info
*fs_info
= bbio
->inode
->root
->fs_info
;
342 queue_work(fs_info
->compressed_write_workers
, &cb
->write_end_work
);
345 static void btrfs_add_compressed_bio_pages(struct compressed_bio
*cb
)
347 struct bio
*bio
= &cb
->bbio
.bio
;
350 while (offset
< cb
->compressed_len
) {
351 u32 len
= min_t(u32
, cb
->compressed_len
- offset
, PAGE_SIZE
);
353 /* Maximum compressed extent is smaller than bio size limit. */
354 __bio_add_page(bio
, cb
->compressed_pages
[offset
>> PAGE_SHIFT
],
361 * worker function to build and submit bios for previously compressed pages.
362 * The corresponding pages in the inode should be marked for writeback
363 * and the compressed pages should have a reference on them for dropping
364 * when the IO is complete.
366 * This also checksums the file bytes and gets things ready for
369 void btrfs_submit_compressed_write(struct btrfs_ordered_extent
*ordered
,
370 struct page
**compressed_pages
,
371 unsigned int nr_pages
,
372 blk_opf_t write_flags
,
375 struct btrfs_inode
*inode
= BTRFS_I(ordered
->inode
);
376 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
377 struct compressed_bio
*cb
;
379 ASSERT(IS_ALIGNED(ordered
->file_offset
, fs_info
->sectorsize
));
380 ASSERT(IS_ALIGNED(ordered
->num_bytes
, fs_info
->sectorsize
));
382 cb
= alloc_compressed_bio(inode
, ordered
->file_offset
,
383 REQ_OP_WRITE
| write_flags
,
384 end_bbio_comprssed_write
);
385 cb
->start
= ordered
->file_offset
;
386 cb
->len
= ordered
->num_bytes
;
387 cb
->compressed_pages
= compressed_pages
;
388 cb
->compressed_len
= ordered
->disk_num_bytes
;
389 cb
->writeback
= writeback
;
390 INIT_WORK(&cb
->write_end_work
, btrfs_finish_compressed_write_work
);
391 cb
->nr_pages
= nr_pages
;
392 cb
->bbio
.bio
.bi_iter
.bi_sector
= ordered
->disk_bytenr
>> SECTOR_SHIFT
;
393 cb
->bbio
.ordered
= ordered
;
394 btrfs_add_compressed_bio_pages(cb
);
396 btrfs_submit_bio(&cb
->bbio
, 0);
400 * Add extra pages in the same compressed file extent so that we don't need to
401 * re-read the same extent again and again.
403 * NOTE: this won't work well for subpage, as for subpage read, we lock the
404 * full page then submit bio for each compressed/regular extents.
406 * This means, if we have several sectors in the same page points to the same
407 * on-disk compressed data, we will re-read the same extent many times and
408 * this function can only help for the next page.
410 static noinline
int add_ra_bio_pages(struct inode
*inode
,
412 struct compressed_bio
*cb
,
413 int *memstall
, unsigned long *pflags
)
415 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
416 unsigned long end_index
;
417 struct bio
*orig_bio
= &cb
->orig_bbio
->bio
;
418 u64 cur
= cb
->orig_bbio
->file_offset
+ orig_bio
->bi_iter
.bi_size
;
419 u64 isize
= i_size_read(inode
);
422 struct extent_map
*em
;
423 struct address_space
*mapping
= inode
->i_mapping
;
424 struct extent_map_tree
*em_tree
;
425 struct extent_io_tree
*tree
;
426 int sectors_missed
= 0;
428 em_tree
= &BTRFS_I(inode
)->extent_tree
;
429 tree
= &BTRFS_I(inode
)->io_tree
;
435 * For current subpage support, we only support 64K page size,
436 * which means maximum compressed extent size (128K) is just 2x page
438 * This makes readahead less effective, so here disable readahead for
439 * subpage for now, until full compressed write is supported.
441 if (btrfs_sb(inode
->i_sb
)->sectorsize
< PAGE_SIZE
)
444 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
446 while (cur
< compressed_end
) {
448 u64 pg_index
= cur
>> PAGE_SHIFT
;
451 if (pg_index
> end_index
)
454 page
= xa_load(&mapping
->i_pages
, pg_index
);
455 if (page
&& !xa_is_value(page
)) {
456 sectors_missed
+= (PAGE_SIZE
- offset_in_page(cur
)) >>
457 fs_info
->sectorsize_bits
;
459 /* Beyond threshold, no need to continue */
460 if (sectors_missed
> 4)
464 * Jump to next page start as we already have page for
467 cur
= (pg_index
<< PAGE_SHIFT
) + PAGE_SIZE
;
471 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
476 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
478 /* There is already a page, skip to page end */
479 cur
= (pg_index
<< PAGE_SHIFT
) + PAGE_SIZE
;
483 if (!*memstall
&& PageWorkingset(page
)) {
484 psi_memstall_enter(pflags
);
488 ret
= set_page_extent_mapped(page
);
495 page_end
= (pg_index
<< PAGE_SHIFT
) + PAGE_SIZE
- 1;
496 lock_extent(tree
, cur
, page_end
, NULL
);
497 read_lock(&em_tree
->lock
);
498 em
= lookup_extent_mapping(em_tree
, cur
, page_end
+ 1 - cur
);
499 read_unlock(&em_tree
->lock
);
502 * At this point, we have a locked page in the page cache for
503 * these bytes in the file. But, we have to make sure they map
504 * to this compressed extent on disk.
506 if (!em
|| cur
< em
->start
||
507 (cur
+ fs_info
->sectorsize
> extent_map_end(em
)) ||
508 (em
->block_start
>> SECTOR_SHIFT
) != orig_bio
->bi_iter
.bi_sector
) {
510 unlock_extent(tree
, cur
, page_end
, NULL
);
517 if (page
->index
== end_index
) {
518 size_t zero_offset
= offset_in_page(isize
);
522 zeros
= PAGE_SIZE
- zero_offset
;
523 memzero_page(page
, zero_offset
, zeros
);
527 add_size
= min(em
->start
+ em
->len
, page_end
+ 1) - cur
;
528 ret
= bio_add_page(orig_bio
, page
, add_size
, offset_in_page(cur
));
529 if (ret
!= add_size
) {
530 unlock_extent(tree
, cur
, page_end
, NULL
);
536 * If it's subpage, we also need to increase its
537 * subpage::readers number, as at endio we will decrease
538 * subpage::readers and to unlock the page.
540 if (fs_info
->sectorsize
< PAGE_SIZE
)
541 btrfs_subpage_start_reader(fs_info
, page_folio(page
),
550 * for a compressed read, the bio we get passed has all the inode pages
551 * in it. We don't actually do IO on those pages but allocate new ones
552 * to hold the compressed pages on disk.
554 * bio->bi_iter.bi_sector points to the compressed extent on disk
555 * bio->bi_io_vec points to all of the inode pages
557 * After the compressed pages are read, we copy the bytes into the
558 * bio we were passed and then call the bio end_io calls
560 void btrfs_submit_compressed_read(struct btrfs_bio
*bbio
)
562 struct btrfs_inode
*inode
= bbio
->inode
;
563 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
564 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
565 struct compressed_bio
*cb
;
566 unsigned int compressed_len
;
567 u64 file_offset
= bbio
->file_offset
;
570 struct extent_map
*em
;
571 unsigned long pflags
;
576 /* we need the actual starting offset of this extent in the file */
577 read_lock(&em_tree
->lock
);
578 em
= lookup_extent_mapping(em_tree
, file_offset
, fs_info
->sectorsize
);
579 read_unlock(&em_tree
->lock
);
585 ASSERT(extent_map_is_compressed(em
));
586 compressed_len
= em
->block_len
;
588 cb
= alloc_compressed_bio(inode
, file_offset
, REQ_OP_READ
,
589 end_bbio_comprssed_read
);
591 cb
->start
= em
->orig_start
;
593 em_start
= em
->start
;
595 cb
->len
= bbio
->bio
.bi_iter
.bi_size
;
596 cb
->compressed_len
= compressed_len
;
597 cb
->compress_type
= extent_map_compression(em
);
598 cb
->orig_bbio
= bbio
;
602 cb
->nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
603 cb
->compressed_pages
= kcalloc(cb
->nr_pages
, sizeof(struct page
*), GFP_NOFS
);
604 if (!cb
->compressed_pages
) {
605 ret
= BLK_STS_RESOURCE
;
609 ret2
= btrfs_alloc_page_array(cb
->nr_pages
, cb
->compressed_pages
, 0);
611 ret
= BLK_STS_RESOURCE
;
612 goto out_free_compressed_pages
;
615 add_ra_bio_pages(&inode
->vfs_inode
, em_start
+ em_len
, cb
, &memstall
,
618 /* include any pages we added in add_ra-bio_pages */
619 cb
->len
= bbio
->bio
.bi_iter
.bi_size
;
620 cb
->bbio
.bio
.bi_iter
.bi_sector
= bbio
->bio
.bi_iter
.bi_sector
;
621 btrfs_add_compressed_bio_pages(cb
);
624 psi_memstall_leave(&pflags
);
626 btrfs_submit_bio(&cb
->bbio
, 0);
629 out_free_compressed_pages
:
630 kfree(cb
->compressed_pages
);
632 bio_put(&cb
->bbio
.bio
);
634 btrfs_bio_end_io(bbio
, ret
);
638 * Heuristic uses systematic sampling to collect data from the input data
639 * range, the logic can be tuned by the following constants:
641 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
642 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
644 #define SAMPLING_READ_SIZE (16)
645 #define SAMPLING_INTERVAL (256)
648 * For statistical analysis of the input data we consider bytes that form a
649 * Galois Field of 256 objects. Each object has an attribute count, ie. how
650 * many times the object appeared in the sample.
652 #define BUCKET_SIZE (256)
655 * The size of the sample is based on a statistical sampling rule of thumb.
656 * The common way is to perform sampling tests as long as the number of
657 * elements in each cell is at least 5.
659 * Instead of 5, we choose 32 to obtain more accurate results.
660 * If the data contain the maximum number of symbols, which is 256, we obtain a
661 * sample size bound by 8192.
663 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
664 * from up to 512 locations.
666 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
667 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
673 struct heuristic_ws
{
674 /* Partial copy of input data */
677 /* Buckets store counters for each byte value */
678 struct bucket_item
*bucket
;
680 struct bucket_item
*bucket_b
;
681 struct list_head list
;
684 static struct workspace_manager heuristic_wsm
;
686 static void free_heuristic_ws(struct list_head
*ws
)
688 struct heuristic_ws
*workspace
;
690 workspace
= list_entry(ws
, struct heuristic_ws
, list
);
692 kvfree(workspace
->sample
);
693 kfree(workspace
->bucket
);
694 kfree(workspace
->bucket_b
);
698 static struct list_head
*alloc_heuristic_ws(unsigned int level
)
700 struct heuristic_ws
*ws
;
702 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
704 return ERR_PTR(-ENOMEM
);
706 ws
->sample
= kvmalloc(MAX_SAMPLE_SIZE
, GFP_KERNEL
);
710 ws
->bucket
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket
), GFP_KERNEL
);
714 ws
->bucket_b
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket_b
), GFP_KERNEL
);
718 INIT_LIST_HEAD(&ws
->list
);
721 free_heuristic_ws(&ws
->list
);
722 return ERR_PTR(-ENOMEM
);
725 const struct btrfs_compress_op btrfs_heuristic_compress
= {
726 .workspace_manager
= &heuristic_wsm
,
729 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
730 /* The heuristic is represented as compression type 0 */
731 &btrfs_heuristic_compress
,
732 &btrfs_zlib_compress
,
734 &btrfs_zstd_compress
,
737 static struct list_head
*alloc_workspace(int type
, unsigned int level
)
740 case BTRFS_COMPRESS_NONE
: return alloc_heuristic_ws(level
);
741 case BTRFS_COMPRESS_ZLIB
: return zlib_alloc_workspace(level
);
742 case BTRFS_COMPRESS_LZO
: return lzo_alloc_workspace(level
);
743 case BTRFS_COMPRESS_ZSTD
: return zstd_alloc_workspace(level
);
746 * This can't happen, the type is validated several times
747 * before we get here.
753 static void free_workspace(int type
, struct list_head
*ws
)
756 case BTRFS_COMPRESS_NONE
: return free_heuristic_ws(ws
);
757 case BTRFS_COMPRESS_ZLIB
: return zlib_free_workspace(ws
);
758 case BTRFS_COMPRESS_LZO
: return lzo_free_workspace(ws
);
759 case BTRFS_COMPRESS_ZSTD
: return zstd_free_workspace(ws
);
762 * This can't happen, the type is validated several times
763 * before we get here.
769 static void btrfs_init_workspace_manager(int type
)
771 struct workspace_manager
*wsm
;
772 struct list_head
*workspace
;
774 wsm
= btrfs_compress_op
[type
]->workspace_manager
;
775 INIT_LIST_HEAD(&wsm
->idle_ws
);
776 spin_lock_init(&wsm
->ws_lock
);
777 atomic_set(&wsm
->total_ws
, 0);
778 init_waitqueue_head(&wsm
->ws_wait
);
781 * Preallocate one workspace for each compression type so we can
782 * guarantee forward progress in the worst case
784 workspace
= alloc_workspace(type
, 0);
785 if (IS_ERR(workspace
)) {
787 "BTRFS: cannot preallocate compression workspace, will try later\n");
789 atomic_set(&wsm
->total_ws
, 1);
791 list_add(workspace
, &wsm
->idle_ws
);
795 static void btrfs_cleanup_workspace_manager(int type
)
797 struct workspace_manager
*wsman
;
798 struct list_head
*ws
;
800 wsman
= btrfs_compress_op
[type
]->workspace_manager
;
801 while (!list_empty(&wsman
->idle_ws
)) {
802 ws
= wsman
->idle_ws
.next
;
804 free_workspace(type
, ws
);
805 atomic_dec(&wsman
->total_ws
);
810 * This finds an available workspace or allocates a new one.
811 * If it's not possible to allocate a new one, waits until there's one.
812 * Preallocation makes a forward progress guarantees and we do not return
815 struct list_head
*btrfs_get_workspace(int type
, unsigned int level
)
817 struct workspace_manager
*wsm
;
818 struct list_head
*workspace
;
819 int cpus
= num_online_cpus();
821 struct list_head
*idle_ws
;
824 wait_queue_head_t
*ws_wait
;
827 wsm
= btrfs_compress_op
[type
]->workspace_manager
;
828 idle_ws
= &wsm
->idle_ws
;
829 ws_lock
= &wsm
->ws_lock
;
830 total_ws
= &wsm
->total_ws
;
831 ws_wait
= &wsm
->ws_wait
;
832 free_ws
= &wsm
->free_ws
;
836 if (!list_empty(idle_ws
)) {
837 workspace
= idle_ws
->next
;
840 spin_unlock(ws_lock
);
844 if (atomic_read(total_ws
) > cpus
) {
847 spin_unlock(ws_lock
);
848 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
849 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
851 finish_wait(ws_wait
, &wait
);
854 atomic_inc(total_ws
);
855 spin_unlock(ws_lock
);
858 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
859 * to turn it off here because we might get called from the restricted
860 * context of btrfs_compress_bio/btrfs_compress_pages
862 nofs_flag
= memalloc_nofs_save();
863 workspace
= alloc_workspace(type
, level
);
864 memalloc_nofs_restore(nofs_flag
);
866 if (IS_ERR(workspace
)) {
867 atomic_dec(total_ws
);
871 * Do not return the error but go back to waiting. There's a
872 * workspace preallocated for each type and the compression
873 * time is bounded so we get to a workspace eventually. This
874 * makes our caller's life easier.
876 * To prevent silent and low-probability deadlocks (when the
877 * initial preallocation fails), check if there are any
880 if (atomic_read(total_ws
) == 0) {
881 static DEFINE_RATELIMIT_STATE(_rs
,
882 /* once per minute */ 60 * HZ
,
885 if (__ratelimit(&_rs
)) {
886 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
894 static struct list_head
*get_workspace(int type
, int level
)
897 case BTRFS_COMPRESS_NONE
: return btrfs_get_workspace(type
, level
);
898 case BTRFS_COMPRESS_ZLIB
: return zlib_get_workspace(level
);
899 case BTRFS_COMPRESS_LZO
: return btrfs_get_workspace(type
, level
);
900 case BTRFS_COMPRESS_ZSTD
: return zstd_get_workspace(level
);
903 * This can't happen, the type is validated several times
904 * before we get here.
911 * put a workspace struct back on the list or free it if we have enough
912 * idle ones sitting around
914 void btrfs_put_workspace(int type
, struct list_head
*ws
)
916 struct workspace_manager
*wsm
;
917 struct list_head
*idle_ws
;
920 wait_queue_head_t
*ws_wait
;
923 wsm
= btrfs_compress_op
[type
]->workspace_manager
;
924 idle_ws
= &wsm
->idle_ws
;
925 ws_lock
= &wsm
->ws_lock
;
926 total_ws
= &wsm
->total_ws
;
927 ws_wait
= &wsm
->ws_wait
;
928 free_ws
= &wsm
->free_ws
;
931 if (*free_ws
<= num_online_cpus()) {
932 list_add(ws
, idle_ws
);
934 spin_unlock(ws_lock
);
937 spin_unlock(ws_lock
);
939 free_workspace(type
, ws
);
940 atomic_dec(total_ws
);
942 cond_wake_up(ws_wait
);
945 static void put_workspace(int type
, struct list_head
*ws
)
948 case BTRFS_COMPRESS_NONE
: return btrfs_put_workspace(type
, ws
);
949 case BTRFS_COMPRESS_ZLIB
: return btrfs_put_workspace(type
, ws
);
950 case BTRFS_COMPRESS_LZO
: return btrfs_put_workspace(type
, ws
);
951 case BTRFS_COMPRESS_ZSTD
: return zstd_put_workspace(ws
);
954 * This can't happen, the type is validated several times
955 * before we get here.
962 * Adjust @level according to the limits of the compression algorithm or
963 * fallback to default
965 static unsigned int btrfs_compress_set_level(int type
, unsigned level
)
967 const struct btrfs_compress_op
*ops
= btrfs_compress_op
[type
];
970 level
= ops
->default_level
;
972 level
= min(level
, ops
->max_level
);
978 * Given an address space and start and length, compress the bytes into @pages
979 * that are allocated on demand.
981 * @type_level is encoded algorithm and level, where level 0 means whatever
982 * default the algorithm chooses and is opaque here;
983 * - compression algo are 0-3
984 * - the level are bits 4-7
986 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
987 * and returns number of actually allocated pages
989 * @total_in is used to return the number of bytes actually read. It
990 * may be smaller than the input length if we had to exit early because we
991 * ran out of room in the pages array or because we cross the
994 * @total_out is an in/out parameter, must be set to the input length and will
995 * be also used to return the total number of compressed bytes
997 int btrfs_compress_pages(unsigned int type_level
, struct address_space
*mapping
,
998 u64 start
, struct page
**pages
,
999 unsigned long *out_pages
,
1000 unsigned long *total_in
,
1001 unsigned long *total_out
)
1003 int type
= btrfs_compress_type(type_level
);
1004 int level
= btrfs_compress_level(type_level
);
1005 struct list_head
*workspace
;
1008 level
= btrfs_compress_set_level(type
, level
);
1009 workspace
= get_workspace(type
, level
);
1010 ret
= compression_compress_pages(type
, workspace
, mapping
, start
, pages
,
1011 out_pages
, total_in
, total_out
);
1012 put_workspace(type
, workspace
);
1016 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
1018 struct list_head
*workspace
;
1020 int type
= cb
->compress_type
;
1022 workspace
= get_workspace(type
, 0);
1023 ret
= compression_decompress_bio(workspace
, cb
);
1024 put_workspace(type
, workspace
);
1027 zero_fill_bio(&cb
->orig_bbio
->bio
);
1032 * a less complex decompression routine. Our compressed data fits in a
1033 * single page, and we want to read a single page out of it.
1034 * start_byte tells us the offset into the compressed data we're interested in
1036 int btrfs_decompress(int type
, const u8
*data_in
, struct page
*dest_page
,
1037 unsigned long dest_pgoff
, size_t srclen
, size_t destlen
)
1039 struct btrfs_fs_info
*fs_info
= page_to_fs_info(dest_page
);
1040 struct list_head
*workspace
;
1041 const u32 sectorsize
= fs_info
->sectorsize
;
1045 * The full destination page range should not exceed the page size.
1046 * And the @destlen should not exceed sectorsize, as this is only called for
1047 * inline file extents, which should not exceed sectorsize.
1049 ASSERT(dest_pgoff
+ destlen
<= PAGE_SIZE
&& destlen
<= sectorsize
);
1051 workspace
= get_workspace(type
, 0);
1052 ret
= compression_decompress(type
, workspace
, data_in
, dest_page
,
1053 dest_pgoff
, srclen
, destlen
);
1054 put_workspace(type
, workspace
);
1059 int __init
btrfs_init_compress(void)
1061 if (bioset_init(&btrfs_compressed_bioset
, BIO_POOL_SIZE
,
1062 offsetof(struct compressed_bio
, bbio
.bio
),
1066 compr_pool
.shrinker
= shrinker_alloc(SHRINKER_NONSLAB
, "btrfs-compr-pages");
1067 if (!compr_pool
.shrinker
)
1070 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE
);
1071 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB
);
1072 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO
);
1073 zstd_init_workspace_manager();
1075 spin_lock_init(&compr_pool
.lock
);
1076 INIT_LIST_HEAD(&compr_pool
.list
);
1077 compr_pool
.count
= 0;
1078 /* 128K / 4K = 32, for 8 threads is 256 pages. */
1079 compr_pool
.thresh
= BTRFS_MAX_COMPRESSED
/ PAGE_SIZE
* 8;
1080 compr_pool
.shrinker
->count_objects
= btrfs_compr_pool_count
;
1081 compr_pool
.shrinker
->scan_objects
= btrfs_compr_pool_scan
;
1082 compr_pool
.shrinker
->batch
= 32;
1083 compr_pool
.shrinker
->seeks
= DEFAULT_SEEKS
;
1084 shrinker_register(compr_pool
.shrinker
);
1089 void __cold
btrfs_exit_compress(void)
1091 /* For now scan drains all pages and does not touch the parameters. */
1092 btrfs_compr_pool_scan(NULL
, NULL
);
1093 shrinker_free(compr_pool
.shrinker
);
1095 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE
);
1096 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB
);
1097 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO
);
1098 zstd_cleanup_workspace_manager();
1099 bioset_exit(&btrfs_compressed_bioset
);
1103 * Copy decompressed data from working buffer to pages.
1105 * @buf: The decompressed data buffer
1106 * @buf_len: The decompressed data length
1107 * @decompressed: Number of bytes that are already decompressed inside the
1109 * @cb: The compressed extent descriptor
1110 * @orig_bio: The original bio that the caller wants to read for
1112 * An easier to understand graph is like below:
1114 * |<- orig_bio ->| |<- orig_bio->|
1115 * |<------- full decompressed extent ----->|
1116 * |<----------- @cb range ---->|
1117 * | |<-- @buf_len -->|
1118 * |<--- @decompressed --->|
1120 * Note that, @cb can be a subpage of the full decompressed extent, but
1121 * @cb->start always has the same as the orig_file_offset value of the full
1122 * decompressed extent.
1124 * When reading compressed extent, we have to read the full compressed extent,
1125 * while @orig_bio may only want part of the range.
1126 * Thus this function will ensure only data covered by @orig_bio will be copied
1129 * Return 0 if we have copied all needed contents for @orig_bio.
1130 * Return >0 if we need continue decompress.
1132 int btrfs_decompress_buf2page(const char *buf
, u32 buf_len
,
1133 struct compressed_bio
*cb
, u32 decompressed
)
1135 struct bio
*orig_bio
= &cb
->orig_bbio
->bio
;
1136 /* Offset inside the full decompressed extent */
1139 cur_offset
= decompressed
;
1140 /* The main loop to do the copy */
1141 while (cur_offset
< decompressed
+ buf_len
) {
1142 struct bio_vec bvec
;
1145 /* Offset inside the full decompressed extent */
1148 bvec
= bio_iter_iovec(orig_bio
, orig_bio
->bi_iter
);
1150 * cb->start may underflow, but subtracting that value can still
1151 * give us correct offset inside the full decompressed extent.
1153 bvec_offset
= page_offset(bvec
.bv_page
) + bvec
.bv_offset
- cb
->start
;
1155 /* Haven't reached the bvec range, exit */
1156 if (decompressed
+ buf_len
<= bvec_offset
)
1159 copy_start
= max(cur_offset
, bvec_offset
);
1160 copy_len
= min(bvec_offset
+ bvec
.bv_len
,
1161 decompressed
+ buf_len
) - copy_start
;
1165 * Extra range check to ensure we didn't go beyond
1168 ASSERT(copy_start
- decompressed
< buf_len
);
1169 memcpy_to_page(bvec
.bv_page
, bvec
.bv_offset
,
1170 buf
+ copy_start
- decompressed
, copy_len
);
1171 cur_offset
+= copy_len
;
1173 bio_advance(orig_bio
, copy_len
);
1174 /* Finished the bio */
1175 if (!orig_bio
->bi_iter
.bi_size
)
1182 * Shannon Entropy calculation
1184 * Pure byte distribution analysis fails to determine compressibility of data.
1185 * Try calculating entropy to estimate the average minimum number of bits
1186 * needed to encode the sampled data.
1188 * For convenience, return the percentage of needed bits, instead of amount of
1191 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1192 * and can be compressible with high probability
1194 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1196 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1198 #define ENTROPY_LVL_ACEPTABLE (65)
1199 #define ENTROPY_LVL_HIGH (80)
1202 * For increasead precision in shannon_entropy calculation,
1203 * let's do pow(n, M) to save more digits after comma:
1205 * - maximum int bit length is 64
1206 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1207 * - 13 * 4 = 52 < 64 -> M = 4
1211 static inline u32
ilog2_w(u64 n
)
1213 return ilog2(n
* n
* n
* n
);
1216 static u32
shannon_entropy(struct heuristic_ws
*ws
)
1218 const u32 entropy_max
= 8 * ilog2_w(2);
1219 u32 entropy_sum
= 0;
1220 u32 p
, p_base
, sz_base
;
1223 sz_base
= ilog2_w(ws
->sample_size
);
1224 for (i
= 0; i
< BUCKET_SIZE
&& ws
->bucket
[i
].count
> 0; i
++) {
1225 p
= ws
->bucket
[i
].count
;
1226 p_base
= ilog2_w(p
);
1227 entropy_sum
+= p
* (sz_base
- p_base
);
1230 entropy_sum
/= ws
->sample_size
;
1231 return entropy_sum
* 100 / entropy_max
;
1234 #define RADIX_BASE 4U
1235 #define COUNTERS_SIZE (1U << RADIX_BASE)
1237 static u8
get4bits(u64 num
, int shift
) {
1242 low4bits
= (COUNTERS_SIZE
- 1) - (num
% COUNTERS_SIZE
);
1247 * Use 4 bits as radix base
1248 * Use 16 u32 counters for calculating new position in buf array
1250 * @array - array that will be sorted
1251 * @array_buf - buffer array to store sorting results
1252 * must be equal in size to @array
1255 static void radix_sort(struct bucket_item
*array
, struct bucket_item
*array_buf
,
1260 u32 counters
[COUNTERS_SIZE
];
1268 * Try avoid useless loop iterations for small numbers stored in big
1269 * counters. Example: 48 33 4 ... in 64bit array
1271 max_num
= array
[0].count
;
1272 for (i
= 1; i
< num
; i
++) {
1273 buf_num
= array
[i
].count
;
1274 if (buf_num
> max_num
)
1278 buf_num
= ilog2(max_num
);
1279 bitlen
= ALIGN(buf_num
, RADIX_BASE
* 2);
1282 while (shift
< bitlen
) {
1283 memset(counters
, 0, sizeof(counters
));
1285 for (i
= 0; i
< num
; i
++) {
1286 buf_num
= array
[i
].count
;
1287 addr
= get4bits(buf_num
, shift
);
1291 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1292 counters
[i
] += counters
[i
- 1];
1294 for (i
= num
- 1; i
>= 0; i
--) {
1295 buf_num
= array
[i
].count
;
1296 addr
= get4bits(buf_num
, shift
);
1298 new_addr
= counters
[addr
];
1299 array_buf
[new_addr
] = array
[i
];
1302 shift
+= RADIX_BASE
;
1305 * Normal radix expects to move data from a temporary array, to
1306 * the main one. But that requires some CPU time. Avoid that
1307 * by doing another sort iteration to original array instead of
1310 memset(counters
, 0, sizeof(counters
));
1312 for (i
= 0; i
< num
; i
++) {
1313 buf_num
= array_buf
[i
].count
;
1314 addr
= get4bits(buf_num
, shift
);
1318 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1319 counters
[i
] += counters
[i
- 1];
1321 for (i
= num
- 1; i
>= 0; i
--) {
1322 buf_num
= array_buf
[i
].count
;
1323 addr
= get4bits(buf_num
, shift
);
1325 new_addr
= counters
[addr
];
1326 array
[new_addr
] = array_buf
[i
];
1329 shift
+= RADIX_BASE
;
1334 * Size of the core byte set - how many bytes cover 90% of the sample
1336 * There are several types of structured binary data that use nearly all byte
1337 * values. The distribution can be uniform and counts in all buckets will be
1338 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1340 * Other possibility is normal (Gaussian) distribution, where the data could
1341 * be potentially compressible, but we have to take a few more steps to decide
1344 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1345 * compression algo can easy fix that
1346 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1347 * probability is not compressible
1349 #define BYTE_CORE_SET_LOW (64)
1350 #define BYTE_CORE_SET_HIGH (200)
1352 static int byte_core_set_size(struct heuristic_ws
*ws
)
1355 u32 coreset_sum
= 0;
1356 const u32 core_set_threshold
= ws
->sample_size
* 90 / 100;
1357 struct bucket_item
*bucket
= ws
->bucket
;
1359 /* Sort in reverse order */
1360 radix_sort(ws
->bucket
, ws
->bucket_b
, BUCKET_SIZE
);
1362 for (i
= 0; i
< BYTE_CORE_SET_LOW
; i
++)
1363 coreset_sum
+= bucket
[i
].count
;
1365 if (coreset_sum
> core_set_threshold
)
1368 for (; i
< BYTE_CORE_SET_HIGH
&& bucket
[i
].count
> 0; i
++) {
1369 coreset_sum
+= bucket
[i
].count
;
1370 if (coreset_sum
> core_set_threshold
)
1378 * Count byte values in buckets.
1379 * This heuristic can detect textual data (configs, xml, json, html, etc).
1380 * Because in most text-like data byte set is restricted to limited number of
1381 * possible characters, and that restriction in most cases makes data easy to
1384 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1385 * less - compressible
1386 * more - need additional analysis
1388 #define BYTE_SET_THRESHOLD (64)
1390 static u32
byte_set_size(const struct heuristic_ws
*ws
)
1393 u32 byte_set_size
= 0;
1395 for (i
= 0; i
< BYTE_SET_THRESHOLD
; i
++) {
1396 if (ws
->bucket
[i
].count
> 0)
1401 * Continue collecting count of byte values in buckets. If the byte
1402 * set size is bigger then the threshold, it's pointless to continue,
1403 * the detection technique would fail for this type of data.
1405 for (; i
< BUCKET_SIZE
; i
++) {
1406 if (ws
->bucket
[i
].count
> 0) {
1408 if (byte_set_size
> BYTE_SET_THRESHOLD
)
1409 return byte_set_size
;
1413 return byte_set_size
;
1416 static bool sample_repeated_patterns(struct heuristic_ws
*ws
)
1418 const u32 half_of_sample
= ws
->sample_size
/ 2;
1419 const u8
*data
= ws
->sample
;
1421 return memcmp(&data
[0], &data
[half_of_sample
], half_of_sample
) == 0;
1424 static void heuristic_collect_sample(struct inode
*inode
, u64 start
, u64 end
,
1425 struct heuristic_ws
*ws
)
1428 u64 index
, index_end
;
1429 u32 i
, curr_sample_pos
;
1433 * Compression handles the input data by chunks of 128KiB
1434 * (defined by BTRFS_MAX_UNCOMPRESSED)
1436 * We do the same for the heuristic and loop over the whole range.
1438 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1439 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1441 if (end
- start
> BTRFS_MAX_UNCOMPRESSED
)
1442 end
= start
+ BTRFS_MAX_UNCOMPRESSED
;
1444 index
= start
>> PAGE_SHIFT
;
1445 index_end
= end
>> PAGE_SHIFT
;
1447 /* Don't miss unaligned end */
1448 if (!PAGE_ALIGNED(end
))
1451 curr_sample_pos
= 0;
1452 while (index
< index_end
) {
1453 page
= find_get_page(inode
->i_mapping
, index
);
1454 in_data
= kmap_local_page(page
);
1455 /* Handle case where the start is not aligned to PAGE_SIZE */
1456 i
= start
% PAGE_SIZE
;
1457 while (i
< PAGE_SIZE
- SAMPLING_READ_SIZE
) {
1458 /* Don't sample any garbage from the last page */
1459 if (start
> end
- SAMPLING_READ_SIZE
)
1461 memcpy(&ws
->sample
[curr_sample_pos
], &in_data
[i
],
1462 SAMPLING_READ_SIZE
);
1463 i
+= SAMPLING_INTERVAL
;
1464 start
+= SAMPLING_INTERVAL
;
1465 curr_sample_pos
+= SAMPLING_READ_SIZE
;
1467 kunmap_local(in_data
);
1473 ws
->sample_size
= curr_sample_pos
;
1477 * Compression heuristic.
1479 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1480 * quickly (compared to direct compression) detect data characteristics
1481 * (compressible/incompressible) to avoid wasting CPU time on incompressible
1484 * The following types of analysis can be performed:
1485 * - detect mostly zero data
1486 * - detect data with low "byte set" size (text, etc)
1487 * - detect data with low/high "core byte" set
1489 * Return non-zero if the compression should be done, 0 otherwise.
1491 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1493 struct list_head
*ws_list
= get_workspace(0, 0);
1494 struct heuristic_ws
*ws
;
1499 ws
= list_entry(ws_list
, struct heuristic_ws
, list
);
1501 heuristic_collect_sample(inode
, start
, end
, ws
);
1503 if (sample_repeated_patterns(ws
)) {
1508 memset(ws
->bucket
, 0, sizeof(*ws
->bucket
)*BUCKET_SIZE
);
1510 for (i
= 0; i
< ws
->sample_size
; i
++) {
1511 byte
= ws
->sample
[i
];
1512 ws
->bucket
[byte
].count
++;
1515 i
= byte_set_size(ws
);
1516 if (i
< BYTE_SET_THRESHOLD
) {
1521 i
= byte_core_set_size(ws
);
1522 if (i
<= BYTE_CORE_SET_LOW
) {
1527 if (i
>= BYTE_CORE_SET_HIGH
) {
1532 i
= shannon_entropy(ws
);
1533 if (i
<= ENTROPY_LVL_ACEPTABLE
) {
1539 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1540 * needed to give green light to compression.
1542 * For now just assume that compression at that level is not worth the
1543 * resources because:
1545 * 1. it is possible to defrag the data later
1547 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1548 * values, every bucket has counter at level ~54. The heuristic would
1549 * be confused. This can happen when data have some internal repeated
1550 * patterns like "abbacbbc...". This can be detected by analyzing
1551 * pairs of bytes, which is too costly.
1553 if (i
< ENTROPY_LVL_HIGH
) {
1562 put_workspace(0, ws_list
);
1567 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1568 * level, unrecognized string will set the default level
1570 unsigned int btrfs_compress_str2level(unsigned int type
, const char *str
)
1572 unsigned int level
= 0;
1578 if (str
[0] == ':') {
1579 ret
= kstrtouint(str
+ 1, 10, &level
);
1584 level
= btrfs_compress_set_level(type
, level
);