1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/pagemap.h>
11 #include <linux/pagevec.h>
12 #include <linux/highmem.h>
13 #include <linux/kthread.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/psi.h>
20 #include <linux/slab.h>
21 #include <linux/sched/mm.h>
22 #include <linux/log2.h>
23 #include <linux/shrinker.h>
24 #include <crypto/hash.h>
29 #include "transaction.h"
30 #include "btrfs_inode.h"
32 #include "ordered-data.h"
33 #include "compression.h"
34 #include "extent_io.h"
35 #include "extent_map.h"
38 #include "file-item.h"
41 static struct bio_set btrfs_compressed_bioset
;
43 static const char* const btrfs_compress_types
[] = { "", "zlib", "lzo", "zstd" };
45 const char* btrfs_compress_type2str(enum btrfs_compression_type type
)
48 case BTRFS_COMPRESS_ZLIB
:
49 case BTRFS_COMPRESS_LZO
:
50 case BTRFS_COMPRESS_ZSTD
:
51 case BTRFS_COMPRESS_NONE
:
52 return btrfs_compress_types
[type
];
60 static inline struct compressed_bio
*to_compressed_bio(struct btrfs_bio
*bbio
)
62 return container_of(bbio
, struct compressed_bio
, bbio
);
65 static struct compressed_bio
*alloc_compressed_bio(struct btrfs_inode
*inode
,
66 u64 start
, blk_opf_t op
,
67 btrfs_bio_end_io_t end_io
)
69 struct btrfs_bio
*bbio
;
71 bbio
= btrfs_bio(bio_alloc_bioset(NULL
, BTRFS_MAX_COMPRESSED_PAGES
, op
,
72 GFP_NOFS
, &btrfs_compressed_bioset
));
73 btrfs_bio_init(bbio
, inode
->root
->fs_info
, end_io
, NULL
);
75 bbio
->file_offset
= start
;
76 return to_compressed_bio(bbio
);
79 bool btrfs_compress_is_valid_type(const char *str
, size_t len
)
83 for (i
= 1; i
< ARRAY_SIZE(btrfs_compress_types
); i
++) {
84 size_t comp_len
= strlen(btrfs_compress_types
[i
]);
89 if (!strncmp(btrfs_compress_types
[i
], str
, comp_len
))
95 static int compression_compress_pages(int type
, struct list_head
*ws
,
96 struct address_space
*mapping
, u64 start
, struct page
**pages
,
97 unsigned long *out_pages
, unsigned long *total_in
,
98 unsigned long *total_out
)
101 case BTRFS_COMPRESS_ZLIB
:
102 return zlib_compress_pages(ws
, mapping
, start
, pages
,
103 out_pages
, total_in
, total_out
);
104 case BTRFS_COMPRESS_LZO
:
105 return lzo_compress_pages(ws
, mapping
, start
, pages
,
106 out_pages
, total_in
, total_out
);
107 case BTRFS_COMPRESS_ZSTD
:
108 return zstd_compress_pages(ws
, mapping
, start
, pages
,
109 out_pages
, total_in
, total_out
);
110 case BTRFS_COMPRESS_NONE
:
113 * This can happen when compression races with remount setting
114 * it to 'no compress', while caller doesn't call
115 * inode_need_compress() to check if we really need to
118 * Not a big deal, just need to inform caller that we
119 * haven't allocated any pages yet.
126 static int compression_decompress_bio(struct list_head
*ws
,
127 struct compressed_bio
*cb
)
129 switch (cb
->compress_type
) {
130 case BTRFS_COMPRESS_ZLIB
: return zlib_decompress_bio(ws
, cb
);
131 case BTRFS_COMPRESS_LZO
: return lzo_decompress_bio(ws
, cb
);
132 case BTRFS_COMPRESS_ZSTD
: return zstd_decompress_bio(ws
, cb
);
133 case BTRFS_COMPRESS_NONE
:
136 * This can't happen, the type is validated several times
137 * before we get here.
143 static int compression_decompress(int type
, struct list_head
*ws
,
144 const u8
*data_in
, struct page
*dest_page
,
145 unsigned long start_byte
, size_t srclen
, size_t destlen
)
148 case BTRFS_COMPRESS_ZLIB
: return zlib_decompress(ws
, data_in
, dest_page
,
149 start_byte
, srclen
, destlen
);
150 case BTRFS_COMPRESS_LZO
: return lzo_decompress(ws
, data_in
, dest_page
,
151 start_byte
, srclen
, destlen
);
152 case BTRFS_COMPRESS_ZSTD
: return zstd_decompress(ws
, data_in
, dest_page
,
153 start_byte
, srclen
, destlen
);
154 case BTRFS_COMPRESS_NONE
:
157 * This can't happen, the type is validated several times
158 * before we get here.
164 static void btrfs_free_compressed_pages(struct compressed_bio
*cb
)
166 for (unsigned int i
= 0; i
< cb
->nr_pages
; i
++)
167 btrfs_free_compr_page(cb
->compressed_pages
[i
]);
168 kfree(cb
->compressed_pages
);
171 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
174 * Global cache of last unused pages for compression/decompression.
176 static struct btrfs_compr_pool
{
177 struct shrinker
*shrinker
;
179 struct list_head list
;
184 static unsigned long btrfs_compr_pool_count(struct shrinker
*sh
, struct shrink_control
*sc
)
189 * We must not read the values more than once if 'ret' gets expanded in
190 * the return statement so we don't accidentally return a negative
191 * number, even if the first condition finds it positive.
193 ret
= READ_ONCE(compr_pool
.count
) - READ_ONCE(compr_pool
.thresh
);
195 return ret
> 0 ? ret
: 0;
198 static unsigned long btrfs_compr_pool_scan(struct shrinker
*sh
, struct shrink_control
*sc
)
200 struct list_head remove
;
201 struct list_head
*tmp
, *next
;
204 if (compr_pool
.count
== 0)
207 INIT_LIST_HEAD(&remove
);
209 /* For now, just simply drain the whole list. */
210 spin_lock(&compr_pool
.lock
);
211 list_splice_init(&compr_pool
.list
, &remove
);
212 freed
= compr_pool
.count
;
213 compr_pool
.count
= 0;
214 spin_unlock(&compr_pool
.lock
);
216 list_for_each_safe(tmp
, next
, &remove
) {
217 struct page
*page
= list_entry(tmp
, struct page
, lru
);
219 ASSERT(page_ref_count(page
) == 1);
227 * Common wrappers for page allocation from compression wrappers
229 struct page
*btrfs_alloc_compr_page(void)
231 struct page
*page
= NULL
;
233 spin_lock(&compr_pool
.lock
);
234 if (compr_pool
.count
> 0) {
235 page
= list_first_entry(&compr_pool
.list
, struct page
, lru
);
236 list_del_init(&page
->lru
);
239 spin_unlock(&compr_pool
.lock
);
244 return alloc_page(GFP_NOFS
);
247 void btrfs_free_compr_page(struct page
*page
)
249 bool do_free
= false;
251 spin_lock(&compr_pool
.lock
);
252 if (compr_pool
.count
> compr_pool
.thresh
) {
255 list_add(&page
->lru
, &compr_pool
.list
);
258 spin_unlock(&compr_pool
.lock
);
263 ASSERT(page_ref_count(page
) == 1);
267 static void end_compressed_bio_read(struct btrfs_bio
*bbio
)
269 struct compressed_bio
*cb
= to_compressed_bio(bbio
);
270 blk_status_t status
= bbio
->bio
.bi_status
;
273 status
= errno_to_blk_status(btrfs_decompress_bio(cb
));
275 btrfs_free_compressed_pages(cb
);
276 btrfs_bio_end_io(cb
->orig_bbio
, status
);
281 * Clear the writeback bits on all of the file
282 * pages for a compressed write
284 static noinline
void end_compressed_writeback(const struct compressed_bio
*cb
)
286 struct inode
*inode
= &cb
->bbio
.inode
->vfs_inode
;
287 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
288 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
289 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
290 struct folio_batch fbatch
;
291 const int error
= blk_status_to_errno(cb
->bbio
.bio
.bi_status
);
296 mapping_set_error(inode
->i_mapping
, error
);
298 folio_batch_init(&fbatch
);
299 while (index
<= end_index
) {
300 ret
= filemap_get_folios(inode
->i_mapping
, &index
, end_index
,
306 for (i
= 0; i
< ret
; i
++) {
307 struct folio
*folio
= fbatch
.folios
[i
];
309 btrfs_page_clamp_clear_writeback(fs_info
, &folio
->page
,
312 folio_batch_release(&fbatch
);
314 /* the inode may be gone now */
317 static void btrfs_finish_compressed_write_work(struct work_struct
*work
)
319 struct compressed_bio
*cb
=
320 container_of(work
, struct compressed_bio
, write_end_work
);
322 btrfs_finish_ordered_extent(cb
->bbio
.ordered
, NULL
, cb
->start
, cb
->len
,
323 cb
->bbio
.bio
.bi_status
== BLK_STS_OK
);
326 end_compressed_writeback(cb
);
327 /* Note, our inode could be gone now */
329 btrfs_free_compressed_pages(cb
);
330 bio_put(&cb
->bbio
.bio
);
334 * Do the cleanup once all the compressed pages hit the disk. This will clear
335 * writeback on the file pages and free the compressed pages.
337 * This also calls the writeback end hooks for the file pages so that metadata
338 * and checksums can be updated in the file.
340 static void end_compressed_bio_write(struct btrfs_bio
*bbio
)
342 struct compressed_bio
*cb
= to_compressed_bio(bbio
);
343 struct btrfs_fs_info
*fs_info
= bbio
->inode
->root
->fs_info
;
345 queue_work(fs_info
->compressed_write_workers
, &cb
->write_end_work
);
348 static void btrfs_add_compressed_bio_pages(struct compressed_bio
*cb
)
350 struct bio
*bio
= &cb
->bbio
.bio
;
353 while (offset
< cb
->compressed_len
) {
354 u32 len
= min_t(u32
, cb
->compressed_len
- offset
, PAGE_SIZE
);
356 /* Maximum compressed extent is smaller than bio size limit. */
357 __bio_add_page(bio
, cb
->compressed_pages
[offset
>> PAGE_SHIFT
],
364 * worker function to build and submit bios for previously compressed pages.
365 * The corresponding pages in the inode should be marked for writeback
366 * and the compressed pages should have a reference on them for dropping
367 * when the IO is complete.
369 * This also checksums the file bytes and gets things ready for
372 void btrfs_submit_compressed_write(struct btrfs_ordered_extent
*ordered
,
373 struct page
**compressed_pages
,
374 unsigned int nr_pages
,
375 blk_opf_t write_flags
,
378 struct btrfs_inode
*inode
= BTRFS_I(ordered
->inode
);
379 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
380 struct compressed_bio
*cb
;
382 ASSERT(IS_ALIGNED(ordered
->file_offset
, fs_info
->sectorsize
));
383 ASSERT(IS_ALIGNED(ordered
->num_bytes
, fs_info
->sectorsize
));
385 cb
= alloc_compressed_bio(inode
, ordered
->file_offset
,
386 REQ_OP_WRITE
| write_flags
,
387 end_compressed_bio_write
);
388 cb
->start
= ordered
->file_offset
;
389 cb
->len
= ordered
->num_bytes
;
390 cb
->compressed_pages
= compressed_pages
;
391 cb
->compressed_len
= ordered
->disk_num_bytes
;
392 cb
->writeback
= writeback
;
393 INIT_WORK(&cb
->write_end_work
, btrfs_finish_compressed_write_work
);
394 cb
->nr_pages
= nr_pages
;
395 cb
->bbio
.bio
.bi_iter
.bi_sector
= ordered
->disk_bytenr
>> SECTOR_SHIFT
;
396 cb
->bbio
.ordered
= ordered
;
397 btrfs_add_compressed_bio_pages(cb
);
399 btrfs_submit_bio(&cb
->bbio
, 0);
403 * Add extra pages in the same compressed file extent so that we don't need to
404 * re-read the same extent again and again.
406 * NOTE: this won't work well for subpage, as for subpage read, we lock the
407 * full page then submit bio for each compressed/regular extents.
409 * This means, if we have several sectors in the same page points to the same
410 * on-disk compressed data, we will re-read the same extent many times and
411 * this function can only help for the next page.
413 static noinline
int add_ra_bio_pages(struct inode
*inode
,
415 struct compressed_bio
*cb
,
416 int *memstall
, unsigned long *pflags
)
418 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
419 unsigned long end_index
;
420 struct bio
*orig_bio
= &cb
->orig_bbio
->bio
;
421 u64 cur
= cb
->orig_bbio
->file_offset
+ orig_bio
->bi_iter
.bi_size
;
422 u64 isize
= i_size_read(inode
);
425 struct extent_map
*em
;
426 struct address_space
*mapping
= inode
->i_mapping
;
427 struct extent_map_tree
*em_tree
;
428 struct extent_io_tree
*tree
;
429 int sectors_missed
= 0;
431 em_tree
= &BTRFS_I(inode
)->extent_tree
;
432 tree
= &BTRFS_I(inode
)->io_tree
;
438 * For current subpage support, we only support 64K page size,
439 * which means maximum compressed extent size (128K) is just 2x page
441 * This makes readahead less effective, so here disable readahead for
442 * subpage for now, until full compressed write is supported.
444 if (btrfs_sb(inode
->i_sb
)->sectorsize
< PAGE_SIZE
)
447 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
449 while (cur
< compressed_end
) {
451 u64 pg_index
= cur
>> PAGE_SHIFT
;
454 if (pg_index
> end_index
)
457 page
= xa_load(&mapping
->i_pages
, pg_index
);
458 if (page
&& !xa_is_value(page
)) {
459 sectors_missed
+= (PAGE_SIZE
- offset_in_page(cur
)) >>
460 fs_info
->sectorsize_bits
;
462 /* Beyond threshold, no need to continue */
463 if (sectors_missed
> 4)
467 * Jump to next page start as we already have page for
470 cur
= (pg_index
<< PAGE_SHIFT
) + PAGE_SIZE
;
474 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
479 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
481 /* There is already a page, skip to page end */
482 cur
= (pg_index
<< PAGE_SHIFT
) + PAGE_SIZE
;
486 if (!*memstall
&& PageWorkingset(page
)) {
487 psi_memstall_enter(pflags
);
491 ret
= set_page_extent_mapped(page
);
498 page_end
= (pg_index
<< PAGE_SHIFT
) + PAGE_SIZE
- 1;
499 lock_extent(tree
, cur
, page_end
, NULL
);
500 read_lock(&em_tree
->lock
);
501 em
= lookup_extent_mapping(em_tree
, cur
, page_end
+ 1 - cur
);
502 read_unlock(&em_tree
->lock
);
505 * At this point, we have a locked page in the page cache for
506 * these bytes in the file. But, we have to make sure they map
507 * to this compressed extent on disk.
509 if (!em
|| cur
< em
->start
||
510 (cur
+ fs_info
->sectorsize
> extent_map_end(em
)) ||
511 (em
->block_start
>> SECTOR_SHIFT
) != orig_bio
->bi_iter
.bi_sector
) {
513 unlock_extent(tree
, cur
, page_end
, NULL
);
520 if (page
->index
== end_index
) {
521 size_t zero_offset
= offset_in_page(isize
);
525 zeros
= PAGE_SIZE
- zero_offset
;
526 memzero_page(page
, zero_offset
, zeros
);
530 add_size
= min(em
->start
+ em
->len
, page_end
+ 1) - cur
;
531 ret
= bio_add_page(orig_bio
, page
, add_size
, offset_in_page(cur
));
532 if (ret
!= add_size
) {
533 unlock_extent(tree
, cur
, page_end
, NULL
);
539 * If it's subpage, we also need to increase its
540 * subpage::readers number, as at endio we will decrease
541 * subpage::readers and to unlock the page.
543 if (fs_info
->sectorsize
< PAGE_SIZE
)
544 btrfs_subpage_start_reader(fs_info
, page
, cur
, add_size
);
552 * for a compressed read, the bio we get passed has all the inode pages
553 * in it. We don't actually do IO on those pages but allocate new ones
554 * to hold the compressed pages on disk.
556 * bio->bi_iter.bi_sector points to the compressed extent on disk
557 * bio->bi_io_vec points to all of the inode pages
559 * After the compressed pages are read, we copy the bytes into the
560 * bio we were passed and then call the bio end_io calls
562 void btrfs_submit_compressed_read(struct btrfs_bio
*bbio
)
564 struct btrfs_inode
*inode
= bbio
->inode
;
565 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
566 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
567 struct compressed_bio
*cb
;
568 unsigned int compressed_len
;
569 u64 file_offset
= bbio
->file_offset
;
572 struct extent_map
*em
;
573 unsigned long pflags
;
578 /* we need the actual starting offset of this extent in the file */
579 read_lock(&em_tree
->lock
);
580 em
= lookup_extent_mapping(em_tree
, file_offset
, fs_info
->sectorsize
);
581 read_unlock(&em_tree
->lock
);
587 ASSERT(extent_map_is_compressed(em
));
588 compressed_len
= em
->block_len
;
590 cb
= alloc_compressed_bio(inode
, file_offset
, REQ_OP_READ
,
591 end_compressed_bio_read
);
593 cb
->start
= em
->orig_start
;
595 em_start
= em
->start
;
597 cb
->len
= bbio
->bio
.bi_iter
.bi_size
;
598 cb
->compressed_len
= compressed_len
;
599 cb
->compress_type
= extent_map_compression(em
);
600 cb
->orig_bbio
= bbio
;
604 cb
->nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
605 cb
->compressed_pages
= kcalloc(cb
->nr_pages
, sizeof(struct page
*), GFP_NOFS
);
606 if (!cb
->compressed_pages
) {
607 ret
= BLK_STS_RESOURCE
;
611 ret2
= btrfs_alloc_page_array(cb
->nr_pages
, cb
->compressed_pages
);
613 ret
= BLK_STS_RESOURCE
;
614 goto out_free_compressed_pages
;
617 add_ra_bio_pages(&inode
->vfs_inode
, em_start
+ em_len
, cb
, &memstall
,
620 /* include any pages we added in add_ra-bio_pages */
621 cb
->len
= bbio
->bio
.bi_iter
.bi_size
;
622 cb
->bbio
.bio
.bi_iter
.bi_sector
= bbio
->bio
.bi_iter
.bi_sector
;
623 btrfs_add_compressed_bio_pages(cb
);
626 psi_memstall_leave(&pflags
);
628 btrfs_submit_bio(&cb
->bbio
, 0);
631 out_free_compressed_pages
:
632 kfree(cb
->compressed_pages
);
634 bio_put(&cb
->bbio
.bio
);
636 btrfs_bio_end_io(bbio
, ret
);
640 * Heuristic uses systematic sampling to collect data from the input data
641 * range, the logic can be tuned by the following constants:
643 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
644 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
646 #define SAMPLING_READ_SIZE (16)
647 #define SAMPLING_INTERVAL (256)
650 * For statistical analysis of the input data we consider bytes that form a
651 * Galois Field of 256 objects. Each object has an attribute count, ie. how
652 * many times the object appeared in the sample.
654 #define BUCKET_SIZE (256)
657 * The size of the sample is based on a statistical sampling rule of thumb.
658 * The common way is to perform sampling tests as long as the number of
659 * elements in each cell is at least 5.
661 * Instead of 5, we choose 32 to obtain more accurate results.
662 * If the data contain the maximum number of symbols, which is 256, we obtain a
663 * sample size bound by 8192.
665 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
666 * from up to 512 locations.
668 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
669 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
675 struct heuristic_ws
{
676 /* Partial copy of input data */
679 /* Buckets store counters for each byte value */
680 struct bucket_item
*bucket
;
682 struct bucket_item
*bucket_b
;
683 struct list_head list
;
686 static struct workspace_manager heuristic_wsm
;
688 static void free_heuristic_ws(struct list_head
*ws
)
690 struct heuristic_ws
*workspace
;
692 workspace
= list_entry(ws
, struct heuristic_ws
, list
);
694 kvfree(workspace
->sample
);
695 kfree(workspace
->bucket
);
696 kfree(workspace
->bucket_b
);
700 static struct list_head
*alloc_heuristic_ws(unsigned int level
)
702 struct heuristic_ws
*ws
;
704 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
706 return ERR_PTR(-ENOMEM
);
708 ws
->sample
= kvmalloc(MAX_SAMPLE_SIZE
, GFP_KERNEL
);
712 ws
->bucket
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket
), GFP_KERNEL
);
716 ws
->bucket_b
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket_b
), GFP_KERNEL
);
720 INIT_LIST_HEAD(&ws
->list
);
723 free_heuristic_ws(&ws
->list
);
724 return ERR_PTR(-ENOMEM
);
727 const struct btrfs_compress_op btrfs_heuristic_compress
= {
728 .workspace_manager
= &heuristic_wsm
,
731 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
732 /* The heuristic is represented as compression type 0 */
733 &btrfs_heuristic_compress
,
734 &btrfs_zlib_compress
,
736 &btrfs_zstd_compress
,
739 static struct list_head
*alloc_workspace(int type
, unsigned int level
)
742 case BTRFS_COMPRESS_NONE
: return alloc_heuristic_ws(level
);
743 case BTRFS_COMPRESS_ZLIB
: return zlib_alloc_workspace(level
);
744 case BTRFS_COMPRESS_LZO
: return lzo_alloc_workspace(level
);
745 case BTRFS_COMPRESS_ZSTD
: return zstd_alloc_workspace(level
);
748 * This can't happen, the type is validated several times
749 * before we get here.
755 static void free_workspace(int type
, struct list_head
*ws
)
758 case BTRFS_COMPRESS_NONE
: return free_heuristic_ws(ws
);
759 case BTRFS_COMPRESS_ZLIB
: return zlib_free_workspace(ws
);
760 case BTRFS_COMPRESS_LZO
: return lzo_free_workspace(ws
);
761 case BTRFS_COMPRESS_ZSTD
: return zstd_free_workspace(ws
);
764 * This can't happen, the type is validated several times
765 * before we get here.
771 static void btrfs_init_workspace_manager(int type
)
773 struct workspace_manager
*wsm
;
774 struct list_head
*workspace
;
776 wsm
= btrfs_compress_op
[type
]->workspace_manager
;
777 INIT_LIST_HEAD(&wsm
->idle_ws
);
778 spin_lock_init(&wsm
->ws_lock
);
779 atomic_set(&wsm
->total_ws
, 0);
780 init_waitqueue_head(&wsm
->ws_wait
);
783 * Preallocate one workspace for each compression type so we can
784 * guarantee forward progress in the worst case
786 workspace
= alloc_workspace(type
, 0);
787 if (IS_ERR(workspace
)) {
789 "BTRFS: cannot preallocate compression workspace, will try later\n");
791 atomic_set(&wsm
->total_ws
, 1);
793 list_add(workspace
, &wsm
->idle_ws
);
797 static void btrfs_cleanup_workspace_manager(int type
)
799 struct workspace_manager
*wsman
;
800 struct list_head
*ws
;
802 wsman
= btrfs_compress_op
[type
]->workspace_manager
;
803 while (!list_empty(&wsman
->idle_ws
)) {
804 ws
= wsman
->idle_ws
.next
;
806 free_workspace(type
, ws
);
807 atomic_dec(&wsman
->total_ws
);
812 * This finds an available workspace or allocates a new one.
813 * If it's not possible to allocate a new one, waits until there's one.
814 * Preallocation makes a forward progress guarantees and we do not return
817 struct list_head
*btrfs_get_workspace(int type
, unsigned int level
)
819 struct workspace_manager
*wsm
;
820 struct list_head
*workspace
;
821 int cpus
= num_online_cpus();
823 struct list_head
*idle_ws
;
826 wait_queue_head_t
*ws_wait
;
829 wsm
= btrfs_compress_op
[type
]->workspace_manager
;
830 idle_ws
= &wsm
->idle_ws
;
831 ws_lock
= &wsm
->ws_lock
;
832 total_ws
= &wsm
->total_ws
;
833 ws_wait
= &wsm
->ws_wait
;
834 free_ws
= &wsm
->free_ws
;
838 if (!list_empty(idle_ws
)) {
839 workspace
= idle_ws
->next
;
842 spin_unlock(ws_lock
);
846 if (atomic_read(total_ws
) > cpus
) {
849 spin_unlock(ws_lock
);
850 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
851 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
853 finish_wait(ws_wait
, &wait
);
856 atomic_inc(total_ws
);
857 spin_unlock(ws_lock
);
860 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
861 * to turn it off here because we might get called from the restricted
862 * context of btrfs_compress_bio/btrfs_compress_pages
864 nofs_flag
= memalloc_nofs_save();
865 workspace
= alloc_workspace(type
, level
);
866 memalloc_nofs_restore(nofs_flag
);
868 if (IS_ERR(workspace
)) {
869 atomic_dec(total_ws
);
873 * Do not return the error but go back to waiting. There's a
874 * workspace preallocated for each type and the compression
875 * time is bounded so we get to a workspace eventually. This
876 * makes our caller's life easier.
878 * To prevent silent and low-probability deadlocks (when the
879 * initial preallocation fails), check if there are any
882 if (atomic_read(total_ws
) == 0) {
883 static DEFINE_RATELIMIT_STATE(_rs
,
884 /* once per minute */ 60 * HZ
,
887 if (__ratelimit(&_rs
)) {
888 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
896 static struct list_head
*get_workspace(int type
, int level
)
899 case BTRFS_COMPRESS_NONE
: return btrfs_get_workspace(type
, level
);
900 case BTRFS_COMPRESS_ZLIB
: return zlib_get_workspace(level
);
901 case BTRFS_COMPRESS_LZO
: return btrfs_get_workspace(type
, level
);
902 case BTRFS_COMPRESS_ZSTD
: return zstd_get_workspace(level
);
905 * This can't happen, the type is validated several times
906 * before we get here.
913 * put a workspace struct back on the list or free it if we have enough
914 * idle ones sitting around
916 void btrfs_put_workspace(int type
, struct list_head
*ws
)
918 struct workspace_manager
*wsm
;
919 struct list_head
*idle_ws
;
922 wait_queue_head_t
*ws_wait
;
925 wsm
= btrfs_compress_op
[type
]->workspace_manager
;
926 idle_ws
= &wsm
->idle_ws
;
927 ws_lock
= &wsm
->ws_lock
;
928 total_ws
= &wsm
->total_ws
;
929 ws_wait
= &wsm
->ws_wait
;
930 free_ws
= &wsm
->free_ws
;
933 if (*free_ws
<= num_online_cpus()) {
934 list_add(ws
, idle_ws
);
936 spin_unlock(ws_lock
);
939 spin_unlock(ws_lock
);
941 free_workspace(type
, ws
);
942 atomic_dec(total_ws
);
944 cond_wake_up(ws_wait
);
947 static void put_workspace(int type
, struct list_head
*ws
)
950 case BTRFS_COMPRESS_NONE
: return btrfs_put_workspace(type
, ws
);
951 case BTRFS_COMPRESS_ZLIB
: return btrfs_put_workspace(type
, ws
);
952 case BTRFS_COMPRESS_LZO
: return btrfs_put_workspace(type
, ws
);
953 case BTRFS_COMPRESS_ZSTD
: return zstd_put_workspace(ws
);
956 * This can't happen, the type is validated several times
957 * before we get here.
964 * Adjust @level according to the limits of the compression algorithm or
965 * fallback to default
967 static unsigned int btrfs_compress_set_level(int type
, unsigned level
)
969 const struct btrfs_compress_op
*ops
= btrfs_compress_op
[type
];
972 level
= ops
->default_level
;
974 level
= min(level
, ops
->max_level
);
980 * Given an address space and start and length, compress the bytes into @pages
981 * that are allocated on demand.
983 * @type_level is encoded algorithm and level, where level 0 means whatever
984 * default the algorithm chooses and is opaque here;
985 * - compression algo are 0-3
986 * - the level are bits 4-7
988 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
989 * and returns number of actually allocated pages
991 * @total_in is used to return the number of bytes actually read. It
992 * may be smaller than the input length if we had to exit early because we
993 * ran out of room in the pages array or because we cross the
996 * @total_out is an in/out parameter, must be set to the input length and will
997 * be also used to return the total number of compressed bytes
999 int btrfs_compress_pages(unsigned int type_level
, struct address_space
*mapping
,
1000 u64 start
, struct page
**pages
,
1001 unsigned long *out_pages
,
1002 unsigned long *total_in
,
1003 unsigned long *total_out
)
1005 int type
= btrfs_compress_type(type_level
);
1006 int level
= btrfs_compress_level(type_level
);
1007 struct list_head
*workspace
;
1010 level
= btrfs_compress_set_level(type
, level
);
1011 workspace
= get_workspace(type
, level
);
1012 ret
= compression_compress_pages(type
, workspace
, mapping
, start
, pages
,
1013 out_pages
, total_in
, total_out
);
1014 put_workspace(type
, workspace
);
1018 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
1020 struct list_head
*workspace
;
1022 int type
= cb
->compress_type
;
1024 workspace
= get_workspace(type
, 0);
1025 ret
= compression_decompress_bio(workspace
, cb
);
1026 put_workspace(type
, workspace
);
1029 zero_fill_bio(&cb
->orig_bbio
->bio
);
1034 * a less complex decompression routine. Our compressed data fits in a
1035 * single page, and we want to read a single page out of it.
1036 * start_byte tells us the offset into the compressed data we're interested in
1038 int btrfs_decompress(int type
, const u8
*data_in
, struct page
*dest_page
,
1039 unsigned long start_byte
, size_t srclen
, size_t destlen
)
1041 struct list_head
*workspace
;
1044 workspace
= get_workspace(type
, 0);
1045 ret
= compression_decompress(type
, workspace
, data_in
, dest_page
,
1046 start_byte
, srclen
, destlen
);
1047 put_workspace(type
, workspace
);
1052 int __init
btrfs_init_compress(void)
1054 if (bioset_init(&btrfs_compressed_bioset
, BIO_POOL_SIZE
,
1055 offsetof(struct compressed_bio
, bbio
.bio
),
1059 compr_pool
.shrinker
= shrinker_alloc(SHRINKER_NONSLAB
, "btrfs-compr-pages");
1060 if (!compr_pool
.shrinker
)
1063 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE
);
1064 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB
);
1065 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO
);
1066 zstd_init_workspace_manager();
1068 spin_lock_init(&compr_pool
.lock
);
1069 INIT_LIST_HEAD(&compr_pool
.list
);
1070 compr_pool
.count
= 0;
1071 /* 128K / 4K = 32, for 8 threads is 256 pages. */
1072 compr_pool
.thresh
= BTRFS_MAX_COMPRESSED
/ PAGE_SIZE
* 8;
1073 compr_pool
.shrinker
->count_objects
= btrfs_compr_pool_count
;
1074 compr_pool
.shrinker
->scan_objects
= btrfs_compr_pool_scan
;
1075 compr_pool
.shrinker
->batch
= 32;
1076 compr_pool
.shrinker
->seeks
= DEFAULT_SEEKS
;
1077 shrinker_register(compr_pool
.shrinker
);
1082 void __cold
btrfs_exit_compress(void)
1084 /* For now scan drains all pages and does not touch the parameters. */
1085 btrfs_compr_pool_scan(NULL
, NULL
);
1086 shrinker_free(compr_pool
.shrinker
);
1088 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE
);
1089 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB
);
1090 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO
);
1091 zstd_cleanup_workspace_manager();
1092 bioset_exit(&btrfs_compressed_bioset
);
1096 * Copy decompressed data from working buffer to pages.
1098 * @buf: The decompressed data buffer
1099 * @buf_len: The decompressed data length
1100 * @decompressed: Number of bytes that are already decompressed inside the
1102 * @cb: The compressed extent descriptor
1103 * @orig_bio: The original bio that the caller wants to read for
1105 * An easier to understand graph is like below:
1107 * |<- orig_bio ->| |<- orig_bio->|
1108 * |<------- full decompressed extent ----->|
1109 * |<----------- @cb range ---->|
1110 * | |<-- @buf_len -->|
1111 * |<--- @decompressed --->|
1113 * Note that, @cb can be a subpage of the full decompressed extent, but
1114 * @cb->start always has the same as the orig_file_offset value of the full
1115 * decompressed extent.
1117 * When reading compressed extent, we have to read the full compressed extent,
1118 * while @orig_bio may only want part of the range.
1119 * Thus this function will ensure only data covered by @orig_bio will be copied
1122 * Return 0 if we have copied all needed contents for @orig_bio.
1123 * Return >0 if we need continue decompress.
1125 int btrfs_decompress_buf2page(const char *buf
, u32 buf_len
,
1126 struct compressed_bio
*cb
, u32 decompressed
)
1128 struct bio
*orig_bio
= &cb
->orig_bbio
->bio
;
1129 /* Offset inside the full decompressed extent */
1132 cur_offset
= decompressed
;
1133 /* The main loop to do the copy */
1134 while (cur_offset
< decompressed
+ buf_len
) {
1135 struct bio_vec bvec
;
1138 /* Offset inside the full decompressed extent */
1141 bvec
= bio_iter_iovec(orig_bio
, orig_bio
->bi_iter
);
1143 * cb->start may underflow, but subtracting that value can still
1144 * give us correct offset inside the full decompressed extent.
1146 bvec_offset
= page_offset(bvec
.bv_page
) + bvec
.bv_offset
- cb
->start
;
1148 /* Haven't reached the bvec range, exit */
1149 if (decompressed
+ buf_len
<= bvec_offset
)
1152 copy_start
= max(cur_offset
, bvec_offset
);
1153 copy_len
= min(bvec_offset
+ bvec
.bv_len
,
1154 decompressed
+ buf_len
) - copy_start
;
1158 * Extra range check to ensure we didn't go beyond
1161 ASSERT(copy_start
- decompressed
< buf_len
);
1162 memcpy_to_page(bvec
.bv_page
, bvec
.bv_offset
,
1163 buf
+ copy_start
- decompressed
, copy_len
);
1164 cur_offset
+= copy_len
;
1166 bio_advance(orig_bio
, copy_len
);
1167 /* Finished the bio */
1168 if (!orig_bio
->bi_iter
.bi_size
)
1175 * Shannon Entropy calculation
1177 * Pure byte distribution analysis fails to determine compressibility of data.
1178 * Try calculating entropy to estimate the average minimum number of bits
1179 * needed to encode the sampled data.
1181 * For convenience, return the percentage of needed bits, instead of amount of
1184 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1185 * and can be compressible with high probability
1187 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1189 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1191 #define ENTROPY_LVL_ACEPTABLE (65)
1192 #define ENTROPY_LVL_HIGH (80)
1195 * For increasead precision in shannon_entropy calculation,
1196 * let's do pow(n, M) to save more digits after comma:
1198 * - maximum int bit length is 64
1199 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1200 * - 13 * 4 = 52 < 64 -> M = 4
1204 static inline u32
ilog2_w(u64 n
)
1206 return ilog2(n
* n
* n
* n
);
1209 static u32
shannon_entropy(struct heuristic_ws
*ws
)
1211 const u32 entropy_max
= 8 * ilog2_w(2);
1212 u32 entropy_sum
= 0;
1213 u32 p
, p_base
, sz_base
;
1216 sz_base
= ilog2_w(ws
->sample_size
);
1217 for (i
= 0; i
< BUCKET_SIZE
&& ws
->bucket
[i
].count
> 0; i
++) {
1218 p
= ws
->bucket
[i
].count
;
1219 p_base
= ilog2_w(p
);
1220 entropy_sum
+= p
* (sz_base
- p_base
);
1223 entropy_sum
/= ws
->sample_size
;
1224 return entropy_sum
* 100 / entropy_max
;
1227 #define RADIX_BASE 4U
1228 #define COUNTERS_SIZE (1U << RADIX_BASE)
1230 static u8
get4bits(u64 num
, int shift
) {
1235 low4bits
= (COUNTERS_SIZE
- 1) - (num
% COUNTERS_SIZE
);
1240 * Use 4 bits as radix base
1241 * Use 16 u32 counters for calculating new position in buf array
1243 * @array - array that will be sorted
1244 * @array_buf - buffer array to store sorting results
1245 * must be equal in size to @array
1248 static void radix_sort(struct bucket_item
*array
, struct bucket_item
*array_buf
,
1253 u32 counters
[COUNTERS_SIZE
];
1261 * Try avoid useless loop iterations for small numbers stored in big
1262 * counters. Example: 48 33 4 ... in 64bit array
1264 max_num
= array
[0].count
;
1265 for (i
= 1; i
< num
; i
++) {
1266 buf_num
= array
[i
].count
;
1267 if (buf_num
> max_num
)
1271 buf_num
= ilog2(max_num
);
1272 bitlen
= ALIGN(buf_num
, RADIX_BASE
* 2);
1275 while (shift
< bitlen
) {
1276 memset(counters
, 0, sizeof(counters
));
1278 for (i
= 0; i
< num
; i
++) {
1279 buf_num
= array
[i
].count
;
1280 addr
= get4bits(buf_num
, shift
);
1284 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1285 counters
[i
] += counters
[i
- 1];
1287 for (i
= num
- 1; i
>= 0; i
--) {
1288 buf_num
= array
[i
].count
;
1289 addr
= get4bits(buf_num
, shift
);
1291 new_addr
= counters
[addr
];
1292 array_buf
[new_addr
] = array
[i
];
1295 shift
+= RADIX_BASE
;
1298 * Normal radix expects to move data from a temporary array, to
1299 * the main one. But that requires some CPU time. Avoid that
1300 * by doing another sort iteration to original array instead of
1303 memset(counters
, 0, sizeof(counters
));
1305 for (i
= 0; i
< num
; i
++) {
1306 buf_num
= array_buf
[i
].count
;
1307 addr
= get4bits(buf_num
, shift
);
1311 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1312 counters
[i
] += counters
[i
- 1];
1314 for (i
= num
- 1; i
>= 0; i
--) {
1315 buf_num
= array_buf
[i
].count
;
1316 addr
= get4bits(buf_num
, shift
);
1318 new_addr
= counters
[addr
];
1319 array
[new_addr
] = array_buf
[i
];
1322 shift
+= RADIX_BASE
;
1327 * Size of the core byte set - how many bytes cover 90% of the sample
1329 * There are several types of structured binary data that use nearly all byte
1330 * values. The distribution can be uniform and counts in all buckets will be
1331 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1333 * Other possibility is normal (Gaussian) distribution, where the data could
1334 * be potentially compressible, but we have to take a few more steps to decide
1337 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1338 * compression algo can easy fix that
1339 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1340 * probability is not compressible
1342 #define BYTE_CORE_SET_LOW (64)
1343 #define BYTE_CORE_SET_HIGH (200)
1345 static int byte_core_set_size(struct heuristic_ws
*ws
)
1348 u32 coreset_sum
= 0;
1349 const u32 core_set_threshold
= ws
->sample_size
* 90 / 100;
1350 struct bucket_item
*bucket
= ws
->bucket
;
1352 /* Sort in reverse order */
1353 radix_sort(ws
->bucket
, ws
->bucket_b
, BUCKET_SIZE
);
1355 for (i
= 0; i
< BYTE_CORE_SET_LOW
; i
++)
1356 coreset_sum
+= bucket
[i
].count
;
1358 if (coreset_sum
> core_set_threshold
)
1361 for (; i
< BYTE_CORE_SET_HIGH
&& bucket
[i
].count
> 0; i
++) {
1362 coreset_sum
+= bucket
[i
].count
;
1363 if (coreset_sum
> core_set_threshold
)
1371 * Count byte values in buckets.
1372 * This heuristic can detect textual data (configs, xml, json, html, etc).
1373 * Because in most text-like data byte set is restricted to limited number of
1374 * possible characters, and that restriction in most cases makes data easy to
1377 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1378 * less - compressible
1379 * more - need additional analysis
1381 #define BYTE_SET_THRESHOLD (64)
1383 static u32
byte_set_size(const struct heuristic_ws
*ws
)
1386 u32 byte_set_size
= 0;
1388 for (i
= 0; i
< BYTE_SET_THRESHOLD
; i
++) {
1389 if (ws
->bucket
[i
].count
> 0)
1394 * Continue collecting count of byte values in buckets. If the byte
1395 * set size is bigger then the threshold, it's pointless to continue,
1396 * the detection technique would fail for this type of data.
1398 for (; i
< BUCKET_SIZE
; i
++) {
1399 if (ws
->bucket
[i
].count
> 0) {
1401 if (byte_set_size
> BYTE_SET_THRESHOLD
)
1402 return byte_set_size
;
1406 return byte_set_size
;
1409 static bool sample_repeated_patterns(struct heuristic_ws
*ws
)
1411 const u32 half_of_sample
= ws
->sample_size
/ 2;
1412 const u8
*data
= ws
->sample
;
1414 return memcmp(&data
[0], &data
[half_of_sample
], half_of_sample
) == 0;
1417 static void heuristic_collect_sample(struct inode
*inode
, u64 start
, u64 end
,
1418 struct heuristic_ws
*ws
)
1421 u64 index
, index_end
;
1422 u32 i
, curr_sample_pos
;
1426 * Compression handles the input data by chunks of 128KiB
1427 * (defined by BTRFS_MAX_UNCOMPRESSED)
1429 * We do the same for the heuristic and loop over the whole range.
1431 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1432 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1434 if (end
- start
> BTRFS_MAX_UNCOMPRESSED
)
1435 end
= start
+ BTRFS_MAX_UNCOMPRESSED
;
1437 index
= start
>> PAGE_SHIFT
;
1438 index_end
= end
>> PAGE_SHIFT
;
1440 /* Don't miss unaligned end */
1441 if (!PAGE_ALIGNED(end
))
1444 curr_sample_pos
= 0;
1445 while (index
< index_end
) {
1446 page
= find_get_page(inode
->i_mapping
, index
);
1447 in_data
= kmap_local_page(page
);
1448 /* Handle case where the start is not aligned to PAGE_SIZE */
1449 i
= start
% PAGE_SIZE
;
1450 while (i
< PAGE_SIZE
- SAMPLING_READ_SIZE
) {
1451 /* Don't sample any garbage from the last page */
1452 if (start
> end
- SAMPLING_READ_SIZE
)
1454 memcpy(&ws
->sample
[curr_sample_pos
], &in_data
[i
],
1455 SAMPLING_READ_SIZE
);
1456 i
+= SAMPLING_INTERVAL
;
1457 start
+= SAMPLING_INTERVAL
;
1458 curr_sample_pos
+= SAMPLING_READ_SIZE
;
1460 kunmap_local(in_data
);
1466 ws
->sample_size
= curr_sample_pos
;
1470 * Compression heuristic.
1472 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1473 * quickly (compared to direct compression) detect data characteristics
1474 * (compressible/incompressible) to avoid wasting CPU time on incompressible
1477 * The following types of analysis can be performed:
1478 * - detect mostly zero data
1479 * - detect data with low "byte set" size (text, etc)
1480 * - detect data with low/high "core byte" set
1482 * Return non-zero if the compression should be done, 0 otherwise.
1484 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1486 struct list_head
*ws_list
= get_workspace(0, 0);
1487 struct heuristic_ws
*ws
;
1492 ws
= list_entry(ws_list
, struct heuristic_ws
, list
);
1494 heuristic_collect_sample(inode
, start
, end
, ws
);
1496 if (sample_repeated_patterns(ws
)) {
1501 memset(ws
->bucket
, 0, sizeof(*ws
->bucket
)*BUCKET_SIZE
);
1503 for (i
= 0; i
< ws
->sample_size
; i
++) {
1504 byte
= ws
->sample
[i
];
1505 ws
->bucket
[byte
].count
++;
1508 i
= byte_set_size(ws
);
1509 if (i
< BYTE_SET_THRESHOLD
) {
1514 i
= byte_core_set_size(ws
);
1515 if (i
<= BYTE_CORE_SET_LOW
) {
1520 if (i
>= BYTE_CORE_SET_HIGH
) {
1525 i
= shannon_entropy(ws
);
1526 if (i
<= ENTROPY_LVL_ACEPTABLE
) {
1532 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1533 * needed to give green light to compression.
1535 * For now just assume that compression at that level is not worth the
1536 * resources because:
1538 * 1. it is possible to defrag the data later
1540 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1541 * values, every bucket has counter at level ~54. The heuristic would
1542 * be confused. This can happen when data have some internal repeated
1543 * patterns like "abbacbbc...". This can be detected by analyzing
1544 * pairs of bytes, which is too costly.
1546 if (i
< ENTROPY_LVL_HIGH
) {
1555 put_workspace(0, ws_list
);
1560 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1561 * level, unrecognized string will set the default level
1563 unsigned int btrfs_compress_str2level(unsigned int type
, const char *str
)
1565 unsigned int level
= 0;
1571 if (str
[0] == ':') {
1572 ret
= kstrtouint(str
+ 1, 10, &level
);
1577 level
= btrfs_compress_set_level(type
, level
);