]>
Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c8b97818 CM |
2 | /* |
3 | * Copyright (C) 2008 Oracle. All rights reserved. | |
c8b97818 CM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/bio.h> | |
c8b97818 CM |
8 | #include <linux/file.h> |
9 | #include <linux/fs.h> | |
10 | #include <linux/pagemap.h> | |
a75b81c3 | 11 | #include <linux/pagevec.h> |
c8b97818 | 12 | #include <linux/highmem.h> |
e41d12f5 | 13 | #include <linux/kthread.h> |
c8b97818 CM |
14 | #include <linux/time.h> |
15 | #include <linux/init.h> | |
16 | #include <linux/string.h> | |
c8b97818 | 17 | #include <linux/backing-dev.h> |
c8b97818 | 18 | #include <linux/writeback.h> |
4088a47e | 19 | #include <linux/psi.h> |
5a0e3ad6 | 20 | #include <linux/slab.h> |
fe308533 | 21 | #include <linux/sched/mm.h> |
19562430 | 22 | #include <linux/log2.h> |
4cea422a | 23 | #include <linux/shrinker.h> |
d5178578 | 24 | #include <crypto/hash.h> |
602cbe91 | 25 | #include "misc.h" |
c8b97818 | 26 | #include "ctree.h" |
ec8eb376 | 27 | #include "fs.h" |
c8b97818 CM |
28 | #include "disk-io.h" |
29 | #include "transaction.h" | |
30 | #include "btrfs_inode.h" | |
103c1972 | 31 | #include "bio.h" |
c8b97818 | 32 | #include "ordered-data.h" |
c8b97818 CM |
33 | #include "compression.h" |
34 | #include "extent_io.h" | |
35 | #include "extent_map.h" | |
6a404910 | 36 | #include "subpage.h" |
764c7c9a | 37 | #include "zoned.h" |
7c8ede16 | 38 | #include "file-item.h" |
7f0add25 | 39 | #include "super.h" |
c8b97818 | 40 | |
e794203e | 41 | static struct bio_set btrfs_compressed_bioset; |
544fe4a9 | 42 | |
e128f9c3 DS |
43 | static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; |
44 | ||
45 | const char* btrfs_compress_type2str(enum btrfs_compression_type type) | |
46 | { | |
47 | switch (type) { | |
48 | case BTRFS_COMPRESS_ZLIB: | |
49 | case BTRFS_COMPRESS_LZO: | |
50 | case BTRFS_COMPRESS_ZSTD: | |
51 | case BTRFS_COMPRESS_NONE: | |
52 | return btrfs_compress_types[type]; | |
ce96b7ff CX |
53 | default: |
54 | break; | |
e128f9c3 DS |
55 | } |
56 | ||
57 | return NULL; | |
58 | } | |
59 | ||
544fe4a9 CH |
60 | static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio) |
61 | { | |
62 | return container_of(bbio, struct compressed_bio, bbio); | |
63 | } | |
64 | ||
65 | static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, | |
66 | u64 start, blk_opf_t op, | |
67 | btrfs_bio_end_io_t end_io) | |
68 | { | |
69 | struct btrfs_bio *bbio; | |
70 | ||
71 | bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op, | |
72 | GFP_NOFS, &btrfs_compressed_bioset)); | |
4317ff00 QW |
73 | btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL); |
74 | bbio->inode = inode; | |
544fe4a9 CH |
75 | bbio->file_offset = start; |
76 | return to_compressed_bio(bbio); | |
77 | } | |
78 | ||
aa53e3bf JT |
79 | bool btrfs_compress_is_valid_type(const char *str, size_t len) |
80 | { | |
81 | int i; | |
82 | ||
83 | for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { | |
84 | size_t comp_len = strlen(btrfs_compress_types[i]); | |
85 | ||
86 | if (len < comp_len) | |
87 | continue; | |
88 | ||
89 | if (!strncmp(btrfs_compress_types[i], str, comp_len)) | |
90 | return true; | |
91 | } | |
92 | return false; | |
93 | } | |
94 | ||
1e4eb746 DS |
95 | static int compression_compress_pages(int type, struct list_head *ws, |
96 | struct address_space *mapping, u64 start, struct page **pages, | |
97 | unsigned long *out_pages, unsigned long *total_in, | |
98 | unsigned long *total_out) | |
99 | { | |
100 | switch (type) { | |
101 | case BTRFS_COMPRESS_ZLIB: | |
102 | return zlib_compress_pages(ws, mapping, start, pages, | |
103 | out_pages, total_in, total_out); | |
104 | case BTRFS_COMPRESS_LZO: | |
105 | return lzo_compress_pages(ws, mapping, start, pages, | |
106 | out_pages, total_in, total_out); | |
107 | case BTRFS_COMPRESS_ZSTD: | |
108 | return zstd_compress_pages(ws, mapping, start, pages, | |
109 | out_pages, total_in, total_out); | |
110 | case BTRFS_COMPRESS_NONE: | |
111 | default: | |
112 | /* | |
1d8ba9e7 QW |
113 | * This can happen when compression races with remount setting |
114 | * it to 'no compress', while caller doesn't call | |
115 | * inode_need_compress() to check if we really need to | |
116 | * compress. | |
117 | * | |
118 | * Not a big deal, just need to inform caller that we | |
119 | * haven't allocated any pages yet. | |
1e4eb746 | 120 | */ |
1d8ba9e7 | 121 | *out_pages = 0; |
1e4eb746 DS |
122 | return -E2BIG; |
123 | } | |
124 | } | |
125 | ||
4a9e803e SY |
126 | static int compression_decompress_bio(struct list_head *ws, |
127 | struct compressed_bio *cb) | |
1e4eb746 | 128 | { |
4a9e803e | 129 | switch (cb->compress_type) { |
1e4eb746 DS |
130 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); |
131 | case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); | |
132 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); | |
133 | case BTRFS_COMPRESS_NONE: | |
134 | default: | |
135 | /* | |
136 | * This can't happen, the type is validated several times | |
137 | * before we get here. | |
138 | */ | |
139 | BUG(); | |
140 | } | |
141 | } | |
142 | ||
143 | static int compression_decompress(int type, struct list_head *ws, | |
3e09b5b2 | 144 | const u8 *data_in, struct page *dest_page, |
1e4eb746 DS |
145 | unsigned long start_byte, size_t srclen, size_t destlen) |
146 | { | |
147 | switch (type) { | |
148 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, | |
149 | start_byte, srclen, destlen); | |
150 | case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, | |
151 | start_byte, srclen, destlen); | |
152 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, | |
153 | start_byte, srclen, destlen); | |
154 | case BTRFS_COMPRESS_NONE: | |
155 | default: | |
156 | /* | |
157 | * This can't happen, the type is validated several times | |
158 | * before we get here. | |
159 | */ | |
160 | BUG(); | |
161 | } | |
162 | } | |
163 | ||
32586c5b CH |
164 | static void btrfs_free_compressed_pages(struct compressed_bio *cb) |
165 | { | |
a959a174 | 166 | for (unsigned int i = 0; i < cb->nr_pages; i++) |
9ba965dc | 167 | btrfs_free_compr_page(cb->compressed_pages[i]); |
32586c5b CH |
168 | kfree(cb->compressed_pages); |
169 | } | |
170 | ||
8140dc30 | 171 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
48a3b636 | 172 | |
4cea422a DS |
173 | /* |
174 | * Global cache of last unused pages for compression/decompression. | |
175 | */ | |
176 | static struct btrfs_compr_pool { | |
177 | struct shrinker *shrinker; | |
178 | spinlock_t lock; | |
179 | struct list_head list; | |
180 | int count; | |
181 | int thresh; | |
182 | } compr_pool; | |
183 | ||
184 | static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc) | |
185 | { | |
186 | int ret; | |
187 | ||
188 | /* | |
189 | * We must not read the values more than once if 'ret' gets expanded in | |
190 | * the return statement so we don't accidentally return a negative | |
191 | * number, even if the first condition finds it positive. | |
192 | */ | |
193 | ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh); | |
194 | ||
195 | return ret > 0 ? ret : 0; | |
196 | } | |
197 | ||
198 | static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc) | |
199 | { | |
200 | struct list_head remove; | |
201 | struct list_head *tmp, *next; | |
202 | int freed; | |
203 | ||
204 | if (compr_pool.count == 0) | |
205 | return SHRINK_STOP; | |
206 | ||
207 | INIT_LIST_HEAD(&remove); | |
208 | ||
209 | /* For now, just simply drain the whole list. */ | |
210 | spin_lock(&compr_pool.lock); | |
211 | list_splice_init(&compr_pool.list, &remove); | |
212 | freed = compr_pool.count; | |
213 | compr_pool.count = 0; | |
214 | spin_unlock(&compr_pool.lock); | |
215 | ||
216 | list_for_each_safe(tmp, next, &remove) { | |
217 | struct page *page = list_entry(tmp, struct page, lru); | |
218 | ||
219 | ASSERT(page_ref_count(page) == 1); | |
220 | put_page(page); | |
221 | } | |
222 | ||
223 | return freed; | |
224 | } | |
225 | ||
9ba965dc DS |
226 | /* |
227 | * Common wrappers for page allocation from compression wrappers | |
228 | */ | |
229 | struct page *btrfs_alloc_compr_page(void) | |
230 | { | |
4cea422a DS |
231 | struct page *page = NULL; |
232 | ||
233 | spin_lock(&compr_pool.lock); | |
234 | if (compr_pool.count > 0) { | |
235 | page = list_first_entry(&compr_pool.list, struct page, lru); | |
236 | list_del_init(&page->lru); | |
237 | compr_pool.count--; | |
238 | } | |
239 | spin_unlock(&compr_pool.lock); | |
240 | ||
241 | if (page) | |
242 | return page; | |
243 | ||
9ba965dc DS |
244 | return alloc_page(GFP_NOFS); |
245 | } | |
246 | ||
247 | void btrfs_free_compr_page(struct page *page) | |
248 | { | |
4cea422a DS |
249 | bool do_free = false; |
250 | ||
251 | spin_lock(&compr_pool.lock); | |
252 | if (compr_pool.count > compr_pool.thresh) { | |
253 | do_free = true; | |
254 | } else { | |
255 | list_add(&page->lru, &compr_pool.list); | |
256 | compr_pool.count++; | |
257 | } | |
258 | spin_unlock(&compr_pool.lock); | |
259 | ||
260 | if (!do_free) | |
261 | return; | |
262 | ||
9ba965dc DS |
263 | ASSERT(page_ref_count(page) == 1); |
264 | put_page(page); | |
265 | } | |
266 | ||
30493ff4 | 267 | static void end_compressed_bio_read(struct btrfs_bio *bbio) |
86ccbb4d | 268 | { |
544fe4a9 CH |
269 | struct compressed_bio *cb = to_compressed_bio(bbio); |
270 | blk_status_t status = bbio->bio.bi_status; | |
86ccbb4d | 271 | |
544fe4a9 CH |
272 | if (!status) |
273 | status = errno_to_blk_status(btrfs_decompress_bio(cb)); | |
81bd9328 | 274 | |
32586c5b | 275 | btrfs_free_compressed_pages(cb); |
b7d463a1 | 276 | btrfs_bio_end_io(cb->orig_bbio, status); |
917f32a2 | 277 | bio_put(&bbio->bio); |
c8b97818 CM |
278 | } |
279 | ||
280 | /* | |
281 | * Clear the writeback bits on all of the file | |
282 | * pages for a compressed write | |
283 | */ | |
544fe4a9 | 284 | static noinline void end_compressed_writeback(const struct compressed_bio *cb) |
c8b97818 | 285 | { |
544fe4a9 | 286 | struct inode *inode = &cb->bbio.inode->vfs_inode; |
741ec653 | 287 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
09cbfeaf KS |
288 | unsigned long index = cb->start >> PAGE_SHIFT; |
289 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; | |
a75b81c3 | 290 | struct folio_batch fbatch; |
ed164802 | 291 | const int error = blk_status_to_errno(cb->bbio.bio.bi_status); |
c8b97818 CM |
292 | int i; |
293 | int ret; | |
294 | ||
ed164802 DS |
295 | if (error) |
296 | mapping_set_error(inode->i_mapping, error); | |
7bdcefc1 | 297 | |
a75b81c3 VMO |
298 | folio_batch_init(&fbatch); |
299 | while (index <= end_index) { | |
300 | ret = filemap_get_folios(inode->i_mapping, &index, end_index, | |
301 | &fbatch); | |
302 | ||
303 | if (ret == 0) | |
304 | return; | |
305 | ||
c8b97818 | 306 | for (i = 0; i < ret; i++) { |
a75b81c3 VMO |
307 | struct folio *folio = fbatch.folios[i]; |
308 | ||
a75b81c3 | 309 | btrfs_page_clamp_clear_writeback(fs_info, &folio->page, |
741ec653 | 310 | cb->start, cb->len); |
c8b97818 | 311 | } |
a75b81c3 | 312 | folio_batch_release(&fbatch); |
c8b97818 CM |
313 | } |
314 | /* the inode may be gone now */ | |
c8b97818 CM |
315 | } |
316 | ||
f9327a70 | 317 | static void btrfs_finish_compressed_write_work(struct work_struct *work) |
c8b97818 | 318 | { |
f9327a70 CH |
319 | struct compressed_bio *cb = |
320 | container_of(work, struct compressed_bio, write_end_work); | |
321 | ||
7dd43954 CH |
322 | btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len, |
323 | cb->bbio.bio.bi_status == BLK_STS_OK); | |
c8b97818 | 324 | |
7c0c7269 | 325 | if (cb->writeback) |
544fe4a9 | 326 | end_compressed_writeback(cb); |
6853c64a | 327 | /* Note, our inode could be gone now */ |
c8b97818 | 328 | |
32586c5b | 329 | btrfs_free_compressed_pages(cb); |
544fe4a9 | 330 | bio_put(&cb->bbio.bio); |
6853c64a QW |
331 | } |
332 | ||
333 | /* | |
334 | * Do the cleanup once all the compressed pages hit the disk. This will clear | |
335 | * writeback on the file pages and free the compressed pages. | |
336 | * | |
337 | * This also calls the writeback end hooks for the file pages so that metadata | |
338 | * and checksums can be updated in the file. | |
339 | */ | |
917f32a2 | 340 | static void end_compressed_bio_write(struct btrfs_bio *bbio) |
6853c64a | 341 | { |
544fe4a9 CH |
342 | struct compressed_bio *cb = to_compressed_bio(bbio); |
343 | struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; | |
6853c64a | 344 | |
d5e4377d | 345 | queue_work(fs_info->compressed_write_workers, &cb->write_end_work); |
c8b97818 CM |
346 | } |
347 | ||
4513cb0c | 348 | static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb) |
10e924bc | 349 | { |
10e924bc | 350 | struct bio *bio = &cb->bbio.bio; |
43fa4219 | 351 | u32 offset = 0; |
10e924bc | 352 | |
43fa4219 CH |
353 | while (offset < cb->compressed_len) { |
354 | u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE); | |
10e924bc | 355 | |
43fa4219 CH |
356 | /* Maximum compressed extent is smaller than bio size limit. */ |
357 | __bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT], | |
358 | len, 0); | |
359 | offset += len; | |
10e924bc | 360 | } |
10e924bc CH |
361 | } |
362 | ||
c8b97818 CM |
363 | /* |
364 | * worker function to build and submit bios for previously compressed pages. | |
365 | * The corresponding pages in the inode should be marked for writeback | |
366 | * and the compressed pages should have a reference on them for dropping | |
367 | * when the IO is complete. | |
368 | * | |
369 | * This also checksums the file bytes and gets things ready for | |
370 | * the end io hooks. | |
371 | */ | |
d611935b CH |
372 | void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, |
373 | struct page **compressed_pages, | |
374 | unsigned int nr_pages, | |
375 | blk_opf_t write_flags, | |
376 | bool writeback) | |
c8b97818 | 377 | { |
d611935b | 378 | struct btrfs_inode *inode = BTRFS_I(ordered->inode); |
c7ee1819 | 379 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
c8b97818 | 380 | struct compressed_bio *cb; |
c8b97818 | 381 | |
d611935b CH |
382 | ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); |
383 | ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); | |
544fe4a9 | 384 | |
d611935b CH |
385 | cb = alloc_compressed_bio(inode, ordered->file_offset, |
386 | REQ_OP_WRITE | write_flags, | |
544fe4a9 | 387 | end_compressed_bio_write); |
d611935b CH |
388 | cb->start = ordered->file_offset; |
389 | cb->len = ordered->num_bytes; | |
c8b97818 | 390 | cb->compressed_pages = compressed_pages; |
d611935b | 391 | cb->compressed_len = ordered->disk_num_bytes; |
7c0c7269 | 392 | cb->writeback = writeback; |
fed8a72d | 393 | INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); |
c8b97818 | 394 | cb->nr_pages = nr_pages; |
d611935b | 395 | cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; |
ec63b84d | 396 | cb->bbio.ordered = ordered; |
4513cb0c | 397 | btrfs_add_compressed_bio_pages(cb); |
c8b97818 | 398 | |
ae42a154 | 399 | btrfs_submit_bio(&cb->bbio, 0); |
c8b97818 CM |
400 | } |
401 | ||
6a404910 QW |
402 | /* |
403 | * Add extra pages in the same compressed file extent so that we don't need to | |
404 | * re-read the same extent again and again. | |
405 | * | |
406 | * NOTE: this won't work well for subpage, as for subpage read, we lock the | |
407 | * full page then submit bio for each compressed/regular extents. | |
408 | * | |
409 | * This means, if we have several sectors in the same page points to the same | |
410 | * on-disk compressed data, we will re-read the same extent many times and | |
411 | * this function can only help for the next page. | |
412 | */ | |
771ed689 CM |
413 | static noinline int add_ra_bio_pages(struct inode *inode, |
414 | u64 compressed_end, | |
4088a47e | 415 | struct compressed_bio *cb, |
82e60d00 | 416 | int *memstall, unsigned long *pflags) |
771ed689 | 417 | { |
6a404910 | 418 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
771ed689 | 419 | unsigned long end_index; |
b7d463a1 CH |
420 | struct bio *orig_bio = &cb->orig_bbio->bio; |
421 | u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; | |
771ed689 CM |
422 | u64 isize = i_size_read(inode); |
423 | int ret; | |
424 | struct page *page; | |
771ed689 CM |
425 | struct extent_map *em; |
426 | struct address_space *mapping = inode->i_mapping; | |
771ed689 CM |
427 | struct extent_map_tree *em_tree; |
428 | struct extent_io_tree *tree; | |
6a404910 | 429 | int sectors_missed = 0; |
771ed689 | 430 | |
771ed689 CM |
431 | em_tree = &BTRFS_I(inode)->extent_tree; |
432 | tree = &BTRFS_I(inode)->io_tree; | |
433 | ||
434 | if (isize == 0) | |
435 | return 0; | |
436 | ||
ca62e85d QW |
437 | /* |
438 | * For current subpage support, we only support 64K page size, | |
439 | * which means maximum compressed extent size (128K) is just 2x page | |
440 | * size. | |
441 | * This makes readahead less effective, so here disable readahead for | |
442 | * subpage for now, until full compressed write is supported. | |
443 | */ | |
444 | if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE) | |
445 | return 0; | |
446 | ||
09cbfeaf | 447 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
771ed689 | 448 | |
6a404910 QW |
449 | while (cur < compressed_end) { |
450 | u64 page_end; | |
451 | u64 pg_index = cur >> PAGE_SHIFT; | |
452 | u32 add_size; | |
771ed689 | 453 | |
306e16ce | 454 | if (pg_index > end_index) |
771ed689 CM |
455 | break; |
456 | ||
0a943c65 | 457 | page = xa_load(&mapping->i_pages, pg_index); |
3159f943 | 458 | if (page && !xa_is_value(page)) { |
6a404910 QW |
459 | sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >> |
460 | fs_info->sectorsize_bits; | |
461 | ||
462 | /* Beyond threshold, no need to continue */ | |
463 | if (sectors_missed > 4) | |
771ed689 | 464 | break; |
6a404910 QW |
465 | |
466 | /* | |
467 | * Jump to next page start as we already have page for | |
468 | * current offset. | |
469 | */ | |
470 | cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; | |
471 | continue; | |
771ed689 CM |
472 | } |
473 | ||
c62d2555 MH |
474 | page = __page_cache_alloc(mapping_gfp_constraint(mapping, |
475 | ~__GFP_FS)); | |
771ed689 CM |
476 | if (!page) |
477 | break; | |
478 | ||
c62d2555 | 479 | if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { |
09cbfeaf | 480 | put_page(page); |
6a404910 QW |
481 | /* There is already a page, skip to page end */ |
482 | cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; | |
483 | continue; | |
771ed689 CM |
484 | } |
485 | ||
82e60d00 | 486 | if (!*memstall && PageWorkingset(page)) { |
4088a47e | 487 | psi_memstall_enter(pflags); |
82e60d00 JW |
488 | *memstall = 1; |
489 | } | |
4088a47e | 490 | |
32443de3 QW |
491 | ret = set_page_extent_mapped(page); |
492 | if (ret < 0) { | |
493 | unlock_page(page); | |
494 | put_page(page); | |
495 | break; | |
496 | } | |
497 | ||
6a404910 | 498 | page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1; |
570eb97b | 499 | lock_extent(tree, cur, page_end, NULL); |
890871be | 500 | read_lock(&em_tree->lock); |
6a404910 | 501 | em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); |
890871be | 502 | read_unlock(&em_tree->lock); |
771ed689 | 503 | |
6a404910 QW |
504 | /* |
505 | * At this point, we have a locked page in the page cache for | |
506 | * these bytes in the file. But, we have to make sure they map | |
507 | * to this compressed extent on disk. | |
508 | */ | |
509 | if (!em || cur < em->start || | |
510 | (cur + fs_info->sectorsize > extent_map_end(em)) || | |
29e70be2 | 511 | (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) { |
771ed689 | 512 | free_extent_map(em); |
570eb97b | 513 | unlock_extent(tree, cur, page_end, NULL); |
771ed689 | 514 | unlock_page(page); |
09cbfeaf | 515 | put_page(page); |
771ed689 CM |
516 | break; |
517 | } | |
518 | free_extent_map(em); | |
519 | ||
520 | if (page->index == end_index) { | |
7073017a | 521 | size_t zero_offset = offset_in_page(isize); |
771ed689 CM |
522 | |
523 | if (zero_offset) { | |
524 | int zeros; | |
09cbfeaf | 525 | zeros = PAGE_SIZE - zero_offset; |
d048b9c2 | 526 | memzero_page(page, zero_offset, zeros); |
771ed689 CM |
527 | } |
528 | } | |
529 | ||
6a404910 | 530 | add_size = min(em->start + em->len, page_end + 1) - cur; |
b7d463a1 | 531 | ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur)); |
6a404910 | 532 | if (ret != add_size) { |
570eb97b | 533 | unlock_extent(tree, cur, page_end, NULL); |
771ed689 | 534 | unlock_page(page); |
09cbfeaf | 535 | put_page(page); |
771ed689 CM |
536 | break; |
537 | } | |
6a404910 QW |
538 | /* |
539 | * If it's subpage, we also need to increase its | |
540 | * subpage::readers number, as at endio we will decrease | |
541 | * subpage::readers and to unlock the page. | |
542 | */ | |
543 | if (fs_info->sectorsize < PAGE_SIZE) | |
544 | btrfs_subpage_start_reader(fs_info, page, cur, add_size); | |
545 | put_page(page); | |
546 | cur += add_size; | |
771ed689 | 547 | } |
771ed689 CM |
548 | return 0; |
549 | } | |
550 | ||
c8b97818 CM |
551 | /* |
552 | * for a compressed read, the bio we get passed has all the inode pages | |
553 | * in it. We don't actually do IO on those pages but allocate new ones | |
554 | * to hold the compressed pages on disk. | |
555 | * | |
4f024f37 | 556 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
c8b97818 | 557 | * bio->bi_io_vec points to all of the inode pages |
c8b97818 CM |
558 | * |
559 | * After the compressed pages are read, we copy the bytes into the | |
560 | * bio we were passed and then call the bio end_io calls | |
561 | */ | |
e1949310 | 562 | void btrfs_submit_compressed_read(struct btrfs_bio *bbio) |
c8b97818 | 563 | { |
690834e4 | 564 | struct btrfs_inode *inode = bbio->inode; |
544fe4a9 CH |
565 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
566 | struct extent_map_tree *em_tree = &inode->extent_tree; | |
c8b97818 | 567 | struct compressed_bio *cb; |
356b4a2d | 568 | unsigned int compressed_len; |
690834e4 | 569 | u64 file_offset = bbio->file_offset; |
e04ca626 CM |
570 | u64 em_len; |
571 | u64 em_start; | |
c8b97818 | 572 | struct extent_map *em; |
82e60d00 JW |
573 | unsigned long pflags; |
574 | int memstall = 0; | |
f9f15de8 | 575 | blk_status_t ret; |
dd137dd1 | 576 | int ret2; |
c8b97818 CM |
577 | |
578 | /* we need the actual starting offset of this extent in the file */ | |
890871be | 579 | read_lock(&em_tree->lock); |
557023ea | 580 | em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize); |
890871be | 581 | read_unlock(&em_tree->lock); |
f9f15de8 JB |
582 | if (!em) { |
583 | ret = BLK_STS_IOERR; | |
584 | goto out; | |
585 | } | |
c8b97818 | 586 | |
f86f7a75 | 587 | ASSERT(extent_map_is_compressed(em)); |
d20f7043 | 588 | compressed_len = em->block_len; |
6b82ce8d | 589 | |
544fe4a9 CH |
590 | cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ, |
591 | end_compressed_bio_read); | |
c8b97818 | 592 | |
ff5b7ee3 | 593 | cb->start = em->orig_start; |
e04ca626 CM |
594 | em_len = em->len; |
595 | em_start = em->start; | |
d20f7043 | 596 | |
690834e4 | 597 | cb->len = bbio->bio.bi_iter.bi_size; |
c8b97818 | 598 | cb->compressed_len = compressed_len; |
f86f7a75 | 599 | cb->compress_type = extent_map_compression(em); |
b7d463a1 | 600 | cb->orig_bbio = bbio; |
c8b97818 | 601 | |
1d8fa2e2 | 602 | free_extent_map(em); |
1d8fa2e2 | 603 | |
dd137dd1 STD |
604 | cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
605 | cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS); | |
f9f15de8 JB |
606 | if (!cb->compressed_pages) { |
607 | ret = BLK_STS_RESOURCE; | |
544fe4a9 | 608 | goto out_free_bio; |
f9f15de8 | 609 | } |
6b82ce8d | 610 | |
dd137dd1 STD |
611 | ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages); |
612 | if (ret2) { | |
613 | ret = BLK_STS_RESOURCE; | |
544fe4a9 | 614 | goto out_free_compressed_pages; |
c8b97818 | 615 | } |
c8b97818 | 616 | |
544fe4a9 CH |
617 | add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall, |
618 | &pflags); | |
771ed689 | 619 | |
771ed689 | 620 | /* include any pages we added in add_ra-bio_pages */ |
690834e4 | 621 | cb->len = bbio->bio.bi_iter.bi_size; |
4513cb0c CH |
622 | cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector; |
623 | btrfs_add_compressed_bio_pages(cb); | |
524bcd1e | 624 | |
82e60d00 | 625 | if (memstall) |
4088a47e CH |
626 | psi_memstall_leave(&pflags); |
627 | ||
e1949310 | 628 | btrfs_submit_bio(&cb->bbio, 0); |
cb4411dd | 629 | return; |
6b82ce8d | 630 | |
544fe4a9 | 631 | out_free_compressed_pages: |
6b82ce8d | 632 | kfree(cb->compressed_pages); |
544fe4a9 | 633 | out_free_bio: |
10e924bc | 634 | bio_put(&cb->bbio.bio); |
544fe4a9 | 635 | out: |
690834e4 | 636 | btrfs_bio_end_io(bbio, ret); |
c8b97818 | 637 | } |
261507a0 | 638 | |
17b5a6c1 TT |
639 | /* |
640 | * Heuristic uses systematic sampling to collect data from the input data | |
641 | * range, the logic can be tuned by the following constants: | |
642 | * | |
643 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample | |
644 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected | |
645 | */ | |
646 | #define SAMPLING_READ_SIZE (16) | |
647 | #define SAMPLING_INTERVAL (256) | |
648 | ||
649 | /* | |
650 | * For statistical analysis of the input data we consider bytes that form a | |
651 | * Galois Field of 256 objects. Each object has an attribute count, ie. how | |
652 | * many times the object appeared in the sample. | |
653 | */ | |
654 | #define BUCKET_SIZE (256) | |
655 | ||
656 | /* | |
657 | * The size of the sample is based on a statistical sampling rule of thumb. | |
658 | * The common way is to perform sampling tests as long as the number of | |
659 | * elements in each cell is at least 5. | |
660 | * | |
661 | * Instead of 5, we choose 32 to obtain more accurate results. | |
662 | * If the data contain the maximum number of symbols, which is 256, we obtain a | |
663 | * sample size bound by 8192. | |
664 | * | |
665 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes | |
666 | * from up to 512 locations. | |
667 | */ | |
668 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ | |
669 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) | |
670 | ||
671 | struct bucket_item { | |
672 | u32 count; | |
673 | }; | |
4e439a0b TT |
674 | |
675 | struct heuristic_ws { | |
17b5a6c1 TT |
676 | /* Partial copy of input data */ |
677 | u8 *sample; | |
a440d48c | 678 | u32 sample_size; |
17b5a6c1 TT |
679 | /* Buckets store counters for each byte value */ |
680 | struct bucket_item *bucket; | |
440c840c TT |
681 | /* Sorting buffer */ |
682 | struct bucket_item *bucket_b; | |
4e439a0b TT |
683 | struct list_head list; |
684 | }; | |
685 | ||
92ee5530 DZ |
686 | static struct workspace_manager heuristic_wsm; |
687 | ||
4e439a0b TT |
688 | static void free_heuristic_ws(struct list_head *ws) |
689 | { | |
690 | struct heuristic_ws *workspace; | |
691 | ||
692 | workspace = list_entry(ws, struct heuristic_ws, list); | |
693 | ||
17b5a6c1 TT |
694 | kvfree(workspace->sample); |
695 | kfree(workspace->bucket); | |
440c840c | 696 | kfree(workspace->bucket_b); |
4e439a0b TT |
697 | kfree(workspace); |
698 | } | |
699 | ||
7bf49943 | 700 | static struct list_head *alloc_heuristic_ws(unsigned int level) |
4e439a0b TT |
701 | { |
702 | struct heuristic_ws *ws; | |
703 | ||
704 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | |
705 | if (!ws) | |
706 | return ERR_PTR(-ENOMEM); | |
707 | ||
17b5a6c1 TT |
708 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
709 | if (!ws->sample) | |
710 | goto fail; | |
711 | ||
712 | ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); | |
713 | if (!ws->bucket) | |
714 | goto fail; | |
4e439a0b | 715 | |
440c840c TT |
716 | ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); |
717 | if (!ws->bucket_b) | |
718 | goto fail; | |
719 | ||
17b5a6c1 | 720 | INIT_LIST_HEAD(&ws->list); |
4e439a0b | 721 | return &ws->list; |
17b5a6c1 TT |
722 | fail: |
723 | free_heuristic_ws(&ws->list); | |
724 | return ERR_PTR(-ENOMEM); | |
4e439a0b TT |
725 | } |
726 | ||
ca4ac360 | 727 | const struct btrfs_compress_op btrfs_heuristic_compress = { |
be951045 | 728 | .workspace_manager = &heuristic_wsm, |
ca4ac360 DZ |
729 | }; |
730 | ||
e8c9f186 | 731 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
ca4ac360 DZ |
732 | /* The heuristic is represented as compression type 0 */ |
733 | &btrfs_heuristic_compress, | |
261507a0 | 734 | &btrfs_zlib_compress, |
a6fa6fae | 735 | &btrfs_lzo_compress, |
5c1aab1d | 736 | &btrfs_zstd_compress, |
261507a0 LZ |
737 | }; |
738 | ||
c778df14 DS |
739 | static struct list_head *alloc_workspace(int type, unsigned int level) |
740 | { | |
741 | switch (type) { | |
742 | case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); | |
743 | case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); | |
744 | case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); | |
745 | case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); | |
746 | default: | |
747 | /* | |
748 | * This can't happen, the type is validated several times | |
749 | * before we get here. | |
750 | */ | |
751 | BUG(); | |
752 | } | |
753 | } | |
754 | ||
1e002351 DS |
755 | static void free_workspace(int type, struct list_head *ws) |
756 | { | |
757 | switch (type) { | |
758 | case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); | |
759 | case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); | |
760 | case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); | |
761 | case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); | |
762 | default: | |
763 | /* | |
764 | * This can't happen, the type is validated several times | |
765 | * before we get here. | |
766 | */ | |
767 | BUG(); | |
768 | } | |
769 | } | |
770 | ||
d5517033 | 771 | static void btrfs_init_workspace_manager(int type) |
261507a0 | 772 | { |
0cf25213 | 773 | struct workspace_manager *wsm; |
4e439a0b | 774 | struct list_head *workspace; |
261507a0 | 775 | |
0cf25213 | 776 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
777 | INIT_LIST_HEAD(&wsm->idle_ws); |
778 | spin_lock_init(&wsm->ws_lock); | |
779 | atomic_set(&wsm->total_ws, 0); | |
780 | init_waitqueue_head(&wsm->ws_wait); | |
f77dd0d6 | 781 | |
1666edab DZ |
782 | /* |
783 | * Preallocate one workspace for each compression type so we can | |
784 | * guarantee forward progress in the worst case | |
785 | */ | |
c778df14 | 786 | workspace = alloc_workspace(type, 0); |
1666edab DZ |
787 | if (IS_ERR(workspace)) { |
788 | pr_warn( | |
789 | "BTRFS: cannot preallocate compression workspace, will try later\n"); | |
790 | } else { | |
92ee5530 DZ |
791 | atomic_set(&wsm->total_ws, 1); |
792 | wsm->free_ws = 1; | |
793 | list_add(workspace, &wsm->idle_ws); | |
1666edab DZ |
794 | } |
795 | } | |
796 | ||
2510307e | 797 | static void btrfs_cleanup_workspace_manager(int type) |
1666edab | 798 | { |
2dba7143 | 799 | struct workspace_manager *wsman; |
1666edab DZ |
800 | struct list_head *ws; |
801 | ||
2dba7143 | 802 | wsman = btrfs_compress_op[type]->workspace_manager; |
1666edab DZ |
803 | while (!list_empty(&wsman->idle_ws)) { |
804 | ws = wsman->idle_ws.next; | |
805 | list_del(ws); | |
1e002351 | 806 | free_workspace(type, ws); |
1666edab | 807 | atomic_dec(&wsman->total_ws); |
261507a0 | 808 | } |
261507a0 LZ |
809 | } |
810 | ||
811 | /* | |
e721e49d DS |
812 | * This finds an available workspace or allocates a new one. |
813 | * If it's not possible to allocate a new one, waits until there's one. | |
814 | * Preallocation makes a forward progress guarantees and we do not return | |
815 | * errors. | |
261507a0 | 816 | */ |
5907a9bb | 817 | struct list_head *btrfs_get_workspace(int type, unsigned int level) |
261507a0 | 818 | { |
5907a9bb | 819 | struct workspace_manager *wsm; |
261507a0 LZ |
820 | struct list_head *workspace; |
821 | int cpus = num_online_cpus(); | |
fe308533 | 822 | unsigned nofs_flag; |
4e439a0b TT |
823 | struct list_head *idle_ws; |
824 | spinlock_t *ws_lock; | |
825 | atomic_t *total_ws; | |
826 | wait_queue_head_t *ws_wait; | |
827 | int *free_ws; | |
828 | ||
5907a9bb | 829 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
830 | idle_ws = &wsm->idle_ws; |
831 | ws_lock = &wsm->ws_lock; | |
832 | total_ws = &wsm->total_ws; | |
833 | ws_wait = &wsm->ws_wait; | |
834 | free_ws = &wsm->free_ws; | |
261507a0 | 835 | |
261507a0 | 836 | again: |
d9187649 BL |
837 | spin_lock(ws_lock); |
838 | if (!list_empty(idle_ws)) { | |
839 | workspace = idle_ws->next; | |
261507a0 | 840 | list_del(workspace); |
6ac10a6a | 841 | (*free_ws)--; |
d9187649 | 842 | spin_unlock(ws_lock); |
261507a0 LZ |
843 | return workspace; |
844 | ||
845 | } | |
6ac10a6a | 846 | if (atomic_read(total_ws) > cpus) { |
261507a0 LZ |
847 | DEFINE_WAIT(wait); |
848 | ||
d9187649 BL |
849 | spin_unlock(ws_lock); |
850 | prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); | |
6ac10a6a | 851 | if (atomic_read(total_ws) > cpus && !*free_ws) |
261507a0 | 852 | schedule(); |
d9187649 | 853 | finish_wait(ws_wait, &wait); |
261507a0 LZ |
854 | goto again; |
855 | } | |
6ac10a6a | 856 | atomic_inc(total_ws); |
d9187649 | 857 | spin_unlock(ws_lock); |
261507a0 | 858 | |
fe308533 DS |
859 | /* |
860 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have | |
861 | * to turn it off here because we might get called from the restricted | |
862 | * context of btrfs_compress_bio/btrfs_compress_pages | |
863 | */ | |
864 | nofs_flag = memalloc_nofs_save(); | |
c778df14 | 865 | workspace = alloc_workspace(type, level); |
fe308533 DS |
866 | memalloc_nofs_restore(nofs_flag); |
867 | ||
261507a0 | 868 | if (IS_ERR(workspace)) { |
6ac10a6a | 869 | atomic_dec(total_ws); |
d9187649 | 870 | wake_up(ws_wait); |
e721e49d DS |
871 | |
872 | /* | |
873 | * Do not return the error but go back to waiting. There's a | |
874 | * workspace preallocated for each type and the compression | |
875 | * time is bounded so we get to a workspace eventually. This | |
876 | * makes our caller's life easier. | |
52356716 DS |
877 | * |
878 | * To prevent silent and low-probability deadlocks (when the | |
879 | * initial preallocation fails), check if there are any | |
880 | * workspaces at all. | |
e721e49d | 881 | */ |
52356716 DS |
882 | if (atomic_read(total_ws) == 0) { |
883 | static DEFINE_RATELIMIT_STATE(_rs, | |
884 | /* once per minute */ 60 * HZ, | |
885 | /* no burst */ 1); | |
886 | ||
887 | if (__ratelimit(&_rs)) { | |
ab8d0fc4 | 888 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); |
52356716 DS |
889 | } |
890 | } | |
e721e49d | 891 | goto again; |
261507a0 LZ |
892 | } |
893 | return workspace; | |
894 | } | |
895 | ||
7bf49943 | 896 | static struct list_head *get_workspace(int type, int level) |
929f4baf | 897 | { |
6a0d1272 | 898 | switch (type) { |
5907a9bb | 899 | case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); |
6a0d1272 | 900 | case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); |
5907a9bb | 901 | case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); |
6a0d1272 DS |
902 | case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); |
903 | default: | |
904 | /* | |
905 | * This can't happen, the type is validated several times | |
906 | * before we get here. | |
907 | */ | |
908 | BUG(); | |
909 | } | |
929f4baf DZ |
910 | } |
911 | ||
261507a0 LZ |
912 | /* |
913 | * put a workspace struct back on the list or free it if we have enough | |
914 | * idle ones sitting around | |
915 | */ | |
a3bbd2a9 | 916 | void btrfs_put_workspace(int type, struct list_head *ws) |
261507a0 | 917 | { |
a3bbd2a9 | 918 | struct workspace_manager *wsm; |
4e439a0b TT |
919 | struct list_head *idle_ws; |
920 | spinlock_t *ws_lock; | |
921 | atomic_t *total_ws; | |
922 | wait_queue_head_t *ws_wait; | |
923 | int *free_ws; | |
924 | ||
a3bbd2a9 | 925 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
926 | idle_ws = &wsm->idle_ws; |
927 | ws_lock = &wsm->ws_lock; | |
928 | total_ws = &wsm->total_ws; | |
929 | ws_wait = &wsm->ws_wait; | |
930 | free_ws = &wsm->free_ws; | |
d9187649 BL |
931 | |
932 | spin_lock(ws_lock); | |
26b28dce | 933 | if (*free_ws <= num_online_cpus()) { |
929f4baf | 934 | list_add(ws, idle_ws); |
6ac10a6a | 935 | (*free_ws)++; |
d9187649 | 936 | spin_unlock(ws_lock); |
261507a0 LZ |
937 | goto wake; |
938 | } | |
d9187649 | 939 | spin_unlock(ws_lock); |
261507a0 | 940 | |
1e002351 | 941 | free_workspace(type, ws); |
6ac10a6a | 942 | atomic_dec(total_ws); |
261507a0 | 943 | wake: |
093258e6 | 944 | cond_wake_up(ws_wait); |
261507a0 LZ |
945 | } |
946 | ||
929f4baf DZ |
947 | static void put_workspace(int type, struct list_head *ws) |
948 | { | |
bd3a5287 | 949 | switch (type) { |
a3bbd2a9 DS |
950 | case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); |
951 | case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); | |
952 | case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); | |
bd3a5287 DS |
953 | case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); |
954 | default: | |
955 | /* | |
956 | * This can't happen, the type is validated several times | |
957 | * before we get here. | |
958 | */ | |
959 | BUG(); | |
960 | } | |
929f4baf DZ |
961 | } |
962 | ||
adbab642 AJ |
963 | /* |
964 | * Adjust @level according to the limits of the compression algorithm or | |
965 | * fallback to default | |
966 | */ | |
967 | static unsigned int btrfs_compress_set_level(int type, unsigned level) | |
968 | { | |
969 | const struct btrfs_compress_op *ops = btrfs_compress_op[type]; | |
970 | ||
971 | if (level == 0) | |
972 | level = ops->default_level; | |
973 | else | |
974 | level = min(level, ops->max_level); | |
975 | ||
976 | return level; | |
977 | } | |
978 | ||
261507a0 | 979 | /* |
38c31464 DS |
980 | * Given an address space and start and length, compress the bytes into @pages |
981 | * that are allocated on demand. | |
261507a0 | 982 | * |
f51d2b59 DS |
983 | * @type_level is encoded algorithm and level, where level 0 means whatever |
984 | * default the algorithm chooses and is opaque here; | |
985 | * - compression algo are 0-3 | |
986 | * - the level are bits 4-7 | |
987 | * | |
4d3a800e DS |
988 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
989 | * and returns number of actually allocated pages | |
261507a0 | 990 | * |
38c31464 DS |
991 | * @total_in is used to return the number of bytes actually read. It |
992 | * may be smaller than the input length if we had to exit early because we | |
261507a0 LZ |
993 | * ran out of room in the pages array or because we cross the |
994 | * max_out threshold. | |
995 | * | |
38c31464 DS |
996 | * @total_out is an in/out parameter, must be set to the input length and will |
997 | * be also used to return the total number of compressed bytes | |
261507a0 | 998 | */ |
f51d2b59 | 999 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
38c31464 | 1000 | u64 start, struct page **pages, |
261507a0 LZ |
1001 | unsigned long *out_pages, |
1002 | unsigned long *total_in, | |
e5d74902 | 1003 | unsigned long *total_out) |
261507a0 | 1004 | { |
1972708a | 1005 | int type = btrfs_compress_type(type_level); |
7bf49943 | 1006 | int level = btrfs_compress_level(type_level); |
261507a0 LZ |
1007 | struct list_head *workspace; |
1008 | int ret; | |
1009 | ||
b0c1fe1e | 1010 | level = btrfs_compress_set_level(type, level); |
7bf49943 | 1011 | workspace = get_workspace(type, level); |
1e4eb746 DS |
1012 | ret = compression_compress_pages(type, workspace, mapping, start, pages, |
1013 | out_pages, total_in, total_out); | |
929f4baf | 1014 | put_workspace(type, workspace); |
261507a0 LZ |
1015 | return ret; |
1016 | } | |
1017 | ||
8140dc30 | 1018 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
261507a0 LZ |
1019 | { |
1020 | struct list_head *workspace; | |
1021 | int ret; | |
8140dc30 | 1022 | int type = cb->compress_type; |
261507a0 | 1023 | |
7bf49943 | 1024 | workspace = get_workspace(type, 0); |
4a9e803e | 1025 | ret = compression_decompress_bio(workspace, cb); |
929f4baf | 1026 | put_workspace(type, workspace); |
e1ddce71 | 1027 | |
7edb9a3e | 1028 | if (!ret) |
b7d463a1 | 1029 | zero_fill_bio(&cb->orig_bbio->bio); |
261507a0 LZ |
1030 | return ret; |
1031 | } | |
1032 | ||
1033 | /* | |
1034 | * a less complex decompression routine. Our compressed data fits in a | |
1035 | * single page, and we want to read a single page out of it. | |
1036 | * start_byte tells us the offset into the compressed data we're interested in | |
1037 | */ | |
3e09b5b2 | 1038 | int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, |
261507a0 LZ |
1039 | unsigned long start_byte, size_t srclen, size_t destlen) |
1040 | { | |
1041 | struct list_head *workspace; | |
1042 | int ret; | |
1043 | ||
7bf49943 | 1044 | workspace = get_workspace(type, 0); |
1e4eb746 DS |
1045 | ret = compression_decompress(type, workspace, data_in, dest_page, |
1046 | start_byte, srclen, destlen); | |
929f4baf | 1047 | put_workspace(type, workspace); |
7bf49943 | 1048 | |
261507a0 LZ |
1049 | return ret; |
1050 | } | |
1051 | ||
5565b8e0 | 1052 | int __init btrfs_init_compress(void) |
1666edab | 1053 | { |
544fe4a9 CH |
1054 | if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE, |
1055 | offsetof(struct compressed_bio, bbio.bio), | |
1056 | BIOSET_NEED_BVECS)) | |
1057 | return -ENOMEM; | |
4cea422a DS |
1058 | |
1059 | compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages"); | |
1060 | if (!compr_pool.shrinker) | |
1061 | return -ENOMEM; | |
1062 | ||
d5517033 DS |
1063 | btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); |
1064 | btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); | |
1065 | btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); | |
1066 | zstd_init_workspace_manager(); | |
4cea422a DS |
1067 | |
1068 | spin_lock_init(&compr_pool.lock); | |
1069 | INIT_LIST_HEAD(&compr_pool.list); | |
1070 | compr_pool.count = 0; | |
1071 | /* 128K / 4K = 32, for 8 threads is 256 pages. */ | |
1072 | compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8; | |
1073 | compr_pool.shrinker->count_objects = btrfs_compr_pool_count; | |
1074 | compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan; | |
1075 | compr_pool.shrinker->batch = 32; | |
1076 | compr_pool.shrinker->seeks = DEFAULT_SEEKS; | |
1077 | shrinker_register(compr_pool.shrinker); | |
1078 | ||
5565b8e0 | 1079 | return 0; |
1666edab DZ |
1080 | } |
1081 | ||
e67c718b | 1082 | void __cold btrfs_exit_compress(void) |
261507a0 | 1083 | { |
4cea422a DS |
1084 | /* For now scan drains all pages and does not touch the parameters. */ |
1085 | btrfs_compr_pool_scan(NULL, NULL); | |
1086 | shrinker_free(compr_pool.shrinker); | |
1087 | ||
2510307e DS |
1088 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); |
1089 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); | |
1090 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); | |
1091 | zstd_cleanup_workspace_manager(); | |
544fe4a9 | 1092 | bioset_exit(&btrfs_compressed_bioset); |
261507a0 | 1093 | } |
3a39c18d LZ |
1094 | |
1095 | /* | |
1c3dc173 | 1096 | * Copy decompressed data from working buffer to pages. |
3a39c18d | 1097 | * |
1c3dc173 QW |
1098 | * @buf: The decompressed data buffer |
1099 | * @buf_len: The decompressed data length | |
1100 | * @decompressed: Number of bytes that are already decompressed inside the | |
1101 | * compressed extent | |
1102 | * @cb: The compressed extent descriptor | |
1103 | * @orig_bio: The original bio that the caller wants to read for | |
3a39c18d | 1104 | * |
1c3dc173 QW |
1105 | * An easier to understand graph is like below: |
1106 | * | |
1107 | * |<- orig_bio ->| |<- orig_bio->| | |
1108 | * |<------- full decompressed extent ----->| | |
1109 | * |<----------- @cb range ---->| | |
1110 | * | |<-- @buf_len -->| | |
1111 | * |<--- @decompressed --->| | |
1112 | * | |
1113 | * Note that, @cb can be a subpage of the full decompressed extent, but | |
1114 | * @cb->start always has the same as the orig_file_offset value of the full | |
1115 | * decompressed extent. | |
1116 | * | |
1117 | * When reading compressed extent, we have to read the full compressed extent, | |
1118 | * while @orig_bio may only want part of the range. | |
1119 | * Thus this function will ensure only data covered by @orig_bio will be copied | |
1120 | * to. | |
1121 | * | |
1122 | * Return 0 if we have copied all needed contents for @orig_bio. | |
1123 | * Return >0 if we need continue decompress. | |
3a39c18d | 1124 | */ |
1c3dc173 QW |
1125 | int btrfs_decompress_buf2page(const char *buf, u32 buf_len, |
1126 | struct compressed_bio *cb, u32 decompressed) | |
3a39c18d | 1127 | { |
b7d463a1 | 1128 | struct bio *orig_bio = &cb->orig_bbio->bio; |
1c3dc173 QW |
1129 | /* Offset inside the full decompressed extent */ |
1130 | u32 cur_offset; | |
1131 | ||
1132 | cur_offset = decompressed; | |
1133 | /* The main loop to do the copy */ | |
1134 | while (cur_offset < decompressed + buf_len) { | |
1135 | struct bio_vec bvec; | |
1136 | size_t copy_len; | |
1137 | u32 copy_start; | |
1138 | /* Offset inside the full decompressed extent */ | |
1139 | u32 bvec_offset; | |
1140 | ||
1141 | bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter); | |
1142 | /* | |
1143 | * cb->start may underflow, but subtracting that value can still | |
1144 | * give us correct offset inside the full decompressed extent. | |
1145 | */ | |
1146 | bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start; | |
974b1adc | 1147 | |
1c3dc173 QW |
1148 | /* Haven't reached the bvec range, exit */ |
1149 | if (decompressed + buf_len <= bvec_offset) | |
1150 | return 1; | |
3a39c18d | 1151 | |
1c3dc173 QW |
1152 | copy_start = max(cur_offset, bvec_offset); |
1153 | copy_len = min(bvec_offset + bvec.bv_len, | |
1154 | decompressed + buf_len) - copy_start; | |
1155 | ASSERT(copy_len); | |
3a39c18d | 1156 | |
974b1adc | 1157 | /* |
1c3dc173 QW |
1158 | * Extra range check to ensure we didn't go beyond |
1159 | * @buf + @buf_len. | |
974b1adc | 1160 | */ |
1c3dc173 QW |
1161 | ASSERT(copy_start - decompressed < buf_len); |
1162 | memcpy_to_page(bvec.bv_page, bvec.bv_offset, | |
1163 | buf + copy_start - decompressed, copy_len); | |
1c3dc173 | 1164 | cur_offset += copy_len; |
3a39c18d | 1165 | |
1c3dc173 QW |
1166 | bio_advance(orig_bio, copy_len); |
1167 | /* Finished the bio */ | |
1168 | if (!orig_bio->bi_iter.bi_size) | |
1169 | return 0; | |
3a39c18d | 1170 | } |
3a39c18d LZ |
1171 | return 1; |
1172 | } | |
c2fcdcdf | 1173 | |
19562430 TT |
1174 | /* |
1175 | * Shannon Entropy calculation | |
1176 | * | |
52042d8e | 1177 | * Pure byte distribution analysis fails to determine compressibility of data. |
19562430 TT |
1178 | * Try calculating entropy to estimate the average minimum number of bits |
1179 | * needed to encode the sampled data. | |
1180 | * | |
1181 | * For convenience, return the percentage of needed bits, instead of amount of | |
1182 | * bits directly. | |
1183 | * | |
1184 | * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy | |
1185 | * and can be compressible with high probability | |
1186 | * | |
1187 | * @ENTROPY_LVL_HIGH - data are not compressible with high probability | |
1188 | * | |
1189 | * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. | |
1190 | */ | |
1191 | #define ENTROPY_LVL_ACEPTABLE (65) | |
1192 | #define ENTROPY_LVL_HIGH (80) | |
1193 | ||
1194 | /* | |
1195 | * For increasead precision in shannon_entropy calculation, | |
1196 | * let's do pow(n, M) to save more digits after comma: | |
1197 | * | |
1198 | * - maximum int bit length is 64 | |
1199 | * - ilog2(MAX_SAMPLE_SIZE) -> 13 | |
1200 | * - 13 * 4 = 52 < 64 -> M = 4 | |
1201 | * | |
1202 | * So use pow(n, 4). | |
1203 | */ | |
1204 | static inline u32 ilog2_w(u64 n) | |
1205 | { | |
1206 | return ilog2(n * n * n * n); | |
1207 | } | |
1208 | ||
1209 | static u32 shannon_entropy(struct heuristic_ws *ws) | |
1210 | { | |
1211 | const u32 entropy_max = 8 * ilog2_w(2); | |
1212 | u32 entropy_sum = 0; | |
1213 | u32 p, p_base, sz_base; | |
1214 | u32 i; | |
1215 | ||
1216 | sz_base = ilog2_w(ws->sample_size); | |
1217 | for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { | |
1218 | p = ws->bucket[i].count; | |
1219 | p_base = ilog2_w(p); | |
1220 | entropy_sum += p * (sz_base - p_base); | |
1221 | } | |
1222 | ||
1223 | entropy_sum /= ws->sample_size; | |
1224 | return entropy_sum * 100 / entropy_max; | |
1225 | } | |
1226 | ||
440c840c TT |
1227 | #define RADIX_BASE 4U |
1228 | #define COUNTERS_SIZE (1U << RADIX_BASE) | |
1229 | ||
1230 | static u8 get4bits(u64 num, int shift) { | |
1231 | u8 low4bits; | |
1232 | ||
1233 | num >>= shift; | |
1234 | /* Reverse order */ | |
1235 | low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); | |
1236 | return low4bits; | |
1237 | } | |
1238 | ||
440c840c TT |
1239 | /* |
1240 | * Use 4 bits as radix base | |
52042d8e | 1241 | * Use 16 u32 counters for calculating new position in buf array |
440c840c TT |
1242 | * |
1243 | * @array - array that will be sorted | |
1244 | * @array_buf - buffer array to store sorting results | |
1245 | * must be equal in size to @array | |
1246 | * @num - array size | |
440c840c | 1247 | */ |
23ae8c63 | 1248 | static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, |
36243c91 | 1249 | int num) |
858177d3 | 1250 | { |
440c840c TT |
1251 | u64 max_num; |
1252 | u64 buf_num; | |
1253 | u32 counters[COUNTERS_SIZE]; | |
1254 | u32 new_addr; | |
1255 | u32 addr; | |
1256 | int bitlen; | |
1257 | int shift; | |
1258 | int i; | |
858177d3 | 1259 | |
440c840c TT |
1260 | /* |
1261 | * Try avoid useless loop iterations for small numbers stored in big | |
1262 | * counters. Example: 48 33 4 ... in 64bit array | |
1263 | */ | |
23ae8c63 | 1264 | max_num = array[0].count; |
440c840c | 1265 | for (i = 1; i < num; i++) { |
23ae8c63 | 1266 | buf_num = array[i].count; |
440c840c TT |
1267 | if (buf_num > max_num) |
1268 | max_num = buf_num; | |
1269 | } | |
1270 | ||
1271 | buf_num = ilog2(max_num); | |
1272 | bitlen = ALIGN(buf_num, RADIX_BASE * 2); | |
1273 | ||
1274 | shift = 0; | |
1275 | while (shift < bitlen) { | |
1276 | memset(counters, 0, sizeof(counters)); | |
1277 | ||
1278 | for (i = 0; i < num; i++) { | |
23ae8c63 | 1279 | buf_num = array[i].count; |
440c840c TT |
1280 | addr = get4bits(buf_num, shift); |
1281 | counters[addr]++; | |
1282 | } | |
1283 | ||
1284 | for (i = 1; i < COUNTERS_SIZE; i++) | |
1285 | counters[i] += counters[i - 1]; | |
1286 | ||
1287 | for (i = num - 1; i >= 0; i--) { | |
23ae8c63 | 1288 | buf_num = array[i].count; |
440c840c TT |
1289 | addr = get4bits(buf_num, shift); |
1290 | counters[addr]--; | |
1291 | new_addr = counters[addr]; | |
7add17be | 1292 | array_buf[new_addr] = array[i]; |
440c840c TT |
1293 | } |
1294 | ||
1295 | shift += RADIX_BASE; | |
1296 | ||
1297 | /* | |
1298 | * Normal radix expects to move data from a temporary array, to | |
1299 | * the main one. But that requires some CPU time. Avoid that | |
1300 | * by doing another sort iteration to original array instead of | |
1301 | * memcpy() | |
1302 | */ | |
1303 | memset(counters, 0, sizeof(counters)); | |
1304 | ||
1305 | for (i = 0; i < num; i ++) { | |
23ae8c63 | 1306 | buf_num = array_buf[i].count; |
440c840c TT |
1307 | addr = get4bits(buf_num, shift); |
1308 | counters[addr]++; | |
1309 | } | |
1310 | ||
1311 | for (i = 1; i < COUNTERS_SIZE; i++) | |
1312 | counters[i] += counters[i - 1]; | |
1313 | ||
1314 | for (i = num - 1; i >= 0; i--) { | |
23ae8c63 | 1315 | buf_num = array_buf[i].count; |
440c840c TT |
1316 | addr = get4bits(buf_num, shift); |
1317 | counters[addr]--; | |
1318 | new_addr = counters[addr]; | |
7add17be | 1319 | array[new_addr] = array_buf[i]; |
440c840c TT |
1320 | } |
1321 | ||
1322 | shift += RADIX_BASE; | |
1323 | } | |
858177d3 TT |
1324 | } |
1325 | ||
1326 | /* | |
1327 | * Size of the core byte set - how many bytes cover 90% of the sample | |
1328 | * | |
1329 | * There are several types of structured binary data that use nearly all byte | |
1330 | * values. The distribution can be uniform and counts in all buckets will be | |
1331 | * nearly the same (eg. encrypted data). Unlikely to be compressible. | |
1332 | * | |
1333 | * Other possibility is normal (Gaussian) distribution, where the data could | |
1334 | * be potentially compressible, but we have to take a few more steps to decide | |
1335 | * how much. | |
1336 | * | |
1337 | * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, | |
1338 | * compression algo can easy fix that | |
1339 | * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high | |
1340 | * probability is not compressible | |
1341 | */ | |
1342 | #define BYTE_CORE_SET_LOW (64) | |
1343 | #define BYTE_CORE_SET_HIGH (200) | |
1344 | ||
1345 | static int byte_core_set_size(struct heuristic_ws *ws) | |
1346 | { | |
1347 | u32 i; | |
1348 | u32 coreset_sum = 0; | |
1349 | const u32 core_set_threshold = ws->sample_size * 90 / 100; | |
1350 | struct bucket_item *bucket = ws->bucket; | |
1351 | ||
1352 | /* Sort in reverse order */ | |
36243c91 | 1353 | radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); |
858177d3 TT |
1354 | |
1355 | for (i = 0; i < BYTE_CORE_SET_LOW; i++) | |
1356 | coreset_sum += bucket[i].count; | |
1357 | ||
1358 | if (coreset_sum > core_set_threshold) | |
1359 | return i; | |
1360 | ||
1361 | for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { | |
1362 | coreset_sum += bucket[i].count; | |
1363 | if (coreset_sum > core_set_threshold) | |
1364 | break; | |
1365 | } | |
1366 | ||
1367 | return i; | |
1368 | } | |
1369 | ||
a288e92c TT |
1370 | /* |
1371 | * Count byte values in buckets. | |
1372 | * This heuristic can detect textual data (configs, xml, json, html, etc). | |
1373 | * Because in most text-like data byte set is restricted to limited number of | |
1374 | * possible characters, and that restriction in most cases makes data easy to | |
1375 | * compress. | |
1376 | * | |
1377 | * @BYTE_SET_THRESHOLD - consider all data within this byte set size: | |
1378 | * less - compressible | |
1379 | * more - need additional analysis | |
1380 | */ | |
1381 | #define BYTE_SET_THRESHOLD (64) | |
1382 | ||
1383 | static u32 byte_set_size(const struct heuristic_ws *ws) | |
1384 | { | |
1385 | u32 i; | |
1386 | u32 byte_set_size = 0; | |
1387 | ||
1388 | for (i = 0; i < BYTE_SET_THRESHOLD; i++) { | |
1389 | if (ws->bucket[i].count > 0) | |
1390 | byte_set_size++; | |
1391 | } | |
1392 | ||
1393 | /* | |
1394 | * Continue collecting count of byte values in buckets. If the byte | |
1395 | * set size is bigger then the threshold, it's pointless to continue, | |
1396 | * the detection technique would fail for this type of data. | |
1397 | */ | |
1398 | for (; i < BUCKET_SIZE; i++) { | |
1399 | if (ws->bucket[i].count > 0) { | |
1400 | byte_set_size++; | |
1401 | if (byte_set_size > BYTE_SET_THRESHOLD) | |
1402 | return byte_set_size; | |
1403 | } | |
1404 | } | |
1405 | ||
1406 | return byte_set_size; | |
1407 | } | |
1408 | ||
1fe4f6fa TT |
1409 | static bool sample_repeated_patterns(struct heuristic_ws *ws) |
1410 | { | |
1411 | const u32 half_of_sample = ws->sample_size / 2; | |
1412 | const u8 *data = ws->sample; | |
1413 | ||
1414 | return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; | |
1415 | } | |
1416 | ||
a440d48c TT |
1417 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
1418 | struct heuristic_ws *ws) | |
1419 | { | |
1420 | struct page *page; | |
1421 | u64 index, index_end; | |
1422 | u32 i, curr_sample_pos; | |
1423 | u8 *in_data; | |
1424 | ||
1425 | /* | |
1426 | * Compression handles the input data by chunks of 128KiB | |
1427 | * (defined by BTRFS_MAX_UNCOMPRESSED) | |
1428 | * | |
1429 | * We do the same for the heuristic and loop over the whole range. | |
1430 | * | |
1431 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will | |
1432 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. | |
1433 | */ | |
1434 | if (end - start > BTRFS_MAX_UNCOMPRESSED) | |
1435 | end = start + BTRFS_MAX_UNCOMPRESSED; | |
1436 | ||
1437 | index = start >> PAGE_SHIFT; | |
1438 | index_end = end >> PAGE_SHIFT; | |
1439 | ||
1440 | /* Don't miss unaligned end */ | |
ce394a7f | 1441 | if (!PAGE_ALIGNED(end)) |
a440d48c TT |
1442 | index_end++; |
1443 | ||
1444 | curr_sample_pos = 0; | |
1445 | while (index < index_end) { | |
1446 | page = find_get_page(inode->i_mapping, index); | |
58c1a35c | 1447 | in_data = kmap_local_page(page); |
a440d48c TT |
1448 | /* Handle case where the start is not aligned to PAGE_SIZE */ |
1449 | i = start % PAGE_SIZE; | |
1450 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { | |
1451 | /* Don't sample any garbage from the last page */ | |
1452 | if (start > end - SAMPLING_READ_SIZE) | |
1453 | break; | |
1454 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], | |
1455 | SAMPLING_READ_SIZE); | |
1456 | i += SAMPLING_INTERVAL; | |
1457 | start += SAMPLING_INTERVAL; | |
1458 | curr_sample_pos += SAMPLING_READ_SIZE; | |
1459 | } | |
58c1a35c | 1460 | kunmap_local(in_data); |
a440d48c TT |
1461 | put_page(page); |
1462 | ||
1463 | index++; | |
1464 | } | |
1465 | ||
1466 | ws->sample_size = curr_sample_pos; | |
1467 | } | |
1468 | ||
c2fcdcdf TT |
1469 | /* |
1470 | * Compression heuristic. | |
1471 | * | |
1472 | * For now is's a naive and optimistic 'return true', we'll extend the logic to | |
1473 | * quickly (compared to direct compression) detect data characteristics | |
67da05b3 | 1474 | * (compressible/incompressible) to avoid wasting CPU time on incompressible |
c2fcdcdf TT |
1475 | * data. |
1476 | * | |
1477 | * The following types of analysis can be performed: | |
1478 | * - detect mostly zero data | |
1479 | * - detect data with low "byte set" size (text, etc) | |
1480 | * - detect data with low/high "core byte" set | |
1481 | * | |
1482 | * Return non-zero if the compression should be done, 0 otherwise. | |
1483 | */ | |
1484 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) | |
1485 | { | |
7bf49943 | 1486 | struct list_head *ws_list = get_workspace(0, 0); |
4e439a0b | 1487 | struct heuristic_ws *ws; |
a440d48c TT |
1488 | u32 i; |
1489 | u8 byte; | |
19562430 | 1490 | int ret = 0; |
c2fcdcdf | 1491 | |
4e439a0b TT |
1492 | ws = list_entry(ws_list, struct heuristic_ws, list); |
1493 | ||
a440d48c TT |
1494 | heuristic_collect_sample(inode, start, end, ws); |
1495 | ||
1fe4f6fa TT |
1496 | if (sample_repeated_patterns(ws)) { |
1497 | ret = 1; | |
1498 | goto out; | |
1499 | } | |
1500 | ||
a440d48c TT |
1501 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); |
1502 | ||
1503 | for (i = 0; i < ws->sample_size; i++) { | |
1504 | byte = ws->sample[i]; | |
1505 | ws->bucket[byte].count++; | |
c2fcdcdf TT |
1506 | } |
1507 | ||
a288e92c TT |
1508 | i = byte_set_size(ws); |
1509 | if (i < BYTE_SET_THRESHOLD) { | |
1510 | ret = 2; | |
1511 | goto out; | |
1512 | } | |
1513 | ||
858177d3 TT |
1514 | i = byte_core_set_size(ws); |
1515 | if (i <= BYTE_CORE_SET_LOW) { | |
1516 | ret = 3; | |
1517 | goto out; | |
1518 | } | |
1519 | ||
1520 | if (i >= BYTE_CORE_SET_HIGH) { | |
1521 | ret = 0; | |
1522 | goto out; | |
1523 | } | |
1524 | ||
19562430 TT |
1525 | i = shannon_entropy(ws); |
1526 | if (i <= ENTROPY_LVL_ACEPTABLE) { | |
1527 | ret = 4; | |
1528 | goto out; | |
1529 | } | |
1530 | ||
1531 | /* | |
1532 | * For the levels below ENTROPY_LVL_HIGH, additional analysis would be | |
1533 | * needed to give green light to compression. | |
1534 | * | |
1535 | * For now just assume that compression at that level is not worth the | |
1536 | * resources because: | |
1537 | * | |
1538 | * 1. it is possible to defrag the data later | |
1539 | * | |
1540 | * 2. the data would turn out to be hardly compressible, eg. 150 byte | |
1541 | * values, every bucket has counter at level ~54. The heuristic would | |
1542 | * be confused. This can happen when data have some internal repeated | |
1543 | * patterns like "abbacbbc...". This can be detected by analyzing | |
1544 | * pairs of bytes, which is too costly. | |
1545 | */ | |
1546 | if (i < ENTROPY_LVL_HIGH) { | |
1547 | ret = 5; | |
1548 | goto out; | |
1549 | } else { | |
1550 | ret = 0; | |
1551 | goto out; | |
1552 | } | |
1553 | ||
1fe4f6fa | 1554 | out: |
929f4baf | 1555 | put_workspace(0, ws_list); |
c2fcdcdf TT |
1556 | return ret; |
1557 | } | |
f51d2b59 | 1558 | |
d0ab62ce DZ |
1559 | /* |
1560 | * Convert the compression suffix (eg. after "zlib" starting with ":") to | |
1561 | * level, unrecognized string will set the default level | |
1562 | */ | |
1563 | unsigned int btrfs_compress_str2level(unsigned int type, const char *str) | |
f51d2b59 | 1564 | { |
d0ab62ce DZ |
1565 | unsigned int level = 0; |
1566 | int ret; | |
1567 | ||
1568 | if (!type) | |
f51d2b59 DS |
1569 | return 0; |
1570 | ||
d0ab62ce DZ |
1571 | if (str[0] == ':') { |
1572 | ret = kstrtouint(str + 1, 10, &level); | |
1573 | if (ret) | |
1574 | level = 0; | |
1575 | } | |
1576 | ||
b0c1fe1e DS |
1577 | level = btrfs_compress_set_level(type, level); |
1578 | ||
1579 | return level; | |
1580 | } |