What: /sys/fs/erofs/<disk>/sync_decompress
Date: November 2021
Contact: "Huang Jianan" <huangjianan@oppo.com>
-Description: Control strategy of sync decompression:
+Description: Control strategy of synchronous decompression. Synchronous
+ decompression tries to decompress in the reader thread for
+ synchronous reads and small asynchronous reads (<= 12 KiB):
- - 0 (default, auto): enable for readpage, and enable for
- readahead on atomic contexts only.
- - 1 (force on): enable for readpage and readahead.
- - 2 (force off): disable for all situations.
+ - 0 (auto, default): apply to synchronous reads only, but will
+ switch to 1 (force on) if any decompression
+ request is detected in atomic contexts;
+ - 1 (force on): apply to synchronous reads and small
+ asynchronous reads;
+ - 2 (force off): disable synchronous decompression completely.
What: /sys/fs/erofs/<disk>/drop_caches
Date: November 2024
struct erofs_mount_opts {
/* current strategy of how to use managed cache */
unsigned char cache_strategy;
- /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
- unsigned int sync_decompress;
- /* threshold for decompression synchronously */
- unsigned int max_sync_decompress_pages;
unsigned int mount_opt;
};
/* managed XArray arranged in physical block number */
struct xarray managed_pslots;
+ unsigned int sync_decompress; /* strategy for sync decompression */
unsigned int shrinker_run_no;
u16 available_compr_algs;
#include <linux/cpuhotplug.h>
#include <trace/events/erofs.h>
+#define Z_EROFS_MAX_SYNC_DECOMPRESS_BYTES 12288
#define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
#define Z_EROFS_INLINE_BVECS 2
return err;
}
-static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
- unsigned int readahead_pages)
-{
- /* auto: enable for read_folio, disable for readahead */
- if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
- !readahead_pages)
- return true;
-
- if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
- (readahead_pages <= sbi->opt.max_sync_decompress_pages))
- return true;
-
- return false;
-}
-
static bool z_erofs_page_is_invalidated(struct page *page)
{
return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
#else
queue_work(z_erofs_workqueue, &io->u.work);
#endif
- /* enable sync decompression for readahead */
- if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
- sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
+ /* See `sync_decompress` in sysfs-fs-erofs for more details */
+ if (sbi->sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
+ sbi->sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
return;
}
z_erofs_decompressqueue_work(&io->u.work);
z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
}
-static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages)
+static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rabytes)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
- bool force_fg = z_erofs_is_sync_decompress(sbi, rapages);
+ int syncmode = sbi->sync_decompress;
+ bool force_fg;
int err;
+ force_fg = (syncmode == EROFS_SYNC_DECOMPRESS_AUTO && !rabytes) ||
+ (syncmode == EROFS_SYNC_DECOMPRESS_FORCE_ON &&
+ (rabytes <= Z_EROFS_MAX_SYNC_DECOMPRESS_BYTES));
+
if (f->head == Z_EROFS_PCLUSTER_TAIL)
return 0;
- z_erofs_submit_queue(f, io, &force_fg, !!rapages);
+ z_erofs_submit_queue(f, io, &force_fg, !!rabytes);
/* handle bypass queue (no i/o pclusters) immediately */
err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
z_erofs_pcluster_readmore(&f, rac, false);
z_erofs_pcluster_end(&f);
- (void)z_erofs_runqueue(&f, nrpages);
+ (void)z_erofs_runqueue(&f, nrpages << PAGE_SHIFT);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
}