struct list_head list;
};
-static struct workspace_manager heuristic_wsm;
-
static void free_heuristic_ws(struct list_head *ws)
{
struct heuristic_ws *workspace;
return ERR_PTR(-ENOMEM);
}
-const struct btrfs_compress_op btrfs_heuristic_compress = {
- .workspace_manager = &heuristic_wsm,
-};
+const struct btrfs_compress_op btrfs_heuristic_compress = { 0 };
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
/* The heuristic is represented as compression type 0 */
return 0;
}
-static void btrfs_init_workspace_manager(struct btrfs_fs_info *fs_info, int type)
-{
- struct workspace_manager *wsm;
- struct list_head *workspace;
-
- wsm = btrfs_compress_op[type]->workspace_manager;
- INIT_LIST_HEAD(&wsm->idle_ws);
- spin_lock_init(&wsm->ws_lock);
- atomic_set(&wsm->total_ws, 0);
- init_waitqueue_head(&wsm->ws_wait);
-
- /*
- * Preallocate one workspace for each compression type so we can
- * guarantee forward progress in the worst case
- */
- workspace = alloc_workspace(fs_info, type, 0);
- if (IS_ERR(workspace)) {
- btrfs_warn(fs_info,
- "cannot preallocate compression workspace, will try later");
- } else {
- atomic_set(&wsm->total_ws, 1);
- wsm->free_ws = 1;
- list_add(workspace, &wsm->idle_ws);
- }
-}
-
static void free_workspace_manager(struct btrfs_fs_info *fs_info,
enum btrfs_compression_type type)
{
kfree(gwsm);
}
-static void btrfs_cleanup_workspace_manager(int type)
-{
- struct workspace_manager *wsman;
- struct list_head *ws;
-
- wsman = btrfs_compress_op[type]->workspace_manager;
- while (!list_empty(&wsman->idle_ws)) {
- ws = wsman->idle_ws.next;
- list_del(ws);
- free_workspace(type, ws);
- atomic_dec(&wsman->total_ws);
- }
-}
-
/*
* This finds an available workspace or allocates a new one.
* If it's not possible to allocate a new one, waits until there's one.
if (!compr_pool.shrinker)
return -ENOMEM;
- btrfs_init_workspace_manager(NULL, BTRFS_COMPRESS_NONE);
- btrfs_init_workspace_manager(NULL, BTRFS_COMPRESS_ZLIB);
- btrfs_init_workspace_manager(NULL, BTRFS_COMPRESS_LZO);
- zstd_init_workspace_manager(NULL);
-
spin_lock_init(&compr_pool.lock);
INIT_LIST_HEAD(&compr_pool.list);
compr_pool.count = 0;
btrfs_compr_pool_scan(NULL, NULL);
shrinker_free(compr_pool.shrinker);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
- btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
- zstd_cleanup_workspace_manager();
bioset_exit(&btrfs_compressed_bioset);
}
size_t destlen);
int zstd_alloc_workspace_manager(struct btrfs_fs_info *fs_info);
void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info);
-void zstd_init_workspace_manager(struct btrfs_fs_info *fs_info);
-void zstd_cleanup_workspace_manager(void);
struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level);
void zstd_free_workspace(struct list_head *ws);
struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level);
struct list_head list;
};
-static struct workspace_manager wsm;
-
void lzo_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
}
const struct btrfs_compress_op btrfs_lzo_compress = {
- .workspace_manager = &wsm,
.max_level = 1,
.default_level = 1,
};
int level;
};
-static struct workspace_manager wsm;
-
struct list_head *zlib_get_workspace(struct btrfs_fs_info *fs_info, unsigned int level)
{
struct list_head *ws = btrfs_get_workspace(fs_info, BTRFS_COMPRESS_ZLIB, level);
}
const struct btrfs_compress_op btrfs_zlib_compress = {
- .workspace_manager = &wsm,
.min_level = 1,
.max_level = 9,
.default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
struct timer_list timer;
};
-static struct zstd_workspace_manager wsm;
-
static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
static inline struct workspace *list_to_workspace(struct list_head *list)
return 0;
}
-void zstd_init_workspace_manager(struct btrfs_fs_info *fs_info)
-{
- struct list_head *ws;
- int i;
-
- zstd_calc_ws_mem_sizes();
-
- wsm.ops = &btrfs_zstd_compress;
- spin_lock_init(&wsm.lock);
- init_waitqueue_head(&wsm.wait);
- timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
-
- INIT_LIST_HEAD(&wsm.lru_list);
- for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
- INIT_LIST_HEAD(&wsm.idle_ws[i]);
-
- ws = zstd_alloc_workspace(fs_info, ZSTD_BTRFS_MAX_LEVEL);
- if (IS_ERR(ws)) {
- btrfs_warn(NULL, "cannot preallocate zstd compression workspace");
- } else {
- set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
- list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
- }
-}
-
void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info)
{
struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
kfree(zwsm);
}
-void zstd_cleanup_workspace_manager(void)
-{
- struct workspace *workspace;
- int i;
-
- spin_lock_bh(&wsm.lock);
- for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
- while (!list_empty(&wsm.idle_ws[i])) {
- workspace = container_of(wsm.idle_ws[i].next,
- struct workspace, list);
- list_del(&workspace->list);
- list_del(&workspace->lru_list);
- zstd_free_workspace(&workspace->list);
- }
- }
- spin_unlock_bh(&wsm.lock);
-
- timer_delete_sync(&wsm.timer);
-}
-
/*
* Find workspace for given level.
*
}
const struct btrfs_compress_op btrfs_zstd_compress = {
- /* ZSTD uses own workspace manager */
- .workspace_manager = NULL,
.min_level = ZSTD_BTRFS_MIN_LEVEL,
.max_level = ZSTD_BTRFS_MAX_LEVEL,
.default_level = ZSTD_BTRFS_DEFAULT_LEVEL,