* A: Field should be accessed / updated in atomic for parallelized code.
*/
struct z_erofs_pcluster {
- struct erofs_workgroup obj;
struct mutex lock;
+ struct lockref lockref;
/* A: point to next chained pcluster or TAILs */
z_erofs_next_pcluster_t next;
+ /* I: start block address of this pcluster */
+ erofs_off_t index;
+
/* L: the maximum decompression size of this round */
unsigned int length;
static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
{
- return !pcl->obj.index;
+ return !pcl->index;
}
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
if (READ_ONCE(pcl->compressed_bvecs[i].page))
continue;
- page = find_get_page(mc, pcl->obj.index + i);
+ page = find_get_page(mc, pcl->index + i);
if (!page) {
/* I/O is needed, no possible to decompress directly */
standalone = false;
continue;
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
}
- spin_lock(&pcl->obj.lockref.lock);
+ spin_lock(&pcl->lockref.lock);
if (!pcl->compressed_bvecs[i].page) {
pcl->compressed_bvecs[i].page = page ? page : newpage;
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
continue;
}
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
if (page)
put_page(page);
/* (erofs_shrinker) disconnect cached encoded data with pclusters */
static int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp)
+ struct z_erofs_pcluster *pcl)
{
- struct z_erofs_pcluster *const pcl =
- container_of(grp, struct z_erofs_pcluster, obj);
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
struct folio *folio;
int i;
return true;
ret = false;
- spin_lock(&pcl->obj.lockref.lock);
- if (pcl->obj.lockref.count <= 0) {
+ spin_lock(&pcl->lockref.lock);
+ if (pcl->lockref.count <= 0) {
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
for (; bvec < end; ++bvec) {
if (bvec->page && page_folio(bvec->page) == folio) {
}
}
}
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
return ret;
}
if (exclusive) {
/* give priority for inplaceio to use file pages first */
- spin_lock(&pcl->obj.lockref.lock);
+ spin_lock(&pcl->lockref.lock);
while (fe->icur > 0) {
if (pcl->compressed_bvecs[--fe->icur].page)
continue;
pcl->compressed_bvecs[fe->icur] = *bvec;
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
return 0;
}
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
/* otherwise, check if it can be used as a bvpage */
if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
return ret;
}
-static bool z_erofs_get_pcluster(struct erofs_workgroup *grp)
+static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl)
{
- if (lockref_get_not_zero(&grp->lockref))
+ if (lockref_get_not_zero(&pcl->lockref))
return true;
- spin_lock(&grp->lockref.lock);
- if (__lockref_is_dead(&grp->lockref)) {
- spin_unlock(&grp->lockref.lock);
+ spin_lock(&pcl->lockref.lock);
+ if (__lockref_is_dead(&pcl->lockref)) {
+ spin_unlock(&pcl->lockref.lock);
return false;
}
- if (!grp->lockref.count++)
+ if (!pcl->lockref.count++)
atomic_long_dec(&erofs_global_shrink_cnt);
- spin_unlock(&grp->lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
return true;
}
struct super_block *sb = fe->inode->i_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
bool ztailpacking = map->m_flags & EROFS_MAP_META;
- struct z_erofs_pcluster *pcl;
- struct erofs_workgroup *grp, *pre;
+ struct z_erofs_pcluster *pcl, *pre;
int err;
if (!(map->m_flags & EROFS_MAP_ENCODED) ||
if (IS_ERR(pcl))
return PTR_ERR(pcl);
- spin_lock_init(&pcl->obj.lockref.lock);
- pcl->obj.lockref.count = 1; /* one ref for this request */
+ spin_lock_init(&pcl->lockref.lock);
+ pcl->lockref.count = 1; /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
pcl->length = 0;
pcl->partial = true;
DBG_BUGON(!mutex_trylock(&pcl->lock));
if (ztailpacking) {
- pcl->obj.index = 0; /* which indicates ztailpacking */
+ pcl->index = 0; /* which indicates ztailpacking */
} else {
- pcl->obj.index = erofs_blknr(sb, map->m_pa);
+ pcl->index = erofs_blknr(sb, map->m_pa);
while (1) {
xa_lock(&sbi->managed_pslots);
- pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
- NULL, grp, GFP_KERNEL);
+ pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->index,
+ NULL, pcl, GFP_KERNEL);
if (!pre || xa_is_err(pre) || z_erofs_get_pcluster(pre)) {
xa_unlock(&sbi->managed_pslots);
break;
err = xa_err(pre);
goto err_out;
} else if (pre) {
- fe->pcl = container_of(pre,
- struct z_erofs_pcluster, obj);
+ fe->pcl = pre;
err = -EEXIST;
goto err_out;
}
struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
- struct erofs_workgroup *grp = NULL;
+ struct z_erofs_pcluster *pcl = NULL;
int ret;
DBG_BUGON(fe->pcl);
if (!(map->m_flags & EROFS_MAP_META)) {
while (1) {
rcu_read_lock();
- grp = xa_load(&EROFS_SB(sb)->managed_pslots, blknr);
- if (!grp || z_erofs_get_pcluster(grp)) {
- DBG_BUGON(grp && blknr != grp->index);
+ pcl = xa_load(&EROFS_SB(sb)->managed_pslots, blknr);
+ if (!pcl || z_erofs_get_pcluster(pcl)) {
+ DBG_BUGON(pcl && blknr != pcl->index);
rcu_read_unlock();
break;
}
return -EFSCORRUPTED;
}
- if (grp) {
- fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
+ if (pcl) {
+ fe->pcl = pcl;
ret = -EEXIST;
} else {
ret = z_erofs_register_pcluster(fe);
struct z_erofs_pcluster, rcu));
}
-static void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
-{
- struct z_erofs_pcluster *const pcl =
- container_of(grp, struct z_erofs_pcluster, obj);
-
- call_rcu(&pcl->rcu, z_erofs_rcu_callback);
-}
-
static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp)
+ struct z_erofs_pcluster *pcl)
{
int free = false;
- spin_lock(&grp->lockref.lock);
- if (grp->lockref.count)
+ spin_lock(&pcl->lockref.lock);
+ if (pcl->lockref.count)
goto out;
/*
* the XArray. Otherwise some folios could be still attached to the
* orphan old pcluster when the new one is available in the tree.
*/
- if (erofs_try_to_free_all_cached_folios(sbi, grp))
+ if (erofs_try_to_free_all_cached_folios(sbi, pcl))
goto out;
/*
* It's impossible to fail after the pcluster is freezed, but in order
* to avoid some race conditions, add a DBG_BUGON to observe this.
*/
- DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
+ DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->index) != pcl);
- lockref_mark_dead(&grp->lockref);
+ lockref_mark_dead(&pcl->lockref);
free = true;
out:
- spin_unlock(&grp->lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
if (free) {
atomic_long_dec(&erofs_global_shrink_cnt);
- erofs_workgroup_free_rcu(grp);
+ call_rcu(&pcl->rcu, z_erofs_rcu_callback);
}
return free;
}
unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
unsigned long nr_shrink)
{
- struct erofs_workgroup *grp;
+ struct z_erofs_pcluster *pcl;
unsigned int freed = 0;
unsigned long index;
xa_lock(&sbi->managed_pslots);
- xa_for_each(&sbi->managed_pslots, index, grp) {
+ xa_for_each(&sbi->managed_pslots, index, pcl) {
/* try to shrink each valid pcluster */
- if (!erofs_try_to_release_pcluster(sbi, grp))
+ if (!erofs_try_to_release_pcluster(sbi, pcl))
continue;
xa_unlock(&sbi->managed_pslots);
static void z_erofs_put_pcluster(struct z_erofs_pcluster *pcl)
{
- struct erofs_workgroup *grp = &pcl->obj;
-
- if (lockref_put_or_lock(&grp->lockref))
+ if (lockref_put_or_lock(&pcl->lockref))
return;
- DBG_BUGON(__lockref_is_dead(&grp->lockref));
- if (grp->lockref.count == 1)
+ DBG_BUGON(__lockref_is_dead(&pcl->lockref));
+ if (pcl->lockref.count == 1)
atomic_long_inc(&erofs_global_shrink_cnt);
- --grp->lockref.count;
- spin_unlock(&grp->lockref.lock);
+ --pcl->lockref.count;
+ spin_unlock(&pcl->lockref.lock);
}
static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
bvec->bv_offset = 0;
bvec->bv_len = PAGE_SIZE;
repeat:
- spin_lock(&pcl->obj.lockref.lock);
+ spin_lock(&pcl->lockref.lock);
zbv = pcl->compressed_bvecs[nr];
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
if (!zbv.page)
goto out_allocfolio;
folio_put(folio);
out_allocfolio:
page = __erofs_allocpage(&f->pagepool, gfp, true);
- spin_lock(&pcl->obj.lockref.lock);
+ spin_lock(&pcl->lockref.lock);
if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
if (page)
erofs_pagepool_add(&f->pagepool, page);
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
cond_resched();
goto repeat;
}
pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
- spin_unlock(&pcl->obj.lockref.lock);
+ spin_unlock(&pcl->lockref.lock);
bvec->bv_page = page;
if (!page)
return;
folio = page_folio(page);
out_tocache:
if (!tocache || bs != PAGE_SIZE ||
- filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) {
+ filemap_add_folio(mc, folio, pcl->index + nr, gfp)) {
/* turn into a temporary shortlived folio (1 ref) */
folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
return;
/* no device id here, thus it will always succeed */
mdev = (struct erofs_map_dev) {
- .m_pa = erofs_pos(sb, pcl->obj.index),
+ .m_pa = erofs_pos(sb, pcl->index),
};
(void)erofs_map_dev(sb, &mdev);