readahead_gfp_mask(as) & ~__GFP_RECLAIM);
}
-/* Has a disk mapping */
-#define EROFS_MAP_MAPPED 0x0001
+/* Allocated on disk at @m_pa (e.g. NOT a fragment extent) */
+#define EROFS_MAP_MAPPED 0x0001
/* Located in metadata (could be copied from bd_inode) */
-#define EROFS_MAP_META 0x0002
-/* The extent is encoded */
-#define EROFS_MAP_ENCODED 0x0004
-/* The length of extent is full */
-#define EROFS_MAP_FULL_MAPPED 0x0008
+#define EROFS_MAP_META 0x0002
+/* @m_llen may be truncated by the runtime compared to the on-disk record */
+#define EROFS_MAP_PARTIAL_MAPPED 0x0004
+/* The on-disk @m_llen may cover only part of the encoded data */
+#define EROFS_MAP_PARTIAL_REF 0x0008
/* Located in the special packed inode */
-#define __EROFS_MAP_FRAGMENT 0x0010
-/* The extent refers to partial decompressed data */
-#define EROFS_MAP_PARTIAL_REF 0x0020
-
-#define EROFS_MAP_FRAGMENT (EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT)
+#define EROFS_MAP_FRAGMENT 0x0010
+/* The encoded on-disk data will be fully handled (decompressed) */
+#define EROFS_MAP_FULL(f) (!((f) & (EROFS_MAP_PARTIAL_MAPPED | \
+ EROFS_MAP_PARTIAL_REF)))
struct erofs_map_blocks {
struct erofs_buf buf;
if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
return false;
- if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
+ if (fe->map.m_flags & EROFS_MAP_PARTIAL_MAPPED)
return true;
if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
/* bump split parts first to avoid several separate cases */
++split;
- if (!(map->m_flags & EROFS_MAP_MAPPED)) {
- folio_zero_segment(folio, cur, end);
- tight = false;
- } else if (map->m_flags & __EROFS_MAP_FRAGMENT) {
+ if (map->m_flags & EROFS_MAP_FRAGMENT) {
erofs_off_t fpos = offset + cur - map->m_la;
err = z_erofs_read_fragment(inode->i_sb, folio, cur,
if (err)
break;
tight = false;
+ } else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ folio_zero_segment(folio, cur, end);
+ tight = false;
} else {
if (!f->pcl) {
err = z_erofs_pcluster_begin(f);
f->pcl->length = offset + end - map->m_la;
f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
}
- if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
- !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
+ if (EROFS_MAP_FULL(map->m_flags) &&
f->pcl->length == map->m_llen)
f->pcl->partial = false;
}
/* shorten the remaining extent to update progress */
map->m_llen = offset + cur - map->m_la;
- map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
+ map->m_flags |= EROFS_MAP_PARTIAL_MAPPED;
if (cur <= pgs) {
split = cur < pgs;
tight = (bs == PAGE_SIZE);
map->m_la = end;
err = z_erofs_map_blocks_iter(inode, map,
EROFS_GET_BLOCKS_READMORE);
- if (err || !(map->m_flags & EROFS_MAP_ENCODED))
+ if (err || !(map->m_flags & EROFS_MAP_MAPPED))
return;
/* expand ra for the trailing edge if readahead */
end = round_up(end, PAGE_SIZE);
} else {
end = round_up(map->m_la, PAGE_SIZE);
- if (!(map->m_flags & EROFS_MAP_ENCODED) || !map->m_llen)
+ if (!(map->m_flags & EROFS_MAP_MAPPED) || !map->m_llen)
return;
}
if ((flags & EROFS_GET_BLOCKS_FINDTAIL) && ztailpacking)
vi->z_fragmentoff = m.nextpackoff;
- map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
+ map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_PARTIAL_MAPPED;
end = (m.lcn + 1ULL) << lclusterbits;
if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && endoff >= m.clusterofs) {
} else {
if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
end = (m.lcn << lclusterbits) | m.clusterofs;
- map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ map->m_flags &= ~EROFS_MAP_PARTIAL_MAPPED;
m.delta[0] = 1;
}
/* get the corresponding first chunk */
map->m_llen >= i_blocksize(inode))) {
err = z_erofs_get_extent_decompressedlen(&m);
if (!err)
- map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ map->m_flags &= ~EROFS_MAP_PARTIAL_MAPPED;
}
unmap_out:
if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
vi->z_fragmentoff |= map->m_pa << 32;
} else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
- map->m_flags |= EROFS_MAP_MAPPED |
- EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
+ map->m_flags |= EROFS_MAP_MAPPED;
fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
map->m_flags |= EROFS_MAP_PARTIAL_REF;
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
u64 pend;
- if (!(map->m_flags & EROFS_MAP_ENCODED))
+ if (!(map->m_flags & EROFS_MAP_MAPPED))
return 0;
if (unlikely(map->m_algorithmformat >= Z_EROFS_COMPRESSION_RUNTIME_MAX)) {
erofs_err(inode->i_sb, "unknown algorithm %d @ pos %llu for nid %llu, please upgrade kernel",
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = map.m_la;
iomap->length = map.m_llen;
- if (map.m_flags & EROFS_MAP_MAPPED) {
+ if (map.m_flags & EROFS_MAP_FRAGMENT) {
iomap->type = IOMAP_MAPPED;
- iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ?
- IOMAP_NULL_ADDR : map.m_pa;
+ iomap->addr = IOMAP_NULL_ADDR;
+ } else if (map.m_flags & EROFS_MAP_MAPPED) {
+ iomap->type = IOMAP_MAPPED;
+ iomap->addr = map.m_pa;
} else {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
#define show_mflags(flags) __print_flags(flags, "", \
{ EROFS_MAP_MAPPED, "M" }, \
{ EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ENCODED, "E" }, \
- { EROFS_MAP_FULL_MAPPED, "F" }, \
- { EROFS_MAP_FRAGMENT, "R" }, \
- { EROFS_MAP_PARTIAL_REF, "P" })
+ { EROFS_MAP_PARTIAL_MAPPED, "T" }, \
+ { EROFS_MAP_PARTIAL_REF, "P" }, \
+ { EROFS_MAP_FRAGMENT, "R" })
TRACE_EVENT(erofs_lookup,