*
* Pass 1 is designed to stash away enough information so that the
* other passes should not need to read in the inode information
- * during the normal course of a filesystem check. (Althogh if an
+ * during the normal course of a filesystem check. (Although if an
* inconsistency is detected, other passes may need to read in an
* inode to fix it.)
*
#undef DEBUG
+struct ea_quota {
+ blk64_t blocks;
+ __u64 inodes;
+};
+
static int process_block(ext2_filsys fs, blk64_t *blocknr,
e2_blkcnt_t blockcnt, blk64_t ref_blk,
int ref_offset, void *priv_data);
e2_blkcnt_t blockcnt, blk64_t ref_blk,
int ref_offset, void *priv_data);
static void check_blocks(e2fsck_t ctx, struct problem_context *pctx,
- char *block_buf, blk64_t ea_ibody_quota_blocks);
+ char *block_buf,
+ const struct ea_quota *ea_ibody_quota);
static void mark_table_blocks(e2fsck_t ctx);
static void alloc_bb_map(e2fsck_t ctx);
static void alloc_imagic_map(e2fsck_t ctx);
struct problem_context *pctx;
ext2fs_block_bitmap fs_meta_blocks;
e2fsck_t ctx;
- region_t region;
+ blk64_t next_lblock;
struct extent_tree_info eti;
};
struct process_inode_block {
ext2_ino_t ino;
- blk64_t ea_ibody_quota_blocks;
+ struct ea_quota ea_ibody_quota;
struct ext2_inode_large inode;
};
int i;
/*
- * If the index flag is set, then this is a bogus
+ * If the index or extents flag is set, then this is a bogus
* device/fifo/socket
*/
- if (inode->i_flags & EXT2_INDEX_FL)
+ if (inode->i_flags & (EXT2_INDEX_FL | EXT4_EXTENTS_FL))
return 0;
/*
int e2fsck_pass1_check_symlink(ext2_filsys fs, ext2_ino_t ino,
struct ext2_inode *inode, char *buf)
{
+ unsigned int buflen;
unsigned int len;
- int i;
- ext2_extent_handle_t handle;
- struct ext2_extent_info info;
- struct ext2fs_extent extent;
if ((inode->i_size_high || inode->i_size == 0) ||
(inode->i_flags & EXT2_INDEX_FL))
return 0;
- if (inode->i_flags & EXT4_EXTENTS_FL) {
- if (inode->i_flags & EXT4_INLINE_DATA_FL)
- return 0;
- if (inode->i_size > fs->blocksize)
- return 0;
- if (ext2fs_extent_open2(fs, ino, inode, &handle))
- return 0;
- i = 0;
- if (ext2fs_extent_get_info(handle, &info) ||
- (info.num_entries != 1) ||
- (info.max_depth != 0))
- goto exit_extent;
- if (ext2fs_extent_get(handle, EXT2_EXTENT_ROOT, &extent) ||
- (extent.e_lblk != 0) ||
- (extent.e_len != 1) ||
- (extent.e_pblk < fs->super->s_first_data_block) ||
- (extent.e_pblk >= ext2fs_blocks_count(fs->super)))
- goto exit_extent;
- i = 1;
- exit_extent:
- ext2fs_extent_free(handle);
- return i;
- }
-
if (inode->i_flags & EXT4_INLINE_DATA_FL) {
size_t inline_size;
+ if (inode->i_flags & EXT4_EXTENTS_FL)
+ return 0;
if (ext2fs_inline_data_size(fs, ino, &inline_size))
return 0;
if (inode->i_size != inline_size)
}
if (ext2fs_is_fast_symlink(inode)) {
- if (inode->i_size >= sizeof(inode->i_block))
- return 0;
-
- len = strnlen((char *)inode->i_block, sizeof(inode->i_block));
- if (len == sizeof(inode->i_block))
+ if (inode->i_flags & EXT4_EXTENTS_FL)
return 0;
+ buf = (char *)inode->i_block;
+ buflen = sizeof(inode->i_block);
} else {
- if ((inode->i_size >= fs->blocksize) ||
- (inode->i_block[0] < fs->super->s_first_data_block) ||
- (inode->i_block[0] >= ext2fs_blocks_count(fs->super)))
- return 0;
+ ext2_extent_handle_t handle;
+ struct ext2_extent_info info;
+ struct ext2fs_extent extent;
+ blk64_t blk;
+ int i;
- for (i = 1; i < EXT2_N_BLOCKS; i++)
- if (inode->i_block[i])
+ if (inode->i_flags & EXT4_EXTENTS_FL) {
+ if (ext2fs_extent_open2(fs, ino, inode, &handle))
return 0;
+ if (ext2fs_extent_get_info(handle, &info) ||
+ (info.num_entries != 1) ||
+ (info.max_depth != 0)) {
+ ext2fs_extent_free(handle);
+ return 0;
+ }
+ if (ext2fs_extent_get(handle, EXT2_EXTENT_ROOT,
+ &extent) ||
+ (extent.e_lblk != 0) ||
+ (extent.e_len != 1)) {
+ ext2fs_extent_free(handle);
+ return 0;
+ }
+ blk = extent.e_pblk;
+ ext2fs_extent_free(handle);
+ } else {
+ blk = inode->i_block[0];
+
+ for (i = 1; i < EXT2_N_BLOCKS; i++)
+ if (inode->i_block[i])
+ return 0;
+ }
- if (io_channel_read_blk64(fs->io, inode->i_block[0], 1, buf))
+ if (blk < fs->super->s_first_data_block ||
+ blk >= ext2fs_blocks_count(fs->super))
return 0;
- if (inode->i_flags & EXT4_ENCRYPT_FL) {
- len = ext2fs_le32_to_cpu(*((__u32 *)buf)) + 4;
- } else {
- len = strnlen(buf, fs->blocksize);
- }
- if (len == fs->blocksize)
+ if (io_channel_read_blk64(fs->io, blk, 1, buf))
return 0;
+
+ buflen = fs->blocksize;
}
+
+ if (inode->i_flags & EXT4_ENCRYPT_FL)
+ len = ext2fs_le16_to_cpu(*(__u16 *)buf) + 2;
+ else
+ len = strnlen(buf, buflen);
+
+ if (len >= buflen)
+ return 0;
+
if (len != inode->i_size)
- if ((inode->i_flags & EXT4_ENCRYPT_FL) == 0)
- return 0;
+ return 0;
return 1;
}
}
static void check_ea_in_inode(e2fsck_t ctx, struct problem_context *pctx,
- blk64_t *ea_ibody_quota_blocks)
+ struct ea_quota *ea_ibody_quota)
{
struct ext2_super_block *sb = ctx->fs->super;
struct ext2_inode_large *inode;
unsigned int storage_size, remain;
problem_t problem = 0;
region_t region = 0;
- blk64_t quota_blocks = 0;
- *ea_ibody_quota_blocks = 0;
+ ea_ibody_quota->blocks = 0;
+ ea_ibody_quota->inodes = 0;
inode = (struct ext2_inode_large *) pctx->inode;
storage_size = EXT2_INODE_SIZE(ctx->fs->super) - EXT2_GOOD_OLD_INODE_SIZE -
goto fix;
}
} else {
- blk64_t entry_quota_blocks;
+ blk64_t quota_blocks;
problem = check_large_ea_inode(ctx, entry, pctx,
- &entry_quota_blocks);
+ "a_blocks);
if (problem != 0)
goto fix;
- quota_blocks += entry_quota_blocks;
+ ea_ibody_quota->blocks += quota_blocks;
+ ea_ibody_quota->inodes++;
}
/* If EA value is stored in external inode then it does not
if (problem == 0 || !fix_problem(ctx, problem, pctx)) {
inc_ea_inode_refs(ctx, pctx,
(struct ext2_ext_attr_entry *)start, end);
- *ea_ibody_quota_blocks = quota_blocks;
return;
}
*((__u32 *)header) = 0UL;
e2fsck_write_inode_full(ctx, pctx->ino, pctx->inode,
EXT2_INODE_SIZE(sb), "pass1");
+ ea_ibody_quota->blocks = 0;
+ ea_ibody_quota->inodes = 0;
}
static int check_inode_extra_negative_epoch(__u32 xtime, __u32 extra) {
#define EXT4_EXTRA_NEGATIVE_DATE_CUTOFF 2 * (1LL << 32)
static void check_inode_extra_space(e2fsck_t ctx, struct problem_context *pctx,
- blk64_t *ea_ibody_quota_blocks)
+ struct ea_quota *ea_ibody_quota)
{
struct ext2_super_block *sb = ctx->fs->super;
struct ext2_inode_large *inode;
__u32 *eamagic;
int min, max;
- *ea_ibody_quota_blocks = 0;
+ ea_ibody_quota->blocks = 0;
+ ea_ibody_quota->inodes = 0;
inode = (struct ext2_inode_large *) pctx->inode;
if (EXT2_INODE_SIZE(sb) == EXT2_GOOD_OLD_INODE_SIZE) {
inode->i_extra_isize);
if (*eamagic == EXT2_EXT_ATTR_MAGIC) {
/* it seems inode has an extended attribute(s) in body */
- check_ea_in_inode(ctx, pctx, ea_ibody_quota_blocks);
+ check_ea_in_inode(ctx, pctx, ea_ibody_quota);
}
/*
errcode_t retval;
char *tdb_dir;
int enable;
+ int full_map;
*ret = 0;
}
e2fsck_set_bitmap_type(ctx->fs, EXT2FS_BMAP64_RBTREE, icount_name,
&save_type);
+ if (ctx->options & E2F_OPT_ICOUNT_FULLMAP)
+ flags |= EXT2_ICOUNT_OPT_FULLMAP;
retval = ext2fs_create_icount2(ctx->fs, flags, 0, hint, ret);
ctx->fs->default_bitmap_type = save_type;
return retval;
int failed_csum = 0;
ext2_ino_t ino_threshold = 0;
dgrp_t ra_group = 0;
- blk64_t ea_ibody_quota_blocks;
+ struct ea_quota ea_ibody_quota;
init_resource_track(&rtrack, ctx->fs->io);
clear_problem_context(&pctx);
case EXT2_ET_NO_INLINE_DATA:
case EXT2_ET_EXT_ATTR_CSUM_INVALID:
case EXT2_ET_EA_BAD_VALUE_OFFSET:
+ case EXT2_ET_EA_INODE_CORRUPTED:
/* broken EA or no system.data EA; truncate */
if (fix_problem(ctx, PR_1_INLINE_DATA_NO_ATTR,
&pctx)) {
/*
* Make sure the root inode is a directory; if
* not, offer to clear it. It will be
- * regnerated in pass #3.
+ * regenerated in pass #3.
*/
if (!LINUX_S_ISDIR(inode->i_mode)) {
if (fix_problem(ctx, PR_1_ROOT_NO_DIR, &pctx))
"pass1");
failed_csum = 0;
}
- check_blocks(ctx, &pctx, block_buf, 0);
+ check_blocks(ctx, &pctx, block_buf, NULL);
FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
continue;
}
"pass1");
failed_csum = 0;
}
- check_blocks(ctx, &pctx, block_buf, 0);
+ check_blocks(ctx, &pctx, block_buf, NULL);
FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
continue;
}
failed_csum = 0;
}
}
- check_blocks(ctx, &pctx, block_buf, 0);
+ check_blocks(ctx, &pctx, block_buf, NULL);
FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
continue;
}
}
}
- check_inode_extra_space(ctx, &pctx, &ea_ibody_quota_blocks);
+ check_inode_extra_space(ctx, &pctx, &ea_ibody_quota);
check_is_really_dir(ctx, &pctx, block_buf);
/*
} else if (ext2fs_is_fast_symlink(inode)) {
ctx->fs_fast_symlinks_count++;
check_blocks(ctx, &pctx, block_buf,
- ea_ibody_quota_blocks);
+ &ea_ibody_quota);
FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
continue;
}
itp = &inodes_to_process[process_inode_count];
itp->ino = ino;
- itp->ea_ibody_quota_blocks = ea_ibody_quota_blocks;
+ itp->ea_ibody_quota = ea_ibody_quota;
if (inode_size < sizeof(struct ext2_inode_large))
memcpy(&itp->inode, inode, inode_size);
else
memcpy(&itp->inode, inode, sizeof(itp->inode));
process_inode_count++;
} else
- check_blocks(ctx, &pctx, block_buf,
- ea_ibody_quota_blocks);
+ check_blocks(ctx, &pctx, block_buf, &ea_ibody_quota);
FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
ctx->refcount_extra = 0;
}
- if (ctx->ea_block_quota) {
- ea_refcount_free(ctx->ea_block_quota);
- ctx->ea_block_quota = 0;
+ if (ctx->ea_block_quota_blocks) {
+ ea_refcount_free(ctx->ea_block_quota_blocks);
+ ctx->ea_block_quota_blocks = 0;
+ }
+
+ if (ctx->ea_block_quota_inodes) {
+ ea_refcount_free(ctx->ea_block_quota_inodes);
+ ctx->ea_block_quota_inodes = 0;
}
if (ctx->invalid_bitmaps)
pctx.ino);
ehandler_operation(buf);
check_blocks(ctx, &pctx, block_buf,
- inodes_to_process[i].ea_ibody_quota_blocks);
+ &inodes_to_process[i].ea_ibody_quota);
if (ctx->flags & E2F_FLAG_SIGNAL_MASK)
break;
}
clear_problem_context(&pctx);
if (ext2fs_fast_test_block_bitmap2(ctx->block_found_map, block)) {
+ if (ext2fs_has_feature_shared_blocks(ctx->fs->super) &&
+ !(ctx->options & E2F_OPT_UNSHARE_BLOCKS)) {
+ return;
+ }
if (!ctx->block_dup_map) {
pctx.errcode = e2fsck_allocate_block_bitmap(ctx->fs,
_("multiply claimed block map"),
* Handle processing the extended attribute blocks
*/
static int check_ext_attr(e2fsck_t ctx, struct problem_context *pctx,
- char *block_buf, blk64_t *ea_block_quota_blocks)
+ char *block_buf, struct ea_quota *ea_block_quota)
{
ext2_filsys fs = ctx->fs;
ext2_ino_t ino = pctx->ino;
struct ext2_ext_attr_header *header;
struct ext2_ext_attr_entry *first, *entry;
blk64_t quota_blocks = EXT2FS_C2B(fs, 1);
+ __u64 quota_inodes = 0;
region_t region = 0;
int failed_csum = 0;
+ ea_block_quota->blocks = 0;
+ ea_block_quota->inodes = 0;
+
blk = ext2fs_file_acl_block(fs, inode);
if (blk == 0)
return 0;
/* Have we seen this EA block before? */
if (ext2fs_fast_test_block_bitmap2(ctx->block_ea_map, blk)) {
- if (ctx->ea_block_quota)
- ea_refcount_fetch(ctx->ea_block_quota, blk,
- ea_block_quota_blocks);
- else
- *ea_block_quota_blocks = 0;
+ ea_block_quota->blocks = EXT2FS_C2B(fs, 1);
+ ea_block_quota->inodes = 0;
+
+ if (ctx->ea_block_quota_blocks) {
+ ea_refcount_fetch(ctx->ea_block_quota_blocks, blk,
+ "a_blocks);
+ if (quota_blocks)
+ ea_block_quota->blocks = quota_blocks;
+ }
+
+ if (ctx->ea_block_quota_inodes)
+ ea_refcount_fetch(ctx->ea_block_quota_inodes, blk,
+ &ea_block_quota->inodes);
if (ea_refcount_decrement(ctx->refcount, blk, 0) == 0)
return 1;
goto clear_extattr;
quota_blocks += entry_quota_blocks;
+ quota_inodes++;
}
entry = EXT2_EXT_ATTR_NEXT(entry);
return 0;
}
- *ea_block_quota_blocks = quota_blocks;
- if (quota_blocks) {
- if (!ctx->ea_block_quota) {
+ if (quota_blocks != EXT2FS_C2B(fs, 1)) {
+ if (!ctx->ea_block_quota_blocks) {
pctx->errcode = ea_refcount_create(0,
- &ctx->ea_block_quota);
+ &ctx->ea_block_quota_blocks);
if (pctx->errcode) {
pctx->num = 3;
+ goto refcount_fail;
+ }
+ }
+ ea_refcount_store(ctx->ea_block_quota_blocks, blk,
+ quota_blocks);
+ }
+
+ if (quota_inodes) {
+ if (!ctx->ea_block_quota_inodes) {
+ pctx->errcode = ea_refcount_create(0,
+ &ctx->ea_block_quota_inodes);
+ if (pctx->errcode) {
+ pctx->num = 4;
+refcount_fail:
fix_problem(ctx, PR_1_ALLOCATE_REFCOUNT, pctx);
ctx->flags |= E2F_FLAG_ABORT;
return 0;
}
}
- ea_refcount_store(ctx->ea_block_quota, blk, quota_blocks);
+
+ ea_refcount_store(ctx->ea_block_quota_inodes, blk,
+ quota_inodes);
}
+ ea_block_quota->blocks = quota_blocks;
+ ea_block_quota->inodes = quota_inodes;
+
inc_ea_inode_refs(ctx, pctx, first, end);
ea_refcount_store(ctx->refcount, blk, header->h_refcount - 1);
mark_block_used(ctx, blk);
(1U << (21 - ctx->fs->super->s_log_block_size))))
problem = PR_1_TOOBIG_DIR;
- if (is_leaf && problem == 0 && extent.e_len > 0 &&
- region_allocate(pb->region, extent.e_lblk, extent.e_len))
- problem = PR_1_EXTENT_COLLISION;
+ if (is_leaf && problem == 0 && extent.e_len > 0) {
+#if 0
+ printf("extent_region(ino=%u, expect=%llu, "
+ "lblk=%llu, len=%u)\n",
+ pb->ino, pb->next_lblock,
+ extent.e_lblk, extent.e_len);
+#endif
+ if (extent.e_lblk < pb->next_lblock)
+ problem = PR_1_EXTENT_COLLISION;
+ else if (extent.e_lblk + extent.e_len > pb->next_lblock)
+ pb->next_lblock = extent.e_lblk + extent.e_len;
+ }
/*
* Uninitialized blocks in a directory? Clear the flag and
* will reallocate the block; then we can try again.
*/
if (pb->ino != EXT2_RESIZE_INO &&
+ extent.e_pblk < ctx->fs->super->s_blocks_count &&
ext2fs_test_block_bitmap2(ctx->block_metadata_map,
extent.e_pblk)) {
next_try_repairs = 0;
fix_problem(ctx,
PR_1_CRITICAL_METADATA_COLLISION,
pctx);
- ctx->flags |= E2F_FLAG_RESTART_LATER;
+ if ((ctx->options & E2F_OPT_NO) == 0)
+ ctx->flags |= E2F_FLAG_RESTART_LATER;
}
pctx->errcode = ext2fs_extent_get(ehandle,
EXT2_EXTENT_DOWN, &extent);
memset(pb->eti.ext_info, 0, sizeof(pb->eti.ext_info));
pb->eti.ino = pb->ino;
- pb->region = region_create(0, info.max_lblk);
- if (!pb->region) {
- ext2fs_extent_free(ehandle);
- fix_problem(ctx, PR_1_EXTENT_ALLOC_REGION_ABORT, pctx);
- ctx->flags |= E2F_FLAG_ABORT;
- return;
- }
+ pb->next_lblock = 0;
eof_lblk = ((EXT2_I_SIZE(inode) + fs->blocksize - 1) >>
EXT2_BLOCK_SIZE_BITS(fs->super)) - 1;
"check_blocks_extents");
pctx->errcode = 0;
}
- region_free(pb->region);
- pb->region = NULL;
ext2fs_extent_free(ehandle);
/* Rebuild unless it's a dir and we're rehashing it */
* blocks used by that inode.
*/
static void check_blocks(e2fsck_t ctx, struct problem_context *pctx,
- char *block_buf, blk64_t ea_ibody_quota_blocks)
+ char *block_buf, const struct ea_quota *ea_ibody_quota)
{
ext2_filsys fs = ctx->fs;
struct process_block_struct pb;
int extent_fs;
int inlinedata_fs;
__u64 size;
- blk64_t ea_block_quota_blocks = 0;
+ struct ea_quota ea_block_quota;
pb.ino = ino;
- pb.num_blocks = EXT2FS_B2C(ctx->fs, ea_ibody_quota_blocks);
+ pb.num_blocks = EXT2FS_B2C(ctx->fs,
+ ea_ibody_quota ? ea_ibody_quota->blocks : 0);
pb.last_block = ~0;
pb.last_init_lblock = -1;
pb.last_db_block = -1;
extent_fs = ext2fs_has_feature_extents(ctx->fs->super);
inlinedata_fs = ext2fs_has_feature_inline_data(ctx->fs->super);
- if (check_ext_attr(ctx, pctx, block_buf, &ea_block_quota_blocks)) {
+ if (check_ext_attr(ctx, pctx, block_buf, &ea_block_quota)) {
if (ctx->flags & E2F_FLAG_SIGNAL_MASK)
goto out;
- pb.num_blocks += EXT2FS_B2C(ctx->fs, ea_block_quota_blocks);
+ pb.num_blocks += EXT2FS_B2C(ctx->fs, ea_block_quota.blocks);
}
if (inlinedata_fs && (inode->i_flags & EXT4_INLINE_DATA_FL))
}
if (ino != quota_type2inum(PRJQUOTA, fs->super) &&
- (ino == EXT2_ROOT_INO || ino >= EXT2_FIRST_INODE(ctx->fs->super))) {
+ (ino == EXT2_ROOT_INO || ino >= EXT2_FIRST_INODE(ctx->fs->super)) &&
+ !(inode->i_flags & EXT4_EA_INODE_FL)) {
quota_data_add(ctx->qctx, (struct ext2_inode_large *) inode,
ino,
pb.num_blocks * EXT2_CLUSTER_SIZE(fs->super));
quota_data_inodes(ctx->qctx, (struct ext2_inode_large *) inode,
- ino, +1);
+ ino, (ea_ibody_quota ?
+ ea_ibody_quota->inodes : 0) +
+ ea_block_quota.inodes + 1);
}
if (!ext2fs_has_feature_huge_file(fs->super) ||
size = EXT2_I_SIZE(inode);
if ((pb.last_init_lblock >= 0) &&
- /* allow allocated blocks to end of PAGE_SIZE */
+ /* if size is smaller than expected by the block count,
+ * allow allocated blocks to end of PAGE_SIZE.
+ * last_init_lblock is the last in-use block, so it is
+ * the minimum expected file size, but +1 because it is
+ * the base-zero block number and not the block count. */
(size < (__u64)pb.last_init_lblock * fs->blocksize) &&
- (pb.last_init_lblock / blkpg * blkpg != pb.last_init_lblock ||
- size < (__u64)(pb.last_init_lblock & ~(blkpg-1)) *
+ ((pb.last_init_lblock + 1) / blkpg * blkpg !=
+ (pb.last_init_lblock + 1) ||
+ size < (__u64)(pb.last_init_lblock & ~(blkpg - 1)) *
fs->blocksize))
bad_size = 3;
else if (!(extent_fs && (inode->i_flags & EXT4_EXTENTS_FL)) &&
*/
if (blockcnt < 0 &&
p->ino != EXT2_RESIZE_INO &&
+ blk < ctx->fs->super->s_blocks_count &&
ext2fs_test_block_bitmap2(ctx->block_metadata_map, blk)) {
pctx->blk = blk;
fix_problem(ctx, PR_1_CRITICAL_METADATA_COLLISION, pctx);
- ctx->flags |= E2F_FLAG_RESTART_LATER;
+ if ((ctx->options & E2F_OPT_NO) == 0)
+ ctx->flags |= E2F_FLAG_RESTART_LATER;
}
if (problem) {
}
/*
- * Thes subroutines short circuits ext2fs_get_blocks and
+ * These subroutines short circuits ext2fs_get_blocks and
* ext2fs_check_directory; we use them since we already have the inode
* structure, so there's no point in letting the ext2fs library read
* the inode again.