va_end(args);
}
+static bool valid_stripe_count(u64 profile, u16 num_stripes, u16 sub_stripes)
+{
+ switch (profile) {
+ case BTRFS_BLOCK_GROUP_RAID0:
+ return true;
+ case BTRFS_BLOCK_GROUP_RAID10:
+ return sub_stripes == btrfs_raid_array[BTRFS_RAID_RAID10].sub_stripes;
+ case BTRFS_BLOCK_GROUP_RAID1:
+ return num_stripes == btrfs_raid_array[BTRFS_RAID_RAID1].devs_min;
+ case BTRFS_BLOCK_GROUP_RAID1C3:
+ return num_stripes == btrfs_raid_array[BTRFS_RAID_RAID1C3].devs_min;
+ case BTRFS_BLOCK_GROUP_RAID1C4:
+ return num_stripes == btrfs_raid_array[BTRFS_RAID_RAID1C4].devs_min;
+ case BTRFS_BLOCK_GROUP_RAID5:
+ return num_stripes >= btrfs_raid_array[BTRFS_RAID_RAID5].devs_min;
+ case BTRFS_BLOCK_GROUP_RAID6:
+ return num_stripes >= btrfs_raid_array[BTRFS_RAID_RAID6].devs_min;
+ case BTRFS_BLOCK_GROUP_DUP:
+ return num_stripes == btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes;
+ case 0: /* SINGLE */
+ return num_stripes == btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes;
+ default:
+ BUG();
+ }
+}
+
/*
* The common chunk check which could also work on super block sys chunk array.
*
u64 features;
u32 chunk_sector_size;
bool mixed = false;
+ bool remapped;
int raid_index;
int nparity;
int ncopies;
raid_index = btrfs_bg_flags_to_raid_index(type);
ncopies = btrfs_raid_array[raid_index].ncopies;
nparity = btrfs_raid_array[raid_index].nparity;
+ remapped = (type & BTRFS_BLOCK_GROUP_REMAPPED);
- if (unlikely(!num_stripes)) {
+ if (unlikely(!remapped && !num_stripes)) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes, have %u", num_stripes);
return -EUCLEAN;
}
- if (unlikely(num_stripes < ncopies)) {
+ if (unlikely(num_stripes != 0 && num_stripes < ncopies)) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid chunk num_stripes < ncopies, have %u < %d",
num_stripes, ncopies);
}
}
- if (unlikely((type & BTRFS_BLOCK_GROUP_RAID10 &&
- sub_stripes != btrfs_raid_array[BTRFS_RAID_RAID10].sub_stripes) ||
- (type & BTRFS_BLOCK_GROUP_RAID1 &&
- num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1].devs_min) ||
- (type & BTRFS_BLOCK_GROUP_RAID1C3 &&
- num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C3].devs_min) ||
- (type & BTRFS_BLOCK_GROUP_RAID1C4 &&
- num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C4].devs_min) ||
- (type & BTRFS_BLOCK_GROUP_RAID5 &&
- num_stripes < btrfs_raid_array[BTRFS_RAID_RAID5].devs_min) ||
- (type & BTRFS_BLOCK_GROUP_RAID6 &&
- num_stripes < btrfs_raid_array[BTRFS_RAID_RAID6].devs_min) ||
- (type & BTRFS_BLOCK_GROUP_DUP &&
- num_stripes != btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes) ||
- ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
- num_stripes != btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes))) {
+ if (!remapped &&
+ !valid_stripe_count(type & BTRFS_BLOCK_GROUP_PROFILE_MASK,
+ num_stripes, sub_stripes)) {
chunk_err(fs_info, leaf, chunk, logical,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
struct btrfs_fs_info *fs_info = leaf->fs_info;
int num_stripes;
- if (unlikely(btrfs_item_size(leaf, slot) < sizeof(struct btrfs_chunk))) {
+ if (unlikely(btrfs_item_size(leaf, slot) < offsetof(struct btrfs_chunk, stripe))) {
chunk_err(fs_info, leaf, chunk, key->offset,
"invalid chunk item size: have %u expect [%zu, %u)",
btrfs_item_size(leaf, slot),
- sizeof(struct btrfs_chunk),
+ offsetof(struct btrfs_chunk, stripe),
BTRFS_LEAF_DATA_SIZE(fs_info));
return -EUCLEAN;
}