return;
bitmap_wait_behind_writes(mddev);
- if (!mddev->serialize_policy)
+ if (!test_bit(MD_SERIALIZE_POLICY, &mddev->flags))
mddev_destroy_serial_pool(mddev, NULL);
mutex_lock(&mddev->bitmap_info.mutex);
mddev->bitmap_info.max_write_behind = backlog;
if (!backlog && mddev->serial_info_pool) {
/* serial_info_pool is not needed if backlog is zero */
- if (!mddev->serialize_policy)
+ if (!test_bit(MD_SERIALIZE_POLICY, &mddev->flags))
mddev_destroy_serial_pool(mddev, NULL);
} else if (backlog && !mddev->serial_info_pool) {
/* serial_info_pool is needed since backlog is not zero */
rdev_for_each(temp, mddev) {
if (!rdev) {
- if (!mddev->serialize_policy ||
+ if (!test_bit(MD_SERIALIZE_POLICY,
+ &mddev->flags) ||
!rdev_need_serial(temp))
rdev_uninit_serial(temp);
else
if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1))
return sprintf(page, "n/a\n");
else
- return sprintf(page, "%d\n", mddev->serialize_policy);
+ return sprintf(page, "%d\n",
+ test_bit(MD_SERIALIZE_POLICY, &mddev->flags));
}
/*
- * Setting serialize_policy to true to enforce write IO is not reordered
+ * Setting MD_SERIALIZE_POLICY enforce write IO is not reordered
* for raid1.
*/
static ssize_t
if (err)
return err;
- if (value == mddev->serialize_policy)
+ if (value == test_bit(MD_SERIALIZE_POLICY, &mddev->flags))
return len;
err = mddev_suspend_and_lock(mddev);
goto unlock;
}
- if (value)
+ if (value) {
mddev_create_serial_pool(mddev, NULL);
- else
+ set_bit(MD_SERIALIZE_POLICY, &mddev->flags);
+ } else {
mddev_destroy_serial_pool(mddev, NULL);
- mddev->serialize_policy = value;
+ clear_bit(MD_SERIALIZE_POLICY, &mddev->flags);
+ }
unlock:
mddev_unlock_and_resume(mddev);
return err ?: len;
md_update_sb(mddev, 1);
}
/* disable policy to guarantee rdevs free resources for serialization */
- mddev->serialize_policy = 0;
+ clear_bit(MD_SERIALIZE_POLICY, &mddev->flags);
mddev_destroy_serial_pool(mddev, NULL);
}
* @MD_DELETED: This device is being deleted
* @MD_HAS_SUPERBLOCK: There is persistence sb in member disks.
* @MD_FAILLAST_DEV: Allow last rdev to be removed.
+ * @MD_SERIALIZE_POLICY: Enforce write IO is not reordered, just used by raid1.
*
* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
*/
MD_DELETED,
MD_HAS_SUPERBLOCK,
MD_FAILLAST_DEV,
+ MD_SERIALIZE_POLICY,
};
enum mddev_sb_flags {
/* The sequence number for sync thread */
atomic_t sync_seq;
-
- bool serialize_policy:1;
};
enum recovery_flags {
(1L << MD_FAILFAST_SUPPORTED) |\
(1L << MD_HAS_PPL) | \
(1L << MD_HAS_MULTIPLE_PPLS) | \
- (1L << MD_FAILLAST_DEV))
+ (1L << MD_FAILLAST_DEV) | \
+ (1L << MD_SERIALIZE_POLICY))
/*
* inform the user of the raid configuration
call_bio_endio(r1_bio);
}
}
- } else if (rdev->mddev->serialize_policy)
+ } else if (test_bit(MD_SERIALIZE_POLICY, &rdev->mddev->flags))
remove_serial(rdev, lo, hi);
if (r1_bio->bios[mirror] == NULL)
rdev_dec_pending(rdev, conf->mddev);
mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
&mddev->bio_set);
- if (mddev->serialize_policy)
+ if (test_bit(MD_SERIALIZE_POLICY, &mddev->flags))
wait_for_serialization(rdev, r1_bio);
}
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_FAILFAST_SUPPORTED) | \
- (1L << MD_FAILLAST_DEV))
+ (1L << MD_FAILLAST_DEV) | \
+ (1L << MD_SERIALIZE_POLICY))
#define cpu_to_group(cpu) cpu_to_node(cpu)