/* last sector in the request */
sector_t last_sector;
+ /* the request had REQ_PREFLUSH, cleared after the first stripe_head */
+ bool do_flush;
+
/*
* bitmap to track stripe sectors that have been added to stripes
* add one to account for unaligned requests
*/
- DECLARE_BITMAP(sectors_to_do, RAID5_MAX_REQ_STRIPES + 1);
-
- /* the request had REQ_PREFLUSH, cleared after the first stripe_head */
- bool do_flush;
+ unsigned long sectors_to_do[];
};
/*
bi->bi_next = NULL;
ctx = mempool_alloc(conf->ctx_pool, GFP_NOIO);
- memset(ctx, 0, sizeof(*ctx));
+ memset(ctx, 0, conf->ctx_size);
ctx->first_sector = logical_sector;
ctx->last_sector = bio_end_sector(bi);
/*
return 0;
}
+static int raid5_create_ctx_pool(struct r5conf *conf)
+{
+ struct stripe_request_ctx *ctx;
+ int size;
+
+ if (mddev_is_dm(conf->mddev))
+ size = BITS_TO_LONGS(RAID5_MAX_REQ_STRIPES);
+ else
+ size = BITS_TO_LONGS(
+ queue_max_hw_sectors(conf->mddev->gendisk->queue) >>
+ RAID5_STRIPE_SHIFT(conf));
+
+ conf->ctx_size = struct_size(ctx, sectors_to_do, size);
+ conf->ctx_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS,
+ conf->ctx_size);
+
+ return conf->ctx_pool ? 0 : -ENOMEM;
+}
+
static int raid5_set_limits(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
* Limit the max sectors based on this.
*/
lim.max_hw_sectors = RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf);
+ if ((lim.max_hw_sectors << 9) < lim.io_opt)
+ lim.max_hw_sectors = lim.io_opt >> 9;
/* No restrictions on the number of segments in the request */
lim.max_segments = USHRT_MAX;
goto abort;
}
- conf->ctx_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS,
- sizeof(struct stripe_request_ctx));
- if (!conf->ctx_pool) {
- ret = -ENOMEM;
+ ret = raid5_create_ctx_pool(conf);
+ if (ret)
goto abort;
- }
ret = log_init(conf, journal_dev, raid5_has_ppl(conf));
if (ret)