bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
spinlock_t cancel_lock;
struct ublk_device *dev;
- struct ublk_io ios[];
+ struct ublk_io ios[] __counted_by(q_depth);
};
struct ublk_device {
static int ublk_init_queue(struct ublk_device *ub, int q_id)
{
int depth = ub->dev_info.queue_depth;
- int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
struct ublk_queue *ubq;
struct page *page;
numa_node = ublk_get_queue_numa_node(ub, q_id);
/* Allocate queue structure on local NUMA node */
- ubq = kvzalloc_node(ubq_size, GFP_KERNEL, numa_node);
+ ubq = kvzalloc_node(struct_size(ubq, ios, depth), GFP_KERNEL,
+ numa_node);
if (!ubq)
return -ENOMEM;