ublk_cancel_dev(ub);
}
+static void ublk_reset_io_flags(struct ublk_queue *ubq, struct ublk_io *io)
+{
+ /* UBLK_IO_FLAG_CANCELED can be cleared now */
+ spin_lock(&ubq->cancel_lock);
+ io->flags &= ~UBLK_IO_FLAG_CANCELED;
+ spin_unlock(&ubq->cancel_lock);
+}
+
/* reset per-queue io flags */
static void ublk_queue_reset_io_flags(struct ublk_queue *ubq)
{
- int j;
-
- /* UBLK_IO_FLAG_CANCELED can be cleared now */
spin_lock(&ubq->cancel_lock);
- for (j = 0; j < ubq->q_depth; j++)
- ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
ubq->canceling = false;
spin_unlock(&ubq->cancel_lock);
ubq->fail_io = false;
}
/* device can only be started after all IOs are ready */
-static void ublk_mark_io_ready(struct ublk_device *ub, u16 q_id)
+static void ublk_mark_io_ready(struct ublk_device *ub, u16 q_id,
+ struct ublk_io *io)
__must_hold(&ub->mutex)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
ub->unprivileged_daemons = true;
ubq->nr_io_ready++;
+ ublk_reset_io_flags(ubq, io);
/* Check if this specific queue is now fully ready */
if (ublk_queue_ready(ubq)) {
if (!ret)
ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
if (!ret)
- ublk_mark_io_ready(ub, q_id);
+ ublk_mark_io_ready(ub, q_id, io);
mutex_unlock(&ub->mutex);
return ret;
}
ublk_io_unlock(io);
if (!ret)
- ublk_mark_io_ready(data->ub, ubq->q_id);
+ ublk_mark_io_ready(data->ub, ubq->q_id, io);
return ret;
}