static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
+ u16 q_id, u16 tag, struct ublk_io *io);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *
if (!ublk_dev_support_zero_copy(ub))
return -EINVAL;
- req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
+ req = __ublk_check_and_get_req(ub, q_id, tag, io);
if (!req)
return -EINVAL;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
+ u16 q_id, u16 tag, struct ublk_io *io)
{
struct request *req;
if (!ublk_rq_has_data(req))
goto fail_put;
- if (offset > blk_rq_bytes(req))
- goto fail_put;
-
return req;
fail_put:
ublk_put_req_ref(io, req);
return -EINVAL;
io = &ubq->ios[tag];
- req = __ublk_check_and_get_req(ub, q_id, tag, io, buf_off);
+ req = __ublk_check_and_get_req(ub, q_id, tag, io);
if (!req)
return -EINVAL;
+ if (buf_off > blk_rq_bytes(req)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (!ublk_check_ubuf_dir(req, dir)) {
ret = -EACCES;
goto out;