rbd_assert(!need_exclusive_lock(img_req) ||
__rbd_is_lock_owner(rbd_dev));
- if (rbd_img_is_write(img_req)) {
- rbd_assert(!img_req->snapc);
+ if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
+ rbd_assert(!rbd_img_is_write(img_req));
+ } else {
+ struct request *rq = blk_mq_rq_from_pdu(img_req);
+ u64 off = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
+ u64 len = blk_rq_bytes(rq);
+ u64 mapping_size;
+
down_read(&rbd_dev->header_rwsem);
- img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ mapping_size = rbd_dev->mapping.size;
+ if (rbd_img_is_write(img_req)) {
+ rbd_assert(!img_req->snapc);
+ img_req->snapc =
+ ceph_get_snap_context(rbd_dev->header.snapc);
+ }
up_read(&rbd_dev->header_rwsem);
+
+ if (unlikely(off + len > mapping_size)) {
+ rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)",
+ off, len, mapping_size);
+ img_req->pending.result = -EIO;
+ return;
+ }
}
for_each_obj_request(img_req, obj_req) {
struct request *rq = blk_mq_rq_from_pdu(img_request);
u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
u64 length = blk_rq_bytes(rq);
- u64 mapping_size;
int result;
/* Ignore/skip any zero-length requests */
blk_mq_start_request(rq);
down_read(&rbd_dev->header_rwsem);
- mapping_size = rbd_dev->mapping.size;
rbd_img_capture_header(img_request);
up_read(&rbd_dev->header_rwsem);
- if (offset + length > mapping_size) {
- rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
- length, mapping_size);
- result = -EIO;
- goto err_img_request;
- }
-
dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
img_request, obj_op_name(op_type), offset, length);