* if user copy or zero copy are enabled:
* - UBLK_REFCOUNT_INIT from dispatch to the server
* until UBLK_IO_COMMIT_AND_FETCH_REQ
- * - 1 for each inflight ublk_ch_{read,write}_iter() call
+ * - 1 for each inflight ublk_ch_{read,write}_iter() call not on task
* - 1 for each io_uring registered buffer not registered on task
* The I/O can only be completed once all references are dropped.
* User copy and buffer registration operations are only permitted
struct ublk_io *io;
unsigned data_len;
bool is_integrity;
+ bool on_daemon;
size_t buf_off;
u16 tag, q_id;
ssize_t ret;
return -EINVAL;
io = &ubq->ios[tag];
- req = __ublk_check_and_get_req(ub, q_id, tag, io);
- if (!req)
- return -EINVAL;
+ on_daemon = current == READ_ONCE(io->task);
+ if (on_daemon) {
+ /* On daemon, io can't be completed concurrently, so skip ref */
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ return -EINVAL;
+
+ req = io->req;
+ if (!ublk_rq_has_data(req))
+ return -EINVAL;
+ } else {
+ req = __ublk_check_and_get_req(ub, q_id, tag, io);
+ if (!req)
+ return -EINVAL;
+ }
if (is_integrity) {
struct blk_integrity *bi = &req->q->limits.integrity;
ret = ublk_copy_user_pages(req, buf_off, iter, dir);
out:
- ublk_put_req_ref(io, req);
+ if (!on_daemon)
+ ublk_put_req_ref(io, req);
return ret;
}