static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- struct io_uring_cmd *ioucmd, unsigned int flags)
+ struct io_uring_cmd *ioucmd, unsigned int flags,
+ unsigned int iou_issue_flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
return -EINVAL;
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
- rq_data_dir(req), &iter, ioucmd);
+ rq_data_dir(req), &iter, ioucmd,
+ iou_issue_flags);
if (ret < 0)
goto out;
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, NULL, flags);
+ meta_len, NULL, flags, 0);
if (ret)
return ret;
}
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, ioucmd, vec);
+ d.metadata_len, ioucmd, vec, issue_flags);
if (ret)
return ret;
}
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd);
+ struct iov_iter *iter, void *ioucmd,
+ unsigned int issue_flags);
/*
* Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
+ struct iov_iter *iter, void *ioucmd,
+ unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
}
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
+ struct iov_iter *iter, void *ioucmd,
+ unsigned int issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
struct io_rsrc_node *node = req->buf_node;