blk_mq_end_request(rq, BLK_STS_IOERR);
}
+static void ublk_auto_buf_reg_fallback(struct request *req, struct ublk_io *io)
+{
+ const struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
+ refcount_set(&data->ref, 1);
+}
+
static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io,
unsigned int issue_flags)
{
ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
pdu->buf.index, issue_flags);
if (ret) {
+ if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(req, io);
+ return true;
+ }
blk_mq_end_request(req, BLK_STS_IOERR);
return false;
}
if (pdu->buf.reserved0 || pdu->buf.reserved1)
return -EINVAL;
+ if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
return 0;
}
*
* - all reserved fields in `ublk_auto_buf_reg` need to be zeroed
*
+ * - pass flags from `ublk_auto_buf_reg.flags` if needed
+ *
* This way avoids extra cost from two uring_cmd, but also simplifies backend
* implementation, such as, the dependency on IO_REGISTER_IO_BUF and
* IO_UNREGISTER_IO_BUF becomes not necessary.
+ *
+ * If wrong data or flags are provided, both IO_FETCH_REQ and
+ * IO_COMMIT_AND_FETCH_REQ are failed, for the latter, the ublk IO request
+ * won't be completed until new IO_COMMIT_AND_FETCH_REQ command is issued
+ * successfully
*/
#define UBLK_F_AUTO_BUF_REG (1ULL << 11)
#define UBLK_IO_F_FUA (1U << 13)
#define UBLK_IO_F_NOUNMAP (1U << 15)
#define UBLK_IO_F_SWAP (1U << 16)
+/*
+ * For UBLK_F_AUTO_BUF_REG & UBLK_AUTO_BUF_REG_FALLBACK only.
+ *
+ * This flag is set if auto buffer register is failed & ublk server passes
+ * UBLK_AUTO_BUF_REG_FALLBACK, and ublk server need to register buffer
+ * manually for handling the delivered IO command if this flag is observed
+ *
+ * ublk server has to check this flag if UBLK_AUTO_BUF_REG_FALLBACK is
+ * passed in.
+ */
+#define UBLK_IO_F_NEED_REG_BUF (1U << 17)
/*
* io cmd is described by this structure, and stored in share memory, indexed
return iod->op_flags >> 8;
}
+/*
+ * If this flag is set, fallback by completing the uring_cmd and setting
+ * `UBLK_IO_F_NEED_REG_BUF` in case of auto-buf-register failure;
+ * otherwise the client ublk request is failed silently
+ *
+ * If ublk server passes this flag, it has to check if UBLK_IO_F_NEED_REG_BUF
+ * is set in `ublksrv_io_desc.op_flags`. If UBLK_IO_F_NEED_REG_BUF is set,
+ * ublk server needs to register io buffer manually for handling IO command.
+ */
+#define UBLK_AUTO_BUF_REG_FALLBACK (1 << 0)
+#define UBLK_AUTO_BUF_REG_F_MASK UBLK_AUTO_BUF_REG_FALLBACK
+
struct ublk_auto_buf_reg {
/* index for registering the delivered request buffer */
__u16 index;
- __u16 reserved0;
+ __u8 flags;
+ __u8 reserved0;
/*
* io_ring FD can be passed via the reserve field in future for
* uring_cmd's sqe->addr:
*
* - bit0 ~ bit15: buffer index
+ * - bit16 ~ bit23: flags
* - bit24 ~ bit31: reserved0
* - bit32 ~ bit63: reserved1
*/
{
struct ublk_auto_buf_reg reg = {
.index = sqe_addr & 0xffff,
- .reserved0 = (sqe_addr >> 16) & 0xffff,
+ .flags = (sqe_addr >> 16) & 0xff,
+ .reserved0 = (sqe_addr >> 24) & 0xff,
.reserved1 = sqe_addr >> 32,
};
static inline __u64
ublk_auto_buf_reg_to_sqe_addr(const struct ublk_auto_buf_reg *buf)
{
- __u64 addr = buf->index | (__u64)buf->reserved0 << 16 |
+ __u64 addr = buf->index | (__u64)buf->flags << 16 | (__u64)buf->reserved0 << 24 |
(__u64)buf->reserved1 << 32;
return addr;