free(q->ios[i].buf_addr);
}
-static int ublk_queue_init(struct ublk_queue *q)
+static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
{
struct ublk_dev *dev = q->dev;
int depth = dev->dev_info.queue_depth;
if (dev->dev_info.flags & UBLK_F_AUTO_BUF_REG)
q->state |= UBLKSRV_AUTO_BUF_REG;
}
+ q->state |= extra_flags;
cmd_buf_size = ublk_queue_cmd_buf_sz(q);
off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz();
close(dev->fds[0]);
}
-static void ublk_set_auto_buf_reg(struct io_uring_sqe *sqe,
- unsigned short buf_idx,
- unsigned char flags)
+static void ublk_set_auto_buf_reg(const struct ublk_queue *q,
+ struct io_uring_sqe *sqe,
+ unsigned short tag)
{
- struct ublk_auto_buf_reg buf = {
- .index = buf_idx,
- .flags = flags,
- };
+ struct ublk_auto_buf_reg buf = {};
+
+ if (q->tgt_ops->buf_index)
+ buf.index = q->tgt_ops->buf_index(q, tag);
+ else
+ buf.index = tag;
+
+ if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK)
+ buf.flags = UBLK_AUTO_BUF_REG_FALLBACK;
sqe->addr = ublk_auto_buf_reg_to_sqe_addr(&buf);
}
cmd->addr = 0;
if (q->state & UBLKSRV_AUTO_BUF_REG)
- ublk_set_auto_buf_reg(sqe[0], tag, 0);
+ ublk_set_auto_buf_reg(q, sqe[0], tag);
user_data = build_user_data(tag, _IOC_NR(cmd_op), 0, 0);
io_uring_sqe_set_data64(sqe[0], user_data);
struct ublk_queue *q;
sem_t *queue_sem;
cpu_set_t *affinity;
+ unsigned char auto_zc_fallback;
};
static void *ublk_io_handler_fn(void *data)
struct ublk_queue_info *info = data;
struct ublk_queue *q = info->q;
int dev_id = q->dev->dev_info.dev_id;
+ unsigned extra_flags = 0;
int ret;
- ret = ublk_queue_init(q);
+ if (info->auto_zc_fallback)
+ extra_flags = UBLKSRV_AUTO_BUF_REG_FALLBACK;
+
+ ret = ublk_queue_init(q, extra_flags);
if (ret) {
ublk_err("ublk dev %d queue %d init queue failed\n",
dev_id, q->q_id);
qinfo[i].q = &dev->q[i];
qinfo[i].queue_sem = &queue_sem;
qinfo[i].affinity = &affinity_buf[i];
+ qinfo[i].auto_zc_fallback = ctx->auto_zc_fallback;
pthread_create(&dev->q[i].thread, NULL,
ublk_io_handler_fn,
&qinfo[i]);
printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n",
exe, recovery ? "recover" : "add");
- printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--debug_mask mask] [-r 0|1 ] [-g]\n");
+ printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n");
printf("\t[-e 0|1 ] [-i 0|1]\n");
printf("\t[target options] [backfile1] [backfile2] ...\n");
printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n");
{ "recovery_fail_io", 1, NULL, 'e'},
{ "recovery_reissue", 1, NULL, 'i'},
{ "get_data", 1, NULL, 'g'},
- { "auto_zc", 0, NULL, 0},
+ { "auto_zc", 0, NULL, 0 },
+ { "auto_zc_fallback", 0, NULL, 0 },
{ 0, 0, 0, 0 }
};
const struct ublk_tgt_ops *ops = NULL;
ctx.fg = 1;
if (!strcmp(longopts[option_idx].name, "auto_zc"))
ctx.flags |= UBLK_F_AUTO_BUF_REG;
+ if (!strcmp(longopts[option_idx].name, "auto_zc_fallback"))
+ ctx.auto_zc_fallback = 1;
break;
case '?':
/*
}
}
+ /* auto_zc_fallback depends on F_AUTO_BUF_REG & F_SUPPORT_ZERO_COPY */
+ if (ctx.auto_zc_fallback &&
+ !((ctx.flags & UBLK_F_AUTO_BUF_REG) &&
+ (ctx.flags & UBLK_F_SUPPORT_ZERO_COPY))) {
+ ublk_err("%s: auto_zc_fallback is set but neither "
+ "F_AUTO_BUF_REG nor F_SUPPORT_ZERO_COPY is enabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
i = optind;
while (i < argc && ctx.nr_files < MAX_BACK_FILES) {
ctx.files[ctx.nr_files++] = argv[i++];
unsigned int all:1;
unsigned int fg:1;
unsigned int recovery:1;
+ unsigned int auto_zc_fallback:1;
int _evtfd;
int _shmid;
*/
void (*parse_cmd_line)(struct dev_ctx *ctx, int argc, char *argv[]);
void (*usage)(const struct ublk_tgt_ops *ops);
+
+ /* return buffer index for UBLK_F_AUTO_BUF_REG */
+ unsigned short (*buf_index)(const struct ublk_queue *, int tag);
};
struct ublk_tgt {
#define UBLKSRV_NO_BUF (1U << 2)
#define UBLKSRV_ZC (1U << 3)
#define UBLKSRV_AUTO_BUF_REG (1U << 4)
+#define UBLKSRV_AUTO_BUF_REG_FALLBACK (1U << 5)
unsigned state;
pid_t tid;
pthread_t thread;
extern unsigned int ublk_dbg_mask;
extern int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag);
+
+static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod)
+{
+ return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF);
+}
+
static inline int is_target_io(__u64 user_data)
{
return (user_data & (1ULL << 63)) != 0;
unsigned zc = ublk_queue_use_zc(q);
int queued;
- if (auto_zc)
+ if (auto_zc && !ublk_io_auto_zc_fallback(iod))
queued = null_queue_auto_zc_io(q, tag);
else if (zc)
queued = null_queue_zc_io(q, tag);
return 0;
}
+/*
+ * return invalid buffer index for triggering auto buffer register failure,
+ * then UBLK_IO_RES_NEED_REG_BUF handling is covered
+ */
+static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag)
+{
+ if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK)
+ return (unsigned short)-1;
+ return tag;
+}
+
const struct ublk_tgt_ops null_tgt_ops = {
.name = "null",
.init_tgt = ublk_null_tgt_init,
.queue_io = ublk_null_queue_io,
.tgt_io_done = ublk_null_io_done,
+ .buf_index = ublk_null_buf_index,
};