if (q->io_cmd_buf)
munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q));
- for (i = 0; i < nr_ios; i++)
+ for (i = 0; i < nr_ios; i++) {
free(q->ios[i].buf_addr);
+ free(q->ios[i].integrity_buf);
+ }
}
static void ublk_thread_deinit(struct ublk_thread *t)
}
}
-static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags)
+static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags,
+ __u8 metadata_size)
{
struct ublk_dev *dev = q->dev;
int depth = dev->dev_info.queue_depth;
int i;
- int cmd_buf_size, io_buf_size;
+ int cmd_buf_size, io_buf_size, integrity_size;
unsigned long off;
q->tgt_ops = dev->tgt.ops;
q->q_depth = depth;
q->flags = dev->dev_info.flags;
q->flags |= extra_flags;
+ q->metadata_size = metadata_size;
/* Cache fd in queue for fast path access */
q->ublk_fd = dev->fds[0];
}
io_buf_size = dev->dev_info.max_io_buf_bytes;
+ integrity_size = ublk_integrity_len(q, io_buf_size);
for (i = 0; i < q->q_depth; i++) {
q->ios[i].buf_addr = NULL;
q->ios[i].flags = UBLKS_IO_NEED_FETCH_RQ | UBLKS_IO_FREE;
q->ios[i].tag = i;
+ if (integrity_size) {
+ q->ios[i].integrity_buf = malloc(integrity_size);
+ if (!q->ios[i].integrity_buf) {
+ ublk_err("ublk dev %d queue %d io %d malloc(%d) failed: %m\n",
+ dev->dev_info.dev_id, q->q_id, i,
+ integrity_size);
+ goto fail;
+ }
+ }
+
+
if (ublk_queue_no_buf(q))
continue;
__u8 ublk_op = ublksrv_get_op(iod);
__u32 len = iod->nr_sectors << 9;
void *addr = io->buf_addr;
+ ssize_t copied;
if (ublk_op != match_ublk_op)
return;
while (len) {
__u32 copy_len = min(len, UBLK_USER_COPY_LEN);
- ssize_t copied;
if (ublk_op == UBLK_IO_OP_WRITE)
copied = pread(q->ublk_fd, addr, copy_len, off);
off += copy_len;
len -= copy_len;
}
+
+ if (!(iod->op_flags & UBLK_IO_F_INTEGRITY))
+ return;
+
+ len = ublk_integrity_len(q, iod->nr_sectors << 9);
+ off = ublk_user_copy_offset(q->q_id, io->tag);
+ off |= UBLKSRV_IO_INTEGRITY_FLAG;
+ if (ublk_op == UBLK_IO_OP_WRITE)
+ copied = pread(q->ublk_fd, io->integrity_buf, len, off);
+ else if (ublk_op == UBLK_IO_OP_READ)
+ copied = pwrite(q->ublk_fd, io->integrity_buf, len, off);
+ else
+ assert(0);
+ assert(copied == (ssize_t)len);
}
int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io)
dev->q[i].dev = dev;
dev->q[i].q_id = i;
- ret = ublk_queue_init(&dev->q[i], extra_flags);
+ ret = ublk_queue_init(&dev->q[i], extra_flags,
+ ctx->metadata_size);
if (ret) {
ublk_err("ublk dev %d queue %d init queue failed\n",
dinfo->dev_id, i);
struct ublk_io {
char *buf_addr;
+ void *integrity_buf;
#define UBLKS_IO_NEED_FETCH_RQ (1UL << 0)
#define UBLKS_IO_NEED_COMMIT_RQ_COMP (1UL << 1)
#define UBLKS_Q_NO_UBLK_FIXED_FD (1ULL << 62)
__u64 flags;
int ublk_fd; /* cached ublk char device fd */
+ __u8 metadata_size;
struct ublk_io ios[UBLK_QUEUE_DEPTH];
};
};
}
+static inline size_t ublk_integrity_len(const struct ublk_queue *q, size_t len)
+{
+ /* All targets currently use interval_exp = logical_bs_shift = 9 */
+ return (len >> 9) * q->metadata_size;
+}
+
+static inline size_t
+ublk_integrity_data_len(const struct ublk_queue *q, size_t integrity_len)
+{
+ return (integrity_len / q->metadata_size) << 9;
+}
+
static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod)
{
return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF);