]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - drivers/nvme/target/io-cmd-bdev.c
Merge tag 'for-5.8/drivers-2020-06-01' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / nvme / target / io-cmd-bdev.c
index ea0e596be15dc5a9c72380cf39dcc8678b88bfe6..3dd6f566a240f147a4e12561175d927de564641c 100644 (file)
@@ -47,6 +47,22 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
        id->nows = to0based(ql->io_opt / ql->logical_block_size);
 }
 
+static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
+{
+       struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
+
+       if (bi) {
+               ns->metadata_size = bi->tuple_size;
+               if (bi->profile == &t10_pi_type1_crc)
+                       ns->pi_type = NVME_NS_DPS_PI_TYPE1;
+               else if (bi->profile == &t10_pi_type3_crc)
+                       ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+               else
+                       /* Unsupported metadata type */
+                       ns->metadata_size = 0;
+       }
+}
+
 int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
 {
        int ret;
@@ -64,6 +80,12 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
        }
        ns->size = i_size_read(ns->bdev->bd_inode);
        ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+       ns->pi_type = 0;
+       ns->metadata_size = 0;
+       if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+               nvmet_bdev_ns_enable_integrity(ns);
+
        return 0;
 }
 
@@ -75,6 +97,11 @@ void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
        }
 }
 
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
+{
+       ns->size = i_size_read(ns->bdev->bd_inode);
+}
+
 static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
 {
        u16 status = NVME_SC_SUCCESS;
@@ -142,6 +169,61 @@ static void nvmet_bio_done(struct bio *bio)
                bio_put(bio);
 }
 
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+                               struct sg_mapping_iter *miter)
+{
+       struct blk_integrity *bi;
+       struct bio_integrity_payload *bip;
+       struct block_device *bdev = req->ns->bdev;
+       int rc;
+       size_t resid, len;
+
+       bi = bdev_get_integrity(bdev);
+       if (unlikely(!bi)) {
+               pr_err("Unable to locate bio_integrity\n");
+               return -ENODEV;
+       }
+
+       bip = bio_integrity_alloc(bio, GFP_NOIO,
+               min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
+       if (IS_ERR(bip)) {
+               pr_err("Unable to allocate bio_integrity_payload\n");
+               return PTR_ERR(bip);
+       }
+
+       bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+       /* virtual start sector must be in integrity interval units */
+       bip_set_seed(bip, bio->bi_iter.bi_sector >>
+                    (bi->interval_exp - SECTOR_SHIFT));
+
+       resid = bip->bip_iter.bi_size;
+       while (resid > 0 && sg_miter_next(miter)) {
+               len = min_t(size_t, miter->length, resid);
+               rc = bio_integrity_add_page(bio, miter->page, len,
+                                           offset_in_page(miter->addr));
+               if (unlikely(rc != len)) {
+                       pr_err("bio_integrity_add_page() failed; %d\n", rc);
+                       sg_miter_stop(miter);
+                       return -ENOMEM;
+               }
+
+               resid -= len;
+               if (len < miter->length)
+                       miter->consumed -= miter->length - len;
+       }
+       sg_miter_stop(miter);
+
+       return 0;
+}
+#else
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+                               struct sg_mapping_iter *miter)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
 static void nvmet_bdev_execute_rw(struct nvmet_req *req)
 {
        int sg_cnt = req->sg_cnt;
@@ -149,9 +231,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
        struct scatterlist *sg;
        struct blk_plug plug;
        sector_t sector;
-       int op, i;
+       int op, i, rc;
+       struct sg_mapping_iter prot_miter;
+       unsigned int iter_flags;
+       unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
 
-       if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+       if (!nvmet_check_transfer_len(req, total_len))
                return;
 
        if (!req->sg_cnt) {
@@ -163,8 +248,10 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
                op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
                if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
                        op |= REQ_FUA;
+               iter_flags = SG_MITER_TO_SG;
        } else {
                op = REQ_OP_READ;
+               iter_flags = SG_MITER_FROM_SG;
        }
 
        if (is_pci_p2pdma_page(sg_page(req->sg)))
@@ -186,11 +273,24 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
        bio->bi_opf = op;
 
        blk_start_plug(&plug);
+       if (req->metadata_len)
+               sg_miter_start(&prot_miter, req->metadata_sg,
+                              req->metadata_sg_cnt, iter_flags);
+
        for_each_sg(req->sg, sg, req->sg_cnt, i) {
                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
                                != sg->length) {
                        struct bio *prev = bio;
 
+                       if (req->metadata_len) {
+                               rc = nvmet_bdev_alloc_bip(req, bio,
+                                                         &prot_miter);
+                               if (unlikely(rc)) {
+                                       bio_io_error(bio);
+                                       return;
+                               }
+                       }
+
                        bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
                        bio_set_dev(bio, req->ns->bdev);
                        bio->bi_iter.bi_sector = sector;
@@ -204,6 +304,14 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
                sg_cnt--;
        }
 
+       if (req->metadata_len) {
+               rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
+               if (unlikely(rc)) {
+                       bio_io_error(bio);
+                       return;
+               }
+       }
+
        submit_bio(bio);
        blk_finish_plug(&plug);
 }
@@ -212,7 +320,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
 {
        struct bio *bio = &req->b.inline_bio;
 
-       if (!nvmet_check_data_len(req, 0))
+       if (!nvmet_check_transfer_len(req, 0))
                return;
 
        bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
@@ -226,7 +334,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
 
 u16 nvmet_bdev_flush(struct nvmet_req *req)
 {
-       if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+       if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
                return NVME_SC_INTERNAL | NVME_SC_DNR;
        return 0;
 }
@@ -304,7 +412,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
        sector_t nr_sector;
        int ret;
 
-       if (!nvmet_check_data_len(req, 0))
+       if (!nvmet_check_transfer_len(req, 0))
                return;
 
        sector = le64_to_cpu(write_zeroes->slba) <<
@@ -331,6 +439,8 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
        case nvme_cmd_read:
        case nvme_cmd_write:
                req->execute = nvmet_bdev_execute_rw;
+               if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+                       req->metadata_len = nvmet_rw_metadata_len(req);
                return 0;
        case nvme_cmd_flush:
                req->execute = nvmet_bdev_execute_flush;