]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
nvme-pci: refactor nvme_pci_use_sgls
authorChristoph Hellwig <hch@lst.de>
Wed, 25 Jun 2025 11:35:00 +0000 (13:35 +0200)
committerJens Axboe <axboe@kernel.dk>
Mon, 30 Jun 2025 21:50:32 +0000 (15:50 -0600)
Move the average segment size into a separate helper, and return a
tristate to distinguish the case where can use SGL vs where we have to
use SGLs.  This will allow the simplify the code and make more efficient
decisions in follow on changes.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20250625113531.522027-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/pci.c

index 8ff12e415cb5d1529d760b33f3e0cf3b8d1555f1..16ff87fe3dd99e2f5d32ca99cad173e3a8798df4 100644 (file)
@@ -578,6 +578,12 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
        spin_unlock(&nvmeq->sq_lock);
 }
 
+enum nvme_use_sgl {
+       SGL_UNSUPPORTED,
+       SGL_SUPPORTED,
+       SGL_FORCED,
+};
+
 static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
                                              struct request *req)
 {
@@ -587,23 +593,27 @@ static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
                nvme_req(req)->flags & NVME_REQ_USERCMD;
 }
 
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
-                                    int nseg)
+static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
+               struct request *req)
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
-       unsigned int avg_seg_size;
 
-       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+       if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
+               if (nvme_req(req)->flags & NVME_REQ_USERCMD)
+                       return SGL_FORCED;
+               if (req->nr_integrity_segments > 1)
+                       return SGL_FORCED;
+               return SGL_SUPPORTED;
+       }
 
-       if (!nvme_ctrl_sgl_supported(&dev->ctrl))
-               return false;
-       if (!nvmeq->qid)
-               return false;
-       if (nvme_pci_metadata_use_sgls(dev, req))
-               return true;
-       if (!sgl_threshold || avg_seg_size < sgl_threshold)
-               return nvme_req(req)->flags & NVME_REQ_USERCMD;
-       return true;
+       return SGL_UNSUPPORTED;
+}
+
+static unsigned int nvme_pci_avg_seg_size(struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+       return DIV_ROUND_UP(blk_rq_payload_bytes(req), iod->sgt.nents);
 }
 
 static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
@@ -851,6 +861,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req);
        blk_status_t ret = BLK_STS_RESOURCE;
        int rc;
 
@@ -888,7 +899,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                goto out_free_sg;
        }
 
-       if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
+       if (use_sgl == SGL_FORCED ||
+           (use_sgl == SGL_SUPPORTED &&
+            (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
                ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw);
        else
                ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw);