]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
nvmet: simplify the nvmet_req_init() interface
authorWilfred Mallawa <wilfred.mallawa@wdc.com>
Thu, 24 Apr 2025 05:13:53 +0000 (15:13 +1000)
committerChristoph Hellwig <hch@lst.de>
Tue, 20 May 2025 03:34:26 +0000 (05:34 +0200)
Now that a submission queue holds a reference to its completion queue,
there is no need to pass the cq argument to nvmet_req_init(), so remove
it.

Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/pci-epf.c
drivers/nvme/target/rdma.c
drivers/nvme/target/tcp.c

index 2b02b2f939a57e2072629502eab5f1d061298605..db7b17d1094e8b4ceb19d81c3ef380f343fb6f0f 100644 (file)
@@ -1156,13 +1156,13 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
        return ret;
 }
 
-bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
-               struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
+               const struct nvmet_fabrics_ops *ops)
 {
        u8 flags = req->cmd->common.flags;
        u16 status;
 
-       req->cq = cq;
+       req->cq = sq->cq;
        req->sq = sq;
        req->ops = ops;
        req->sg = NULL;
index 2e813e51549c8d7c306e88b0bd875d53f485a218..a82cff9a80643956921e61c80439afad41639923 100644 (file)
@@ -2534,10 +2534,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
        fod->data_sg = NULL;
        fod->data_sg_cnt = 0;
 
-       ret = nvmet_req_init(&fod->req,
-                               &fod->queue->nvme_cq,
-                               &fod->queue->nvme_sq,
-                               &nvmet_fc_tgt_fcp_ops);
+       ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq,
+                       &nvmet_fc_tgt_fcp_ops);
        if (!ret) {
                /* bad SQE content or invalid ctrl state */
                /* nvmet layer has already called op done to send rsp. */
index c9ec13eff879df8a2c99eefd111dc393e1f6e4eb..f85a8441bcc6ecdcfb17e9cf1e883d2817cce9fc 100644 (file)
@@ -150,8 +150,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        nvme_start_request(req);
        iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
        iod->req.port = queue->ctrl->port;
-       if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
-                       &queue->nvme_sq, &nvme_loop_ops))
+       if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops))
                return BLK_STS_OK;
 
        if (blk_rq_nr_phys_segments(req)) {
@@ -183,8 +182,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
        iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
        iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
 
-       if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
-                       &nvme_loop_ops)) {
+       if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) {
                dev_err(ctrl->ctrl.device, "failed async event work\n");
                return;
        }
index d3795b09fcc43c3803988159e0dc29ceb5e2dfa3..df69a9dee71cb31c1f61c415de19a8ccb0cabd68 100644 (file)
@@ -561,8 +561,8 @@ u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
 u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
 
-bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
-               struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
+               const struct nvmet_fabrics_ops *ops);
 void nvmet_req_uninit(struct nvmet_req *req);
 size_t nvmet_req_transfer_len(struct nvmet_req *req);
 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
index ebe2fc355fbd7b31bd6a59cadf5cfd8bdfdd63cf..ec529305bc75064721f9a13cf46d2a6ed6e81c2b 100644 (file)
@@ -1597,8 +1597,7 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
                goto complete;
        }
 
-       if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq,
-                           &nvmet_pci_epf_fabrics_ops))
+       if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
                goto complete;
 
        iod->data_len = nvmet_req_transfer_len(req);
index 2e5c32298818463c3d03a858071a550c1c2b171f..432bdf7cd49e78c616908132606f3f46b57dbcd4 100644 (file)
@@ -976,8 +976,7 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
                cmd->send_sge.addr, cmd->send_sge.length,
                DMA_TO_DEVICE);
 
-       if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
-                       &queue->nvme_sq, &nvmet_rdma_ops))
+       if (!nvmet_req_init(&cmd->req, &queue->nvme_sq, &nvmet_rdma_ops))
                return;
 
        status = nvmet_rdma_map_sgl(cmd);
index 5811246dbe426ac50e2d1d5e847b98de4b33afd2..c6603bd9c95edfee953cd0aa3a961e79940a24d5 100644 (file)
@@ -1039,8 +1039,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
        req = &queue->cmd->req;
        memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
 
-       if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
-                       &queue->nvme_sq, &nvmet_tcp_ops))) {
+       if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) {
                pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n",
                        req->cmd, req->cmd->common.command_id,
                        req->cmd->common.opcode,