]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
nvmet: fabrics: add CQ init and destroy
authorWilfred Mallawa <wilfred.mallawa@wdc.com>
Thu, 24 Apr 2025 05:13:51 +0000 (15:13 +1000)
committerChristoph Hellwig <hch@lst.de>
Tue, 20 May 2025 03:34:25 +0000 (05:34 +0200)
With struct nvmet_cq now having a reference count, this patch amends the
target fabrics call chain to initialize and destroy/put a completion
queue.

Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/nvme/target/rdma.c
drivers/nvme/target/tcp.c

index 14f55192367e1ea3bfc5170353090c9f28cdc684..7b8d8b397802c33d350d0c60824aa33fdbdc23e9 100644 (file)
@@ -208,6 +208,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
                return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
        }
 
+       kref_get(&ctrl->ref);
+       old = cmpxchg(&req->cq->ctrl, NULL, ctrl);
+       if (old) {
+               pr_warn("queue already connected!\n");
+               req->error_loc = offsetof(struct nvmf_connect_command, opcode);
+               return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
+       }
+
        /* note: convert queue size from 0's-based value to 1's-based value */
        nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
        nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
index 7b50130f10f6578e6e49fe8ea661de34dfbb3683..7c2a4e2eb315158d5395674613af9c9daeafc589 100644 (file)
@@ -816,6 +816,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
 
        nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
 
+       nvmet_cq_init(&queue->nvme_cq);
        ret = nvmet_sq_init(&queue->nvme_sq);
        if (ret)
                goto out_fail_iodlist;
@@ -826,6 +827,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
        return queue;
 
 out_fail_iodlist:
+       nvmet_cq_put(&queue->nvme_cq);
        nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
        destroy_workqueue(queue->work_q);
 out_free_queue:
@@ -934,6 +936,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
        flush_workqueue(queue->work_q);
 
        nvmet_sq_destroy(&queue->nvme_sq);
+       nvmet_cq_put(&queue->nvme_cq);
 
        nvmet_fc_tgt_q_put(queue);
 }
index d02b80803278c3fb644a25352e43095bc5d21280..bbb3699c8686fb3d6f7db047544f93531624a125 100644 (file)
@@ -275,6 +275,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
        nvme_unquiesce_admin_queue(&ctrl->ctrl);
 
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+       nvmet_cq_put(&ctrl->queues[0].nvme_cq);
        nvme_remove_admin_tag_set(&ctrl->ctrl);
 }
 
@@ -304,6 +305,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
        for (i = 1; i < ctrl->ctrl.queue_count; i++) {
                clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+               nvmet_cq_put(&ctrl->queues[i].nvme_cq);
        }
        ctrl->ctrl.queue_count = 1;
        /*
@@ -329,9 +331,12 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
 
        for (i = 1; i <= nr_io_queues; i++) {
                ctrl->queues[i].ctrl = ctrl;
+               nvmet_cq_init(&ctrl->queues[i].nvme_cq);
                ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
-               if (ret)
+               if (ret) {
+                       nvmet_cq_put(&ctrl->queues[i].nvme_cq);
                        goto out_destroy_queues;
+               }
 
                ctrl->ctrl.queue_count++;
        }
@@ -362,9 +367,12 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        int error;
 
        ctrl->queues[0].ctrl = ctrl;
+       nvmet_cq_init(&ctrl->queues[0].nvme_cq);
        error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
-       if (error)
+       if (error) {
+               nvmet_cq_put(&ctrl->queues[0].nvme_cq);
                return error;
+       }
        ctrl->ctrl.queue_count = 1;
 
        error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
@@ -403,6 +411,7 @@ out_cleanup_tagset:
        nvme_remove_admin_tag_set(&ctrl->ctrl);
 out_free_sq:
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+       nvmet_cq_put(&ctrl->queues[0].nvme_cq);
        return error;
 }
 
index 2a4536ef618487455ada997d0285d9dce657c2ef..3ad9b4d1fad215c9d8a3057f7b90ea00cd1e6844 100644 (file)
@@ -1353,6 +1353,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
        pr_debug("freeing queue %d\n", queue->idx);
 
        nvmet_sq_destroy(&queue->nvme_sq);
+       nvmet_cq_put(&queue->nvme_cq);
 
        nvmet_rdma_destroy_queue_ib(queue);
        if (!queue->nsrq) {
@@ -1436,6 +1437,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
                goto out_reject;
        }
 
+       nvmet_cq_init(&queue->nvme_cq);
        ret = nvmet_sq_init(&queue->nvme_sq);
        if (ret) {
                ret = NVME_RDMA_CM_NO_RSC;
@@ -1517,6 +1519,7 @@ out_ida_remove:
 out_destroy_sq:
        nvmet_sq_destroy(&queue->nvme_sq);
 out_free_queue:
+       nvmet_cq_put(&queue->nvme_cq);
        kfree(queue);
 out_reject:
        nvmet_rdma_cm_reject(cm_id, ret);
index e6997ce6102794335da02fa2f3ab66dfb9e824fc..4dacb6b40fd120e1657737af135aea3a661000ce 100644 (file)
@@ -1577,6 +1577,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
        nvmet_sq_put_tls_key(&queue->nvme_sq);
        nvmet_tcp_uninit_data_in_cmds(queue);
        nvmet_sq_destroy(&queue->nvme_sq);
+       nvmet_cq_put(&queue->nvme_cq);
        cancel_work_sync(&queue->io_work);
        nvmet_tcp_free_cmd_data_in_buffers(queue);
        /* ->sock will be released by fput() */
@@ -1910,6 +1911,7 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
        if (ret)
                goto out_ida_remove;
 
+       nvmet_cq_init(&queue->nvme_cq);
        ret = nvmet_sq_init(&queue->nvme_sq);
        if (ret)
                goto out_free_connect;
@@ -1953,6 +1955,7 @@ out_destroy_sq:
        mutex_unlock(&nvmet_tcp_queue_mutex);
        nvmet_sq_destroy(&queue->nvme_sq);
 out_free_connect:
+       nvmet_cq_put(&queue->nvme_cq);
        nvmet_tcp_free_cmd(&queue->connect);
 out_ida_remove:
        ida_free(&nvmet_tcp_queue_ida, queue->idx);