2 * NVMe over Fabrics RDMA target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/atomic.h>
16 #include <linux/ctype.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/nvme.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/wait.h>
25 #include <linux/inet.h>
26 #include <asm/unaligned.h>
28 #include <rdma/ib_verbs.h>
29 #include <rdma/rdma_cm.h>
32 #include <linux/nvme-rdma.h>
36 * We allow up to a page of inline data to go with the SQE
38 #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
40 struct nvmet_rdma_cmd
{
44 struct scatterlist inline_sg
;
45 struct page
*inline_page
;
46 struct nvme_command
*nvme_cmd
;
47 struct nvmet_rdma_queue
*queue
;
51 NVMET_RDMA_REQ_INLINE_DATA
= (1 << 0),
52 NVMET_RDMA_REQ_INVALIDATE_RKEY
= (1 << 1),
55 struct nvmet_rdma_rsp
{
56 struct ib_sge send_sge
;
57 struct ib_cqe send_cqe
;
58 struct ib_send_wr send_wr
;
60 struct nvmet_rdma_cmd
*cmd
;
61 struct nvmet_rdma_queue
*queue
;
63 struct ib_cqe read_cqe
;
64 struct rdma_rw_ctx rw
;
72 struct list_head wait_list
;
73 struct list_head free_list
;
76 enum nvmet_rdma_queue_state
{
77 NVMET_RDMA_Q_CONNECTING
,
79 NVMET_RDMA_Q_DISCONNECTING
,
80 NVMET_RDMA_IN_DEVICE_REMOVAL
,
83 struct nvmet_rdma_queue
{
84 struct rdma_cm_id
*cm_id
;
85 struct nvmet_port
*port
;
88 struct nvmet_rdma_device
*dev
;
89 spinlock_t state_lock
;
90 enum nvmet_rdma_queue_state state
;
91 struct nvmet_cq nvme_cq
;
92 struct nvmet_sq nvme_sq
;
94 struct nvmet_rdma_rsp
*rsps
;
95 struct list_head free_rsps
;
97 struct nvmet_rdma_cmd
*cmds
;
99 struct work_struct release_work
;
100 struct list_head rsp_wait_list
;
101 struct list_head rsp_wr_wait_list
;
102 spinlock_t rsp_wr_wait_lock
;
109 struct list_head queue_list
;
112 struct nvmet_rdma_device
{
113 struct ib_device
*device
;
116 struct nvmet_rdma_cmd
*srq_cmds
;
119 struct list_head entry
;
122 static bool nvmet_rdma_use_srq
;
123 module_param_named(use_srq
, nvmet_rdma_use_srq
, bool, 0444);
124 MODULE_PARM_DESC(use_srq
, "Use shared receive queue.");
126 static DEFINE_IDA(nvmet_rdma_queue_ida
);
127 static LIST_HEAD(nvmet_rdma_queue_list
);
128 static DEFINE_MUTEX(nvmet_rdma_queue_mutex
);
130 static LIST_HEAD(device_list
);
131 static DEFINE_MUTEX(device_list_mutex
);
133 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp
*rsp
);
134 static void nvmet_rdma_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
135 static void nvmet_rdma_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
136 static void nvmet_rdma_read_data_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
137 static void nvmet_rdma_qp_event(struct ib_event
*event
, void *priv
);
138 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue
*queue
);
140 static struct nvmet_fabrics_ops nvmet_rdma_ops
;
142 /* XXX: really should move to a generic header sooner or later.. */
143 static inline u32
get_unaligned_le24(const u8
*p
)
145 return (u32
)p
[0] | (u32
)p
[1] << 8 | (u32
)p
[2] << 16;
148 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp
*rsp
)
150 return nvme_is_write(rsp
->req
.cmd
) &&
152 !(rsp
->flags
& NVMET_RDMA_REQ_INLINE_DATA
);
155 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp
*rsp
)
157 return !nvme_is_write(rsp
->req
.cmd
) &&
159 !rsp
->req
.rsp
->status
&&
160 !(rsp
->flags
& NVMET_RDMA_REQ_INLINE_DATA
);
163 static inline struct nvmet_rdma_rsp
*
164 nvmet_rdma_get_rsp(struct nvmet_rdma_queue
*queue
)
166 struct nvmet_rdma_rsp
*rsp
;
169 spin_lock_irqsave(&queue
->rsps_lock
, flags
);
170 rsp
= list_first_entry(&queue
->free_rsps
,
171 struct nvmet_rdma_rsp
, free_list
);
172 list_del(&rsp
->free_list
);
173 spin_unlock_irqrestore(&queue
->rsps_lock
, flags
);
179 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp
*rsp
)
183 spin_lock_irqsave(&rsp
->queue
->rsps_lock
, flags
);
184 list_add_tail(&rsp
->free_list
, &rsp
->queue
->free_rsps
);
185 spin_unlock_irqrestore(&rsp
->queue
->rsps_lock
, flags
);
188 static void nvmet_rdma_free_sgl(struct scatterlist
*sgl
, unsigned int nents
)
190 struct scatterlist
*sg
;
196 for_each_sg(sgl
, sg
, nents
, count
)
197 __free_page(sg_page(sg
));
201 static int nvmet_rdma_alloc_sgl(struct scatterlist
**sgl
, unsigned int *nents
,
204 struct scatterlist
*sg
;
209 nent
= DIV_ROUND_UP(length
, PAGE_SIZE
);
210 sg
= kmalloc_array(nent
, sizeof(struct scatterlist
), GFP_KERNEL
);
214 sg_init_table(sg
, nent
);
217 u32 page_len
= min_t(u32
, length
, PAGE_SIZE
);
219 page
= alloc_page(GFP_KERNEL
);
223 sg_set_page(&sg
[i
], page
, page_len
, 0);
234 __free_page(sg_page(&sg
[i
]));
238 return NVME_SC_INTERNAL
;
241 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device
*ndev
,
242 struct nvmet_rdma_cmd
*c
, bool admin
)
244 /* NVMe command / RDMA RECV */
245 c
->nvme_cmd
= kmalloc(sizeof(*c
->nvme_cmd
), GFP_KERNEL
);
249 c
->sge
[0].addr
= ib_dma_map_single(ndev
->device
, c
->nvme_cmd
,
250 sizeof(*c
->nvme_cmd
), DMA_FROM_DEVICE
);
251 if (ib_dma_mapping_error(ndev
->device
, c
->sge
[0].addr
))
254 c
->sge
[0].length
= sizeof(*c
->nvme_cmd
);
255 c
->sge
[0].lkey
= ndev
->pd
->local_dma_lkey
;
258 c
->inline_page
= alloc_pages(GFP_KERNEL
,
259 get_order(NVMET_RDMA_INLINE_DATA_SIZE
));
262 c
->sge
[1].addr
= ib_dma_map_page(ndev
->device
,
263 c
->inline_page
, 0, NVMET_RDMA_INLINE_DATA_SIZE
,
265 if (ib_dma_mapping_error(ndev
->device
, c
->sge
[1].addr
))
266 goto out_free_inline_page
;
267 c
->sge
[1].length
= NVMET_RDMA_INLINE_DATA_SIZE
;
268 c
->sge
[1].lkey
= ndev
->pd
->local_dma_lkey
;
271 c
->cqe
.done
= nvmet_rdma_recv_done
;
273 c
->wr
.wr_cqe
= &c
->cqe
;
274 c
->wr
.sg_list
= c
->sge
;
275 c
->wr
.num_sge
= admin
? 1 : 2;
279 out_free_inline_page
:
281 __free_pages(c
->inline_page
,
282 get_order(NVMET_RDMA_INLINE_DATA_SIZE
));
285 ib_dma_unmap_single(ndev
->device
, c
->sge
[0].addr
,
286 sizeof(*c
->nvme_cmd
), DMA_FROM_DEVICE
);
294 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device
*ndev
,
295 struct nvmet_rdma_cmd
*c
, bool admin
)
298 ib_dma_unmap_page(ndev
->device
, c
->sge
[1].addr
,
299 NVMET_RDMA_INLINE_DATA_SIZE
, DMA_FROM_DEVICE
);
300 __free_pages(c
->inline_page
,
301 get_order(NVMET_RDMA_INLINE_DATA_SIZE
));
303 ib_dma_unmap_single(ndev
->device
, c
->sge
[0].addr
,
304 sizeof(*c
->nvme_cmd
), DMA_FROM_DEVICE
);
308 static struct nvmet_rdma_cmd
*
309 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device
*ndev
,
310 int nr_cmds
, bool admin
)
312 struct nvmet_rdma_cmd
*cmds
;
313 int ret
= -EINVAL
, i
;
315 cmds
= kcalloc(nr_cmds
, sizeof(struct nvmet_rdma_cmd
), GFP_KERNEL
);
319 for (i
= 0; i
< nr_cmds
; i
++) {
320 ret
= nvmet_rdma_alloc_cmd(ndev
, cmds
+ i
, admin
);
329 nvmet_rdma_free_cmd(ndev
, cmds
+ i
, admin
);
335 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device
*ndev
,
336 struct nvmet_rdma_cmd
*cmds
, int nr_cmds
, bool admin
)
340 for (i
= 0; i
< nr_cmds
; i
++)
341 nvmet_rdma_free_cmd(ndev
, cmds
+ i
, admin
);
345 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device
*ndev
,
346 struct nvmet_rdma_rsp
*r
)
348 /* NVMe CQE / RDMA SEND */
349 r
->req
.rsp
= kmalloc(sizeof(*r
->req
.rsp
), GFP_KERNEL
);
353 r
->send_sge
.addr
= ib_dma_map_single(ndev
->device
, r
->req
.rsp
,
354 sizeof(*r
->req
.rsp
), DMA_TO_DEVICE
);
355 if (ib_dma_mapping_error(ndev
->device
, r
->send_sge
.addr
))
358 r
->send_sge
.length
= sizeof(*r
->req
.rsp
);
359 r
->send_sge
.lkey
= ndev
->pd
->local_dma_lkey
;
361 r
->send_cqe
.done
= nvmet_rdma_send_done
;
363 r
->send_wr
.wr_cqe
= &r
->send_cqe
;
364 r
->send_wr
.sg_list
= &r
->send_sge
;
365 r
->send_wr
.num_sge
= 1;
366 r
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
368 /* Data In / RDMA READ */
369 r
->read_cqe
.done
= nvmet_rdma_read_data_done
;
378 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device
*ndev
,
379 struct nvmet_rdma_rsp
*r
)
381 ib_dma_unmap_single(ndev
->device
, r
->send_sge
.addr
,
382 sizeof(*r
->req
.rsp
), DMA_TO_DEVICE
);
387 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue
*queue
)
389 struct nvmet_rdma_device
*ndev
= queue
->dev
;
390 int nr_rsps
= queue
->recv_queue_size
* 2;
391 int ret
= -EINVAL
, i
;
393 queue
->rsps
= kcalloc(nr_rsps
, sizeof(struct nvmet_rdma_rsp
),
398 for (i
= 0; i
< nr_rsps
; i
++) {
399 struct nvmet_rdma_rsp
*rsp
= &queue
->rsps
[i
];
401 ret
= nvmet_rdma_alloc_rsp(ndev
, rsp
);
405 list_add_tail(&rsp
->free_list
, &queue
->free_rsps
);
412 struct nvmet_rdma_rsp
*rsp
= &queue
->rsps
[i
];
414 list_del(&rsp
->free_list
);
415 nvmet_rdma_free_rsp(ndev
, rsp
);
422 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue
*queue
)
424 struct nvmet_rdma_device
*ndev
= queue
->dev
;
425 int i
, nr_rsps
= queue
->recv_queue_size
* 2;
427 for (i
= 0; i
< nr_rsps
; i
++) {
428 struct nvmet_rdma_rsp
*rsp
= &queue
->rsps
[i
];
430 list_del(&rsp
->free_list
);
431 nvmet_rdma_free_rsp(ndev
, rsp
);
436 static int nvmet_rdma_post_recv(struct nvmet_rdma_device
*ndev
,
437 struct nvmet_rdma_cmd
*cmd
)
439 struct ib_recv_wr
*bad_wr
;
441 ib_dma_sync_single_for_device(ndev
->device
,
442 cmd
->sge
[0].addr
, cmd
->sge
[0].length
,
446 return ib_post_srq_recv(ndev
->srq
, &cmd
->wr
, &bad_wr
);
447 return ib_post_recv(cmd
->queue
->cm_id
->qp
, &cmd
->wr
, &bad_wr
);
450 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue
*queue
)
452 spin_lock(&queue
->rsp_wr_wait_lock
);
453 while (!list_empty(&queue
->rsp_wr_wait_list
)) {
454 struct nvmet_rdma_rsp
*rsp
;
457 rsp
= list_entry(queue
->rsp_wr_wait_list
.next
,
458 struct nvmet_rdma_rsp
, wait_list
);
459 list_del(&rsp
->wait_list
);
461 spin_unlock(&queue
->rsp_wr_wait_lock
);
462 ret
= nvmet_rdma_execute_command(rsp
);
463 spin_lock(&queue
->rsp_wr_wait_lock
);
466 list_add(&rsp
->wait_list
, &queue
->rsp_wr_wait_list
);
470 spin_unlock(&queue
->rsp_wr_wait_lock
);
474 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp
*rsp
)
476 struct nvmet_rdma_queue
*queue
= rsp
->queue
;
478 atomic_add(1 + rsp
->n_rdma
, &queue
->sq_wr_avail
);
481 rdma_rw_ctx_destroy(&rsp
->rw
, queue
->cm_id
->qp
,
482 queue
->cm_id
->port_num
, rsp
->req
.sg
,
483 rsp
->req
.sg_cnt
, nvmet_data_dir(&rsp
->req
));
486 if (rsp
->req
.sg
!= &rsp
->cmd
->inline_sg
)
487 nvmet_rdma_free_sgl(rsp
->req
.sg
, rsp
->req
.sg_cnt
);
489 if (unlikely(!list_empty_careful(&queue
->rsp_wr_wait_list
)))
490 nvmet_rdma_process_wr_wait_list(queue
);
492 nvmet_rdma_put_rsp(rsp
);
495 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue
*queue
)
497 if (queue
->nvme_sq
.ctrl
) {
498 nvmet_ctrl_fatal_error(queue
->nvme_sq
.ctrl
);
501 * we didn't setup the controller yet in case
502 * of admin connect error, just disconnect and
505 nvmet_rdma_queue_disconnect(queue
);
509 static void nvmet_rdma_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
511 struct nvmet_rdma_rsp
*rsp
=
512 container_of(wc
->wr_cqe
, struct nvmet_rdma_rsp
, send_cqe
);
514 nvmet_rdma_release_rsp(rsp
);
516 if (unlikely(wc
->status
!= IB_WC_SUCCESS
&&
517 wc
->status
!= IB_WC_WR_FLUSH_ERR
)) {
518 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
519 wc
->wr_cqe
, ib_wc_status_msg(wc
->status
), wc
->status
);
520 nvmet_rdma_error_comp(rsp
->queue
);
524 static void nvmet_rdma_queue_response(struct nvmet_req
*req
)
526 struct nvmet_rdma_rsp
*rsp
=
527 container_of(req
, struct nvmet_rdma_rsp
, req
);
528 struct rdma_cm_id
*cm_id
= rsp
->queue
->cm_id
;
529 struct ib_send_wr
*first_wr
, *bad_wr
;
531 if (rsp
->flags
& NVMET_RDMA_REQ_INVALIDATE_RKEY
) {
532 rsp
->send_wr
.opcode
= IB_WR_SEND_WITH_INV
;
533 rsp
->send_wr
.ex
.invalidate_rkey
= rsp
->invalidate_rkey
;
535 rsp
->send_wr
.opcode
= IB_WR_SEND
;
538 if (nvmet_rdma_need_data_out(rsp
))
539 first_wr
= rdma_rw_ctx_wrs(&rsp
->rw
, cm_id
->qp
,
540 cm_id
->port_num
, NULL
, &rsp
->send_wr
);
542 first_wr
= &rsp
->send_wr
;
544 nvmet_rdma_post_recv(rsp
->queue
->dev
, rsp
->cmd
);
546 ib_dma_sync_single_for_device(rsp
->queue
->dev
->device
,
547 rsp
->send_sge
.addr
, rsp
->send_sge
.length
,
550 if (ib_post_send(cm_id
->qp
, first_wr
, &bad_wr
)) {
551 pr_err("sending cmd response failed\n");
552 nvmet_rdma_release_rsp(rsp
);
556 static void nvmet_rdma_read_data_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
558 struct nvmet_rdma_rsp
*rsp
=
559 container_of(wc
->wr_cqe
, struct nvmet_rdma_rsp
, read_cqe
);
560 struct nvmet_rdma_queue
*queue
= cq
->cq_context
;
562 WARN_ON(rsp
->n_rdma
<= 0);
563 atomic_add(rsp
->n_rdma
, &queue
->sq_wr_avail
);
564 rdma_rw_ctx_destroy(&rsp
->rw
, queue
->cm_id
->qp
,
565 queue
->cm_id
->port_num
, rsp
->req
.sg
,
566 rsp
->req
.sg_cnt
, nvmet_data_dir(&rsp
->req
));
569 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
570 nvmet_rdma_release_rsp(rsp
);
571 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
) {
572 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
573 wc
->wr_cqe
, ib_wc_status_msg(wc
->status
), wc
->status
);
574 nvmet_rdma_error_comp(queue
);
579 rsp
->req
.execute(&rsp
->req
);
582 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp
*rsp
, u32 len
,
585 sg_init_table(&rsp
->cmd
->inline_sg
, 1);
586 sg_set_page(&rsp
->cmd
->inline_sg
, rsp
->cmd
->inline_page
, len
, off
);
587 rsp
->req
.sg
= &rsp
->cmd
->inline_sg
;
591 static u16
nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp
*rsp
)
593 struct nvme_sgl_desc
*sgl
= &rsp
->req
.cmd
->common
.dptr
.sgl
;
594 u64 off
= le64_to_cpu(sgl
->addr
);
595 u32 len
= le32_to_cpu(sgl
->length
);
597 if (!nvme_is_write(rsp
->req
.cmd
))
598 return NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
600 if (off
+ len
> NVMET_RDMA_INLINE_DATA_SIZE
) {
601 pr_err("invalid inline data offset!\n");
602 return NVME_SC_SGL_INVALID_OFFSET
| NVME_SC_DNR
;
605 /* no data command? */
609 nvmet_rdma_use_inline_sg(rsp
, len
, off
);
610 rsp
->flags
|= NVMET_RDMA_REQ_INLINE_DATA
;
614 static u16
nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp
*rsp
,
615 struct nvme_keyed_sgl_desc
*sgl
, bool invalidate
)
617 struct rdma_cm_id
*cm_id
= rsp
->queue
->cm_id
;
618 u64 addr
= le64_to_cpu(sgl
->addr
);
619 u32 len
= get_unaligned_le24(sgl
->length
);
620 u32 key
= get_unaligned_le32(sgl
->key
);
624 /* no data command? */
628 status
= nvmet_rdma_alloc_sgl(&rsp
->req
.sg
, &rsp
->req
.sg_cnt
,
633 ret
= rdma_rw_ctx_init(&rsp
->rw
, cm_id
->qp
, cm_id
->port_num
,
634 rsp
->req
.sg
, rsp
->req
.sg_cnt
, 0, addr
, key
,
635 nvmet_data_dir(&rsp
->req
));
637 return NVME_SC_INTERNAL
;
641 rsp
->invalidate_rkey
= key
;
642 rsp
->flags
|= NVMET_RDMA_REQ_INVALIDATE_RKEY
;
648 static u16
nvmet_rdma_map_sgl(struct nvmet_rdma_rsp
*rsp
)
650 struct nvme_keyed_sgl_desc
*sgl
= &rsp
->req
.cmd
->common
.dptr
.ksgl
;
652 switch (sgl
->type
>> 4) {
653 case NVME_SGL_FMT_DATA_DESC
:
654 switch (sgl
->type
& 0xf) {
655 case NVME_SGL_FMT_OFFSET
:
656 return nvmet_rdma_map_sgl_inline(rsp
);
658 pr_err("invalid SGL subtype: %#x\n", sgl
->type
);
659 return NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
661 case NVME_KEY_SGL_FMT_DATA_DESC
:
662 switch (sgl
->type
& 0xf) {
663 case NVME_SGL_FMT_ADDRESS
| NVME_SGL_FMT_INVALIDATE
:
664 return nvmet_rdma_map_sgl_keyed(rsp
, sgl
, true);
665 case NVME_SGL_FMT_ADDRESS
:
666 return nvmet_rdma_map_sgl_keyed(rsp
, sgl
, false);
668 pr_err("invalid SGL subtype: %#x\n", sgl
->type
);
669 return NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
672 pr_err("invalid SGL type: %#x\n", sgl
->type
);
673 return NVME_SC_SGL_INVALID_TYPE
| NVME_SC_DNR
;
677 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp
*rsp
)
679 struct nvmet_rdma_queue
*queue
= rsp
->queue
;
681 if (unlikely(atomic_sub_return(1 + rsp
->n_rdma
,
682 &queue
->sq_wr_avail
) < 0)) {
683 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
684 1 + rsp
->n_rdma
, queue
->idx
,
685 queue
->nvme_sq
.ctrl
->cntlid
);
686 atomic_add(1 + rsp
->n_rdma
, &queue
->sq_wr_avail
);
690 if (nvmet_rdma_need_data_in(rsp
)) {
691 if (rdma_rw_ctx_post(&rsp
->rw
, queue
->cm_id
->qp
,
692 queue
->cm_id
->port_num
, &rsp
->read_cqe
, NULL
))
693 nvmet_req_complete(&rsp
->req
, NVME_SC_DATA_XFER_ERROR
);
695 rsp
->req
.execute(&rsp
->req
);
701 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue
*queue
,
702 struct nvmet_rdma_rsp
*cmd
)
708 cmd
->req
.port
= queue
->port
;
711 ib_dma_sync_single_for_cpu(queue
->dev
->device
,
712 cmd
->cmd
->sge
[0].addr
, cmd
->cmd
->sge
[0].length
,
714 ib_dma_sync_single_for_cpu(queue
->dev
->device
,
715 cmd
->send_sge
.addr
, cmd
->send_sge
.length
,
718 if (!nvmet_req_init(&cmd
->req
, &queue
->nvme_cq
,
719 &queue
->nvme_sq
, &nvmet_rdma_ops
))
722 status
= nvmet_rdma_map_sgl(cmd
);
726 if (unlikely(!nvmet_rdma_execute_command(cmd
))) {
727 spin_lock(&queue
->rsp_wr_wait_lock
);
728 list_add_tail(&cmd
->wait_list
, &queue
->rsp_wr_wait_list
);
729 spin_unlock(&queue
->rsp_wr_wait_lock
);
735 nvmet_req_complete(&cmd
->req
, status
);
738 static void nvmet_rdma_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
740 struct nvmet_rdma_cmd
*cmd
=
741 container_of(wc
->wr_cqe
, struct nvmet_rdma_cmd
, cqe
);
742 struct nvmet_rdma_queue
*queue
= cq
->cq_context
;
743 struct nvmet_rdma_rsp
*rsp
;
745 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
746 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
) {
747 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
748 wc
->wr_cqe
, ib_wc_status_msg(wc
->status
),
750 nvmet_rdma_error_comp(queue
);
755 if (unlikely(wc
->byte_len
< sizeof(struct nvme_command
))) {
756 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
757 nvmet_rdma_error_comp(queue
);
762 rsp
= nvmet_rdma_get_rsp(queue
);
765 rsp
->req
.cmd
= cmd
->nvme_cmd
;
767 if (unlikely(queue
->state
!= NVMET_RDMA_Q_LIVE
)) {
770 spin_lock_irqsave(&queue
->state_lock
, flags
);
771 if (queue
->state
== NVMET_RDMA_Q_CONNECTING
)
772 list_add_tail(&rsp
->wait_list
, &queue
->rsp_wait_list
);
774 nvmet_rdma_put_rsp(rsp
);
775 spin_unlock_irqrestore(&queue
->state_lock
, flags
);
779 nvmet_rdma_handle_command(queue
, rsp
);
782 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device
*ndev
)
787 nvmet_rdma_free_cmds(ndev
, ndev
->srq_cmds
, ndev
->srq_size
, false);
788 ib_destroy_srq(ndev
->srq
);
791 static int nvmet_rdma_init_srq(struct nvmet_rdma_device
*ndev
)
793 struct ib_srq_init_attr srq_attr
= { NULL
, };
798 srq_size
= 4095; /* XXX: tune */
800 srq_attr
.attr
.max_wr
= srq_size
;
801 srq_attr
.attr
.max_sge
= 2;
802 srq_attr
.attr
.srq_limit
= 0;
803 srq_attr
.srq_type
= IB_SRQT_BASIC
;
804 srq
= ib_create_srq(ndev
->pd
, &srq_attr
);
807 * If SRQs aren't supported we just go ahead and use normal
808 * non-shared receive queues.
810 pr_info("SRQ requested but not supported.\n");
814 ndev
->srq_cmds
= nvmet_rdma_alloc_cmds(ndev
, srq_size
, false);
815 if (IS_ERR(ndev
->srq_cmds
)) {
816 ret
= PTR_ERR(ndev
->srq_cmds
);
817 goto out_destroy_srq
;
821 ndev
->srq_size
= srq_size
;
823 for (i
= 0; i
< srq_size
; i
++)
824 nvmet_rdma_post_recv(ndev
, &ndev
->srq_cmds
[i
]);
833 static void nvmet_rdma_free_dev(struct kref
*ref
)
835 struct nvmet_rdma_device
*ndev
=
836 container_of(ref
, struct nvmet_rdma_device
, ref
);
838 mutex_lock(&device_list_mutex
);
839 list_del(&ndev
->entry
);
840 mutex_unlock(&device_list_mutex
);
842 nvmet_rdma_destroy_srq(ndev
);
843 ib_dealloc_pd(ndev
->pd
);
848 static struct nvmet_rdma_device
*
849 nvmet_rdma_find_get_device(struct rdma_cm_id
*cm_id
)
851 struct nvmet_rdma_device
*ndev
;
854 mutex_lock(&device_list_mutex
);
855 list_for_each_entry(ndev
, &device_list
, entry
) {
856 if (ndev
->device
->node_guid
== cm_id
->device
->node_guid
&&
857 kref_get_unless_zero(&ndev
->ref
))
861 ndev
= kzalloc(sizeof(*ndev
), GFP_KERNEL
);
865 ndev
->device
= cm_id
->device
;
866 kref_init(&ndev
->ref
);
868 ndev
->pd
= ib_alloc_pd(ndev
->device
, 0);
869 if (IS_ERR(ndev
->pd
))
872 if (nvmet_rdma_use_srq
) {
873 ret
= nvmet_rdma_init_srq(ndev
);
878 list_add(&ndev
->entry
, &device_list
);
880 mutex_unlock(&device_list_mutex
);
881 pr_debug("added %s.\n", ndev
->device
->name
);
885 ib_dealloc_pd(ndev
->pd
);
889 mutex_unlock(&device_list_mutex
);
893 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue
*queue
)
895 struct ib_qp_init_attr qp_attr
;
896 struct nvmet_rdma_device
*ndev
= queue
->dev
;
897 int comp_vector
, nr_cqe
, ret
, i
;
900 * Spread the io queues across completion vectors,
901 * but still keep all admin queues on vector 0.
903 comp_vector
= !queue
->host_qid
? 0 :
904 queue
->idx
% ndev
->device
->num_comp_vectors
;
907 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
909 nr_cqe
= queue
->recv_queue_size
+ 2 * queue
->send_queue_size
;
911 queue
->cq
= ib_alloc_cq(ndev
->device
, queue
,
912 nr_cqe
+ 1, comp_vector
,
914 if (IS_ERR(queue
->cq
)) {
915 ret
= PTR_ERR(queue
->cq
);
916 pr_err("failed to create CQ cqe= %d ret= %d\n",
921 memset(&qp_attr
, 0, sizeof(qp_attr
));
922 qp_attr
.qp_context
= queue
;
923 qp_attr
.event_handler
= nvmet_rdma_qp_event
;
924 qp_attr
.send_cq
= queue
->cq
;
925 qp_attr
.recv_cq
= queue
->cq
;
926 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
927 qp_attr
.qp_type
= IB_QPT_RC
;
929 qp_attr
.cap
.max_send_wr
= queue
->send_queue_size
+ 1;
930 qp_attr
.cap
.max_rdma_ctxs
= queue
->send_queue_size
;
931 qp_attr
.cap
.max_send_sge
= max(ndev
->device
->attrs
.max_sge_rd
,
932 ndev
->device
->attrs
.max_sge
);
935 qp_attr
.srq
= ndev
->srq
;
938 qp_attr
.cap
.max_recv_wr
= 1 + queue
->recv_queue_size
;
939 qp_attr
.cap
.max_recv_sge
= 2;
942 ret
= rdma_create_qp(queue
->cm_id
, ndev
->pd
, &qp_attr
);
944 pr_err("failed to create_qp ret= %d\n", ret
);
948 atomic_set(&queue
->sq_wr_avail
, qp_attr
.cap
.max_send_wr
);
950 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
951 __func__
, queue
->cq
->cqe
, qp_attr
.cap
.max_send_sge
,
952 qp_attr
.cap
.max_send_wr
, queue
->cm_id
);
955 for (i
= 0; i
< queue
->recv_queue_size
; i
++) {
956 queue
->cmds
[i
].queue
= queue
;
957 nvmet_rdma_post_recv(ndev
, &queue
->cmds
[i
]);
965 ib_free_cq(queue
->cq
);
969 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue
*queue
)
971 ib_drain_qp(queue
->cm_id
->qp
);
972 rdma_destroy_qp(queue
->cm_id
);
973 ib_free_cq(queue
->cq
);
976 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue
*queue
)
978 pr_info("freeing queue %d\n", queue
->idx
);
980 nvmet_sq_destroy(&queue
->nvme_sq
);
982 nvmet_rdma_destroy_queue_ib(queue
);
983 if (!queue
->dev
->srq
) {
984 nvmet_rdma_free_cmds(queue
->dev
, queue
->cmds
,
985 queue
->recv_queue_size
,
988 nvmet_rdma_free_rsps(queue
);
989 ida_simple_remove(&nvmet_rdma_queue_ida
, queue
->idx
);
993 static void nvmet_rdma_release_queue_work(struct work_struct
*w
)
995 struct nvmet_rdma_queue
*queue
=
996 container_of(w
, struct nvmet_rdma_queue
, release_work
);
997 struct rdma_cm_id
*cm_id
= queue
->cm_id
;
998 struct nvmet_rdma_device
*dev
= queue
->dev
;
999 enum nvmet_rdma_queue_state state
= queue
->state
;
1001 nvmet_rdma_free_queue(queue
);
1003 if (state
!= NVMET_RDMA_IN_DEVICE_REMOVAL
)
1004 rdma_destroy_id(cm_id
);
1006 kref_put(&dev
->ref
, nvmet_rdma_free_dev
);
1010 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param
*conn
,
1011 struct nvmet_rdma_queue
*queue
)
1013 struct nvme_rdma_cm_req
*req
;
1015 req
= (struct nvme_rdma_cm_req
*)conn
->private_data
;
1016 if (!req
|| conn
->private_data_len
== 0)
1017 return NVME_RDMA_CM_INVALID_LEN
;
1019 if (le16_to_cpu(req
->recfmt
) != NVME_RDMA_CM_FMT_1_0
)
1020 return NVME_RDMA_CM_INVALID_RECFMT
;
1022 queue
->host_qid
= le16_to_cpu(req
->qid
);
1025 * req->hsqsize corresponds to our recv queue size plus 1
1026 * req->hrqsize corresponds to our send queue size
1028 queue
->recv_queue_size
= le16_to_cpu(req
->hsqsize
) + 1;
1029 queue
->send_queue_size
= le16_to_cpu(req
->hrqsize
);
1031 if (!queue
->host_qid
&& queue
->recv_queue_size
> NVMF_AQ_DEPTH
)
1032 return NVME_RDMA_CM_INVALID_HSQSIZE
;
1034 /* XXX: Should we enforce some kind of max for IO queues? */
1039 static int nvmet_rdma_cm_reject(struct rdma_cm_id
*cm_id
,
1040 enum nvme_rdma_cm_status status
)
1042 struct nvme_rdma_cm_rej rej
;
1044 pr_debug("rejecting connect request: status %d (%s)\n",
1045 status
, nvme_rdma_cm_msg(status
));
1047 rej
.recfmt
= cpu_to_le16(NVME_RDMA_CM_FMT_1_0
);
1048 rej
.sts
= cpu_to_le16(status
);
1050 return rdma_reject(cm_id
, (void *)&rej
, sizeof(rej
));
1053 static struct nvmet_rdma_queue
*
1054 nvmet_rdma_alloc_queue(struct nvmet_rdma_device
*ndev
,
1055 struct rdma_cm_id
*cm_id
,
1056 struct rdma_cm_event
*event
)
1058 struct nvmet_rdma_queue
*queue
;
1061 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
1063 ret
= NVME_RDMA_CM_NO_RSC
;
1067 ret
= nvmet_sq_init(&queue
->nvme_sq
);
1069 ret
= NVME_RDMA_CM_NO_RSC
;
1070 goto out_free_queue
;
1073 ret
= nvmet_rdma_parse_cm_connect_req(&event
->param
.conn
, queue
);
1075 goto out_destroy_sq
;
1078 * Schedules the actual release because calling rdma_destroy_id from
1079 * inside a CM callback would trigger a deadlock. (great API design..)
1081 INIT_WORK(&queue
->release_work
, nvmet_rdma_release_queue_work
);
1083 queue
->cm_id
= cm_id
;
1085 spin_lock_init(&queue
->state_lock
);
1086 queue
->state
= NVMET_RDMA_Q_CONNECTING
;
1087 INIT_LIST_HEAD(&queue
->rsp_wait_list
);
1088 INIT_LIST_HEAD(&queue
->rsp_wr_wait_list
);
1089 spin_lock_init(&queue
->rsp_wr_wait_lock
);
1090 INIT_LIST_HEAD(&queue
->free_rsps
);
1091 spin_lock_init(&queue
->rsps_lock
);
1092 INIT_LIST_HEAD(&queue
->queue_list
);
1094 queue
->idx
= ida_simple_get(&nvmet_rdma_queue_ida
, 0, 0, GFP_KERNEL
);
1095 if (queue
->idx
< 0) {
1096 ret
= NVME_RDMA_CM_NO_RSC
;
1097 goto out_destroy_sq
;
1100 ret
= nvmet_rdma_alloc_rsps(queue
);
1102 ret
= NVME_RDMA_CM_NO_RSC
;
1103 goto out_ida_remove
;
1107 queue
->cmds
= nvmet_rdma_alloc_cmds(ndev
,
1108 queue
->recv_queue_size
,
1110 if (IS_ERR(queue
->cmds
)) {
1111 ret
= NVME_RDMA_CM_NO_RSC
;
1112 goto out_free_responses
;
1116 ret
= nvmet_rdma_create_queue_ib(queue
);
1118 pr_err("%s: creating RDMA queue failed (%d).\n",
1120 ret
= NVME_RDMA_CM_NO_RSC
;
1128 nvmet_rdma_free_cmds(queue
->dev
, queue
->cmds
,
1129 queue
->recv_queue_size
,
1133 nvmet_rdma_free_rsps(queue
);
1135 ida_simple_remove(&nvmet_rdma_queue_ida
, queue
->idx
);
1137 nvmet_sq_destroy(&queue
->nvme_sq
);
1141 nvmet_rdma_cm_reject(cm_id
, ret
);
1145 static void nvmet_rdma_qp_event(struct ib_event
*event
, void *priv
)
1147 struct nvmet_rdma_queue
*queue
= priv
;
1149 switch (event
->event
) {
1150 case IB_EVENT_COMM_EST
:
1151 rdma_notify(queue
->cm_id
, event
->event
);
1154 pr_err("received IB QP event: %s (%d)\n",
1155 ib_event_msg(event
->event
), event
->event
);
1160 static int nvmet_rdma_cm_accept(struct rdma_cm_id
*cm_id
,
1161 struct nvmet_rdma_queue
*queue
,
1162 struct rdma_conn_param
*p
)
1164 struct rdma_conn_param param
= { };
1165 struct nvme_rdma_cm_rep priv
= { };
1168 param
.rnr_retry_count
= 7;
1169 param
.flow_control
= 1;
1170 param
.initiator_depth
= min_t(u8
, p
->initiator_depth
,
1171 queue
->dev
->device
->attrs
.max_qp_init_rd_atom
);
1172 param
.private_data
= &priv
;
1173 param
.private_data_len
= sizeof(priv
);
1174 priv
.recfmt
= cpu_to_le16(NVME_RDMA_CM_FMT_1_0
);
1175 priv
.crqsize
= cpu_to_le16(queue
->recv_queue_size
);
1177 ret
= rdma_accept(cm_id
, ¶m
);
1179 pr_err("rdma_accept failed (error code = %d)\n", ret
);
1184 static int nvmet_rdma_queue_connect(struct rdma_cm_id
*cm_id
,
1185 struct rdma_cm_event
*event
)
1187 struct nvmet_rdma_device
*ndev
;
1188 struct nvmet_rdma_queue
*queue
;
1191 ndev
= nvmet_rdma_find_get_device(cm_id
);
1193 nvmet_rdma_cm_reject(cm_id
, NVME_RDMA_CM_NO_RSC
);
1194 return -ECONNREFUSED
;
1197 queue
= nvmet_rdma_alloc_queue(ndev
, cm_id
, event
);
1202 queue
->port
= cm_id
->context
;
1204 ret
= nvmet_rdma_cm_accept(cm_id
, queue
, &event
->param
.conn
);
1208 mutex_lock(&nvmet_rdma_queue_mutex
);
1209 list_add_tail(&queue
->queue_list
, &nvmet_rdma_queue_list
);
1210 mutex_unlock(&nvmet_rdma_queue_mutex
);
1215 nvmet_rdma_free_queue(queue
);
1217 kref_put(&ndev
->ref
, nvmet_rdma_free_dev
);
1222 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue
*queue
)
1224 unsigned long flags
;
1226 spin_lock_irqsave(&queue
->state_lock
, flags
);
1227 if (queue
->state
!= NVMET_RDMA_Q_CONNECTING
) {
1228 pr_warn("trying to establish a connected queue\n");
1231 queue
->state
= NVMET_RDMA_Q_LIVE
;
1233 while (!list_empty(&queue
->rsp_wait_list
)) {
1234 struct nvmet_rdma_rsp
*cmd
;
1236 cmd
= list_first_entry(&queue
->rsp_wait_list
,
1237 struct nvmet_rdma_rsp
, wait_list
);
1238 list_del(&cmd
->wait_list
);
1240 spin_unlock_irqrestore(&queue
->state_lock
, flags
);
1241 nvmet_rdma_handle_command(queue
, cmd
);
1242 spin_lock_irqsave(&queue
->state_lock
, flags
);
1246 spin_unlock_irqrestore(&queue
->state_lock
, flags
);
1249 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue
*queue
)
1251 bool disconnect
= false;
1252 unsigned long flags
;
1254 pr_debug("cm_id= %p queue->state= %d\n", queue
->cm_id
, queue
->state
);
1256 spin_lock_irqsave(&queue
->state_lock
, flags
);
1257 switch (queue
->state
) {
1258 case NVMET_RDMA_Q_CONNECTING
:
1259 case NVMET_RDMA_Q_LIVE
:
1260 queue
->state
= NVMET_RDMA_Q_DISCONNECTING
;
1261 case NVMET_RDMA_IN_DEVICE_REMOVAL
:
1264 case NVMET_RDMA_Q_DISCONNECTING
:
1267 spin_unlock_irqrestore(&queue
->state_lock
, flags
);
1270 rdma_disconnect(queue
->cm_id
);
1271 schedule_work(&queue
->release_work
);
1275 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue
*queue
)
1277 bool disconnect
= false;
1279 mutex_lock(&nvmet_rdma_queue_mutex
);
1280 if (!list_empty(&queue
->queue_list
)) {
1281 list_del_init(&queue
->queue_list
);
1284 mutex_unlock(&nvmet_rdma_queue_mutex
);
1287 __nvmet_rdma_queue_disconnect(queue
);
1290 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id
*cm_id
,
1291 struct nvmet_rdma_queue
*queue
)
1293 WARN_ON_ONCE(queue
->state
!= NVMET_RDMA_Q_CONNECTING
);
1295 mutex_lock(&nvmet_rdma_queue_mutex
);
1296 if (!list_empty(&queue
->queue_list
))
1297 list_del_init(&queue
->queue_list
);
1298 mutex_unlock(&nvmet_rdma_queue_mutex
);
1300 pr_err("failed to connect queue %d\n", queue
->idx
);
1301 schedule_work(&queue
->release_work
);
1305 * nvme_rdma_device_removal() - Handle RDMA device removal
1306 * @queue: nvmet rdma queue (cm id qp_context)
1307 * @addr: nvmet address (cm_id context)
1309 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1310 * to unplug so we should take care of destroying our RDMA resources.
1311 * This event will be generated for each allocated cm_id.
1313 * Note that this event can be generated on a normal queue cm_id
1314 * and/or a device bound listener cm_id (where in this case
1315 * queue will be null).
1317 * we claim ownership on destroying the cm_id. For queues we move
1318 * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
1319 * we nullify the priv to prevent double cm_id destruction and destroying
1320 * the cm_id implicitely by returning a non-zero rc to the callout.
1322 static int nvmet_rdma_device_removal(struct rdma_cm_id
*cm_id
,
1323 struct nvmet_rdma_queue
*queue
)
1325 unsigned long flags
;
1328 struct nvmet_port
*port
= cm_id
->context
;
1331 * This is a listener cm_id. Make sure that
1332 * future remove_port won't invoke a double
1333 * cm_id destroy. use atomic xchg to make sure
1334 * we don't compete with remove_port.
1336 if (xchg(&port
->priv
, NULL
) != cm_id
)
1340 * This is a queue cm_id. Make sure that
1341 * release queue will not destroy the cm_id
1342 * and schedule all ctrl queues removal (only
1343 * if the queue is not disconnecting already).
1345 spin_lock_irqsave(&queue
->state_lock
, flags
);
1346 if (queue
->state
!= NVMET_RDMA_Q_DISCONNECTING
)
1347 queue
->state
= NVMET_RDMA_IN_DEVICE_REMOVAL
;
1348 spin_unlock_irqrestore(&queue
->state_lock
, flags
);
1349 nvmet_rdma_queue_disconnect(queue
);
1350 flush_scheduled_work();
1354 * We need to return 1 so that the core will destroy
1355 * it's own ID. What a great API design..
1360 static int nvmet_rdma_cm_handler(struct rdma_cm_id
*cm_id
,
1361 struct rdma_cm_event
*event
)
1363 struct nvmet_rdma_queue
*queue
= NULL
;
1367 queue
= cm_id
->qp
->qp_context
;
1369 pr_debug("%s (%d): status %d id %p\n",
1370 rdma_event_msg(event
->event
), event
->event
,
1371 event
->status
, cm_id
);
1373 switch (event
->event
) {
1374 case RDMA_CM_EVENT_CONNECT_REQUEST
:
1375 ret
= nvmet_rdma_queue_connect(cm_id
, event
);
1377 case RDMA_CM_EVENT_ESTABLISHED
:
1378 nvmet_rdma_queue_established(queue
);
1380 case RDMA_CM_EVENT_ADDR_CHANGE
:
1381 case RDMA_CM_EVENT_DISCONNECTED
:
1382 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
1384 * We might end up here when we already freed the qp
1385 * which means queue release sequence is in progress,
1386 * so don't get in the way...
1389 nvmet_rdma_queue_disconnect(queue
);
1391 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
1392 ret
= nvmet_rdma_device_removal(cm_id
, queue
);
1394 case RDMA_CM_EVENT_REJECTED
:
1395 pr_debug("Connection rejected: %s\n",
1396 rdma_reject_msg(cm_id
, event
->status
));
1398 case RDMA_CM_EVENT_UNREACHABLE
:
1399 case RDMA_CM_EVENT_CONNECT_ERROR
:
1400 nvmet_rdma_queue_connect_fail(cm_id
, queue
);
1403 pr_err("received unrecognized RDMA CM event %d\n",
1411 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl
*ctrl
)
1413 struct nvmet_rdma_queue
*queue
;
1416 mutex_lock(&nvmet_rdma_queue_mutex
);
1417 list_for_each_entry(queue
, &nvmet_rdma_queue_list
, queue_list
) {
1418 if (queue
->nvme_sq
.ctrl
== ctrl
) {
1419 list_del_init(&queue
->queue_list
);
1420 mutex_unlock(&nvmet_rdma_queue_mutex
);
1422 __nvmet_rdma_queue_disconnect(queue
);
1426 mutex_unlock(&nvmet_rdma_queue_mutex
);
1429 static int nvmet_rdma_add_port(struct nvmet_port
*port
)
1431 struct rdma_cm_id
*cm_id
;
1432 struct sockaddr_in addr_in
;
1436 switch (port
->disc_addr
.adrfam
) {
1437 case NVMF_ADDR_FAMILY_IP4
:
1440 pr_err("address family %d not supported\n",
1441 port
->disc_addr
.adrfam
);
1445 ret
= kstrtou16(port
->disc_addr
.trsvcid
, 0, &port_in
);
1449 addr_in
.sin_family
= AF_INET
;
1450 addr_in
.sin_addr
.s_addr
= in_aton(port
->disc_addr
.traddr
);
1451 addr_in
.sin_port
= htons(port_in
);
1453 cm_id
= rdma_create_id(&init_net
, nvmet_rdma_cm_handler
, port
,
1454 RDMA_PS_TCP
, IB_QPT_RC
);
1455 if (IS_ERR(cm_id
)) {
1456 pr_err("CM ID creation failed\n");
1457 return PTR_ERR(cm_id
);
1460 ret
= rdma_bind_addr(cm_id
, (struct sockaddr
*)&addr_in
);
1462 pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in
, ret
);
1463 goto out_destroy_id
;
1466 ret
= rdma_listen(cm_id
, 128);
1468 pr_err("listening to %pISpc failed (%d)\n", &addr_in
, ret
);
1469 goto out_destroy_id
;
1472 pr_info("enabling port %d (%pISpc)\n",
1473 le16_to_cpu(port
->disc_addr
.portid
), &addr_in
);
1478 rdma_destroy_id(cm_id
);
1482 static void nvmet_rdma_remove_port(struct nvmet_port
*port
)
1484 struct rdma_cm_id
*cm_id
= xchg(&port
->priv
, NULL
);
1487 rdma_destroy_id(cm_id
);
1490 static struct nvmet_fabrics_ops nvmet_rdma_ops
= {
1491 .owner
= THIS_MODULE
,
1492 .type
= NVMF_TRTYPE_RDMA
,
1493 .sqe_inline_size
= NVMET_RDMA_INLINE_DATA_SIZE
,
1495 .has_keyed_sgls
= 1,
1496 .add_port
= nvmet_rdma_add_port
,
1497 .remove_port
= nvmet_rdma_remove_port
,
1498 .queue_response
= nvmet_rdma_queue_response
,
1499 .delete_ctrl
= nvmet_rdma_delete_ctrl
,
1502 static int __init
nvmet_rdma_init(void)
1504 return nvmet_register_transport(&nvmet_rdma_ops
);
1507 static void __exit
nvmet_rdma_exit(void)
1509 struct nvmet_rdma_queue
*queue
;
1511 nvmet_unregister_transport(&nvmet_rdma_ops
);
1513 flush_scheduled_work();
1515 mutex_lock(&nvmet_rdma_queue_mutex
);
1516 while ((queue
= list_first_entry_or_null(&nvmet_rdma_queue_list
,
1517 struct nvmet_rdma_queue
, queue_list
))) {
1518 list_del_init(&queue
->queue_list
);
1520 mutex_unlock(&nvmet_rdma_queue_mutex
);
1521 __nvmet_rdma_queue_disconnect(queue
);
1522 mutex_lock(&nvmet_rdma_queue_mutex
);
1524 mutex_unlock(&nvmet_rdma_queue_mutex
);
1526 flush_scheduled_work();
1527 ida_destroy(&nvmet_rdma_queue_ida
);
1530 module_init(nvmet_rdma_init
);
1531 module_exit(nvmet_rdma_exit
);
1533 MODULE_LICENSE("GPL v2");
1534 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */