1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
12 #include <linux/blk-cgroup.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
18 #include <scsi/scsi_transport_fc.h>
19 #include <linux/blk-mq-pci.h>
21 /* *************************** Data Structures/Defines ****************** */
24 enum nvme_fc_queue_flags
{
25 NVME_FC_Q_CONNECTED
= 0,
29 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
30 #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
31 * when connected and a
35 struct nvme_fc_queue
{
36 struct nvme_fc_ctrl
*ctrl
;
38 struct blk_mq_hw_ctx
*hctx
;
40 size_t cmnd_capsule_len
;
49 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
51 enum nvme_fcop_flags
{
52 FCOP_FLAGS_TERMIO
= (1 << 0),
53 FCOP_FLAGS_AEN
= (1 << 1),
56 struct nvmefc_ls_req_op
{
57 struct nvmefc_ls_req ls_req
;
59 struct nvme_fc_rport
*rport
;
60 struct nvme_fc_queue
*queue
;
65 struct completion ls_done
;
66 struct list_head lsreq_list
; /* rport->ls_req_list */
70 struct nvmefc_ls_rcv_op
{
71 struct nvme_fc_rport
*rport
;
72 struct nvmefc_ls_rsp
*lsrsp
;
73 union nvmefc_ls_requests
*rqstbuf
;
74 union nvmefc_ls_responses
*rspbuf
;
78 struct list_head lsrcv_list
; /* rport->ls_rcv_list */
79 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
81 enum nvme_fcpop_state
{
82 FCPOP_STATE_UNINIT
= 0,
84 FCPOP_STATE_ACTIVE
= 2,
85 FCPOP_STATE_ABORTED
= 3,
86 FCPOP_STATE_COMPLETE
= 4,
89 struct nvme_fc_fcp_op
{
90 struct nvme_request nreq
; /*
93 * the 1st element in the
98 struct nvmefc_fcp_req fcp_req
;
100 struct nvme_fc_ctrl
*ctrl
;
101 struct nvme_fc_queue
*queue
;
109 struct nvme_fc_cmd_iu cmd_iu
;
110 struct nvme_fc_ersp_iu rsp_iu
;
113 struct nvme_fcp_op_w_sgl
{
114 struct nvme_fc_fcp_op op
;
115 struct scatterlist sgl
[NVME_INLINE_SG_CNT
];
119 struct nvme_fc_lport
{
120 struct nvme_fc_local_port localport
;
123 struct list_head port_list
; /* nvme_fc_port_list */
124 struct list_head endp_list
;
125 struct device
*dev
; /* physical device for dma */
126 struct nvme_fc_port_template
*ops
;
128 atomic_t act_rport_cnt
;
129 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
131 struct nvme_fc_rport
{
132 struct nvme_fc_remote_port remoteport
;
134 struct list_head endp_list
; /* for lport->endp_list */
135 struct list_head ctrl_list
;
136 struct list_head ls_req_list
;
137 struct list_head ls_rcv_list
;
138 struct list_head disc_list
;
139 struct device
*dev
; /* physical device for dma */
140 struct nvme_fc_lport
*lport
;
143 atomic_t act_ctrl_cnt
;
144 unsigned long dev_loss_end
;
145 struct work_struct lsrcv_work
;
146 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
148 /* fc_ctrl flags values - specified as bit positions */
149 #define ASSOC_ACTIVE 0
150 #define ASSOC_FAILED 1
151 #define FCCTRL_TERMIO 2
153 struct nvme_fc_ctrl
{
155 struct nvme_fc_queue
*queues
;
157 struct nvme_fc_lport
*lport
;
158 struct nvme_fc_rport
*rport
;
163 struct nvmefc_ls_rcv_op
*rcv_disconn
;
165 struct list_head ctrl_list
; /* rport->ctrl_list */
167 struct blk_mq_tag_set admin_tag_set
;
168 struct blk_mq_tag_set tag_set
;
170 struct work_struct ioerr_work
;
171 struct delayed_work connect_work
;
176 wait_queue_head_t ioabort_wait
;
178 struct nvme_fc_fcp_op aen_ops
[NVME_NR_AEN_COMMANDS
];
180 struct nvme_ctrl ctrl
;
183 static inline struct nvme_fc_ctrl
*
184 to_fc_ctrl(struct nvme_ctrl
*ctrl
)
186 return container_of(ctrl
, struct nvme_fc_ctrl
, ctrl
);
189 static inline struct nvme_fc_lport
*
190 localport_to_lport(struct nvme_fc_local_port
*portptr
)
192 return container_of(portptr
, struct nvme_fc_lport
, localport
);
195 static inline struct nvme_fc_rport
*
196 remoteport_to_rport(struct nvme_fc_remote_port
*portptr
)
198 return container_of(portptr
, struct nvme_fc_rport
, remoteport
);
201 static inline struct nvmefc_ls_req_op
*
202 ls_req_to_lsop(struct nvmefc_ls_req
*lsreq
)
204 return container_of(lsreq
, struct nvmefc_ls_req_op
, ls_req
);
207 static inline struct nvme_fc_fcp_op
*
208 fcp_req_to_fcp_op(struct nvmefc_fcp_req
*fcpreq
)
210 return container_of(fcpreq
, struct nvme_fc_fcp_op
, fcp_req
);
215 /* *************************** Globals **************************** */
218 static DEFINE_SPINLOCK(nvme_fc_lock
);
220 static LIST_HEAD(nvme_fc_lport_list
);
221 static DEFINE_IDA(nvme_fc_local_port_cnt
);
222 static DEFINE_IDA(nvme_fc_ctrl_cnt
);
224 static struct workqueue_struct
*nvme_fc_wq
;
226 static bool nvme_fc_waiting_to_unload
;
227 static DECLARE_COMPLETION(nvme_fc_unload_proceed
);
230 * These items are short-term. They will eventually be moved into
231 * a generic FC class. See comments in module init.
233 static struct device
*fc_udev_device
;
235 static void nvme_fc_complete_rq(struct request
*rq
);
237 /* *********************** FC-NVME Port Management ************************ */
239 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*,
240 struct nvme_fc_queue
*, unsigned int);
242 static void nvme_fc_handle_ls_rqst_work(struct work_struct
*work
);
246 nvme_fc_free_lport(struct kref
*ref
)
248 struct nvme_fc_lport
*lport
=
249 container_of(ref
, struct nvme_fc_lport
, ref
);
252 WARN_ON(lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
);
253 WARN_ON(!list_empty(&lport
->endp_list
));
255 /* remove from transport list */
256 spin_lock_irqsave(&nvme_fc_lock
, flags
);
257 list_del(&lport
->port_list
);
258 if (nvme_fc_waiting_to_unload
&& list_empty(&nvme_fc_lport_list
))
259 complete(&nvme_fc_unload_proceed
);
260 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
262 ida_free(&nvme_fc_local_port_cnt
, lport
->localport
.port_num
);
263 ida_destroy(&lport
->endp_cnt
);
265 put_device(lport
->dev
);
271 nvme_fc_lport_put(struct nvme_fc_lport
*lport
)
273 kref_put(&lport
->ref
, nvme_fc_free_lport
);
277 nvme_fc_lport_get(struct nvme_fc_lport
*lport
)
279 return kref_get_unless_zero(&lport
->ref
);
283 static struct nvme_fc_lport
*
284 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info
*pinfo
,
285 struct nvme_fc_port_template
*ops
,
288 struct nvme_fc_lport
*lport
;
291 spin_lock_irqsave(&nvme_fc_lock
, flags
);
293 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
294 if (lport
->localport
.node_name
!= pinfo
->node_name
||
295 lport
->localport
.port_name
!= pinfo
->port_name
)
298 if (lport
->dev
!= dev
) {
299 lport
= ERR_PTR(-EXDEV
);
303 if (lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
) {
304 lport
= ERR_PTR(-EEXIST
);
308 if (!nvme_fc_lport_get(lport
)) {
310 * fails if ref cnt already 0. If so,
311 * act as if lport already deleted
317 /* resume the lport */
320 lport
->localport
.port_role
= pinfo
->port_role
;
321 lport
->localport
.port_id
= pinfo
->port_id
;
322 lport
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
324 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
332 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
338 * nvme_fc_register_localport - transport entry point called by an
339 * LLDD to register the existence of a NVME
341 * @pinfo: pointer to information about the port to be registered
342 * @template: LLDD entrypoints and operational parameters for the port
343 * @dev: physical hardware device node port corresponds to. Will be
344 * used for DMA mappings
345 * @portptr: pointer to a local port pointer. Upon success, the routine
346 * will allocate a nvme_fc_local_port structure and place its
347 * address in the local port pointer. Upon failure, local port
348 * pointer will be set to 0.
351 * a completion status. Must be 0 upon success; a negative errno
352 * (ex: -ENXIO) upon failure.
355 nvme_fc_register_localport(struct nvme_fc_port_info
*pinfo
,
356 struct nvme_fc_port_template
*template,
358 struct nvme_fc_local_port
**portptr
)
360 struct nvme_fc_lport
*newrec
;
364 if (!template->localport_delete
|| !template->remoteport_delete
||
365 !template->ls_req
|| !template->fcp_io
||
366 !template->ls_abort
|| !template->fcp_abort
||
367 !template->max_hw_queues
|| !template->max_sgl_segments
||
368 !template->max_dif_sgl_segments
|| !template->dma_boundary
) {
370 goto out_reghost_failed
;
374 * look to see if there is already a localport that had been
375 * deregistered and in the process of waiting for all the
376 * references to fully be removed. If the references haven't
377 * expired, we can simply re-enable the localport. Remoteports
378 * and controller reconnections should resume naturally.
380 newrec
= nvme_fc_attach_to_unreg_lport(pinfo
, template, dev
);
382 /* found an lport, but something about its state is bad */
383 if (IS_ERR(newrec
)) {
384 ret
= PTR_ERR(newrec
);
385 goto out_reghost_failed
;
387 /* found existing lport, which was resumed */
389 *portptr
= &newrec
->localport
;
393 /* nothing found - allocate a new localport struct */
395 newrec
= kmalloc((sizeof(*newrec
) + template->local_priv_sz
),
399 goto out_reghost_failed
;
402 idx
= ida_alloc(&nvme_fc_local_port_cnt
, GFP_KERNEL
);
408 if (!get_device(dev
) && dev
) {
413 INIT_LIST_HEAD(&newrec
->port_list
);
414 INIT_LIST_HEAD(&newrec
->endp_list
);
415 kref_init(&newrec
->ref
);
416 atomic_set(&newrec
->act_rport_cnt
, 0);
417 newrec
->ops
= template;
419 ida_init(&newrec
->endp_cnt
);
420 if (template->local_priv_sz
)
421 newrec
->localport
.private = &newrec
[1];
423 newrec
->localport
.private = NULL
;
424 newrec
->localport
.node_name
= pinfo
->node_name
;
425 newrec
->localport
.port_name
= pinfo
->port_name
;
426 newrec
->localport
.port_role
= pinfo
->port_role
;
427 newrec
->localport
.port_id
= pinfo
->port_id
;
428 newrec
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
429 newrec
->localport
.port_num
= idx
;
431 spin_lock_irqsave(&nvme_fc_lock
, flags
);
432 list_add_tail(&newrec
->port_list
, &nvme_fc_lport_list
);
433 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
436 dma_set_seg_boundary(dev
, template->dma_boundary
);
438 *portptr
= &newrec
->localport
;
442 ida_free(&nvme_fc_local_port_cnt
, idx
);
450 EXPORT_SYMBOL_GPL(nvme_fc_register_localport
);
453 * nvme_fc_unregister_localport - transport entry point called by an
454 * LLDD to deregister/remove a previously
455 * registered a NVME host FC port.
456 * @portptr: pointer to the (registered) local port that is to be deregistered.
459 * a completion status. Must be 0 upon success; a negative errno
460 * (ex: -ENXIO) upon failure.
463 nvme_fc_unregister_localport(struct nvme_fc_local_port
*portptr
)
465 struct nvme_fc_lport
*lport
= localport_to_lport(portptr
);
471 spin_lock_irqsave(&nvme_fc_lock
, flags
);
473 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
474 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
477 portptr
->port_state
= FC_OBJSTATE_DELETED
;
479 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
481 if (atomic_read(&lport
->act_rport_cnt
) == 0)
482 lport
->ops
->localport_delete(&lport
->localport
);
484 nvme_fc_lport_put(lport
);
488 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport
);
491 * TRADDR strings, per FC-NVME are fixed format:
492 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
493 * udev event will only differ by prefix of what field is
495 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
496 * 19 + 43 + null_fudge = 64 characters
498 #define FCNVME_TRADDR_LENGTH 64
501 nvme_fc_signal_discovery_scan(struct nvme_fc_lport
*lport
,
502 struct nvme_fc_rport
*rport
)
504 char hostaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_HOST_TRADDR=...*/
505 char tgtaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_TRADDR=...*/
506 char *envp
[4] = { "FC_EVENT=nvmediscovery", hostaddr
, tgtaddr
, NULL
};
508 if (!(rport
->remoteport
.port_role
& FC_PORT_ROLE_NVME_DISCOVERY
))
511 snprintf(hostaddr
, sizeof(hostaddr
),
512 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
513 lport
->localport
.node_name
, lport
->localport
.port_name
);
514 snprintf(tgtaddr
, sizeof(tgtaddr
),
515 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
516 rport
->remoteport
.node_name
, rport
->remoteport
.port_name
);
517 kobject_uevent_env(&fc_udev_device
->kobj
, KOBJ_CHANGE
, envp
);
521 nvme_fc_free_rport(struct kref
*ref
)
523 struct nvme_fc_rport
*rport
=
524 container_of(ref
, struct nvme_fc_rport
, ref
);
525 struct nvme_fc_lport
*lport
=
526 localport_to_lport(rport
->remoteport
.localport
);
529 WARN_ON(rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
);
530 WARN_ON(!list_empty(&rport
->ctrl_list
));
532 /* remove from lport list */
533 spin_lock_irqsave(&nvme_fc_lock
, flags
);
534 list_del(&rport
->endp_list
);
535 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
537 WARN_ON(!list_empty(&rport
->disc_list
));
538 ida_free(&lport
->endp_cnt
, rport
->remoteport
.port_num
);
542 nvme_fc_lport_put(lport
);
546 nvme_fc_rport_put(struct nvme_fc_rport
*rport
)
548 kref_put(&rport
->ref
, nvme_fc_free_rport
);
552 nvme_fc_rport_get(struct nvme_fc_rport
*rport
)
554 return kref_get_unless_zero(&rport
->ref
);
558 nvme_fc_resume_controller(struct nvme_fc_ctrl
*ctrl
)
560 switch (ctrl
->ctrl
.state
) {
562 case NVME_CTRL_CONNECTING
:
564 * As all reconnects were suppressed, schedule a
567 dev_info(ctrl
->ctrl
.device
,
568 "NVME-FC{%d}: connectivity re-established. "
569 "Attempting reconnect\n", ctrl
->cnum
);
571 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0);
574 case NVME_CTRL_RESETTING
:
576 * Controller is already in the process of terminating the
577 * association. No need to do anything further. The reconnect
578 * step will naturally occur after the reset completes.
583 /* no action to take - let it delete */
588 static struct nvme_fc_rport
*
589 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport
*lport
,
590 struct nvme_fc_port_info
*pinfo
)
592 struct nvme_fc_rport
*rport
;
593 struct nvme_fc_ctrl
*ctrl
;
596 spin_lock_irqsave(&nvme_fc_lock
, flags
);
598 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
599 if (rport
->remoteport
.node_name
!= pinfo
->node_name
||
600 rport
->remoteport
.port_name
!= pinfo
->port_name
)
603 if (!nvme_fc_rport_get(rport
)) {
604 rport
= ERR_PTR(-ENOLCK
);
608 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
610 spin_lock_irqsave(&rport
->lock
, flags
);
612 /* has it been unregistered */
613 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
) {
614 /* means lldd called us twice */
615 spin_unlock_irqrestore(&rport
->lock
, flags
);
616 nvme_fc_rport_put(rport
);
617 return ERR_PTR(-ESTALE
);
620 rport
->remoteport
.port_role
= pinfo
->port_role
;
621 rport
->remoteport
.port_id
= pinfo
->port_id
;
622 rport
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
623 rport
->dev_loss_end
= 0;
626 * kick off a reconnect attempt on all associations to the
627 * remote port. A successful reconnects will resume i/o.
629 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
)
630 nvme_fc_resume_controller(ctrl
);
632 spin_unlock_irqrestore(&rport
->lock
, flags
);
640 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
646 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport
*rport
,
647 struct nvme_fc_port_info
*pinfo
)
649 if (pinfo
->dev_loss_tmo
)
650 rport
->remoteport
.dev_loss_tmo
= pinfo
->dev_loss_tmo
;
652 rport
->remoteport
.dev_loss_tmo
= NVME_FC_DEFAULT_DEV_LOSS_TMO
;
656 * nvme_fc_register_remoteport - transport entry point called by an
657 * LLDD to register the existence of a NVME
658 * subsystem FC port on its fabric.
659 * @localport: pointer to the (registered) local port that the remote
660 * subsystem port is connected to.
661 * @pinfo: pointer to information about the port to be registered
662 * @portptr: pointer to a remote port pointer. Upon success, the routine
663 * will allocate a nvme_fc_remote_port structure and place its
664 * address in the remote port pointer. Upon failure, remote port
665 * pointer will be set to 0.
668 * a completion status. Must be 0 upon success; a negative errno
669 * (ex: -ENXIO) upon failure.
672 nvme_fc_register_remoteport(struct nvme_fc_local_port
*localport
,
673 struct nvme_fc_port_info
*pinfo
,
674 struct nvme_fc_remote_port
**portptr
)
676 struct nvme_fc_lport
*lport
= localport_to_lport(localport
);
677 struct nvme_fc_rport
*newrec
;
681 if (!nvme_fc_lport_get(lport
)) {
683 goto out_reghost_failed
;
687 * look to see if there is already a remoteport that is waiting
688 * for a reconnect (within dev_loss_tmo) with the same WWN's.
689 * If so, transition to it and reconnect.
691 newrec
= nvme_fc_attach_to_suspended_rport(lport
, pinfo
);
693 /* found an rport, but something about its state is bad */
694 if (IS_ERR(newrec
)) {
695 ret
= PTR_ERR(newrec
);
698 /* found existing rport, which was resumed */
700 nvme_fc_lport_put(lport
);
701 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
702 nvme_fc_signal_discovery_scan(lport
, newrec
);
703 *portptr
= &newrec
->remoteport
;
707 /* nothing found - allocate a new remoteport struct */
709 newrec
= kmalloc((sizeof(*newrec
) + lport
->ops
->remote_priv_sz
),
716 idx
= ida_alloc(&lport
->endp_cnt
, GFP_KERNEL
);
719 goto out_kfree_rport
;
722 INIT_LIST_HEAD(&newrec
->endp_list
);
723 INIT_LIST_HEAD(&newrec
->ctrl_list
);
724 INIT_LIST_HEAD(&newrec
->ls_req_list
);
725 INIT_LIST_HEAD(&newrec
->disc_list
);
726 kref_init(&newrec
->ref
);
727 atomic_set(&newrec
->act_ctrl_cnt
, 0);
728 spin_lock_init(&newrec
->lock
);
729 newrec
->remoteport
.localport
= &lport
->localport
;
730 INIT_LIST_HEAD(&newrec
->ls_rcv_list
);
731 newrec
->dev
= lport
->dev
;
732 newrec
->lport
= lport
;
733 if (lport
->ops
->remote_priv_sz
)
734 newrec
->remoteport
.private = &newrec
[1];
736 newrec
->remoteport
.private = NULL
;
737 newrec
->remoteport
.port_role
= pinfo
->port_role
;
738 newrec
->remoteport
.node_name
= pinfo
->node_name
;
739 newrec
->remoteport
.port_name
= pinfo
->port_name
;
740 newrec
->remoteport
.port_id
= pinfo
->port_id
;
741 newrec
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
742 newrec
->remoteport
.port_num
= idx
;
743 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
744 INIT_WORK(&newrec
->lsrcv_work
, nvme_fc_handle_ls_rqst_work
);
746 spin_lock_irqsave(&nvme_fc_lock
, flags
);
747 list_add_tail(&newrec
->endp_list
, &lport
->endp_list
);
748 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
750 nvme_fc_signal_discovery_scan(lport
, newrec
);
752 *portptr
= &newrec
->remoteport
;
758 nvme_fc_lport_put(lport
);
763 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport
);
766 nvme_fc_abort_lsops(struct nvme_fc_rport
*rport
)
768 struct nvmefc_ls_req_op
*lsop
;
772 spin_lock_irqsave(&rport
->lock
, flags
);
774 list_for_each_entry(lsop
, &rport
->ls_req_list
, lsreq_list
) {
775 if (!(lsop
->flags
& FCOP_FLAGS_TERMIO
)) {
776 lsop
->flags
|= FCOP_FLAGS_TERMIO
;
777 spin_unlock_irqrestore(&rport
->lock
, flags
);
778 rport
->lport
->ops
->ls_abort(&rport
->lport
->localport
,
784 spin_unlock_irqrestore(&rport
->lock
, flags
);
790 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl
*ctrl
)
792 dev_info(ctrl
->ctrl
.device
,
793 "NVME-FC{%d}: controller connectivity lost. Awaiting "
794 "Reconnect", ctrl
->cnum
);
796 switch (ctrl
->ctrl
.state
) {
800 * Schedule a controller reset. The reset will terminate the
801 * association and schedule the reconnect timer. Reconnects
802 * will be attempted until either the ctlr_loss_tmo
803 * (max_retries * connect_delay) expires or the remoteport's
804 * dev_loss_tmo expires.
806 if (nvme_reset_ctrl(&ctrl
->ctrl
)) {
807 dev_warn(ctrl
->ctrl
.device
,
808 "NVME-FC{%d}: Couldn't schedule reset.\n",
810 nvme_delete_ctrl(&ctrl
->ctrl
);
814 case NVME_CTRL_CONNECTING
:
816 * The association has already been terminated and the
817 * controller is attempting reconnects. No need to do anything
818 * futher. Reconnects will be attempted until either the
819 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
820 * remoteport's dev_loss_tmo expires.
824 case NVME_CTRL_RESETTING
:
826 * Controller is already in the process of terminating the
827 * association. No need to do anything further. The reconnect
828 * step will kick in naturally after the association is
833 case NVME_CTRL_DELETING
:
834 case NVME_CTRL_DELETING_NOIO
:
836 /* no action to take - let it delete */
842 * nvme_fc_unregister_remoteport - transport entry point called by an
843 * LLDD to deregister/remove a previously
844 * registered a NVME subsystem FC port.
845 * @portptr: pointer to the (registered) remote port that is to be
849 * a completion status. Must be 0 upon success; a negative errno
850 * (ex: -ENXIO) upon failure.
853 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port
*portptr
)
855 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
856 struct nvme_fc_ctrl
*ctrl
;
862 spin_lock_irqsave(&rport
->lock
, flags
);
864 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
865 spin_unlock_irqrestore(&rport
->lock
, flags
);
868 portptr
->port_state
= FC_OBJSTATE_DELETED
;
870 rport
->dev_loss_end
= jiffies
+ (portptr
->dev_loss_tmo
* HZ
);
872 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
873 /* if dev_loss_tmo==0, dev loss is immediate */
874 if (!portptr
->dev_loss_tmo
) {
875 dev_warn(ctrl
->ctrl
.device
,
876 "NVME-FC{%d}: controller connectivity lost.\n",
878 nvme_delete_ctrl(&ctrl
->ctrl
);
880 nvme_fc_ctrl_connectivity_loss(ctrl
);
883 spin_unlock_irqrestore(&rport
->lock
, flags
);
885 nvme_fc_abort_lsops(rport
);
887 if (atomic_read(&rport
->act_ctrl_cnt
) == 0)
888 rport
->lport
->ops
->remoteport_delete(portptr
);
891 * release the reference, which will allow, if all controllers
892 * go away, which should only occur after dev_loss_tmo occurs,
893 * for the rport to be torn down.
895 nvme_fc_rport_put(rport
);
899 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport
);
902 * nvme_fc_rescan_remoteport - transport entry point called by an
903 * LLDD to request a nvme device rescan.
904 * @remoteport: pointer to the (registered) remote port that is to be
910 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port
*remoteport
)
912 struct nvme_fc_rport
*rport
= remoteport_to_rport(remoteport
);
914 nvme_fc_signal_discovery_scan(rport
->lport
, rport
);
916 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport
);
919 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port
*portptr
,
922 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
925 spin_lock_irqsave(&rport
->lock
, flags
);
927 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
928 spin_unlock_irqrestore(&rport
->lock
, flags
);
932 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
933 rport
->remoteport
.dev_loss_tmo
= dev_loss_tmo
;
935 spin_unlock_irqrestore(&rport
->lock
, flags
);
939 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss
);
942 /* *********************** FC-NVME DMA Handling **************************** */
945 * The fcloop device passes in a NULL device pointer. Real LLD's will
946 * pass in a valid device pointer. If NULL is passed to the dma mapping
947 * routines, depending on the platform, it may or may not succeed, and
951 * Wrapper all the dma routines and check the dev pointer.
953 * If simple mappings (return just a dma address, we'll noop them,
954 * returning a dma address of 0.
956 * On more complex mappings (dma_map_sg), a pseudo routine fills
957 * in the scatter list, setting all dma addresses to 0.
960 static inline dma_addr_t
961 fc_dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
962 enum dma_data_direction dir
)
964 return dev
? dma_map_single(dev
, ptr
, size
, dir
) : (dma_addr_t
)0L;
968 fc_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
970 return dev
? dma_mapping_error(dev
, dma_addr
) : 0;
974 fc_dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
975 enum dma_data_direction dir
)
978 dma_unmap_single(dev
, addr
, size
, dir
);
982 fc_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
983 enum dma_data_direction dir
)
986 dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
990 fc_dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
, size_t size
,
991 enum dma_data_direction dir
)
994 dma_sync_single_for_device(dev
, addr
, size
, dir
);
997 /* pseudo dma_map_sg call */
999 fc_map_sg(struct scatterlist
*sg
, int nents
)
1001 struct scatterlist
*s
;
1004 WARN_ON(nents
== 0 || sg
[0].length
== 0);
1006 for_each_sg(sg
, s
, nents
, i
) {
1007 s
->dma_address
= 0L;
1008 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1009 s
->dma_length
= s
->length
;
1016 fc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1017 enum dma_data_direction dir
)
1019 return dev
? dma_map_sg(dev
, sg
, nents
, dir
) : fc_map_sg(sg
, nents
);
1023 fc_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
1024 enum dma_data_direction dir
)
1027 dma_unmap_sg(dev
, sg
, nents
, dir
);
1030 /* *********************** FC-NVME LS Handling **************************** */
1032 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl
*);
1033 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl
*);
1035 static void nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
);
1038 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op
*lsop
)
1040 struct nvme_fc_rport
*rport
= lsop
->rport
;
1041 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1042 unsigned long flags
;
1044 spin_lock_irqsave(&rport
->lock
, flags
);
1046 if (!lsop
->req_queued
) {
1047 spin_unlock_irqrestore(&rport
->lock
, flags
);
1051 list_del(&lsop
->lsreq_list
);
1053 lsop
->req_queued
= false;
1055 spin_unlock_irqrestore(&rport
->lock
, flags
);
1057 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1058 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1061 nvme_fc_rport_put(rport
);
1065 __nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
,
1066 struct nvmefc_ls_req_op
*lsop
,
1067 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1069 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1070 unsigned long flags
;
1073 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
1074 return -ECONNREFUSED
;
1076 if (!nvme_fc_rport_get(rport
))
1080 lsop
->rport
= rport
;
1081 lsop
->req_queued
= false;
1082 INIT_LIST_HEAD(&lsop
->lsreq_list
);
1083 init_completion(&lsop
->ls_done
);
1085 lsreq
->rqstdma
= fc_dma_map_single(rport
->dev
, lsreq
->rqstaddr
,
1086 lsreq
->rqstlen
+ lsreq
->rsplen
,
1088 if (fc_dma_mapping_error(rport
->dev
, lsreq
->rqstdma
)) {
1092 lsreq
->rspdma
= lsreq
->rqstdma
+ lsreq
->rqstlen
;
1094 spin_lock_irqsave(&rport
->lock
, flags
);
1096 list_add_tail(&lsop
->lsreq_list
, &rport
->ls_req_list
);
1098 lsop
->req_queued
= true;
1100 spin_unlock_irqrestore(&rport
->lock
, flags
);
1102 ret
= rport
->lport
->ops
->ls_req(&rport
->lport
->localport
,
1103 &rport
->remoteport
, lsreq
);
1110 lsop
->ls_error
= ret
;
1111 spin_lock_irqsave(&rport
->lock
, flags
);
1112 lsop
->req_queued
= false;
1113 list_del(&lsop
->lsreq_list
);
1114 spin_unlock_irqrestore(&rport
->lock
, flags
);
1115 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1116 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1119 nvme_fc_rport_put(rport
);
1125 nvme_fc_send_ls_req_done(struct nvmefc_ls_req
*lsreq
, int status
)
1127 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1129 lsop
->ls_error
= status
;
1130 complete(&lsop
->ls_done
);
1134 nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
, struct nvmefc_ls_req_op
*lsop
)
1136 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1137 struct fcnvme_ls_rjt
*rjt
= lsreq
->rspaddr
;
1140 ret
= __nvme_fc_send_ls_req(rport
, lsop
, nvme_fc_send_ls_req_done
);
1144 * No timeout/not interruptible as we need the struct
1145 * to exist until the lldd calls us back. Thus mandate
1146 * wait until driver calls back. lldd responsible for
1147 * the timeout action
1149 wait_for_completion(&lsop
->ls_done
);
1151 __nvme_fc_finish_ls_req(lsop
);
1153 ret
= lsop
->ls_error
;
1159 /* ACC or RJT payload ? */
1160 if (rjt
->w0
.ls_cmd
== FCNVME_LS_RJT
)
1167 nvme_fc_send_ls_req_async(struct nvme_fc_rport
*rport
,
1168 struct nvmefc_ls_req_op
*lsop
,
1169 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1171 /* don't wait for completion */
1173 return __nvme_fc_send_ls_req(rport
, lsop
, done
);
1177 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl
*ctrl
,
1178 struct nvme_fc_queue
*queue
, u16 qsize
, u16 ersp_ratio
)
1180 struct nvmefc_ls_req_op
*lsop
;
1181 struct nvmefc_ls_req
*lsreq
;
1182 struct fcnvme_ls_cr_assoc_rqst
*assoc_rqst
;
1183 struct fcnvme_ls_cr_assoc_acc
*assoc_acc
;
1184 unsigned long flags
;
1187 lsop
= kzalloc((sizeof(*lsop
) +
1188 sizeof(*assoc_rqst
) + sizeof(*assoc_acc
) +
1189 ctrl
->lport
->ops
->lsrqst_priv_sz
), GFP_KERNEL
);
1191 dev_info(ctrl
->ctrl
.device
,
1192 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1198 assoc_rqst
= (struct fcnvme_ls_cr_assoc_rqst
*)&lsop
[1];
1199 assoc_acc
= (struct fcnvme_ls_cr_assoc_acc
*)&assoc_rqst
[1];
1200 lsreq
= &lsop
->ls_req
;
1201 if (ctrl
->lport
->ops
->lsrqst_priv_sz
)
1202 lsreq
->private = &assoc_acc
[1];
1204 lsreq
->private = NULL
;
1206 assoc_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_ASSOCIATION
;
1207 assoc_rqst
->desc_list_len
=
1208 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1210 assoc_rqst
->assoc_cmd
.desc_tag
=
1211 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD
);
1212 assoc_rqst
->assoc_cmd
.desc_len
=
1214 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1216 assoc_rqst
->assoc_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1217 assoc_rqst
->assoc_cmd
.sqsize
= cpu_to_be16(qsize
- 1);
1218 /* Linux supports only Dynamic controllers */
1219 assoc_rqst
->assoc_cmd
.cntlid
= cpu_to_be16(0xffff);
1220 uuid_copy(&assoc_rqst
->assoc_cmd
.hostid
, &ctrl
->ctrl
.opts
->host
->id
);
1221 strncpy(assoc_rqst
->assoc_cmd
.hostnqn
, ctrl
->ctrl
.opts
->host
->nqn
,
1222 min(FCNVME_ASSOC_HOSTNQN_LEN
, NVMF_NQN_SIZE
));
1223 strncpy(assoc_rqst
->assoc_cmd
.subnqn
, ctrl
->ctrl
.opts
->subsysnqn
,
1224 min(FCNVME_ASSOC_SUBNQN_LEN
, NVMF_NQN_SIZE
));
1226 lsop
->queue
= queue
;
1227 lsreq
->rqstaddr
= assoc_rqst
;
1228 lsreq
->rqstlen
= sizeof(*assoc_rqst
);
1229 lsreq
->rspaddr
= assoc_acc
;
1230 lsreq
->rsplen
= sizeof(*assoc_acc
);
1231 lsreq
->timeout
= NVME_FC_LS_TIMEOUT_SEC
;
1233 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1235 goto out_free_buffer
;
1237 /* process connect LS completion */
1239 /* validate the ACC response */
1240 if (assoc_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1242 else if (assoc_acc
->hdr
.desc_list_len
!=
1244 sizeof(struct fcnvme_ls_cr_assoc_acc
)))
1245 fcret
= VERR_CR_ASSOC_ACC_LEN
;
1246 else if (assoc_acc
->hdr
.rqst
.desc_tag
!=
1247 cpu_to_be32(FCNVME_LSDESC_RQST
))
1248 fcret
= VERR_LSDESC_RQST
;
1249 else if (assoc_acc
->hdr
.rqst
.desc_len
!=
1250 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1251 fcret
= VERR_LSDESC_RQST_LEN
;
1252 else if (assoc_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_ASSOCIATION
)
1253 fcret
= VERR_CR_ASSOC
;
1254 else if (assoc_acc
->associd
.desc_tag
!=
1255 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1256 fcret
= VERR_ASSOC_ID
;
1257 else if (assoc_acc
->associd
.desc_len
!=
1259 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1260 fcret
= VERR_ASSOC_ID_LEN
;
1261 else if (assoc_acc
->connectid
.desc_tag
!=
1262 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1263 fcret
= VERR_CONN_ID
;
1264 else if (assoc_acc
->connectid
.desc_len
!=
1265 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1266 fcret
= VERR_CONN_ID_LEN
;
1271 "q %d Create Association LS failed: %s\n",
1272 queue
->qnum
, validation_errors
[fcret
]);
1274 spin_lock_irqsave(&ctrl
->lock
, flags
);
1275 ctrl
->association_id
=
1276 be64_to_cpu(assoc_acc
->associd
.association_id
);
1277 queue
->connection_id
=
1278 be64_to_cpu(assoc_acc
->connectid
.connection_id
);
1279 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1280 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1288 "queue %d connect admin queue failed (%d).\n",
1294 nvme_fc_connect_queue(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
1295 u16 qsize
, u16 ersp_ratio
)
1297 struct nvmefc_ls_req_op
*lsop
;
1298 struct nvmefc_ls_req
*lsreq
;
1299 struct fcnvme_ls_cr_conn_rqst
*conn_rqst
;
1300 struct fcnvme_ls_cr_conn_acc
*conn_acc
;
1303 lsop
= kzalloc((sizeof(*lsop
) +
1304 sizeof(*conn_rqst
) + sizeof(*conn_acc
) +
1305 ctrl
->lport
->ops
->lsrqst_priv_sz
), GFP_KERNEL
);
1307 dev_info(ctrl
->ctrl
.device
,
1308 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1314 conn_rqst
= (struct fcnvme_ls_cr_conn_rqst
*)&lsop
[1];
1315 conn_acc
= (struct fcnvme_ls_cr_conn_acc
*)&conn_rqst
[1];
1316 lsreq
= &lsop
->ls_req
;
1317 if (ctrl
->lport
->ops
->lsrqst_priv_sz
)
1318 lsreq
->private = (void *)&conn_acc
[1];
1320 lsreq
->private = NULL
;
1322 conn_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_CONNECTION
;
1323 conn_rqst
->desc_list_len
= cpu_to_be32(
1324 sizeof(struct fcnvme_lsdesc_assoc_id
) +
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1327 conn_rqst
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1328 conn_rqst
->associd
.desc_len
=
1330 sizeof(struct fcnvme_lsdesc_assoc_id
));
1331 conn_rqst
->associd
.association_id
= cpu_to_be64(ctrl
->association_id
);
1332 conn_rqst
->connect_cmd
.desc_tag
=
1333 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD
);
1334 conn_rqst
->connect_cmd
.desc_len
=
1336 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1337 conn_rqst
->connect_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1338 conn_rqst
->connect_cmd
.qid
= cpu_to_be16(queue
->qnum
);
1339 conn_rqst
->connect_cmd
.sqsize
= cpu_to_be16(qsize
- 1);
1341 lsop
->queue
= queue
;
1342 lsreq
->rqstaddr
= conn_rqst
;
1343 lsreq
->rqstlen
= sizeof(*conn_rqst
);
1344 lsreq
->rspaddr
= conn_acc
;
1345 lsreq
->rsplen
= sizeof(*conn_acc
);
1346 lsreq
->timeout
= NVME_FC_LS_TIMEOUT_SEC
;
1348 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1350 goto out_free_buffer
;
1352 /* process connect LS completion */
1354 /* validate the ACC response */
1355 if (conn_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1357 else if (conn_acc
->hdr
.desc_list_len
!=
1358 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc
)))
1359 fcret
= VERR_CR_CONN_ACC_LEN
;
1360 else if (conn_acc
->hdr
.rqst
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_RQST
))
1361 fcret
= VERR_LSDESC_RQST
;
1362 else if (conn_acc
->hdr
.rqst
.desc_len
!=
1363 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1364 fcret
= VERR_LSDESC_RQST_LEN
;
1365 else if (conn_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_CONNECTION
)
1366 fcret
= VERR_CR_CONN
;
1367 else if (conn_acc
->connectid
.desc_tag
!=
1368 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1369 fcret
= VERR_CONN_ID
;
1370 else if (conn_acc
->connectid
.desc_len
!=
1371 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1372 fcret
= VERR_CONN_ID_LEN
;
1377 "q %d Create I/O Connection LS failed: %s\n",
1378 queue
->qnum
, validation_errors
[fcret
]);
1380 queue
->connection_id
=
1381 be64_to_cpu(conn_acc
->connectid
.connection_id
);
1382 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1390 "queue %d connect I/O queue failed (%d).\n",
1396 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req
*lsreq
, int status
)
1398 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1400 __nvme_fc_finish_ls_req(lsop
);
1402 /* fc-nvme initiator doesn't care about success or failure of cmd */
1408 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1409 * the FC-NVME Association. Terminating the association also
1410 * terminates the FC-NVME connections (per queue, both admin and io
1411 * queues) that are part of the association. E.g. things are torn
1412 * down, and the related FC-NVME Association ID and Connection IDs
1415 * The behavior of the fc-nvme initiator is such that it's
1416 * understanding of the association and connections will implicitly
1417 * be torn down. The action is implicit as it may be due to a loss of
1418 * connectivity with the fc-nvme target, so you may never get a
1419 * response even if you tried. As such, the action of this routine
1420 * is to asynchronously send the LS, ignore any results of the LS, and
1421 * continue on with terminating the association. If the fc-nvme target
1422 * is present and receives the LS, it too can tear down.
1425 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl
*ctrl
)
1427 struct fcnvme_ls_disconnect_assoc_rqst
*discon_rqst
;
1428 struct fcnvme_ls_disconnect_assoc_acc
*discon_acc
;
1429 struct nvmefc_ls_req_op
*lsop
;
1430 struct nvmefc_ls_req
*lsreq
;
1433 lsop
= kzalloc((sizeof(*lsop
) +
1434 sizeof(*discon_rqst
) + sizeof(*discon_acc
) +
1435 ctrl
->lport
->ops
->lsrqst_priv_sz
), GFP_KERNEL
);
1437 dev_info(ctrl
->ctrl
.device
,
1438 "NVME-FC{%d}: send Disconnect Association "
1444 discon_rqst
= (struct fcnvme_ls_disconnect_assoc_rqst
*)&lsop
[1];
1445 discon_acc
= (struct fcnvme_ls_disconnect_assoc_acc
*)&discon_rqst
[1];
1446 lsreq
= &lsop
->ls_req
;
1447 if (ctrl
->lport
->ops
->lsrqst_priv_sz
)
1448 lsreq
->private = (void *)&discon_acc
[1];
1450 lsreq
->private = NULL
;
1452 nvmefc_fmt_lsreq_discon_assoc(lsreq
, discon_rqst
, discon_acc
,
1453 ctrl
->association_id
);
1455 ret
= nvme_fc_send_ls_req_async(ctrl
->rport
, lsop
,
1456 nvme_fc_disconnect_assoc_done
);
1462 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp
*lsrsp
)
1464 struct nvmefc_ls_rcv_op
*lsop
= lsrsp
->nvme_fc_private
;
1465 struct nvme_fc_rport
*rport
= lsop
->rport
;
1466 struct nvme_fc_lport
*lport
= rport
->lport
;
1467 unsigned long flags
;
1469 spin_lock_irqsave(&rport
->lock
, flags
);
1470 list_del(&lsop
->lsrcv_list
);
1471 spin_unlock_irqrestore(&rport
->lock
, flags
);
1473 fc_dma_sync_single_for_cpu(lport
->dev
, lsop
->rspdma
,
1474 sizeof(*lsop
->rspbuf
), DMA_TO_DEVICE
);
1475 fc_dma_unmap_single(lport
->dev
, lsop
->rspdma
,
1476 sizeof(*lsop
->rspbuf
), DMA_TO_DEVICE
);
1478 kfree(lsop
->rspbuf
);
1479 kfree(lsop
->rqstbuf
);
1482 nvme_fc_rport_put(rport
);
1486 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op
*lsop
)
1488 struct nvme_fc_rport
*rport
= lsop
->rport
;
1489 struct nvme_fc_lport
*lport
= rport
->lport
;
1490 struct fcnvme_ls_rqst_w0
*w0
= &lsop
->rqstbuf
->w0
;
1493 fc_dma_sync_single_for_device(lport
->dev
, lsop
->rspdma
,
1494 sizeof(*lsop
->rspbuf
), DMA_TO_DEVICE
);
1496 ret
= lport
->ops
->xmt_ls_rsp(&lport
->localport
, &rport
->remoteport
,
1499 dev_warn(lport
->dev
,
1500 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1502 nvme_fc_xmt_ls_rsp_done(lsop
->lsrsp
);
1507 static struct nvme_fc_ctrl
*
1508 nvme_fc_match_disconn_ls(struct nvme_fc_rport
*rport
,
1509 struct nvmefc_ls_rcv_op
*lsop
)
1511 struct fcnvme_ls_disconnect_assoc_rqst
*rqst
=
1512 &lsop
->rqstbuf
->rq_dis_assoc
;
1513 struct nvme_fc_ctrl
*ctrl
, *ret
= NULL
;
1514 struct nvmefc_ls_rcv_op
*oldls
= NULL
;
1515 u64 association_id
= be64_to_cpu(rqst
->associd
.association_id
);
1516 unsigned long flags
;
1518 spin_lock_irqsave(&rport
->lock
, flags
);
1520 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
1521 if (!nvme_fc_ctrl_get(ctrl
))
1523 spin_lock(&ctrl
->lock
);
1524 if (association_id
== ctrl
->association_id
) {
1525 oldls
= ctrl
->rcv_disconn
;
1526 ctrl
->rcv_disconn
= lsop
;
1529 spin_unlock(&ctrl
->lock
);
1531 /* leave the ctrl get reference */
1533 nvme_fc_ctrl_put(ctrl
);
1536 spin_unlock_irqrestore(&rport
->lock
, flags
);
1538 /* transmit a response for anything that was pending */
1540 dev_info(rport
->lport
->dev
,
1541 "NVME-FC{%d}: Multiple Disconnect Association "
1542 "LS's received\n", ctrl
->cnum
);
1543 /* overwrite good response with bogus failure */
1544 oldls
->lsrsp
->rsplen
= nvme_fc_format_rjt(oldls
->rspbuf
,
1545 sizeof(*oldls
->rspbuf
),
1548 FCNVME_RJT_EXP_NONE
, 0);
1549 nvme_fc_xmt_ls_rsp(oldls
);
1556 * returns true to mean LS handled and ls_rsp can be sent
1557 * returns false to defer ls_rsp xmt (will be done as part of
1558 * association termination)
1561 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op
*lsop
)
1563 struct nvme_fc_rport
*rport
= lsop
->rport
;
1564 struct fcnvme_ls_disconnect_assoc_rqst
*rqst
=
1565 &lsop
->rqstbuf
->rq_dis_assoc
;
1566 struct fcnvme_ls_disconnect_assoc_acc
*acc
=
1567 &lsop
->rspbuf
->rsp_dis_assoc
;
1568 struct nvme_fc_ctrl
*ctrl
= NULL
;
1571 memset(acc
, 0, sizeof(*acc
));
1573 ret
= nvmefc_vldt_lsreq_discon_assoc(lsop
->rqstdatalen
, rqst
);
1575 /* match an active association */
1576 ctrl
= nvme_fc_match_disconn_ls(rport
, lsop
);
1578 ret
= VERR_NO_ASSOC
;
1582 dev_info(rport
->lport
->dev
,
1583 "Disconnect LS failed: %s\n",
1584 validation_errors
[ret
]);
1585 lsop
->lsrsp
->rsplen
= nvme_fc_format_rjt(acc
,
1586 sizeof(*acc
), rqst
->w0
.ls_cmd
,
1587 (ret
== VERR_NO_ASSOC
) ?
1588 FCNVME_RJT_RC_INV_ASSOC
:
1589 FCNVME_RJT_RC_LOGIC
,
1590 FCNVME_RJT_EXP_NONE
, 0);
1594 /* format an ACCept response */
1596 lsop
->lsrsp
->rsplen
= sizeof(*acc
);
1598 nvme_fc_format_rsp_hdr(acc
, FCNVME_LS_ACC
,
1600 sizeof(struct fcnvme_ls_disconnect_assoc_acc
)),
1601 FCNVME_LS_DISCONNECT_ASSOC
);
1604 * the transmit of the response will occur after the exchanges
1605 * for the association have been ABTS'd by
1606 * nvme_fc_delete_association().
1609 /* fail the association */
1610 nvme_fc_error_recovery(ctrl
, "Disconnect Association LS received");
1612 /* release the reference taken by nvme_fc_match_disconn_ls() */
1613 nvme_fc_ctrl_put(ctrl
);
1619 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1620 * returns true if a response should be sent afterward, false if rsp will
1621 * be sent asynchronously.
1624 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op
*lsop
)
1626 struct fcnvme_ls_rqst_w0
*w0
= &lsop
->rqstbuf
->w0
;
1629 lsop
->lsrsp
->nvme_fc_private
= lsop
;
1630 lsop
->lsrsp
->rspbuf
= lsop
->rspbuf
;
1631 lsop
->lsrsp
->rspdma
= lsop
->rspdma
;
1632 lsop
->lsrsp
->done
= nvme_fc_xmt_ls_rsp_done
;
1633 /* Be preventative. handlers will later set to valid length */
1634 lsop
->lsrsp
->rsplen
= 0;
1638 * parse request input, execute the request, and format the
1641 switch (w0
->ls_cmd
) {
1642 case FCNVME_LS_DISCONNECT_ASSOC
:
1643 ret
= nvme_fc_ls_disconnect_assoc(lsop
);
1645 case FCNVME_LS_DISCONNECT_CONN
:
1646 lsop
->lsrsp
->rsplen
= nvme_fc_format_rjt(lsop
->rspbuf
,
1647 sizeof(*lsop
->rspbuf
), w0
->ls_cmd
,
1648 FCNVME_RJT_RC_UNSUP
, FCNVME_RJT_EXP_NONE
, 0);
1650 case FCNVME_LS_CREATE_ASSOCIATION
:
1651 case FCNVME_LS_CREATE_CONNECTION
:
1652 lsop
->lsrsp
->rsplen
= nvme_fc_format_rjt(lsop
->rspbuf
,
1653 sizeof(*lsop
->rspbuf
), w0
->ls_cmd
,
1654 FCNVME_RJT_RC_LOGIC
, FCNVME_RJT_EXP_NONE
, 0);
1657 lsop
->lsrsp
->rsplen
= nvme_fc_format_rjt(lsop
->rspbuf
,
1658 sizeof(*lsop
->rspbuf
), w0
->ls_cmd
,
1659 FCNVME_RJT_RC_INVAL
, FCNVME_RJT_EXP_NONE
, 0);
1667 nvme_fc_handle_ls_rqst_work(struct work_struct
*work
)
1669 struct nvme_fc_rport
*rport
=
1670 container_of(work
, struct nvme_fc_rport
, lsrcv_work
);
1671 struct fcnvme_ls_rqst_w0
*w0
;
1672 struct nvmefc_ls_rcv_op
*lsop
;
1673 unsigned long flags
;
1678 spin_lock_irqsave(&rport
->lock
, flags
);
1679 list_for_each_entry(lsop
, &rport
->ls_rcv_list
, lsrcv_list
) {
1683 lsop
->handled
= true;
1684 if (rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
) {
1685 spin_unlock_irqrestore(&rport
->lock
, flags
);
1686 sendrsp
= nvme_fc_handle_ls_rqst(lsop
);
1688 spin_unlock_irqrestore(&rport
->lock
, flags
);
1689 w0
= &lsop
->rqstbuf
->w0
;
1690 lsop
->lsrsp
->rsplen
= nvme_fc_format_rjt(
1692 sizeof(*lsop
->rspbuf
),
1695 FCNVME_RJT_EXP_NONE
, 0);
1698 nvme_fc_xmt_ls_rsp(lsop
);
1701 spin_unlock_irqrestore(&rport
->lock
, flags
);
1705 void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport
*lport
,
1706 struct fcnvme_ls_rqst_w0
*w0
)
1708 dev_info(lport
->dev
, "RCV %s LS failed: No memory\n",
1709 (w0
->ls_cmd
<= NVME_FC_LAST_LS_CMD_VALUE
) ?
1710 nvmefc_ls_names
[w0
->ls_cmd
] : "");
1714 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1715 * upon the reception of a NVME LS request.
1717 * The nvme-fc layer will copy payload to an internal structure for
1718 * processing. As such, upon completion of the routine, the LLDD may
1719 * immediately free/reuse the LS request buffer passed in the call.
1721 * If this routine returns error, the LLDD should abort the exchange.
1723 * @portptr: pointer to the (registered) remote port that the LS
1724 * was received from. The remoteport is associated with
1725 * a specific localport.
1726 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
1727 * used to reference the exchange corresponding to the LS
1728 * when issuing an ls response.
1729 * @lsreqbuf: pointer to the buffer containing the LS Request
1730 * @lsreqbuf_len: length, in bytes, of the received LS request
1733 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port
*portptr
,
1734 struct nvmefc_ls_rsp
*lsrsp
,
1735 void *lsreqbuf
, u32 lsreqbuf_len
)
1737 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
1738 struct nvme_fc_lport
*lport
= rport
->lport
;
1739 struct fcnvme_ls_rqst_w0
*w0
= (struct fcnvme_ls_rqst_w0
*)lsreqbuf
;
1740 struct nvmefc_ls_rcv_op
*lsop
;
1741 unsigned long flags
;
1744 nvme_fc_rport_get(rport
);
1746 /* validate there's a routine to transmit a response */
1747 if (!lport
->ops
->xmt_ls_rsp
) {
1748 dev_info(lport
->dev
,
1749 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1750 (w0
->ls_cmd
<= NVME_FC_LAST_LS_CMD_VALUE
) ?
1751 nvmefc_ls_names
[w0
->ls_cmd
] : "");
1756 if (lsreqbuf_len
> sizeof(union nvmefc_ls_requests
)) {
1757 dev_info(lport
->dev
,
1758 "RCV %s LS failed: payload too large\n",
1759 (w0
->ls_cmd
<= NVME_FC_LAST_LS_CMD_VALUE
) ?
1760 nvmefc_ls_names
[w0
->ls_cmd
] : "");
1765 lsop
= kzalloc(sizeof(*lsop
), GFP_KERNEL
);
1767 nvme_fc_rcv_ls_req_err_msg(lport
, w0
);
1772 lsop
->rqstbuf
= kzalloc(sizeof(*lsop
->rqstbuf
), GFP_KERNEL
);
1773 lsop
->rspbuf
= kzalloc(sizeof(*lsop
->rspbuf
), GFP_KERNEL
);
1774 if (!lsop
->rqstbuf
|| !lsop
->rspbuf
) {
1775 nvme_fc_rcv_ls_req_err_msg(lport
, w0
);
1780 lsop
->rspdma
= fc_dma_map_single(lport
->dev
, lsop
->rspbuf
,
1781 sizeof(*lsop
->rspbuf
),
1783 if (fc_dma_mapping_error(lport
->dev
, lsop
->rspdma
)) {
1784 dev_info(lport
->dev
,
1785 "RCV %s LS failed: DMA mapping failure\n",
1786 (w0
->ls_cmd
<= NVME_FC_LAST_LS_CMD_VALUE
) ?
1787 nvmefc_ls_names
[w0
->ls_cmd
] : "");
1792 lsop
->rport
= rport
;
1793 lsop
->lsrsp
= lsrsp
;
1795 memcpy(lsop
->rqstbuf
, lsreqbuf
, lsreqbuf_len
);
1796 lsop
->rqstdatalen
= lsreqbuf_len
;
1798 spin_lock_irqsave(&rport
->lock
, flags
);
1799 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
) {
1800 spin_unlock_irqrestore(&rport
->lock
, flags
);
1804 list_add_tail(&lsop
->lsrcv_list
, &rport
->ls_rcv_list
);
1805 spin_unlock_irqrestore(&rport
->lock
, flags
);
1807 schedule_work(&rport
->lsrcv_work
);
1812 fc_dma_unmap_single(lport
->dev
, lsop
->rspdma
,
1813 sizeof(*lsop
->rspbuf
), DMA_TO_DEVICE
);
1815 kfree(lsop
->rspbuf
);
1816 kfree(lsop
->rqstbuf
);
1819 nvme_fc_rport_put(rport
);
1822 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req
);
1825 /* *********************** NVME Ctrl Routines **************************** */
1828 __nvme_fc_exit_request(struct nvme_fc_ctrl
*ctrl
,
1829 struct nvme_fc_fcp_op
*op
)
1831 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1832 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1833 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
1834 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
1836 atomic_set(&op
->state
, FCPOP_STATE_UNINIT
);
1840 nvme_fc_exit_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1841 unsigned int hctx_idx
)
1843 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
1845 return __nvme_fc_exit_request(to_fc_ctrl(set
->driver_data
), op
);
1849 __nvme_fc_abort_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_fcp_op
*op
)
1851 unsigned long flags
;
1854 spin_lock_irqsave(&ctrl
->lock
, flags
);
1855 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_ABORTED
);
1856 if (opstate
!= FCPOP_STATE_ACTIVE
)
1857 atomic_set(&op
->state
, opstate
);
1858 else if (test_bit(FCCTRL_TERMIO
, &ctrl
->flags
)) {
1859 op
->flags
|= FCOP_FLAGS_TERMIO
;
1862 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1864 if (opstate
!= FCPOP_STATE_ACTIVE
)
1867 ctrl
->lport
->ops
->fcp_abort(&ctrl
->lport
->localport
,
1868 &ctrl
->rport
->remoteport
,
1869 op
->queue
->lldd_handle
,
1876 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1878 struct nvme_fc_fcp_op
*aen_op
= ctrl
->aen_ops
;
1881 /* ensure we've initialized the ops once */
1882 if (!(aen_op
->flags
& FCOP_FLAGS_AEN
))
1885 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++)
1886 __nvme_fc_abort_op(ctrl
, aen_op
);
1890 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl
*ctrl
,
1891 struct nvme_fc_fcp_op
*op
, int opstate
)
1893 unsigned long flags
;
1895 if (opstate
== FCPOP_STATE_ABORTED
) {
1896 spin_lock_irqsave(&ctrl
->lock
, flags
);
1897 if (test_bit(FCCTRL_TERMIO
, &ctrl
->flags
) &&
1898 op
->flags
& FCOP_FLAGS_TERMIO
) {
1900 wake_up(&ctrl
->ioabort_wait
);
1902 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1907 nvme_fc_ctrl_ioerr_work(struct work_struct
*work
)
1909 struct nvme_fc_ctrl
*ctrl
=
1910 container_of(work
, struct nvme_fc_ctrl
, ioerr_work
);
1912 nvme_fc_error_recovery(ctrl
, "transport detected io error");
1916 * nvme_fc_io_getuuid - Routine called to get the appid field
1917 * associated with request by the lldd
1918 * @req:IO request from nvme fc to driver
1919 * Returns: UUID if there is an appid associated with VM or
1920 * NULL if the user/libvirt has not set the appid to VM
1922 char *nvme_fc_io_getuuid(struct nvmefc_fcp_req
*req
)
1924 struct nvme_fc_fcp_op
*op
= fcp_req_to_fcp_op(req
);
1925 struct request
*rq
= op
->rq
;
1927 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID
) || !rq
|| !rq
->bio
)
1929 return blkcg_get_fc_appid(rq
->bio
);
1931 EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid
);
1934 nvme_fc_fcpio_done(struct nvmefc_fcp_req
*req
)
1936 struct nvme_fc_fcp_op
*op
= fcp_req_to_fcp_op(req
);
1937 struct request
*rq
= op
->rq
;
1938 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
1939 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
1940 struct nvme_fc_queue
*queue
= op
->queue
;
1941 struct nvme_completion
*cqe
= &op
->rsp_iu
.cqe
;
1942 struct nvme_command
*sqe
= &op
->cmd_iu
.sqe
;
1943 __le16 status
= cpu_to_le16(NVME_SC_SUCCESS
<< 1);
1944 union nvme_result result
;
1945 bool terminate_assoc
= true;
1950 * The current linux implementation of a nvme controller
1951 * allocates a single tag set for all io queues and sizes
1952 * the io queues to fully hold all possible tags. Thus, the
1953 * implementation does not reference or care about the sqhd
1954 * value as it never needs to use the sqhd/sqtail pointers
1955 * for submission pacing.
1957 * This affects the FC-NVME implementation in two ways:
1958 * 1) As the value doesn't matter, we don't need to waste
1959 * cycles extracting it from ERSPs and stamping it in the
1960 * cases where the transport fabricates CQEs on successful
1962 * 2) The FC-NVME implementation requires that delivery of
1963 * ERSP completions are to go back to the nvme layer in order
1964 * relative to the rsn, such that the sqhd value will always
1965 * be "in order" for the nvme layer. As the nvme layer in
1966 * linux doesn't care about sqhd, there's no need to return
1970 * As the core nvme layer in linux currently does not look at
1971 * every field in the cqe - in cases where the FC transport must
1972 * fabricate a CQE, the following fields will not be set as they
1973 * are not referenced:
1974 * cqe.sqid, cqe.sqhd, cqe.command_id
1976 * Failure or error of an individual i/o, in a transport
1977 * detected fashion unrelated to the nvme completion status,
1978 * potentially cause the initiator and target sides to get out
1979 * of sync on SQ head/tail (aka outstanding io count allowed).
1980 * Per FC-NVME spec, failure of an individual command requires
1981 * the connection to be terminated, which in turn requires the
1982 * association to be terminated.
1985 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_COMPLETE
);
1987 fc_dma_sync_single_for_cpu(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1988 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1990 if (opstate
== FCPOP_STATE_ABORTED
)
1991 status
= cpu_to_le16(NVME_SC_HOST_ABORTED_CMD
<< 1);
1992 else if (freq
->status
) {
1993 status
= cpu_to_le16(NVME_SC_HOST_PATH_ERROR
<< 1);
1994 dev_info(ctrl
->ctrl
.device
,
1995 "NVME-FC{%d}: io failed due to lldd error %d\n",
1996 ctrl
->cnum
, freq
->status
);
2000 * For the linux implementation, if we have an unsuccesful
2001 * status, they blk-mq layer can typically be called with the
2002 * non-zero status and the content of the cqe isn't important.
2008 * command completed successfully relative to the wire
2009 * protocol. However, validate anything received and
2010 * extract the status and result from the cqe (create it
2014 switch (freq
->rcv_rsplen
) {
2017 case NVME_FC_SIZEOF_ZEROS_RSP
:
2019 * No response payload or 12 bytes of payload (which
2020 * should all be zeros) are considered successful and
2021 * no payload in the CQE by the transport.
2023 if (freq
->transferred_length
!=
2024 be32_to_cpu(op
->cmd_iu
.data_len
)) {
2025 status
= cpu_to_le16(NVME_SC_HOST_PATH_ERROR
<< 1);
2026 dev_info(ctrl
->ctrl
.device
,
2027 "NVME-FC{%d}: io failed due to bad transfer "
2028 "length: %d vs expected %d\n",
2029 ctrl
->cnum
, freq
->transferred_length
,
2030 be32_to_cpu(op
->cmd_iu
.data_len
));
2036 case sizeof(struct nvme_fc_ersp_iu
):
2038 * The ERSP IU contains a full completion with CQE.
2039 * Validate ERSP IU and look at cqe.
2041 if (unlikely(be16_to_cpu(op
->rsp_iu
.iu_len
) !=
2042 (freq
->rcv_rsplen
/ 4) ||
2043 be32_to_cpu(op
->rsp_iu
.xfrd_len
) !=
2044 freq
->transferred_length
||
2045 op
->rsp_iu
.ersp_result
||
2046 sqe
->common
.command_id
!= cqe
->command_id
)) {
2047 status
= cpu_to_le16(NVME_SC_HOST_PATH_ERROR
<< 1);
2048 dev_info(ctrl
->ctrl
.device
,
2049 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2050 "iu len %d, xfr len %d vs %d, status code "
2051 "%d, cmdid %d vs %d\n",
2052 ctrl
->cnum
, be16_to_cpu(op
->rsp_iu
.iu_len
),
2053 be32_to_cpu(op
->rsp_iu
.xfrd_len
),
2054 freq
->transferred_length
,
2055 op
->rsp_iu
.ersp_result
,
2056 sqe
->common
.command_id
,
2060 result
= cqe
->result
;
2061 status
= cqe
->status
;
2065 status
= cpu_to_le16(NVME_SC_HOST_PATH_ERROR
<< 1);
2066 dev_info(ctrl
->ctrl
.device
,
2067 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2069 ctrl
->cnum
, freq
->rcv_rsplen
);
2073 terminate_assoc
= false;
2076 if (op
->flags
& FCOP_FLAGS_AEN
) {
2077 nvme_complete_async_event(&queue
->ctrl
->ctrl
, status
, &result
);
2078 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
2079 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
2080 op
->flags
= FCOP_FLAGS_AEN
; /* clear other flags */
2081 nvme_fc_ctrl_put(ctrl
);
2085 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
2086 if (!nvme_try_complete_req(rq
, status
, result
))
2087 nvme_fc_complete_rq(rq
);
2090 if (terminate_assoc
&& ctrl
->ctrl
.state
!= NVME_CTRL_RESETTING
)
2091 queue_work(nvme_reset_wq
, &ctrl
->ioerr_work
);
2095 __nvme_fc_init_request(struct nvme_fc_ctrl
*ctrl
,
2096 struct nvme_fc_queue
*queue
, struct nvme_fc_fcp_op
*op
,
2097 struct request
*rq
, u32 rqno
)
2099 struct nvme_fcp_op_w_sgl
*op_w_sgl
=
2100 container_of(op
, typeof(*op_w_sgl
), op
);
2101 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2104 memset(op
, 0, sizeof(*op
));
2105 op
->fcp_req
.cmdaddr
= &op
->cmd_iu
;
2106 op
->fcp_req
.cmdlen
= sizeof(op
->cmd_iu
);
2107 op
->fcp_req
.rspaddr
= &op
->rsp_iu
;
2108 op
->fcp_req
.rsplen
= sizeof(op
->rsp_iu
);
2109 op
->fcp_req
.done
= nvme_fc_fcpio_done
;
2115 cmdiu
->format_id
= NVME_CMD_FORMAT_ID
;
2116 cmdiu
->fc_id
= NVME_CMD_FC_ID
;
2117 cmdiu
->iu_len
= cpu_to_be16(sizeof(*cmdiu
) / sizeof(u32
));
2119 cmdiu
->rsv_cat
= fccmnd_set_cat_css(0,
2120 (NVME_CC_CSS_NVM
>> NVME_CC_CSS_SHIFT
));
2122 cmdiu
->rsv_cat
= fccmnd_set_cat_admin(0);
2124 op
->fcp_req
.cmddma
= fc_dma_map_single(ctrl
->lport
->dev
,
2125 &op
->cmd_iu
, sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
2126 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
)) {
2128 "FCP Op failed - cmdiu dma mapping failed.\n");
2133 op
->fcp_req
.rspdma
= fc_dma_map_single(ctrl
->lport
->dev
,
2134 &op
->rsp_iu
, sizeof(op
->rsp_iu
),
2136 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
)) {
2138 "FCP Op failed - rspiu dma mapping failed.\n");
2142 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
2148 nvme_fc_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
2149 unsigned int hctx_idx
, unsigned int numa_node
)
2151 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(set
->driver_data
);
2152 struct nvme_fcp_op_w_sgl
*op
= blk_mq_rq_to_pdu(rq
);
2153 int queue_idx
= (set
== &ctrl
->tag_set
) ? hctx_idx
+ 1 : 0;
2154 struct nvme_fc_queue
*queue
= &ctrl
->queues
[queue_idx
];
2157 res
= __nvme_fc_init_request(ctrl
, queue
, &op
->op
, rq
, queue
->rqcnt
++);
2160 op
->op
.fcp_req
.first_sgl
= op
->sgl
;
2161 op
->op
.fcp_req
.private = &op
->priv
[0];
2162 nvme_req(rq
)->ctrl
= &ctrl
->ctrl
;
2163 nvme_req(rq
)->cmd
= &op
->op
.cmd_iu
.sqe
;
2168 nvme_fc_init_aen_ops(struct nvme_fc_ctrl
*ctrl
)
2170 struct nvme_fc_fcp_op
*aen_op
;
2171 struct nvme_fc_cmd_iu
*cmdiu
;
2172 struct nvme_command
*sqe
;
2173 void *private = NULL
;
2176 aen_op
= ctrl
->aen_ops
;
2177 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
2178 if (ctrl
->lport
->ops
->fcprqst_priv_sz
) {
2179 private = kzalloc(ctrl
->lport
->ops
->fcprqst_priv_sz
,
2185 cmdiu
= &aen_op
->cmd_iu
;
2187 ret
= __nvme_fc_init_request(ctrl
, &ctrl
->queues
[0],
2188 aen_op
, (struct request
*)NULL
,
2189 (NVME_AQ_BLK_MQ_DEPTH
+ i
));
2195 aen_op
->flags
= FCOP_FLAGS_AEN
;
2196 aen_op
->fcp_req
.private = private;
2198 memset(sqe
, 0, sizeof(*sqe
));
2199 sqe
->common
.opcode
= nvme_admin_async_event
;
2200 /* Note: core layer may overwrite the sqe.command_id value */
2201 sqe
->common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
+ i
;
2207 nvme_fc_term_aen_ops(struct nvme_fc_ctrl
*ctrl
)
2209 struct nvme_fc_fcp_op
*aen_op
;
2212 cancel_work_sync(&ctrl
->ctrl
.async_event_work
);
2213 aen_op
= ctrl
->aen_ops
;
2214 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
2215 __nvme_fc_exit_request(ctrl
, aen_op
);
2217 kfree(aen_op
->fcp_req
.private);
2218 aen_op
->fcp_req
.private = NULL
;
2223 __nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
, unsigned int qidx
)
2225 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(data
);
2226 struct nvme_fc_queue
*queue
= &ctrl
->queues
[qidx
];
2228 hctx
->driver_data
= queue
;
2234 nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
, unsigned int hctx_idx
)
2236 return __nvme_fc_init_hctx(hctx
, data
, hctx_idx
+ 1);
2240 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
2241 unsigned int hctx_idx
)
2243 return __nvme_fc_init_hctx(hctx
, data
, hctx_idx
);
2247 nvme_fc_init_queue(struct nvme_fc_ctrl
*ctrl
, int idx
)
2249 struct nvme_fc_queue
*queue
;
2251 queue
= &ctrl
->queues
[idx
];
2252 memset(queue
, 0, sizeof(*queue
));
2255 atomic_set(&queue
->csn
, 0);
2256 queue
->dev
= ctrl
->dev
;
2259 queue
->cmnd_capsule_len
= ctrl
->ctrl
.ioccsz
* 16;
2261 queue
->cmnd_capsule_len
= sizeof(struct nvme_command
);
2264 * Considered whether we should allocate buffers for all SQEs
2265 * and CQEs and dma map them - mapping their respective entries
2266 * into the request structures (kernel vm addr and dma address)
2267 * thus the driver could use the buffers/mappings directly.
2268 * It only makes sense if the LLDD would use them for its
2269 * messaging api. It's very unlikely most adapter api's would use
2270 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
2271 * structures were used instead.
2276 * This routine terminates a queue at the transport level.
2277 * The transport has already ensured that all outstanding ios on
2278 * the queue have been terminated.
2279 * The transport will send a Disconnect LS request to terminate
2280 * the queue's connection. Termination of the admin queue will also
2281 * terminate the association at the target.
2284 nvme_fc_free_queue(struct nvme_fc_queue
*queue
)
2286 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
))
2289 clear_bit(NVME_FC_Q_LIVE
, &queue
->flags
);
2291 * Current implementation never disconnects a single queue.
2292 * It always terminates a whole association. So there is never
2293 * a disconnect(queue) LS sent to the target.
2296 queue
->connection_id
= 0;
2297 atomic_set(&queue
->csn
, 0);
2301 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*ctrl
,
2302 struct nvme_fc_queue
*queue
, unsigned int qidx
)
2304 if (ctrl
->lport
->ops
->delete_queue
)
2305 ctrl
->lport
->ops
->delete_queue(&ctrl
->lport
->localport
, qidx
,
2306 queue
->lldd_handle
);
2307 queue
->lldd_handle
= NULL
;
2311 nvme_fc_free_io_queues(struct nvme_fc_ctrl
*ctrl
)
2315 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
2316 nvme_fc_free_queue(&ctrl
->queues
[i
]);
2320 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl
*ctrl
,
2321 struct nvme_fc_queue
*queue
, unsigned int qidx
, u16 qsize
)
2325 queue
->lldd_handle
= NULL
;
2326 if (ctrl
->lport
->ops
->create_queue
)
2327 ret
= ctrl
->lport
->ops
->create_queue(&ctrl
->lport
->localport
,
2328 qidx
, qsize
, &queue
->lldd_handle
);
2334 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl
*ctrl
)
2336 struct nvme_fc_queue
*queue
= &ctrl
->queues
[ctrl
->ctrl
.queue_count
- 1];
2339 for (i
= ctrl
->ctrl
.queue_count
- 1; i
>= 1; i
--, queue
--)
2340 __nvme_fc_delete_hw_queue(ctrl
, queue
, i
);
2344 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
2346 struct nvme_fc_queue
*queue
= &ctrl
->queues
[1];
2349 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++, queue
++) {
2350 ret
= __nvme_fc_create_hw_queue(ctrl
, queue
, i
, qsize
);
2359 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[i
], i
);
2364 nvme_fc_connect_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
2368 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++) {
2369 ret
= nvme_fc_connect_queue(ctrl
, &ctrl
->queues
[i
], qsize
,
2373 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
2377 set_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[i
].flags
);
2384 nvme_fc_init_io_queues(struct nvme_fc_ctrl
*ctrl
)
2388 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
2389 nvme_fc_init_queue(ctrl
, i
);
2393 nvme_fc_ctrl_free(struct kref
*ref
)
2395 struct nvme_fc_ctrl
*ctrl
=
2396 container_of(ref
, struct nvme_fc_ctrl
, ref
);
2397 unsigned long flags
;
2399 if (ctrl
->ctrl
.tagset
)
2400 nvme_remove_io_tag_set(&ctrl
->ctrl
);
2402 /* remove from rport list */
2403 spin_lock_irqsave(&ctrl
->rport
->lock
, flags
);
2404 list_del(&ctrl
->ctrl_list
);
2405 spin_unlock_irqrestore(&ctrl
->rport
->lock
, flags
);
2407 nvme_unquiesce_admin_queue(&ctrl
->ctrl
);
2408 nvme_remove_admin_tag_set(&ctrl
->ctrl
);
2410 kfree(ctrl
->queues
);
2412 put_device(ctrl
->dev
);
2413 nvme_fc_rport_put(ctrl
->rport
);
2415 ida_free(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
2416 if (ctrl
->ctrl
.opts
)
2417 nvmf_free_options(ctrl
->ctrl
.opts
);
2422 nvme_fc_ctrl_put(struct nvme_fc_ctrl
*ctrl
)
2424 kref_put(&ctrl
->ref
, nvme_fc_ctrl_free
);
2428 nvme_fc_ctrl_get(struct nvme_fc_ctrl
*ctrl
)
2430 return kref_get_unless_zero(&ctrl
->ref
);
2434 * All accesses from nvme core layer done - can now free the
2435 * controller. Called after last nvme_put_ctrl() call
2438 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl
*nctrl
)
2440 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2442 WARN_ON(nctrl
!= &ctrl
->ctrl
);
2444 nvme_fc_ctrl_put(ctrl
);
2448 * This routine is used by the transport when it needs to find active
2449 * io on a queue that is to be terminated. The transport uses
2450 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2451 * this routine to kill them on a 1 by 1 basis.
2453 * As FC allocates FC exchange for each io, the transport must contact
2454 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2455 * After terminating the exchange the LLDD will call the transport's
2456 * normal io done path for the request, but it will have an aborted
2457 * status. The done path will return the io request back to the block
2458 * layer with an error status.
2460 static bool nvme_fc_terminate_exchange(struct request
*req
, void *data
)
2462 struct nvme_ctrl
*nctrl
= data
;
2463 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2464 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(req
);
2466 op
->nreq
.flags
|= NVME_REQ_CANCELLED
;
2467 __nvme_fc_abort_op(ctrl
, op
);
2472 * This routine runs through all outstanding commands on the association
2473 * and aborts them. This routine is typically be called by the
2474 * delete_association routine. It is also called due to an error during
2475 * reconnect. In that scenario, it is most likely a command that initializes
2476 * the controller, including fabric Connect commands on io queues, that
2477 * may have timed out or failed thus the io must be killed for the connect
2478 * thread to see the error.
2481 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl
*ctrl
, bool start_queues
)
2486 * if aborting io, the queues are no longer good, mark them
2489 if (ctrl
->ctrl
.queue_count
> 1) {
2490 for (q
= 1; q
< ctrl
->ctrl
.queue_count
; q
++)
2491 clear_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[q
].flags
);
2493 clear_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[0].flags
);
2496 * If io queues are present, stop them and terminate all outstanding
2497 * ios on them. As FC allocates FC exchange for each io, the
2498 * transport must contact the LLDD to terminate the exchange,
2499 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2500 * to tell us what io's are busy and invoke a transport routine
2501 * to kill them with the LLDD. After terminating the exchange
2502 * the LLDD will call the transport's normal io done path, but it
2503 * will have an aborted status. The done path will return the
2504 * io requests back to the block layer as part of normal completions
2505 * (but with error status).
2507 if (ctrl
->ctrl
.queue_count
> 1) {
2508 nvme_quiesce_io_queues(&ctrl
->ctrl
);
2509 nvme_sync_io_queues(&ctrl
->ctrl
);
2510 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
2511 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2512 blk_mq_tagset_wait_completed_request(&ctrl
->tag_set
);
2514 nvme_unquiesce_io_queues(&ctrl
->ctrl
);
2518 * Other transports, which don't have link-level contexts bound
2519 * to sqe's, would try to gracefully shutdown the controller by
2520 * writing the registers for shutdown and polling (call
2521 * nvme_disable_ctrl()). Given a bunch of i/o was potentially
2522 * just aborted and we will wait on those contexts, and given
2523 * there was no indication of how live the controlelr is on the
2524 * link, don't send more io to create more contexts for the
2525 * shutdown. Let the controller fail via keepalive failure if
2526 * its still present.
2530 * clean up the admin queue. Same thing as above.
2532 nvme_quiesce_admin_queue(&ctrl
->ctrl
);
2535 * Open-coding nvme_cancel_admin_tagset() as fc
2536 * is not using nvme_cancel_request().
2538 nvme_stop_keep_alive(&ctrl
->ctrl
);
2539 blk_sync_queue(ctrl
->ctrl
.admin_q
);
2540 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
2541 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2542 blk_mq_tagset_wait_completed_request(&ctrl
->admin_tag_set
);
2544 nvme_unquiesce_admin_queue(&ctrl
->ctrl
);
2548 nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
)
2551 * if an error (io timeout, etc) while (re)connecting, the remote
2552 * port requested terminating of the association (disconnect_ls)
2553 * or an error (timeout or abort) occurred on an io while creating
2554 * the controller. Abort any ios on the association and let the
2555 * create_association error path resolve things.
2557 enum nvme_ctrl_state state
;
2558 unsigned long flags
;
2560 spin_lock_irqsave(&ctrl
->lock
, flags
);
2561 state
= ctrl
->ctrl
.state
;
2562 if (state
== NVME_CTRL_CONNECTING
) {
2563 set_bit(ASSOC_FAILED
, &ctrl
->flags
);
2564 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2565 __nvme_fc_abort_outstanding_ios(ctrl
, true);
2566 dev_warn(ctrl
->ctrl
.device
,
2567 "NVME-FC{%d}: transport error during (re)connect\n",
2571 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2573 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2574 if (state
!= NVME_CTRL_LIVE
)
2577 dev_warn(ctrl
->ctrl
.device
,
2578 "NVME-FC{%d}: transport association event: %s\n",
2579 ctrl
->cnum
, errmsg
);
2580 dev_warn(ctrl
->ctrl
.device
,
2581 "NVME-FC{%d}: resetting controller\n", ctrl
->cnum
);
2583 nvme_reset_ctrl(&ctrl
->ctrl
);
2586 static enum blk_eh_timer_return
nvme_fc_timeout(struct request
*rq
)
2588 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2589 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2590 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2591 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2594 * Attempt to abort the offending command. Command completion
2595 * will detect the aborted io and will fail the connection.
2597 dev_info(ctrl
->ctrl
.device
,
2598 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2600 ctrl
->cnum
, op
->queue
->qnum
, sqe
->common
.opcode
,
2601 sqe
->connect
.fctype
, sqe
->common
.cdw10
, sqe
->common
.cdw11
);
2602 if (__nvme_fc_abort_op(ctrl
, op
))
2603 nvme_fc_error_recovery(ctrl
, "io timeout abort failed");
2606 * the io abort has been initiated. Have the reset timer
2607 * restarted and the abort completion will complete the io
2608 * shortly. Avoids a synchronous wait while the abort finishes.
2610 return BLK_EH_RESET_TIMER
;
2614 nvme_fc_map_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2615 struct nvme_fc_fcp_op
*op
)
2617 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2622 if (!blk_rq_nr_phys_segments(rq
))
2625 freq
->sg_table
.sgl
= freq
->first_sgl
;
2626 ret
= sg_alloc_table_chained(&freq
->sg_table
,
2627 blk_rq_nr_phys_segments(rq
), freq
->sg_table
.sgl
,
2628 NVME_INLINE_SG_CNT
);
2632 op
->nents
= blk_rq_map_sg(rq
->q
, rq
, freq
->sg_table
.sgl
);
2633 WARN_ON(op
->nents
> blk_rq_nr_phys_segments(rq
));
2634 freq
->sg_cnt
= fc_dma_map_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
,
2635 op
->nents
, rq_dma_dir(rq
));
2636 if (unlikely(freq
->sg_cnt
<= 0)) {
2637 sg_free_table_chained(&freq
->sg_table
, NVME_INLINE_SG_CNT
);
2643 * TODO: blk_integrity_rq(rq) for DIF
2649 nvme_fc_unmap_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2650 struct nvme_fc_fcp_op
*op
)
2652 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2657 fc_dma_unmap_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
, op
->nents
,
2660 sg_free_table_chained(&freq
->sg_table
, NVME_INLINE_SG_CNT
);
2666 * In FC, the queue is a logical thing. At transport connect, the target
2667 * creates its "queue" and returns a handle that is to be given to the
2668 * target whenever it posts something to the corresponding SQ. When an
2669 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2670 * command contained within the SQE, an io, and assigns a FC exchange
2671 * to it. The SQE and the associated SQ handle are sent in the initial
2672 * CMD IU sents on the exchange. All transfers relative to the io occur
2673 * as part of the exchange. The CQE is the last thing for the io,
2674 * which is transferred (explicitly or implicitly) with the RSP IU
2675 * sent on the exchange. After the CQE is received, the FC exchange is
2676 * terminaed and the Exchange may be used on a different io.
2678 * The transport to LLDD api has the transport making a request for a
2679 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2680 * resource and transfers the command. The LLDD will then process all
2681 * steps to complete the io. Upon completion, the transport done routine
2684 * So - while the operation is outstanding to the LLDD, there is a link
2685 * level FC exchange resource that is also outstanding. This must be
2686 * considered in all cleanup operations.
2689 nvme_fc_start_fcp_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
2690 struct nvme_fc_fcp_op
*op
, u32 data_len
,
2691 enum nvmefc_fcp_datadir io_dir
)
2693 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2694 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2698 * before attempting to send the io, check to see if we believe
2699 * the target device is present
2701 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
2702 return BLK_STS_RESOURCE
;
2704 if (!nvme_fc_ctrl_get(ctrl
))
2705 return BLK_STS_IOERR
;
2707 /* format the FC-NVME CMD IU and fcp_req */
2708 cmdiu
->connection_id
= cpu_to_be64(queue
->connection_id
);
2709 cmdiu
->data_len
= cpu_to_be32(data_len
);
2711 case NVMEFC_FCP_WRITE
:
2712 cmdiu
->flags
= FCNVME_CMD_FLAGS_WRITE
;
2714 case NVMEFC_FCP_READ
:
2715 cmdiu
->flags
= FCNVME_CMD_FLAGS_READ
;
2717 case NVMEFC_FCP_NODATA
:
2721 op
->fcp_req
.payload_length
= data_len
;
2722 op
->fcp_req
.io_dir
= io_dir
;
2723 op
->fcp_req
.transferred_length
= 0;
2724 op
->fcp_req
.rcv_rsplen
= 0;
2725 op
->fcp_req
.status
= NVME_SC_SUCCESS
;
2726 op
->fcp_req
.sqid
= cpu_to_le16(queue
->qnum
);
2729 * validate per fabric rules, set fields mandated by fabric spec
2730 * as well as those by FC-NVME spec.
2732 WARN_ON_ONCE(sqe
->common
.metadata
);
2733 sqe
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2736 * format SQE DPTR field per FC-NVME rules:
2737 * type=0x5 Transport SGL Data Block Descriptor
2738 * subtype=0xA Transport-specific value
2740 * length=length of the data series
2742 sqe
->rw
.dptr
.sgl
.type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2743 NVME_SGL_FMT_TRANSPORT_A
;
2744 sqe
->rw
.dptr
.sgl
.length
= cpu_to_le32(data_len
);
2745 sqe
->rw
.dptr
.sgl
.addr
= 0;
2747 if (!(op
->flags
& FCOP_FLAGS_AEN
)) {
2748 ret
= nvme_fc_map_data(ctrl
, op
->rq
, op
);
2750 nvme_cleanup_cmd(op
->rq
);
2751 nvme_fc_ctrl_put(ctrl
);
2752 if (ret
== -ENOMEM
|| ret
== -EAGAIN
)
2753 return BLK_STS_RESOURCE
;
2754 return BLK_STS_IOERR
;
2758 fc_dma_sync_single_for_device(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
2759 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
2761 atomic_set(&op
->state
, FCPOP_STATE_ACTIVE
);
2763 if (!(op
->flags
& FCOP_FLAGS_AEN
))
2764 nvme_start_request(op
->rq
);
2766 cmdiu
->csn
= cpu_to_be32(atomic_inc_return(&queue
->csn
));
2767 ret
= ctrl
->lport
->ops
->fcp_io(&ctrl
->lport
->localport
,
2768 &ctrl
->rport
->remoteport
,
2769 queue
->lldd_handle
, &op
->fcp_req
);
2773 * If the lld fails to send the command is there an issue with
2774 * the csn value? If the command that fails is the Connect,
2775 * no - as the connection won't be live. If it is a command
2776 * post-connect, it's possible a gap in csn may be created.
2777 * Does this matter? As Linux initiators don't send fused
2778 * commands, no. The gap would exist, but as there's nothing
2779 * that depends on csn order to be delivered on the target
2780 * side, it shouldn't hurt. It would be difficult for a
2781 * target to even detect the csn gap as it has no idea when the
2782 * cmd with the csn was supposed to arrive.
2784 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_COMPLETE
);
2785 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
2787 if (!(op
->flags
& FCOP_FLAGS_AEN
)) {
2788 nvme_fc_unmap_data(ctrl
, op
->rq
, op
);
2789 nvme_cleanup_cmd(op
->rq
);
2792 nvme_fc_ctrl_put(ctrl
);
2794 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
&&
2796 return BLK_STS_IOERR
;
2798 return BLK_STS_RESOURCE
;
2805 nvme_fc_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2806 const struct blk_mq_queue_data
*bd
)
2808 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
2809 struct nvme_fc_queue
*queue
= hctx
->driver_data
;
2810 struct nvme_fc_ctrl
*ctrl
= queue
->ctrl
;
2811 struct request
*rq
= bd
->rq
;
2812 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2813 enum nvmefc_fcp_datadir io_dir
;
2814 bool queue_ready
= test_bit(NVME_FC_Q_LIVE
, &queue
->flags
);
2818 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
||
2819 !nvme_check_ready(&queue
->ctrl
->ctrl
, rq
, queue_ready
))
2820 return nvme_fail_nonready_command(&queue
->ctrl
->ctrl
, rq
);
2822 ret
= nvme_setup_cmd(ns
, rq
);
2827 * nvme core doesn't quite treat the rq opaquely. Commands such
2828 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2829 * there is no actual payload to be transferred.
2830 * To get it right, key data transmission on there being 1 or
2831 * more physical segments in the sg list. If there is no
2832 * physical segments, there is no payload.
2834 if (blk_rq_nr_phys_segments(rq
)) {
2835 data_len
= blk_rq_payload_bytes(rq
);
2836 io_dir
= ((rq_data_dir(rq
) == WRITE
) ?
2837 NVMEFC_FCP_WRITE
: NVMEFC_FCP_READ
);
2840 io_dir
= NVMEFC_FCP_NODATA
;
2844 return nvme_fc_start_fcp_op(ctrl
, queue
, op
, data_len
, io_dir
);
2848 nvme_fc_submit_async_event(struct nvme_ctrl
*arg
)
2850 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(arg
);
2851 struct nvme_fc_fcp_op
*aen_op
;
2854 if (test_bit(FCCTRL_TERMIO
, &ctrl
->flags
))
2857 aen_op
= &ctrl
->aen_ops
[0];
2859 ret
= nvme_fc_start_fcp_op(ctrl
, aen_op
->queue
, aen_op
, 0,
2862 dev_err(ctrl
->ctrl
.device
,
2863 "failed async event work\n");
2867 nvme_fc_complete_rq(struct request
*rq
)
2869 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2870 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2872 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
2873 op
->flags
&= ~FCOP_FLAGS_TERMIO
;
2875 nvme_fc_unmap_data(ctrl
, rq
, op
);
2876 nvme_complete_rq(rq
);
2877 nvme_fc_ctrl_put(ctrl
);
2880 static void nvme_fc_map_queues(struct blk_mq_tag_set
*set
)
2882 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(set
->driver_data
);
2885 for (i
= 0; i
< set
->nr_maps
; i
++) {
2886 struct blk_mq_queue_map
*map
= &set
->map
[i
];
2888 if (!map
->nr_queues
) {
2889 WARN_ON(i
== HCTX_TYPE_DEFAULT
);
2893 /* Call LLDD map queue functionality if defined */
2894 if (ctrl
->lport
->ops
->map_queues
)
2895 ctrl
->lport
->ops
->map_queues(&ctrl
->lport
->localport
,
2898 blk_mq_map_queues(map
);
2902 static const struct blk_mq_ops nvme_fc_mq_ops
= {
2903 .queue_rq
= nvme_fc_queue_rq
,
2904 .complete
= nvme_fc_complete_rq
,
2905 .init_request
= nvme_fc_init_request
,
2906 .exit_request
= nvme_fc_exit_request
,
2907 .init_hctx
= nvme_fc_init_hctx
,
2908 .timeout
= nvme_fc_timeout
,
2909 .map_queues
= nvme_fc_map_queues
,
2913 nvme_fc_create_io_queues(struct nvme_fc_ctrl
*ctrl
)
2915 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2916 unsigned int nr_io_queues
;
2919 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2920 ctrl
->lport
->ops
->max_hw_queues
);
2921 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2923 dev_info(ctrl
->ctrl
.device
,
2924 "set_queue_count failed: %d\n", ret
);
2928 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2932 nvme_fc_init_io_queues(ctrl
);
2934 ret
= nvme_alloc_io_tag_set(&ctrl
->ctrl
, &ctrl
->tag_set
,
2936 struct_size_t(struct nvme_fcp_op_w_sgl
, priv
,
2937 ctrl
->lport
->ops
->fcprqst_priv_sz
));
2941 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2943 goto out_cleanup_tagset
;
2945 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2947 goto out_delete_hw_queues
;
2949 ctrl
->ioq_live
= true;
2953 out_delete_hw_queues
:
2954 nvme_fc_delete_hw_io_queues(ctrl
);
2956 nvme_remove_io_tag_set(&ctrl
->ctrl
);
2957 nvme_fc_free_io_queues(ctrl
);
2959 /* force put free routine to ignore io queues */
2960 ctrl
->ctrl
.tagset
= NULL
;
2966 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl
*ctrl
)
2968 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2969 u32 prior_ioq_cnt
= ctrl
->ctrl
.queue_count
- 1;
2970 unsigned int nr_io_queues
;
2973 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2974 ctrl
->lport
->ops
->max_hw_queues
);
2975 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2977 dev_info(ctrl
->ctrl
.device
,
2978 "set_queue_count failed: %d\n", ret
);
2982 if (!nr_io_queues
&& prior_ioq_cnt
) {
2983 dev_info(ctrl
->ctrl
.device
,
2984 "Fail Reconnect: At least 1 io queue "
2985 "required (was %d)\n", prior_ioq_cnt
);
2989 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2990 /* check for io queues existing */
2991 if (ctrl
->ctrl
.queue_count
== 1)
2994 if (prior_ioq_cnt
!= nr_io_queues
) {
2995 dev_info(ctrl
->ctrl
.device
,
2996 "reconnect: revising io queue count from %d to %d\n",
2997 prior_ioq_cnt
, nr_io_queues
);
2998 blk_mq_update_nr_hw_queues(&ctrl
->tag_set
, nr_io_queues
);
3001 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
3003 goto out_free_io_queues
;
3005 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
3007 goto out_delete_hw_queues
;
3011 out_delete_hw_queues
:
3012 nvme_fc_delete_hw_io_queues(ctrl
);
3014 nvme_fc_free_io_queues(ctrl
);
3019 nvme_fc_rport_active_on_lport(struct nvme_fc_rport
*rport
)
3021 struct nvme_fc_lport
*lport
= rport
->lport
;
3023 atomic_inc(&lport
->act_rport_cnt
);
3027 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport
*rport
)
3029 struct nvme_fc_lport
*lport
= rport
->lport
;
3032 cnt
= atomic_dec_return(&lport
->act_rport_cnt
);
3033 if (cnt
== 0 && lport
->localport
.port_state
== FC_OBJSTATE_DELETED
)
3034 lport
->ops
->localport_delete(&lport
->localport
);
3038 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl
*ctrl
)
3040 struct nvme_fc_rport
*rport
= ctrl
->rport
;
3043 if (test_and_set_bit(ASSOC_ACTIVE
, &ctrl
->flags
))
3046 cnt
= atomic_inc_return(&rport
->act_ctrl_cnt
);
3048 nvme_fc_rport_active_on_lport(rport
);
3054 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl
*ctrl
)
3056 struct nvme_fc_rport
*rport
= ctrl
->rport
;
3057 struct nvme_fc_lport
*lport
= rport
->lport
;
3060 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
3062 cnt
= atomic_dec_return(&rport
->act_ctrl_cnt
);
3064 if (rport
->remoteport
.port_state
== FC_OBJSTATE_DELETED
)
3065 lport
->ops
->remoteport_delete(&rport
->remoteport
);
3066 nvme_fc_rport_inactive_on_lport(rport
);
3073 * This routine restarts the controller on the host side, and
3074 * on the link side, recreates the controller association.
3077 nvme_fc_create_association(struct nvme_fc_ctrl
*ctrl
)
3079 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
3080 struct nvmefc_ls_rcv_op
*disls
= NULL
;
3081 unsigned long flags
;
3085 ++ctrl
->ctrl
.nr_reconnects
;
3087 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
3090 if (nvme_fc_ctlr_active_on_rport(ctrl
))
3093 dev_info(ctrl
->ctrl
.device
,
3094 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
3095 " rport wwpn 0x%016llx: NQN \"%s\"\n",
3096 ctrl
->cnum
, ctrl
->lport
->localport
.port_name
,
3097 ctrl
->rport
->remoteport
.port_name
, ctrl
->ctrl
.opts
->subsysnqn
);
3099 clear_bit(ASSOC_FAILED
, &ctrl
->flags
);
3102 * Create the admin queue
3105 ret
= __nvme_fc_create_hw_queue(ctrl
, &ctrl
->queues
[0], 0,
3108 goto out_free_queue
;
3110 ret
= nvme_fc_connect_admin_queue(ctrl
, &ctrl
->queues
[0],
3111 NVME_AQ_DEPTH
, (NVME_AQ_DEPTH
/ 4));
3113 goto out_delete_hw_queue
;
3115 ret
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
3117 goto out_disconnect_admin_queue
;
3119 set_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[0].flags
);
3122 * Check controller capabilities
3124 * todo:- add code to check if ctrl attributes changed from
3125 * prior connection values
3128 ret
= nvme_enable_ctrl(&ctrl
->ctrl
);
3129 if (!ret
&& test_bit(ASSOC_FAILED
, &ctrl
->flags
))
3132 goto out_disconnect_admin_queue
;
3134 ctrl
->ctrl
.max_segments
= ctrl
->lport
->ops
->max_sgl_segments
;
3135 ctrl
->ctrl
.max_hw_sectors
= ctrl
->ctrl
.max_segments
<<
3138 nvme_unquiesce_admin_queue(&ctrl
->ctrl
);
3140 ret
= nvme_init_ctrl_finish(&ctrl
->ctrl
, false);
3141 if (!ret
&& test_bit(ASSOC_FAILED
, &ctrl
->flags
))
3144 goto out_disconnect_admin_queue
;
3148 /* FC-NVME does not have other data in the capsule */
3149 if (ctrl
->ctrl
.icdoff
) {
3150 dev_err(ctrl
->ctrl
.device
, "icdoff %d is not supported!\n",
3152 ret
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
3153 goto out_disconnect_admin_queue
;
3156 /* FC-NVME supports normal SGL Data Block Descriptors */
3157 if (!nvme_ctrl_sgl_supported(&ctrl
->ctrl
)) {
3158 dev_err(ctrl
->ctrl
.device
,
3159 "Mandatory sgls are not supported!\n");
3160 ret
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
3161 goto out_disconnect_admin_queue
;
3164 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
3165 /* warn if maxcmd is lower than queue_size */
3166 dev_warn(ctrl
->ctrl
.device
,
3167 "queue_size %zu > ctrl maxcmd %u, reducing "
3169 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
3170 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
3171 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
3174 ret
= nvme_fc_init_aen_ops(ctrl
);
3176 goto out_term_aen_ops
;
3179 * Create the io queues
3182 if (ctrl
->ctrl
.queue_count
> 1) {
3183 if (!ctrl
->ioq_live
)
3184 ret
= nvme_fc_create_io_queues(ctrl
);
3186 ret
= nvme_fc_recreate_io_queues(ctrl
);
3189 spin_lock_irqsave(&ctrl
->lock
, flags
);
3190 if (!ret
&& test_bit(ASSOC_FAILED
, &ctrl
->flags
))
3193 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
3194 goto out_term_aen_ops
;
3196 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
3197 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
3199 ctrl
->ctrl
.nr_reconnects
= 0;
3202 nvme_start_ctrl(&ctrl
->ctrl
);
3204 return 0; /* Success */
3207 nvme_fc_term_aen_ops(ctrl
);
3208 out_disconnect_admin_queue
:
3209 dev_warn(ctrl
->ctrl
.device
,
3210 "NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
3211 ctrl
->cnum
, ctrl
->association_id
, ret
);
3212 /* send a Disconnect(association) LS to fc-nvme target */
3213 nvme_fc_xmt_disconnect_assoc(ctrl
);
3214 spin_lock_irqsave(&ctrl
->lock
, flags
);
3215 ctrl
->association_id
= 0;
3216 disls
= ctrl
->rcv_disconn
;
3217 ctrl
->rcv_disconn
= NULL
;
3218 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
3220 nvme_fc_xmt_ls_rsp(disls
);
3221 out_delete_hw_queue
:
3222 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
3224 nvme_fc_free_queue(&ctrl
->queues
[0]);
3225 clear_bit(ASSOC_ACTIVE
, &ctrl
->flags
);
3226 nvme_fc_ctlr_inactive_on_rport(ctrl
);
3233 * This routine stops operation of the controller on the host side.
3234 * On the host os stack side: Admin and IO queues are stopped,
3235 * outstanding ios on them terminated via FC ABTS.
3236 * On the link side: the association is terminated.
3239 nvme_fc_delete_association(struct nvme_fc_ctrl
*ctrl
)
3241 struct nvmefc_ls_rcv_op
*disls
= NULL
;
3242 unsigned long flags
;
3244 if (!test_and_clear_bit(ASSOC_ACTIVE
, &ctrl
->flags
))
3247 spin_lock_irqsave(&ctrl
->lock
, flags
);
3248 set_bit(FCCTRL_TERMIO
, &ctrl
->flags
);
3250 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
3252 __nvme_fc_abort_outstanding_ios(ctrl
, false);
3254 /* kill the aens as they are a separate path */
3255 nvme_fc_abort_aen_ops(ctrl
);
3257 /* wait for all io that had to be aborted */
3258 spin_lock_irq(&ctrl
->lock
);
3259 wait_event_lock_irq(ctrl
->ioabort_wait
, ctrl
->iocnt
== 0, ctrl
->lock
);
3260 clear_bit(FCCTRL_TERMIO
, &ctrl
->flags
);
3261 spin_unlock_irq(&ctrl
->lock
);
3263 nvme_fc_term_aen_ops(ctrl
);
3266 * send a Disconnect(association) LS to fc-nvme target
3267 * Note: could have been sent at top of process, but
3268 * cleaner on link traffic if after the aborts complete.
3269 * Note: if association doesn't exist, association_id will be 0
3271 if (ctrl
->association_id
)
3272 nvme_fc_xmt_disconnect_assoc(ctrl
);
3274 spin_lock_irqsave(&ctrl
->lock
, flags
);
3275 ctrl
->association_id
= 0;
3276 disls
= ctrl
->rcv_disconn
;
3277 ctrl
->rcv_disconn
= NULL
;
3278 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
3281 * if a Disconnect Request was waiting for a response, send
3282 * now that all ABTS's have been issued (and are complete).
3284 nvme_fc_xmt_ls_rsp(disls
);
3286 if (ctrl
->ctrl
.tagset
) {
3287 nvme_fc_delete_hw_io_queues(ctrl
);
3288 nvme_fc_free_io_queues(ctrl
);
3291 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
3292 nvme_fc_free_queue(&ctrl
->queues
[0]);
3294 /* re-enable the admin_q so anything new can fast fail */
3295 nvme_unquiesce_admin_queue(&ctrl
->ctrl
);
3297 /* resume the io queues so that things will fast fail */
3298 nvme_unquiesce_io_queues(&ctrl
->ctrl
);
3300 nvme_fc_ctlr_inactive_on_rport(ctrl
);
3304 nvme_fc_delete_ctrl(struct nvme_ctrl
*nctrl
)
3306 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
3308 cancel_work_sync(&ctrl
->ioerr_work
);
3309 cancel_delayed_work_sync(&ctrl
->connect_work
);
3311 * kill the association on the link side. this will block
3312 * waiting for io to terminate
3314 nvme_fc_delete_association(ctrl
);
3318 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl
*ctrl
, int status
)
3320 struct nvme_fc_rport
*rport
= ctrl
->rport
;
3321 struct nvme_fc_remote_port
*portptr
= &rport
->remoteport
;
3322 unsigned long recon_delay
= ctrl
->ctrl
.opts
->reconnect_delay
* HZ
;
3325 if (ctrl
->ctrl
.state
!= NVME_CTRL_CONNECTING
)
3328 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
) {
3329 dev_info(ctrl
->ctrl
.device
,
3330 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3331 ctrl
->cnum
, status
);
3332 if (status
> 0 && (status
& NVME_SC_DNR
))
3334 } else if (time_after_eq(jiffies
, rport
->dev_loss_end
))
3337 if (recon
&& nvmf_should_reconnect(&ctrl
->ctrl
)) {
3338 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
3339 dev_info(ctrl
->ctrl
.device
,
3340 "NVME-FC{%d}: Reconnect attempt in %ld "
3342 ctrl
->cnum
, recon_delay
/ HZ
);
3343 else if (time_after(jiffies
+ recon_delay
, rport
->dev_loss_end
))
3344 recon_delay
= rport
->dev_loss_end
- jiffies
;
3346 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, recon_delay
);
3348 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
) {
3349 if (status
> 0 && (status
& NVME_SC_DNR
))
3350 dev_warn(ctrl
->ctrl
.device
,
3351 "NVME-FC{%d}: reconnect failure\n",
3354 dev_warn(ctrl
->ctrl
.device
,
3355 "NVME-FC{%d}: Max reconnect attempts "
3357 ctrl
->cnum
, ctrl
->ctrl
.nr_reconnects
);
3359 dev_warn(ctrl
->ctrl
.device
,
3360 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
3361 "while waiting for remoteport connectivity.\n",
3362 ctrl
->cnum
, min_t(int, portptr
->dev_loss_tmo
,
3363 (ctrl
->ctrl
.opts
->max_reconnects
*
3364 ctrl
->ctrl
.opts
->reconnect_delay
)));
3365 WARN_ON(nvme_delete_ctrl(&ctrl
->ctrl
));
3370 nvme_fc_reset_ctrl_work(struct work_struct
*work
)
3372 struct nvme_fc_ctrl
*ctrl
=
3373 container_of(work
, struct nvme_fc_ctrl
, ctrl
.reset_work
);
3375 nvme_stop_ctrl(&ctrl
->ctrl
);
3377 /* will block will waiting for io to terminate */
3378 nvme_fc_delete_association(ctrl
);
3380 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
))
3381 dev_err(ctrl
->ctrl
.device
,
3382 "NVME-FC{%d}: error_recovery: Couldn't change state "
3383 "to CONNECTING\n", ctrl
->cnum
);
3385 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
) {
3386 if (!queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0)) {
3387 dev_err(ctrl
->ctrl
.device
,
3388 "NVME-FC{%d}: failed to schedule connect "
3389 "after reset\n", ctrl
->cnum
);
3391 flush_delayed_work(&ctrl
->connect_work
);
3394 nvme_fc_reconnect_or_delete(ctrl
, -ENOTCONN
);
3399 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops
= {
3401 .module
= THIS_MODULE
,
3402 .flags
= NVME_F_FABRICS
,
3403 .reg_read32
= nvmf_reg_read32
,
3404 .reg_read64
= nvmf_reg_read64
,
3405 .reg_write32
= nvmf_reg_write32
,
3406 .free_ctrl
= nvme_fc_nvme_ctrl_freed
,
3407 .submit_async_event
= nvme_fc_submit_async_event
,
3408 .delete_ctrl
= nvme_fc_delete_ctrl
,
3409 .get_address
= nvmf_get_address
,
3413 nvme_fc_connect_ctrl_work(struct work_struct
*work
)
3417 struct nvme_fc_ctrl
*ctrl
=
3418 container_of(to_delayed_work(work
),
3419 struct nvme_fc_ctrl
, connect_work
);
3421 ret
= nvme_fc_create_association(ctrl
);
3423 nvme_fc_reconnect_or_delete(ctrl
, ret
);
3425 dev_info(ctrl
->ctrl
.device
,
3426 "NVME-FC{%d}: controller connect complete\n",
3431 static const struct blk_mq_ops nvme_fc_admin_mq_ops
= {
3432 .queue_rq
= nvme_fc_queue_rq
,
3433 .complete
= nvme_fc_complete_rq
,
3434 .init_request
= nvme_fc_init_request
,
3435 .exit_request
= nvme_fc_exit_request
,
3436 .init_hctx
= nvme_fc_init_admin_hctx
,
3437 .timeout
= nvme_fc_timeout
,
3442 * Fails a controller request if it matches an existing controller
3443 * (association) with the same tuple:
3444 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3446 * The ports don't need to be compared as they are intrinsically
3447 * already matched by the port pointers supplied.
3450 nvme_fc_existing_controller(struct nvme_fc_rport
*rport
,
3451 struct nvmf_ctrl_options
*opts
)
3453 struct nvme_fc_ctrl
*ctrl
;
3454 unsigned long flags
;
3457 spin_lock_irqsave(&rport
->lock
, flags
);
3458 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
3459 found
= nvmf_ctlr_matches_baseopts(&ctrl
->ctrl
, opts
);
3463 spin_unlock_irqrestore(&rport
->lock
, flags
);
3468 static struct nvme_ctrl
*
3469 nvme_fc_init_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
,
3470 struct nvme_fc_lport
*lport
, struct nvme_fc_rport
*rport
)
3472 struct nvme_fc_ctrl
*ctrl
;
3473 unsigned long flags
;
3474 int ret
, idx
, ctrl_loss_tmo
;
3476 if (!(rport
->remoteport
.port_role
&
3477 (FC_PORT_ROLE_NVME_DISCOVERY
| FC_PORT_ROLE_NVME_TARGET
))) {
3482 if (!opts
->duplicate_connect
&&
3483 nvme_fc_existing_controller(rport
, opts
)) {
3488 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
3494 idx
= ida_alloc(&nvme_fc_ctrl_cnt
, GFP_KERNEL
);
3501 * if ctrl_loss_tmo is being enforced and the default reconnect delay
3502 * is being used, change to a shorter reconnect delay for FC.
3504 if (opts
->max_reconnects
!= -1 &&
3505 opts
->reconnect_delay
== NVMF_DEF_RECONNECT_DELAY
&&
3506 opts
->reconnect_delay
> NVME_FC_DEFAULT_RECONNECT_TMO
) {
3507 ctrl_loss_tmo
= opts
->max_reconnects
* opts
->reconnect_delay
;
3508 opts
->reconnect_delay
= NVME_FC_DEFAULT_RECONNECT_TMO
;
3509 opts
->max_reconnects
= DIV_ROUND_UP(ctrl_loss_tmo
,
3510 opts
->reconnect_delay
);
3513 ctrl
->ctrl
.opts
= opts
;
3514 ctrl
->ctrl
.nr_reconnects
= 0;
3516 ctrl
->ctrl
.numa_node
= dev_to_node(lport
->dev
);
3518 ctrl
->ctrl
.numa_node
= NUMA_NO_NODE
;
3519 INIT_LIST_HEAD(&ctrl
->ctrl_list
);
3520 ctrl
->lport
= lport
;
3521 ctrl
->rport
= rport
;
3522 ctrl
->dev
= lport
->dev
;
3524 ctrl
->ioq_live
= false;
3525 init_waitqueue_head(&ctrl
->ioabort_wait
);
3527 get_device(ctrl
->dev
);
3528 kref_init(&ctrl
->ref
);
3530 INIT_WORK(&ctrl
->ctrl
.reset_work
, nvme_fc_reset_ctrl_work
);
3531 INIT_DELAYED_WORK(&ctrl
->connect_work
, nvme_fc_connect_ctrl_work
);
3532 INIT_WORK(&ctrl
->ioerr_work
, nvme_fc_ctrl_ioerr_work
);
3533 spin_lock_init(&ctrl
->lock
);
3535 /* io queue count */
3536 ctrl
->ctrl
.queue_count
= min_t(unsigned int,
3538 lport
->ops
->max_hw_queues
);
3539 ctrl
->ctrl
.queue_count
++; /* +1 for admin queue */
3541 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
3542 ctrl
->ctrl
.kato
= opts
->kato
;
3543 ctrl
->ctrl
.cntlid
= 0xffff;
3546 ctrl
->queues
= kcalloc(ctrl
->ctrl
.queue_count
,
3547 sizeof(struct nvme_fc_queue
), GFP_KERNEL
);
3551 nvme_fc_init_queue(ctrl
, 0);
3554 * Would have been nice to init io queues tag set as well.
3555 * However, we require interaction from the controller
3556 * for max io queue count before we can do so.
3557 * Defer this to the connect path.
3560 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_fc_ctrl_ops
, 0);
3562 goto out_free_queues
;
3564 /* at this point, teardown path changes to ref counting on nvme ctrl */
3566 ret
= nvme_alloc_admin_tag_set(&ctrl
->ctrl
, &ctrl
->admin_tag_set
,
3567 &nvme_fc_admin_mq_ops
,
3568 struct_size_t(struct nvme_fcp_op_w_sgl
, priv
,
3569 ctrl
->lport
->ops
->fcprqst_priv_sz
));
3573 spin_lock_irqsave(&rport
->lock
, flags
);
3574 list_add_tail(&ctrl
->ctrl_list
, &rport
->ctrl_list
);
3575 spin_unlock_irqrestore(&rport
->lock
, flags
);
3577 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_RESETTING
) ||
3578 !nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
)) {
3579 dev_err(ctrl
->ctrl
.device
,
3580 "NVME-FC{%d}: failed to init ctrl state\n", ctrl
->cnum
);
3584 if (!queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0)) {
3585 dev_err(ctrl
->ctrl
.device
,
3586 "NVME-FC{%d}: failed to schedule initial connect\n",
3591 flush_delayed_work(&ctrl
->connect_work
);
3593 dev_info(ctrl
->ctrl
.device
,
3594 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3595 ctrl
->cnum
, nvmf_ctrl_subsysnqn(&ctrl
->ctrl
));
3600 nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_DELETING
);
3601 cancel_work_sync(&ctrl
->ioerr_work
);
3602 cancel_work_sync(&ctrl
->ctrl
.reset_work
);
3603 cancel_delayed_work_sync(&ctrl
->connect_work
);
3605 ctrl
->ctrl
.opts
= NULL
;
3607 /* initiate nvme ctrl ref counting teardown */
3608 nvme_uninit_ctrl(&ctrl
->ctrl
);
3610 /* Remove core ctrl ref. */
3611 nvme_put_ctrl(&ctrl
->ctrl
);
3613 /* as we're past the point where we transition to the ref
3614 * counting teardown path, if we return a bad pointer here,
3615 * the calling routine, thinking it's prior to the
3616 * transition, will do an rport put. Since the teardown
3617 * path also does a rport put, we do an extra get here to
3618 * so proper order/teardown happens.
3620 nvme_fc_rport_get(rport
);
3622 return ERR_PTR(-EIO
);
3625 kfree(ctrl
->queues
);
3627 put_device(ctrl
->dev
);
3628 ida_free(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
3632 /* exit via here doesn't follow ctlr ref points */
3633 return ERR_PTR(ret
);
3637 struct nvmet_fc_traddr
{
3643 __nvme_fc_parse_u64(substring_t
*sstr
, u64
*val
)
3647 if (match_u64(sstr
, &token64
))
3655 * This routine validates and extracts the WWN's from the TRADDR string.
3656 * As kernel parsers need the 0x to determine number base, universally
3657 * build string to parse with 0x prefix before parsing name strings.
3660 nvme_fc_parse_traddr(struct nvmet_fc_traddr
*traddr
, char *buf
, size_t blen
)
3662 char name
[2 + NVME_FC_TRADDR_HEXNAMELEN
+ 1];
3663 substring_t wwn
= { name
, &name
[sizeof(name
)-1] };
3664 int nnoffset
, pnoffset
;
3666 /* validate if string is one of the 2 allowed formats */
3667 if (strnlen(buf
, blen
) == NVME_FC_TRADDR_MAXLENGTH
&&
3668 !strncmp(buf
, "nn-0x", NVME_FC_TRADDR_OXNNLEN
) &&
3669 !strncmp(&buf
[NVME_FC_TRADDR_MAX_PN_OFFSET
],
3670 "pn-0x", NVME_FC_TRADDR_OXNNLEN
)) {
3671 nnoffset
= NVME_FC_TRADDR_OXNNLEN
;
3672 pnoffset
= NVME_FC_TRADDR_MAX_PN_OFFSET
+
3673 NVME_FC_TRADDR_OXNNLEN
;
3674 } else if ((strnlen(buf
, blen
) == NVME_FC_TRADDR_MINLENGTH
&&
3675 !strncmp(buf
, "nn-", NVME_FC_TRADDR_NNLEN
) &&
3676 !strncmp(&buf
[NVME_FC_TRADDR_MIN_PN_OFFSET
],
3677 "pn-", NVME_FC_TRADDR_NNLEN
))) {
3678 nnoffset
= NVME_FC_TRADDR_NNLEN
;
3679 pnoffset
= NVME_FC_TRADDR_MIN_PN_OFFSET
+ NVME_FC_TRADDR_NNLEN
;
3685 name
[2 + NVME_FC_TRADDR_HEXNAMELEN
] = 0;
3687 memcpy(&name
[2], &buf
[nnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3688 if (__nvme_fc_parse_u64(&wwn
, &traddr
->nn
))
3691 memcpy(&name
[2], &buf
[pnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3692 if (__nvme_fc_parse_u64(&wwn
, &traddr
->pn
))
3698 pr_warn("%s: bad traddr string\n", __func__
);
3702 static struct nvme_ctrl
*
3703 nvme_fc_create_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
)
3705 struct nvme_fc_lport
*lport
;
3706 struct nvme_fc_rport
*rport
;
3707 struct nvme_ctrl
*ctrl
;
3708 struct nvmet_fc_traddr laddr
= { 0L, 0L };
3709 struct nvmet_fc_traddr raddr
= { 0L, 0L };
3710 unsigned long flags
;
3713 ret
= nvme_fc_parse_traddr(&raddr
, opts
->traddr
, NVMF_TRADDR_SIZE
);
3714 if (ret
|| !raddr
.nn
|| !raddr
.pn
)
3715 return ERR_PTR(-EINVAL
);
3717 ret
= nvme_fc_parse_traddr(&laddr
, opts
->host_traddr
, NVMF_TRADDR_SIZE
);
3718 if (ret
|| !laddr
.nn
|| !laddr
.pn
)
3719 return ERR_PTR(-EINVAL
);
3721 /* find the host and remote ports to connect together */
3722 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3723 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3724 if (lport
->localport
.node_name
!= laddr
.nn
||
3725 lport
->localport
.port_name
!= laddr
.pn
||
3726 lport
->localport
.port_state
!= FC_OBJSTATE_ONLINE
)
3729 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3730 if (rport
->remoteport
.node_name
!= raddr
.nn
||
3731 rport
->remoteport
.port_name
!= raddr
.pn
||
3732 rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
3735 /* if fail to get reference fall through. Will error */
3736 if (!nvme_fc_rport_get(rport
))
3739 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3741 ctrl
= nvme_fc_init_ctrl(dev
, opts
, lport
, rport
);
3743 nvme_fc_rport_put(rport
);
3747 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3749 pr_warn("%s: %s - %s combination not found\n",
3750 __func__
, opts
->traddr
, opts
->host_traddr
);
3751 return ERR_PTR(-ENOENT
);
3755 static struct nvmf_transport_ops nvme_fc_transport
= {
3757 .module
= THIS_MODULE
,
3758 .required_opts
= NVMF_OPT_TRADDR
| NVMF_OPT_HOST_TRADDR
,
3759 .allowed_opts
= NVMF_OPT_RECONNECT_DELAY
| NVMF_OPT_CTRL_LOSS_TMO
,
3760 .create_ctrl
= nvme_fc_create_ctrl
,
3763 /* Arbitrary successive failures max. With lots of subsystems could be high */
3764 #define DISCOVERY_MAX_FAIL 20
3766 static ssize_t
nvme_fc_nvme_discovery_store(struct device
*dev
,
3767 struct device_attribute
*attr
, const char *buf
, size_t count
)
3769 unsigned long flags
;
3770 LIST_HEAD(local_disc_list
);
3771 struct nvme_fc_lport
*lport
;
3772 struct nvme_fc_rport
*rport
;
3775 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3777 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3778 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3779 if (!nvme_fc_lport_get(lport
))
3781 if (!nvme_fc_rport_get(rport
)) {
3783 * This is a temporary condition. Upon restart
3784 * this rport will be gone from the list.
3786 * Revert the lport put and retry. Anything
3787 * added to the list already will be skipped (as
3788 * they are no longer list_empty). Loops should
3789 * resume at rports that were not yet seen.
3791 nvme_fc_lport_put(lport
);
3793 if (failcnt
++ < DISCOVERY_MAX_FAIL
)
3796 pr_err("nvme_discovery: too many reference "
3798 goto process_local_list
;
3800 if (list_empty(&rport
->disc_list
))
3801 list_add_tail(&rport
->disc_list
,
3807 while (!list_empty(&local_disc_list
)) {
3808 rport
= list_first_entry(&local_disc_list
,
3809 struct nvme_fc_rport
, disc_list
);
3810 list_del_init(&rport
->disc_list
);
3811 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3813 lport
= rport
->lport
;
3814 /* signal discovery. Won't hurt if it repeats */
3815 nvme_fc_signal_discovery_scan(lport
, rport
);
3816 nvme_fc_rport_put(rport
);
3817 nvme_fc_lport_put(lport
);
3819 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3821 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3826 static DEVICE_ATTR(nvme_discovery
, 0200, NULL
, nvme_fc_nvme_discovery_store
);
3828 #ifdef CONFIG_BLK_CGROUP_FC_APPID
3829 /* Parse the cgroup id from a buf and return the length of cgrpid */
3830 static int fc_parse_cgrpid(const char *buf
, u64
*id
)
3835 memset(cgrp_id
, 0x0, sizeof(cgrp_id
));
3836 for (cgrpid_len
= 0, j
= 0; cgrpid_len
< 17; cgrpid_len
++) {
3837 if (buf
[cgrpid_len
] != ':')
3838 cgrp_id
[cgrpid_len
] = buf
[cgrpid_len
];
3846 if (kstrtou64(cgrp_id
, 16, id
) < 0)
3852 * Parse and update the appid in the blkcg associated with the cgroupid.
3854 static ssize_t
fc_appid_store(struct device
*dev
,
3855 struct device_attribute
*attr
, const char *buf
, size_t count
)
3857 size_t orig_count
= count
;
3861 char app_id
[FC_APPID_LEN
];
3864 if (buf
[count
-1] == '\n')
3867 if ((count
> (16+1+FC_APPID_LEN
)) || (!strchr(buf
, ':')))
3870 cgrpid_len
= fc_parse_cgrpid(buf
, &cgrp_id
);
3873 appid_len
= count
- cgrpid_len
- 1;
3874 if (appid_len
> FC_APPID_LEN
)
3877 memset(app_id
, 0x0, sizeof(app_id
));
3878 memcpy(app_id
, &buf
[cgrpid_len
+1], appid_len
);
3879 ret
= blkcg_set_fc_appid(app_id
, cgrp_id
, sizeof(app_id
));
3884 static DEVICE_ATTR(appid_store
, 0200, NULL
, fc_appid_store
);
3885 #endif /* CONFIG_BLK_CGROUP_FC_APPID */
3887 static struct attribute
*nvme_fc_attrs
[] = {
3888 &dev_attr_nvme_discovery
.attr
,
3889 #ifdef CONFIG_BLK_CGROUP_FC_APPID
3890 &dev_attr_appid_store
.attr
,
3895 static const struct attribute_group nvme_fc_attr_group
= {
3896 .attrs
= nvme_fc_attrs
,
3899 static const struct attribute_group
*nvme_fc_attr_groups
[] = {
3900 &nvme_fc_attr_group
,
3904 static struct class fc_class
= {
3906 .dev_groups
= nvme_fc_attr_groups
,
3909 static int __init
nvme_fc_init_module(void)
3913 nvme_fc_wq
= alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM
, 0);
3919 * It is expected that in the future the kernel will combine
3920 * the FC-isms that are currently under scsi and now being
3921 * added to by NVME into a new standalone FC class. The SCSI
3922 * and NVME protocols and their devices would be under this
3925 * As we need something to post FC-specific udev events to,
3926 * specifically for nvme probe events, start by creating the
3927 * new device class. When the new standalone FC class is
3928 * put in place, this code will move to a more generic
3929 * location for the class.
3931 ret
= class_register(&fc_class
);
3933 pr_err("couldn't register class fc\n");
3934 goto out_destroy_wq
;
3938 * Create a device for the FC-centric udev events
3940 fc_udev_device
= device_create(&fc_class
, NULL
, MKDEV(0, 0), NULL
,
3942 if (IS_ERR(fc_udev_device
)) {
3943 pr_err("couldn't create fc_udev device!\n");
3944 ret
= PTR_ERR(fc_udev_device
);
3945 goto out_destroy_class
;
3948 ret
= nvmf_register_transport(&nvme_fc_transport
);
3950 goto out_destroy_device
;
3955 device_destroy(&fc_class
, MKDEV(0, 0));
3957 class_unregister(&fc_class
);
3959 destroy_workqueue(nvme_fc_wq
);
3965 nvme_fc_delete_controllers(struct nvme_fc_rport
*rport
)
3967 struct nvme_fc_ctrl
*ctrl
;
3969 spin_lock(&rport
->lock
);
3970 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
3971 dev_warn(ctrl
->ctrl
.device
,
3972 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3974 nvme_delete_ctrl(&ctrl
->ctrl
);
3976 spin_unlock(&rport
->lock
);
3980 nvme_fc_cleanup_for_unload(void)
3982 struct nvme_fc_lport
*lport
;
3983 struct nvme_fc_rport
*rport
;
3985 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3986 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3987 nvme_fc_delete_controllers(rport
);
3992 static void __exit
nvme_fc_exit_module(void)
3994 unsigned long flags
;
3995 bool need_cleanup
= false;
3997 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3998 nvme_fc_waiting_to_unload
= true;
3999 if (!list_empty(&nvme_fc_lport_list
)) {
4000 need_cleanup
= true;
4001 nvme_fc_cleanup_for_unload();
4003 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
4005 pr_info("%s: waiting for ctlr deletes\n", __func__
);
4006 wait_for_completion(&nvme_fc_unload_proceed
);
4007 pr_info("%s: ctrl deletes complete\n", __func__
);
4010 nvmf_unregister_transport(&nvme_fc_transport
);
4012 ida_destroy(&nvme_fc_local_port_cnt
);
4013 ida_destroy(&nvme_fc_ctrl_cnt
);
4015 device_destroy(&fc_class
, MKDEV(0, 0));
4016 class_unregister(&fc_class
);
4017 destroy_workqueue(nvme_fc_wq
);
4020 module_init(nvme_fc_init_module
);
4021 module_exit(nvme_fc_exit_module
);
4023 MODULE_LICENSE("GPL v2");