1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/key.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/blk-mq.h>
20 #include <crypto/hash.h>
21 #include <net/busy_poll.h>
22 #include <trace/events/sock.h>
27 struct nvme_tcp_queue
;
29 /* Define the socket priority to use for connections were it is desirable
30 * that the NIC consider performing optimized packet processing or filtering.
31 * A non-zero value being sufficient to indicate general consideration of any
32 * possible optimization. Making it a module param allows for alternative
33 * values that may be unique for some NIC implementations.
35 static int so_priority
;
36 module_param(so_priority
, int, 0644);
37 MODULE_PARM_DESC(so_priority
, "nvme tcp socket optimize priority");
40 * TLS handshake timeout
42 static int tls_handshake_timeout
= 10;
43 #ifdef CONFIG_NVME_TCP_TLS
44 module_param(tls_handshake_timeout
, int, 0644);
45 MODULE_PARM_DESC(tls_handshake_timeout
,
46 "nvme TLS handshake timeout in seconds (default 10)");
49 #ifdef CONFIG_DEBUG_LOCK_ALLOC
50 /* lockdep can detect a circular dependency of the form
51 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
52 * because dependencies are tracked for both nvme-tcp and user contexts. Using
53 * a separate class prevents lockdep from conflating nvme-tcp socket use with
54 * user-space socket API use.
56 static struct lock_class_key nvme_tcp_sk_key
[2];
57 static struct lock_class_key nvme_tcp_slock_key
[2];
59 static void nvme_tcp_reclassify_socket(struct socket
*sock
)
61 struct sock
*sk
= sock
->sk
;
63 if (WARN_ON_ONCE(!sock_allow_reclassification(sk
)))
66 switch (sk
->sk_family
) {
68 sock_lock_init_class_and_name(sk
, "slock-AF_INET-NVME",
69 &nvme_tcp_slock_key
[0],
70 "sk_lock-AF_INET-NVME",
74 sock_lock_init_class_and_name(sk
, "slock-AF_INET6-NVME",
75 &nvme_tcp_slock_key
[1],
76 "sk_lock-AF_INET6-NVME",
84 static void nvme_tcp_reclassify_socket(struct socket
*sock
) { }
87 enum nvme_tcp_send_state
{
88 NVME_TCP_SEND_CMD_PDU
= 0,
89 NVME_TCP_SEND_H2C_PDU
,
94 struct nvme_tcp_request
{
95 struct nvme_request req
;
97 struct nvme_tcp_queue
*queue
;
105 struct list_head entry
;
106 struct llist_node lentry
;
109 struct bio
*curr_bio
;
110 struct iov_iter iter
;
115 enum nvme_tcp_send_state state
;
118 enum nvme_tcp_queue_flags
{
119 NVME_TCP_Q_ALLOCATED
= 0,
121 NVME_TCP_Q_POLLING
= 2,
124 enum nvme_tcp_recv_state
{
125 NVME_TCP_RECV_PDU
= 0,
130 struct nvme_tcp_ctrl
;
131 struct nvme_tcp_queue
{
133 struct work_struct io_work
;
136 struct mutex queue_lock
;
137 struct mutex send_mutex
;
138 struct llist_head req_list
;
139 struct list_head send_list
;
145 size_t data_remaining
;
146 size_t ddgst_remaining
;
150 struct nvme_tcp_request
*request
;
153 size_t cmnd_capsule_len
;
154 struct nvme_tcp_ctrl
*ctrl
;
160 struct ahash_request
*rcv_hash
;
161 struct ahash_request
*snd_hash
;
164 struct completion tls_complete
;
166 struct page_frag_cache pf_cache
;
168 void (*state_change
)(struct sock
*);
169 void (*data_ready
)(struct sock
*);
170 void (*write_space
)(struct sock
*);
173 struct nvme_tcp_ctrl
{
174 /* read only in the hot path */
175 struct nvme_tcp_queue
*queues
;
176 struct blk_mq_tag_set tag_set
;
178 /* other member variables */
179 struct list_head list
;
180 struct blk_mq_tag_set admin_tag_set
;
181 struct sockaddr_storage addr
;
182 struct sockaddr_storage src_addr
;
183 struct nvme_ctrl ctrl
;
185 struct work_struct err_work
;
186 struct delayed_work connect_work
;
187 struct nvme_tcp_request async_req
;
188 u32 io_queues
[HCTX_MAX_TYPES
];
191 static LIST_HEAD(nvme_tcp_ctrl_list
);
192 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex
);
193 static struct workqueue_struct
*nvme_tcp_wq
;
194 static const struct blk_mq_ops nvme_tcp_mq_ops
;
195 static const struct blk_mq_ops nvme_tcp_admin_mq_ops
;
196 static int nvme_tcp_try_send(struct nvme_tcp_queue
*queue
);
198 static inline struct nvme_tcp_ctrl
*to_tcp_ctrl(struct nvme_ctrl
*ctrl
)
200 return container_of(ctrl
, struct nvme_tcp_ctrl
, ctrl
);
203 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue
*queue
)
205 return queue
- queue
->ctrl
->queues
;
208 static inline bool nvme_tcp_tls(struct nvme_ctrl
*ctrl
)
210 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS
))
213 return ctrl
->opts
->tls
;
216 static inline struct blk_mq_tags
*nvme_tcp_tagset(struct nvme_tcp_queue
*queue
)
218 u32 queue_idx
= nvme_tcp_queue_id(queue
);
221 return queue
->ctrl
->admin_tag_set
.tags
[queue_idx
];
222 return queue
->ctrl
->tag_set
.tags
[queue_idx
- 1];
225 static inline u8
nvme_tcp_hdgst_len(struct nvme_tcp_queue
*queue
)
227 return queue
->hdr_digest
? NVME_TCP_DIGEST_LENGTH
: 0;
230 static inline u8
nvme_tcp_ddgst_len(struct nvme_tcp_queue
*queue
)
232 return queue
->data_digest
? NVME_TCP_DIGEST_LENGTH
: 0;
235 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request
*req
)
240 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request
*req
)
242 /* use the pdu space in the back for the data pdu */
243 return req
->pdu
+ sizeof(struct nvme_tcp_cmd_pdu
) -
244 sizeof(struct nvme_tcp_data_pdu
);
247 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request
*req
)
249 if (nvme_is_fabrics(req
->req
.cmd
))
250 return NVME_TCP_ADMIN_CCSZ
;
251 return req
->queue
->cmnd_capsule_len
- sizeof(struct nvme_command
);
254 static inline bool nvme_tcp_async_req(struct nvme_tcp_request
*req
)
256 return req
== &req
->queue
->ctrl
->async_req
;
259 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request
*req
)
263 if (unlikely(nvme_tcp_async_req(req
)))
264 return false; /* async events don't have a request */
266 rq
= blk_mq_rq_from_pdu(req
);
268 return rq_data_dir(rq
) == WRITE
&& req
->data_len
&&
269 req
->data_len
<= nvme_tcp_inline_data_size(req
);
272 static inline struct page
*nvme_tcp_req_cur_page(struct nvme_tcp_request
*req
)
274 return req
->iter
.bvec
->bv_page
;
277 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request
*req
)
279 return req
->iter
.bvec
->bv_offset
+ req
->iter
.iov_offset
;
282 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request
*req
)
284 return min_t(size_t, iov_iter_single_seg_count(&req
->iter
),
285 req
->pdu_len
- req
->pdu_sent
);
288 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request
*req
)
290 return rq_data_dir(blk_mq_rq_from_pdu(req
)) == WRITE
?
291 req
->pdu_len
- req
->pdu_sent
: 0;
294 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request
*req
,
297 return nvme_tcp_pdu_data_left(req
) <= len
;
300 static void nvme_tcp_init_iter(struct nvme_tcp_request
*req
,
303 struct request
*rq
= blk_mq_rq_from_pdu(req
);
309 if (rq
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
310 vec
= &rq
->special_vec
;
312 size
= blk_rq_payload_bytes(rq
);
315 struct bio
*bio
= req
->curr_bio
;
319 vec
= __bvec_iter_bvec(bio
->bi_io_vec
, bio
->bi_iter
);
321 bio_for_each_bvec(bv
, bio
, bi
) {
324 size
= bio
->bi_iter
.bi_size
;
325 offset
= bio
->bi_iter
.bi_bvec_done
;
328 iov_iter_bvec(&req
->iter
, dir
, vec
, nr_bvec
, size
);
329 req
->iter
.iov_offset
= offset
;
332 static inline void nvme_tcp_advance_req(struct nvme_tcp_request
*req
,
335 req
->data_sent
+= len
;
336 req
->pdu_sent
+= len
;
337 iov_iter_advance(&req
->iter
, len
);
338 if (!iov_iter_count(&req
->iter
) &&
339 req
->data_sent
< req
->data_len
) {
340 req
->curr_bio
= req
->curr_bio
->bi_next
;
341 nvme_tcp_init_iter(req
, ITER_SOURCE
);
345 static inline void nvme_tcp_send_all(struct nvme_tcp_queue
*queue
)
349 /* drain the send queue as much as we can... */
351 ret
= nvme_tcp_try_send(queue
);
355 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue
*queue
)
357 return !list_empty(&queue
->send_list
) ||
358 !llist_empty(&queue
->req_list
);
361 static inline void nvme_tcp_queue_request(struct nvme_tcp_request
*req
,
362 bool sync
, bool last
)
364 struct nvme_tcp_queue
*queue
= req
->queue
;
367 empty
= llist_add(&req
->lentry
, &queue
->req_list
) &&
368 list_empty(&queue
->send_list
) && !queue
->request
;
371 * if we're the first on the send_list and we can try to send
372 * directly, otherwise queue io_work. Also, only do that if we
373 * are on the same cpu, so we don't introduce contention.
375 if (queue
->io_cpu
== raw_smp_processor_id() &&
376 sync
&& empty
&& mutex_trylock(&queue
->send_mutex
)) {
377 nvme_tcp_send_all(queue
);
378 mutex_unlock(&queue
->send_mutex
);
381 if (last
&& nvme_tcp_queue_more(queue
))
382 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
385 static void nvme_tcp_process_req_list(struct nvme_tcp_queue
*queue
)
387 struct nvme_tcp_request
*req
;
388 struct llist_node
*node
;
390 for (node
= llist_del_all(&queue
->req_list
); node
; node
= node
->next
) {
391 req
= llist_entry(node
, struct nvme_tcp_request
, lentry
);
392 list_add(&req
->entry
, &queue
->send_list
);
396 static inline struct nvme_tcp_request
*
397 nvme_tcp_fetch_request(struct nvme_tcp_queue
*queue
)
399 struct nvme_tcp_request
*req
;
401 req
= list_first_entry_or_null(&queue
->send_list
,
402 struct nvme_tcp_request
, entry
);
404 nvme_tcp_process_req_list(queue
);
405 req
= list_first_entry_or_null(&queue
->send_list
,
406 struct nvme_tcp_request
, entry
);
411 list_del(&req
->entry
);
415 static inline void nvme_tcp_ddgst_final(struct ahash_request
*hash
,
418 ahash_request_set_crypt(hash
, NULL
, (u8
*)dgst
, 0);
419 crypto_ahash_final(hash
);
422 static inline void nvme_tcp_ddgst_update(struct ahash_request
*hash
,
423 struct page
*page
, off_t off
, size_t len
)
425 struct scatterlist sg
;
427 sg_init_table(&sg
, 1);
428 sg_set_page(&sg
, page
, len
, off
);
429 ahash_request_set_crypt(hash
, &sg
, NULL
, len
);
430 crypto_ahash_update(hash
);
433 static inline void nvme_tcp_hdgst(struct ahash_request
*hash
,
434 void *pdu
, size_t len
)
436 struct scatterlist sg
;
438 sg_init_one(&sg
, pdu
, len
);
439 ahash_request_set_crypt(hash
, &sg
, pdu
+ len
, len
);
440 crypto_ahash_digest(hash
);
443 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue
*queue
,
444 void *pdu
, size_t pdu_len
)
446 struct nvme_tcp_hdr
*hdr
= pdu
;
450 if (unlikely(!(hdr
->flags
& NVME_TCP_F_HDGST
))) {
451 dev_err(queue
->ctrl
->ctrl
.device
,
452 "queue %d: header digest flag is cleared\n",
453 nvme_tcp_queue_id(queue
));
457 recv_digest
= *(__le32
*)(pdu
+ hdr
->hlen
);
458 nvme_tcp_hdgst(queue
->rcv_hash
, pdu
, pdu_len
);
459 exp_digest
= *(__le32
*)(pdu
+ hdr
->hlen
);
460 if (recv_digest
!= exp_digest
) {
461 dev_err(queue
->ctrl
->ctrl
.device
,
462 "header digest error: recv %#x expected %#x\n",
463 le32_to_cpu(recv_digest
), le32_to_cpu(exp_digest
));
470 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue
*queue
, void *pdu
)
472 struct nvme_tcp_hdr
*hdr
= pdu
;
473 u8 digest_len
= nvme_tcp_hdgst_len(queue
);
476 len
= le32_to_cpu(hdr
->plen
) - hdr
->hlen
-
477 ((hdr
->flags
& NVME_TCP_F_HDGST
) ? digest_len
: 0);
479 if (unlikely(len
&& !(hdr
->flags
& NVME_TCP_F_DDGST
))) {
480 dev_err(queue
->ctrl
->ctrl
.device
,
481 "queue %d: data digest flag is cleared\n",
482 nvme_tcp_queue_id(queue
));
485 crypto_ahash_init(queue
->rcv_hash
);
490 static void nvme_tcp_exit_request(struct blk_mq_tag_set
*set
,
491 struct request
*rq
, unsigned int hctx_idx
)
493 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
495 page_frag_free(req
->pdu
);
498 static int nvme_tcp_init_request(struct blk_mq_tag_set
*set
,
499 struct request
*rq
, unsigned int hctx_idx
,
500 unsigned int numa_node
)
502 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(set
->driver_data
);
503 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
504 struct nvme_tcp_cmd_pdu
*pdu
;
505 int queue_idx
= (set
== &ctrl
->tag_set
) ? hctx_idx
+ 1 : 0;
506 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[queue_idx
];
507 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
509 req
->pdu
= page_frag_alloc(&queue
->pf_cache
,
510 sizeof(struct nvme_tcp_cmd_pdu
) + hdgst
,
511 GFP_KERNEL
| __GFP_ZERO
);
517 nvme_req(rq
)->ctrl
= &ctrl
->ctrl
;
518 nvme_req(rq
)->cmd
= &pdu
->cmd
;
523 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
524 unsigned int hctx_idx
)
526 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(data
);
527 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[hctx_idx
+ 1];
529 hctx
->driver_data
= queue
;
533 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
534 unsigned int hctx_idx
)
536 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(data
);
537 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[0];
539 hctx
->driver_data
= queue
;
543 static enum nvme_tcp_recv_state
544 nvme_tcp_recv_state(struct nvme_tcp_queue
*queue
)
546 return (queue
->pdu_remaining
) ? NVME_TCP_RECV_PDU
:
547 (queue
->ddgst_remaining
) ? NVME_TCP_RECV_DDGST
:
551 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue
*queue
)
553 queue
->pdu_remaining
= sizeof(struct nvme_tcp_rsp_pdu
) +
554 nvme_tcp_hdgst_len(queue
);
555 queue
->pdu_offset
= 0;
556 queue
->data_remaining
= -1;
557 queue
->ddgst_remaining
= 0;
560 static void nvme_tcp_error_recovery(struct nvme_ctrl
*ctrl
)
562 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
565 dev_warn(ctrl
->device
, "starting error recovery\n");
566 queue_work(nvme_reset_wq
, &to_tcp_ctrl(ctrl
)->err_work
);
569 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue
*queue
,
570 struct nvme_completion
*cqe
)
572 struct nvme_tcp_request
*req
;
575 rq
= nvme_find_rq(nvme_tcp_tagset(queue
), cqe
->command_id
);
577 dev_err(queue
->ctrl
->ctrl
.device
,
578 "got bad cqe.command_id %#x on queue %d\n",
579 cqe
->command_id
, nvme_tcp_queue_id(queue
));
580 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
584 req
= blk_mq_rq_to_pdu(rq
);
585 if (req
->status
== cpu_to_le16(NVME_SC_SUCCESS
))
586 req
->status
= cqe
->status
;
588 if (!nvme_try_complete_req(rq
, req
->status
, cqe
->result
))
589 nvme_complete_rq(rq
);
595 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue
*queue
,
596 struct nvme_tcp_data_pdu
*pdu
)
600 rq
= nvme_find_rq(nvme_tcp_tagset(queue
), pdu
->command_id
);
602 dev_err(queue
->ctrl
->ctrl
.device
,
603 "got bad c2hdata.command_id %#x on queue %d\n",
604 pdu
->command_id
, nvme_tcp_queue_id(queue
));
608 if (!blk_rq_payload_bytes(rq
)) {
609 dev_err(queue
->ctrl
->ctrl
.device
,
610 "queue %d tag %#x unexpected data\n",
611 nvme_tcp_queue_id(queue
), rq
->tag
);
615 queue
->data_remaining
= le32_to_cpu(pdu
->data_length
);
617 if (pdu
->hdr
.flags
& NVME_TCP_F_DATA_SUCCESS
&&
618 unlikely(!(pdu
->hdr
.flags
& NVME_TCP_F_DATA_LAST
))) {
619 dev_err(queue
->ctrl
->ctrl
.device
,
620 "queue %d tag %#x SUCCESS set but not last PDU\n",
621 nvme_tcp_queue_id(queue
), rq
->tag
);
622 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
629 static int nvme_tcp_handle_comp(struct nvme_tcp_queue
*queue
,
630 struct nvme_tcp_rsp_pdu
*pdu
)
632 struct nvme_completion
*cqe
= &pdu
->cqe
;
636 * AEN requests are special as they don't time out and can
637 * survive any kind of queue freeze and often don't respond to
638 * aborts. We don't even bother to allocate a struct request
639 * for them but rather special case them here.
641 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue
),
643 nvme_complete_async_event(&queue
->ctrl
->ctrl
, cqe
->status
,
646 ret
= nvme_tcp_process_nvme_cqe(queue
, cqe
);
651 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request
*req
)
653 struct nvme_tcp_data_pdu
*data
= nvme_tcp_req_data_pdu(req
);
654 struct nvme_tcp_queue
*queue
= req
->queue
;
655 struct request
*rq
= blk_mq_rq_from_pdu(req
);
656 u32 h2cdata_sent
= req
->pdu_len
;
657 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
658 u8 ddgst
= nvme_tcp_ddgst_len(queue
);
660 req
->state
= NVME_TCP_SEND_H2C_PDU
;
662 req
->pdu_len
= min(req
->h2cdata_left
, queue
->maxh2cdata
);
664 req
->h2cdata_left
-= req
->pdu_len
;
665 req
->h2cdata_offset
+= h2cdata_sent
;
667 memset(data
, 0, sizeof(*data
));
668 data
->hdr
.type
= nvme_tcp_h2c_data
;
669 if (!req
->h2cdata_left
)
670 data
->hdr
.flags
= NVME_TCP_F_DATA_LAST
;
671 if (queue
->hdr_digest
)
672 data
->hdr
.flags
|= NVME_TCP_F_HDGST
;
673 if (queue
->data_digest
)
674 data
->hdr
.flags
|= NVME_TCP_F_DDGST
;
675 data
->hdr
.hlen
= sizeof(*data
);
676 data
->hdr
.pdo
= data
->hdr
.hlen
+ hdgst
;
678 cpu_to_le32(data
->hdr
.hlen
+ hdgst
+ req
->pdu_len
+ ddgst
);
679 data
->ttag
= req
->ttag
;
680 data
->command_id
= nvme_cid(rq
);
681 data
->data_offset
= cpu_to_le32(req
->h2cdata_offset
);
682 data
->data_length
= cpu_to_le32(req
->pdu_len
);
685 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue
*queue
,
686 struct nvme_tcp_r2t_pdu
*pdu
)
688 struct nvme_tcp_request
*req
;
690 u32 r2t_length
= le32_to_cpu(pdu
->r2t_length
);
691 u32 r2t_offset
= le32_to_cpu(pdu
->r2t_offset
);
693 rq
= nvme_find_rq(nvme_tcp_tagset(queue
), pdu
->command_id
);
695 dev_err(queue
->ctrl
->ctrl
.device
,
696 "got bad r2t.command_id %#x on queue %d\n",
697 pdu
->command_id
, nvme_tcp_queue_id(queue
));
700 req
= blk_mq_rq_to_pdu(rq
);
702 if (unlikely(!r2t_length
)) {
703 dev_err(queue
->ctrl
->ctrl
.device
,
704 "req %d r2t len is %u, probably a bug...\n",
705 rq
->tag
, r2t_length
);
709 if (unlikely(req
->data_sent
+ r2t_length
> req
->data_len
)) {
710 dev_err(queue
->ctrl
->ctrl
.device
,
711 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
712 rq
->tag
, r2t_length
, req
->data_len
, req
->data_sent
);
716 if (unlikely(r2t_offset
< req
->data_sent
)) {
717 dev_err(queue
->ctrl
->ctrl
.device
,
718 "req %d unexpected r2t offset %u (expected %zu)\n",
719 rq
->tag
, r2t_offset
, req
->data_sent
);
724 req
->h2cdata_left
= r2t_length
;
725 req
->h2cdata_offset
= r2t_offset
;
726 req
->ttag
= pdu
->ttag
;
728 nvme_tcp_setup_h2c_data_pdu(req
);
729 nvme_tcp_queue_request(req
, false, true);
734 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue
*queue
, struct sk_buff
*skb
,
735 unsigned int *offset
, size_t *len
)
737 struct nvme_tcp_hdr
*hdr
;
738 char *pdu
= queue
->pdu
;
739 size_t rcv_len
= min_t(size_t, *len
, queue
->pdu_remaining
);
742 ret
= skb_copy_bits(skb
, *offset
,
743 &pdu
[queue
->pdu_offset
], rcv_len
);
747 queue
->pdu_remaining
-= rcv_len
;
748 queue
->pdu_offset
+= rcv_len
;
751 if (queue
->pdu_remaining
)
755 if (queue
->hdr_digest
) {
756 ret
= nvme_tcp_verify_hdgst(queue
, queue
->pdu
, hdr
->hlen
);
762 if (queue
->data_digest
) {
763 ret
= nvme_tcp_check_ddgst(queue
, queue
->pdu
);
769 case nvme_tcp_c2h_data
:
770 return nvme_tcp_handle_c2h_data(queue
, (void *)queue
->pdu
);
772 nvme_tcp_init_recv_ctx(queue
);
773 return nvme_tcp_handle_comp(queue
, (void *)queue
->pdu
);
775 nvme_tcp_init_recv_ctx(queue
);
776 return nvme_tcp_handle_r2t(queue
, (void *)queue
->pdu
);
778 dev_err(queue
->ctrl
->ctrl
.device
,
779 "unsupported pdu type (%d)\n", hdr
->type
);
784 static inline void nvme_tcp_end_request(struct request
*rq
, u16 status
)
786 union nvme_result res
= {};
788 if (!nvme_try_complete_req(rq
, cpu_to_le16(status
<< 1), res
))
789 nvme_complete_rq(rq
);
792 static int nvme_tcp_recv_data(struct nvme_tcp_queue
*queue
, struct sk_buff
*skb
,
793 unsigned int *offset
, size_t *len
)
795 struct nvme_tcp_data_pdu
*pdu
= (void *)queue
->pdu
;
797 nvme_cid_to_rq(nvme_tcp_tagset(queue
), pdu
->command_id
);
798 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
803 recv_len
= min_t(size_t, *len
, queue
->data_remaining
);
807 if (!iov_iter_count(&req
->iter
)) {
808 req
->curr_bio
= req
->curr_bio
->bi_next
;
811 * If we don`t have any bios it means that controller
812 * sent more data than we requested, hence error
814 if (!req
->curr_bio
) {
815 dev_err(queue
->ctrl
->ctrl
.device
,
816 "queue %d no space in request %#x",
817 nvme_tcp_queue_id(queue
), rq
->tag
);
818 nvme_tcp_init_recv_ctx(queue
);
821 nvme_tcp_init_iter(req
, ITER_DEST
);
824 /* we can read only from what is left in this bio */
825 recv_len
= min_t(size_t, recv_len
,
826 iov_iter_count(&req
->iter
));
828 if (queue
->data_digest
)
829 ret
= skb_copy_and_hash_datagram_iter(skb
, *offset
,
830 &req
->iter
, recv_len
, queue
->rcv_hash
);
832 ret
= skb_copy_datagram_iter(skb
, *offset
,
833 &req
->iter
, recv_len
);
835 dev_err(queue
->ctrl
->ctrl
.device
,
836 "queue %d failed to copy request %#x data",
837 nvme_tcp_queue_id(queue
), rq
->tag
);
843 queue
->data_remaining
-= recv_len
;
846 if (!queue
->data_remaining
) {
847 if (queue
->data_digest
) {
848 nvme_tcp_ddgst_final(queue
->rcv_hash
, &queue
->exp_ddgst
);
849 queue
->ddgst_remaining
= NVME_TCP_DIGEST_LENGTH
;
851 if (pdu
->hdr
.flags
& NVME_TCP_F_DATA_SUCCESS
) {
852 nvme_tcp_end_request(rq
,
853 le16_to_cpu(req
->status
));
856 nvme_tcp_init_recv_ctx(queue
);
863 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue
*queue
,
864 struct sk_buff
*skb
, unsigned int *offset
, size_t *len
)
866 struct nvme_tcp_data_pdu
*pdu
= (void *)queue
->pdu
;
867 char *ddgst
= (char *)&queue
->recv_ddgst
;
868 size_t recv_len
= min_t(size_t, *len
, queue
->ddgst_remaining
);
869 off_t off
= NVME_TCP_DIGEST_LENGTH
- queue
->ddgst_remaining
;
872 ret
= skb_copy_bits(skb
, *offset
, &ddgst
[off
], recv_len
);
876 queue
->ddgst_remaining
-= recv_len
;
879 if (queue
->ddgst_remaining
)
882 if (queue
->recv_ddgst
!= queue
->exp_ddgst
) {
883 struct request
*rq
= nvme_cid_to_rq(nvme_tcp_tagset(queue
),
885 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
887 req
->status
= cpu_to_le16(NVME_SC_DATA_XFER_ERROR
);
889 dev_err(queue
->ctrl
->ctrl
.device
,
890 "data digest error: recv %#x expected %#x\n",
891 le32_to_cpu(queue
->recv_ddgst
),
892 le32_to_cpu(queue
->exp_ddgst
));
895 if (pdu
->hdr
.flags
& NVME_TCP_F_DATA_SUCCESS
) {
896 struct request
*rq
= nvme_cid_to_rq(nvme_tcp_tagset(queue
),
898 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
900 nvme_tcp_end_request(rq
, le16_to_cpu(req
->status
));
904 nvme_tcp_init_recv_ctx(queue
);
908 static int nvme_tcp_recv_skb(read_descriptor_t
*desc
, struct sk_buff
*skb
,
909 unsigned int offset
, size_t len
)
911 struct nvme_tcp_queue
*queue
= desc
->arg
.data
;
912 size_t consumed
= len
;
915 if (unlikely(!queue
->rd_enabled
))
919 switch (nvme_tcp_recv_state(queue
)) {
920 case NVME_TCP_RECV_PDU
:
921 result
= nvme_tcp_recv_pdu(queue
, skb
, &offset
, &len
);
923 case NVME_TCP_RECV_DATA
:
924 result
= nvme_tcp_recv_data(queue
, skb
, &offset
, &len
);
926 case NVME_TCP_RECV_DDGST
:
927 result
= nvme_tcp_recv_ddgst(queue
, skb
, &offset
, &len
);
933 dev_err(queue
->ctrl
->ctrl
.device
,
934 "receive failed: %d\n", result
);
935 queue
->rd_enabled
= false;
936 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
944 static void nvme_tcp_data_ready(struct sock
*sk
)
946 struct nvme_tcp_queue
*queue
;
948 trace_sk_data_ready(sk
);
950 read_lock_bh(&sk
->sk_callback_lock
);
951 queue
= sk
->sk_user_data
;
952 if (likely(queue
&& queue
->rd_enabled
) &&
953 !test_bit(NVME_TCP_Q_POLLING
, &queue
->flags
))
954 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
955 read_unlock_bh(&sk
->sk_callback_lock
);
958 static void nvme_tcp_write_space(struct sock
*sk
)
960 struct nvme_tcp_queue
*queue
;
962 read_lock_bh(&sk
->sk_callback_lock
);
963 queue
= sk
->sk_user_data
;
964 if (likely(queue
&& sk_stream_is_writeable(sk
))) {
965 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
966 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
968 read_unlock_bh(&sk
->sk_callback_lock
);
971 static void nvme_tcp_state_change(struct sock
*sk
)
973 struct nvme_tcp_queue
*queue
;
975 read_lock_bh(&sk
->sk_callback_lock
);
976 queue
= sk
->sk_user_data
;
980 switch (sk
->sk_state
) {
986 nvme_tcp_error_recovery(&queue
->ctrl
->ctrl
);
989 dev_info(queue
->ctrl
->ctrl
.device
,
990 "queue %d socket state %d\n",
991 nvme_tcp_queue_id(queue
), sk
->sk_state
);
994 queue
->state_change(sk
);
996 read_unlock_bh(&sk
->sk_callback_lock
);
999 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue
*queue
)
1001 queue
->request
= NULL
;
1004 static void nvme_tcp_fail_request(struct nvme_tcp_request
*req
)
1006 if (nvme_tcp_async_req(req
)) {
1007 union nvme_result res
= {};
1009 nvme_complete_async_event(&req
->queue
->ctrl
->ctrl
,
1010 cpu_to_le16(NVME_SC_HOST_PATH_ERROR
), &res
);
1012 nvme_tcp_end_request(blk_mq_rq_from_pdu(req
),
1013 NVME_SC_HOST_PATH_ERROR
);
1017 static int nvme_tcp_try_send_data(struct nvme_tcp_request
*req
)
1019 struct nvme_tcp_queue
*queue
= req
->queue
;
1020 int req_data_len
= req
->data_len
;
1021 u32 h2cdata_left
= req
->h2cdata_left
;
1024 struct bio_vec bvec
;
1025 struct msghdr msg
= {
1026 .msg_flags
= MSG_DONTWAIT
| MSG_SPLICE_PAGES
,
1028 struct page
*page
= nvme_tcp_req_cur_page(req
);
1029 size_t offset
= nvme_tcp_req_cur_offset(req
);
1030 size_t len
= nvme_tcp_req_cur_length(req
);
1031 bool last
= nvme_tcp_pdu_last_send(req
, len
);
1032 int req_data_sent
= req
->data_sent
;
1035 if (last
&& !queue
->data_digest
&& !nvme_tcp_queue_more(queue
))
1036 msg
.msg_flags
|= MSG_EOR
;
1038 msg
.msg_flags
|= MSG_MORE
;
1040 if (!sendpage_ok(page
))
1041 msg
.msg_flags
&= ~MSG_SPLICE_PAGES
;
1043 bvec_set_page(&bvec
, page
, len
, offset
);
1044 iov_iter_bvec(&msg
.msg_iter
, ITER_SOURCE
, &bvec
, 1, len
);
1045 ret
= sock_sendmsg(queue
->sock
, &msg
);
1049 if (queue
->data_digest
)
1050 nvme_tcp_ddgst_update(queue
->snd_hash
, page
,
1054 * update the request iterator except for the last payload send
1055 * in the request where we don't want to modify it as we may
1056 * compete with the RX path completing the request.
1058 if (req_data_sent
+ ret
< req_data_len
)
1059 nvme_tcp_advance_req(req
, ret
);
1061 /* fully successful last send in current PDU */
1062 if (last
&& ret
== len
) {
1063 if (queue
->data_digest
) {
1064 nvme_tcp_ddgst_final(queue
->snd_hash
,
1066 req
->state
= NVME_TCP_SEND_DDGST
;
1070 nvme_tcp_setup_h2c_data_pdu(req
);
1072 nvme_tcp_done_send_req(queue
);
1080 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request
*req
)
1082 struct nvme_tcp_queue
*queue
= req
->queue
;
1083 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
1084 struct bio_vec bvec
;
1085 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_SPLICE_PAGES
, };
1086 bool inline_data
= nvme_tcp_has_inline_data(req
);
1087 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
1088 int len
= sizeof(*pdu
) + hdgst
- req
->offset
;
1091 if (inline_data
|| nvme_tcp_queue_more(queue
))
1092 msg
.msg_flags
|= MSG_MORE
;
1094 msg
.msg_flags
|= MSG_EOR
;
1096 if (queue
->hdr_digest
&& !req
->offset
)
1097 nvme_tcp_hdgst(queue
->snd_hash
, pdu
, sizeof(*pdu
));
1099 bvec_set_virt(&bvec
, (void *)pdu
+ req
->offset
, len
);
1100 iov_iter_bvec(&msg
.msg_iter
, ITER_SOURCE
, &bvec
, 1, len
);
1101 ret
= sock_sendmsg(queue
->sock
, &msg
);
1102 if (unlikely(ret
<= 0))
1108 req
->state
= NVME_TCP_SEND_DATA
;
1109 if (queue
->data_digest
)
1110 crypto_ahash_init(queue
->snd_hash
);
1112 nvme_tcp_done_send_req(queue
);
1121 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request
*req
)
1123 struct nvme_tcp_queue
*queue
= req
->queue
;
1124 struct nvme_tcp_data_pdu
*pdu
= nvme_tcp_req_data_pdu(req
);
1125 struct bio_vec bvec
;
1126 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_MORE
, };
1127 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
1128 int len
= sizeof(*pdu
) - req
->offset
+ hdgst
;
1131 if (queue
->hdr_digest
&& !req
->offset
)
1132 nvme_tcp_hdgst(queue
->snd_hash
, pdu
, sizeof(*pdu
));
1134 if (!req
->h2cdata_left
)
1135 msg
.msg_flags
|= MSG_SPLICE_PAGES
;
1137 bvec_set_virt(&bvec
, (void *)pdu
+ req
->offset
, len
);
1138 iov_iter_bvec(&msg
.msg_iter
, ITER_SOURCE
, &bvec
, 1, len
);
1139 ret
= sock_sendmsg(queue
->sock
, &msg
);
1140 if (unlikely(ret
<= 0))
1145 req
->state
= NVME_TCP_SEND_DATA
;
1146 if (queue
->data_digest
)
1147 crypto_ahash_init(queue
->snd_hash
);
1155 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request
*req
)
1157 struct nvme_tcp_queue
*queue
= req
->queue
;
1158 size_t offset
= req
->offset
;
1159 u32 h2cdata_left
= req
->h2cdata_left
;
1161 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
};
1163 .iov_base
= (u8
*)&req
->ddgst
+ req
->offset
,
1164 .iov_len
= NVME_TCP_DIGEST_LENGTH
- req
->offset
1167 if (nvme_tcp_queue_more(queue
))
1168 msg
.msg_flags
|= MSG_MORE
;
1170 msg
.msg_flags
|= MSG_EOR
;
1172 ret
= kernel_sendmsg(queue
->sock
, &msg
, &iov
, 1, iov
.iov_len
);
1173 if (unlikely(ret
<= 0))
1176 if (offset
+ ret
== NVME_TCP_DIGEST_LENGTH
) {
1178 nvme_tcp_setup_h2c_data_pdu(req
);
1180 nvme_tcp_done_send_req(queue
);
1188 static int nvme_tcp_try_send(struct nvme_tcp_queue
*queue
)
1190 struct nvme_tcp_request
*req
;
1191 unsigned int noreclaim_flag
;
1194 if (!queue
->request
) {
1195 queue
->request
= nvme_tcp_fetch_request(queue
);
1196 if (!queue
->request
)
1199 req
= queue
->request
;
1201 noreclaim_flag
= memalloc_noreclaim_save();
1202 if (req
->state
== NVME_TCP_SEND_CMD_PDU
) {
1203 ret
= nvme_tcp_try_send_cmd_pdu(req
);
1206 if (!nvme_tcp_has_inline_data(req
))
1210 if (req
->state
== NVME_TCP_SEND_H2C_PDU
) {
1211 ret
= nvme_tcp_try_send_data_pdu(req
);
1216 if (req
->state
== NVME_TCP_SEND_DATA
) {
1217 ret
= nvme_tcp_try_send_data(req
);
1222 if (req
->state
== NVME_TCP_SEND_DDGST
)
1223 ret
= nvme_tcp_try_send_ddgst(req
);
1225 if (ret
== -EAGAIN
) {
1227 } else if (ret
< 0) {
1228 dev_err(queue
->ctrl
->ctrl
.device
,
1229 "failed to send request %d\n", ret
);
1230 nvme_tcp_fail_request(queue
->request
);
1231 nvme_tcp_done_send_req(queue
);
1234 memalloc_noreclaim_restore(noreclaim_flag
);
1238 static int nvme_tcp_try_recv(struct nvme_tcp_queue
*queue
)
1240 struct socket
*sock
= queue
->sock
;
1241 struct sock
*sk
= sock
->sk
;
1242 read_descriptor_t rd_desc
;
1245 rd_desc
.arg
.data
= queue
;
1249 consumed
= sock
->ops
->read_sock(sk
, &rd_desc
, nvme_tcp_recv_skb
);
1254 static void nvme_tcp_io_work(struct work_struct
*w
)
1256 struct nvme_tcp_queue
*queue
=
1257 container_of(w
, struct nvme_tcp_queue
, io_work
);
1258 unsigned long deadline
= jiffies
+ msecs_to_jiffies(1);
1261 bool pending
= false;
1264 if (mutex_trylock(&queue
->send_mutex
)) {
1265 result
= nvme_tcp_try_send(queue
);
1266 mutex_unlock(&queue
->send_mutex
);
1269 else if (unlikely(result
< 0))
1273 result
= nvme_tcp_try_recv(queue
);
1276 else if (unlikely(result
< 0))
1279 if (!pending
|| !queue
->rd_enabled
)
1282 } while (!time_after(jiffies
, deadline
)); /* quota is exhausted */
1284 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
1287 static void nvme_tcp_free_crypto(struct nvme_tcp_queue
*queue
)
1289 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(queue
->rcv_hash
);
1291 ahash_request_free(queue
->rcv_hash
);
1292 ahash_request_free(queue
->snd_hash
);
1293 crypto_free_ahash(tfm
);
1296 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue
*queue
)
1298 struct crypto_ahash
*tfm
;
1300 tfm
= crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC
);
1302 return PTR_ERR(tfm
);
1304 queue
->snd_hash
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1305 if (!queue
->snd_hash
)
1307 ahash_request_set_callback(queue
->snd_hash
, 0, NULL
, NULL
);
1309 queue
->rcv_hash
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1310 if (!queue
->rcv_hash
)
1312 ahash_request_set_callback(queue
->rcv_hash
, 0, NULL
, NULL
);
1316 ahash_request_free(queue
->snd_hash
);
1318 crypto_free_ahash(tfm
);
1322 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl
*ctrl
)
1324 struct nvme_tcp_request
*async
= &ctrl
->async_req
;
1326 page_frag_free(async
->pdu
);
1329 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl
*ctrl
)
1331 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[0];
1332 struct nvme_tcp_request
*async
= &ctrl
->async_req
;
1333 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
1335 async
->pdu
= page_frag_alloc(&queue
->pf_cache
,
1336 sizeof(struct nvme_tcp_cmd_pdu
) + hdgst
,
1337 GFP_KERNEL
| __GFP_ZERO
);
1341 async
->queue
= &ctrl
->queues
[0];
1345 static void nvme_tcp_free_queue(struct nvme_ctrl
*nctrl
, int qid
)
1348 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1349 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[qid
];
1350 unsigned int noreclaim_flag
;
1352 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
))
1355 if (queue
->hdr_digest
|| queue
->data_digest
)
1356 nvme_tcp_free_crypto(queue
);
1358 if (queue
->pf_cache
.va
) {
1359 page
= virt_to_head_page(queue
->pf_cache
.va
);
1360 __page_frag_cache_drain(page
, queue
->pf_cache
.pagecnt_bias
);
1361 queue
->pf_cache
.va
= NULL
;
1364 noreclaim_flag
= memalloc_noreclaim_save();
1365 /* ->sock will be released by fput() */
1366 fput(queue
->sock
->file
);
1368 memalloc_noreclaim_restore(noreclaim_flag
);
1371 mutex_destroy(&queue
->send_mutex
);
1372 mutex_destroy(&queue
->queue_lock
);
1375 static int nvme_tcp_init_connection(struct nvme_tcp_queue
*queue
)
1377 struct nvme_tcp_icreq_pdu
*icreq
;
1378 struct nvme_tcp_icresp_pdu
*icresp
;
1379 char cbuf
[CMSG_LEN(sizeof(char))] = {};
1381 struct msghdr msg
= {};
1383 bool ctrl_hdgst
, ctrl_ddgst
;
1387 icreq
= kzalloc(sizeof(*icreq
), GFP_KERNEL
);
1391 icresp
= kzalloc(sizeof(*icresp
), GFP_KERNEL
);
1397 icreq
->hdr
.type
= nvme_tcp_icreq
;
1398 icreq
->hdr
.hlen
= sizeof(*icreq
);
1400 icreq
->hdr
.plen
= cpu_to_le32(icreq
->hdr
.hlen
);
1401 icreq
->pfv
= cpu_to_le16(NVME_TCP_PFV_1_0
);
1402 icreq
->maxr2t
= 0; /* single inflight r2t supported */
1403 icreq
->hpda
= 0; /* no alignment constraint */
1404 if (queue
->hdr_digest
)
1405 icreq
->digest
|= NVME_TCP_HDR_DIGEST_ENABLE
;
1406 if (queue
->data_digest
)
1407 icreq
->digest
|= NVME_TCP_DATA_DIGEST_ENABLE
;
1409 iov
.iov_base
= icreq
;
1410 iov
.iov_len
= sizeof(*icreq
);
1411 ret
= kernel_sendmsg(queue
->sock
, &msg
, &iov
, 1, iov
.iov_len
);
1413 pr_warn("queue %d: failed to send icreq, error %d\n",
1414 nvme_tcp_queue_id(queue
), ret
);
1418 memset(&msg
, 0, sizeof(msg
));
1419 iov
.iov_base
= icresp
;
1420 iov
.iov_len
= sizeof(*icresp
);
1421 if (nvme_tcp_tls(&queue
->ctrl
->ctrl
)) {
1422 msg
.msg_control
= cbuf
;
1423 msg
.msg_controllen
= sizeof(cbuf
);
1425 ret
= kernel_recvmsg(queue
->sock
, &msg
, &iov
, 1,
1426 iov
.iov_len
, msg
.msg_flags
);
1428 pr_warn("queue %d: failed to receive icresp, error %d\n",
1429 nvme_tcp_queue_id(queue
), ret
);
1433 if (nvme_tcp_tls(&queue
->ctrl
->ctrl
)) {
1434 ctype
= tls_get_record_type(queue
->sock
->sk
,
1435 (struct cmsghdr
*)cbuf
);
1436 if (ctype
!= TLS_RECORD_TYPE_DATA
) {
1437 pr_err("queue %d: unhandled TLS record %d\n",
1438 nvme_tcp_queue_id(queue
), ctype
);
1443 if (icresp
->hdr
.type
!= nvme_tcp_icresp
) {
1444 pr_err("queue %d: bad type returned %d\n",
1445 nvme_tcp_queue_id(queue
), icresp
->hdr
.type
);
1449 if (le32_to_cpu(icresp
->hdr
.plen
) != sizeof(*icresp
)) {
1450 pr_err("queue %d: bad pdu length returned %d\n",
1451 nvme_tcp_queue_id(queue
), icresp
->hdr
.plen
);
1455 if (icresp
->pfv
!= NVME_TCP_PFV_1_0
) {
1456 pr_err("queue %d: bad pfv returned %d\n",
1457 nvme_tcp_queue_id(queue
), icresp
->pfv
);
1461 ctrl_ddgst
= !!(icresp
->digest
& NVME_TCP_DATA_DIGEST_ENABLE
);
1462 if ((queue
->data_digest
&& !ctrl_ddgst
) ||
1463 (!queue
->data_digest
&& ctrl_ddgst
)) {
1464 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1465 nvme_tcp_queue_id(queue
),
1466 queue
->data_digest
? "enabled" : "disabled",
1467 ctrl_ddgst
? "enabled" : "disabled");
1471 ctrl_hdgst
= !!(icresp
->digest
& NVME_TCP_HDR_DIGEST_ENABLE
);
1472 if ((queue
->hdr_digest
&& !ctrl_hdgst
) ||
1473 (!queue
->hdr_digest
&& ctrl_hdgst
)) {
1474 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1475 nvme_tcp_queue_id(queue
),
1476 queue
->hdr_digest
? "enabled" : "disabled",
1477 ctrl_hdgst
? "enabled" : "disabled");
1481 if (icresp
->cpda
!= 0) {
1482 pr_err("queue %d: unsupported cpda returned %d\n",
1483 nvme_tcp_queue_id(queue
), icresp
->cpda
);
1487 maxh2cdata
= le32_to_cpu(icresp
->maxdata
);
1488 if ((maxh2cdata
% 4) || (maxh2cdata
< NVME_TCP_MIN_MAXH2CDATA
)) {
1489 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1490 nvme_tcp_queue_id(queue
), maxh2cdata
);
1493 queue
->maxh2cdata
= maxh2cdata
;
1503 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue
*queue
)
1505 return nvme_tcp_queue_id(queue
) == 0;
1508 static bool nvme_tcp_default_queue(struct nvme_tcp_queue
*queue
)
1510 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1511 int qid
= nvme_tcp_queue_id(queue
);
1513 return !nvme_tcp_admin_queue(queue
) &&
1514 qid
< 1 + ctrl
->io_queues
[HCTX_TYPE_DEFAULT
];
1517 static bool nvme_tcp_read_queue(struct nvme_tcp_queue
*queue
)
1519 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1520 int qid
= nvme_tcp_queue_id(queue
);
1522 return !nvme_tcp_admin_queue(queue
) &&
1523 !nvme_tcp_default_queue(queue
) &&
1524 qid
< 1 + ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] +
1525 ctrl
->io_queues
[HCTX_TYPE_READ
];
1528 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue
*queue
)
1530 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1531 int qid
= nvme_tcp_queue_id(queue
);
1533 return !nvme_tcp_admin_queue(queue
) &&
1534 !nvme_tcp_default_queue(queue
) &&
1535 !nvme_tcp_read_queue(queue
) &&
1536 qid
< 1 + ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] +
1537 ctrl
->io_queues
[HCTX_TYPE_READ
] +
1538 ctrl
->io_queues
[HCTX_TYPE_POLL
];
1541 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue
*queue
)
1543 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1544 int qid
= nvme_tcp_queue_id(queue
);
1547 if (nvme_tcp_default_queue(queue
))
1549 else if (nvme_tcp_read_queue(queue
))
1550 n
= qid
- ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] - 1;
1551 else if (nvme_tcp_poll_queue(queue
))
1552 n
= qid
- ctrl
->io_queues
[HCTX_TYPE_DEFAULT
] -
1553 ctrl
->io_queues
[HCTX_TYPE_READ
] - 1;
1554 queue
->io_cpu
= cpumask_next_wrap(n
- 1, cpu_online_mask
, -1, false);
1557 static void nvme_tcp_tls_done(void *data
, int status
, key_serial_t pskid
)
1559 struct nvme_tcp_queue
*queue
= data
;
1560 struct nvme_tcp_ctrl
*ctrl
= queue
->ctrl
;
1561 int qid
= nvme_tcp_queue_id(queue
);
1562 struct key
*tls_key
;
1564 dev_dbg(ctrl
->ctrl
.device
, "queue %d: TLS handshake done, key %x, status %d\n",
1565 qid
, pskid
, status
);
1568 queue
->tls_err
= -status
;
1572 tls_key
= key_lookup(pskid
);
1573 if (IS_ERR(tls_key
)) {
1574 dev_warn(ctrl
->ctrl
.device
, "queue %d: Invalid key %x\n",
1576 queue
->tls_err
= -ENOKEY
;
1578 ctrl
->ctrl
.tls_key
= tls_key
;
1583 complete(&queue
->tls_complete
);
1586 static int nvme_tcp_start_tls(struct nvme_ctrl
*nctrl
,
1587 struct nvme_tcp_queue
*queue
,
1590 int qid
= nvme_tcp_queue_id(queue
);
1592 struct tls_handshake_args args
;
1593 unsigned long tmo
= tls_handshake_timeout
* HZ
;
1594 key_serial_t keyring
= nvme_keyring_id();
1596 dev_dbg(nctrl
->device
, "queue %d: start TLS with key %x\n",
1598 memset(&args
, 0, sizeof(args
));
1599 args
.ta_sock
= queue
->sock
;
1600 args
.ta_done
= nvme_tcp_tls_done
;
1601 args
.ta_data
= queue
;
1602 args
.ta_my_peerids
[0] = pskid
;
1603 args
.ta_num_peerids
= 1;
1604 if (nctrl
->opts
->keyring
)
1605 keyring
= key_serial(nctrl
->opts
->keyring
);
1606 args
.ta_keyring
= keyring
;
1607 args
.ta_timeout_ms
= tls_handshake_timeout
* 1000;
1608 queue
->tls_err
= -EOPNOTSUPP
;
1609 init_completion(&queue
->tls_complete
);
1610 ret
= tls_client_hello_psk(&args
, GFP_KERNEL
);
1612 dev_err(nctrl
->device
, "queue %d: failed to start TLS: %d\n",
1616 ret
= wait_for_completion_interruptible_timeout(&queue
->tls_complete
, tmo
);
1621 dev_err(nctrl
->device
,
1622 "queue %d: TLS handshake failed, error %d\n",
1624 tls_handshake_cancel(queue
->sock
->sk
);
1626 dev_dbg(nctrl
->device
,
1627 "queue %d: TLS handshake complete, error %d\n",
1628 qid
, queue
->tls_err
);
1629 ret
= queue
->tls_err
;
1634 static int nvme_tcp_alloc_queue(struct nvme_ctrl
*nctrl
, int qid
,
1637 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1638 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[qid
];
1639 int ret
, rcv_pdu_size
;
1640 struct file
*sock_file
;
1642 mutex_init(&queue
->queue_lock
);
1644 init_llist_head(&queue
->req_list
);
1645 INIT_LIST_HEAD(&queue
->send_list
);
1646 mutex_init(&queue
->send_mutex
);
1647 INIT_WORK(&queue
->io_work
, nvme_tcp_io_work
);
1650 queue
->cmnd_capsule_len
= nctrl
->ioccsz
* 16;
1652 queue
->cmnd_capsule_len
= sizeof(struct nvme_command
) +
1653 NVME_TCP_ADMIN_CCSZ
;
1655 ret
= sock_create(ctrl
->addr
.ss_family
, SOCK_STREAM
,
1656 IPPROTO_TCP
, &queue
->sock
);
1658 dev_err(nctrl
->device
,
1659 "failed to create socket: %d\n", ret
);
1660 goto err_destroy_mutex
;
1663 sock_file
= sock_alloc_file(queue
->sock
, O_CLOEXEC
, NULL
);
1664 if (IS_ERR(sock_file
)) {
1665 ret
= PTR_ERR(sock_file
);
1666 goto err_destroy_mutex
;
1668 nvme_tcp_reclassify_socket(queue
->sock
);
1670 /* Single syn retry */
1671 tcp_sock_set_syncnt(queue
->sock
->sk
, 1);
1673 /* Set TCP no delay */
1674 tcp_sock_set_nodelay(queue
->sock
->sk
);
1677 * Cleanup whatever is sitting in the TCP transmit queue on socket
1678 * close. This is done to prevent stale data from being sent should
1679 * the network connection be restored before TCP times out.
1681 sock_no_linger(queue
->sock
->sk
);
1683 if (so_priority
> 0)
1684 sock_set_priority(queue
->sock
->sk
, so_priority
);
1686 /* Set socket type of service */
1687 if (nctrl
->opts
->tos
>= 0)
1688 ip_sock_set_tos(queue
->sock
->sk
, nctrl
->opts
->tos
);
1690 /* Set 10 seconds timeout for icresp recvmsg */
1691 queue
->sock
->sk
->sk_rcvtimeo
= 10 * HZ
;
1693 queue
->sock
->sk
->sk_allocation
= GFP_ATOMIC
;
1694 queue
->sock
->sk
->sk_use_task_frag
= false;
1695 nvme_tcp_set_queue_io_cpu(queue
);
1696 queue
->request
= NULL
;
1697 queue
->data_remaining
= 0;
1698 queue
->ddgst_remaining
= 0;
1699 queue
->pdu_remaining
= 0;
1700 queue
->pdu_offset
= 0;
1701 sk_set_memalloc(queue
->sock
->sk
);
1703 if (nctrl
->opts
->mask
& NVMF_OPT_HOST_TRADDR
) {
1704 ret
= kernel_bind(queue
->sock
, (struct sockaddr
*)&ctrl
->src_addr
,
1705 sizeof(ctrl
->src_addr
));
1707 dev_err(nctrl
->device
,
1708 "failed to bind queue %d socket %d\n",
1714 if (nctrl
->opts
->mask
& NVMF_OPT_HOST_IFACE
) {
1715 char *iface
= nctrl
->opts
->host_iface
;
1716 sockptr_t optval
= KERNEL_SOCKPTR(iface
);
1718 ret
= sock_setsockopt(queue
->sock
, SOL_SOCKET
, SO_BINDTODEVICE
,
1719 optval
, strlen(iface
));
1721 dev_err(nctrl
->device
,
1722 "failed to bind to interface %s queue %d err %d\n",
1728 queue
->hdr_digest
= nctrl
->opts
->hdr_digest
;
1729 queue
->data_digest
= nctrl
->opts
->data_digest
;
1730 if (queue
->hdr_digest
|| queue
->data_digest
) {
1731 ret
= nvme_tcp_alloc_crypto(queue
);
1733 dev_err(nctrl
->device
,
1734 "failed to allocate queue %d crypto\n", qid
);
1739 rcv_pdu_size
= sizeof(struct nvme_tcp_rsp_pdu
) +
1740 nvme_tcp_hdgst_len(queue
);
1741 queue
->pdu
= kmalloc(rcv_pdu_size
, GFP_KERNEL
);
1747 dev_dbg(nctrl
->device
, "connecting queue %d\n",
1748 nvme_tcp_queue_id(queue
));
1750 ret
= kernel_connect(queue
->sock
, (struct sockaddr
*)&ctrl
->addr
,
1751 sizeof(ctrl
->addr
), 0);
1753 dev_err(nctrl
->device
,
1754 "failed to connect socket: %d\n", ret
);
1758 /* If PSKs are configured try to start TLS */
1759 if (IS_ENABLED(CONFIG_NVME_TCP_TLS
) && pskid
) {
1760 ret
= nvme_tcp_start_tls(nctrl
, queue
, pskid
);
1762 goto err_init_connect
;
1765 ret
= nvme_tcp_init_connection(queue
);
1767 goto err_init_connect
;
1769 set_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
);
1774 kernel_sock_shutdown(queue
->sock
, SHUT_RDWR
);
1778 if (queue
->hdr_digest
|| queue
->data_digest
)
1779 nvme_tcp_free_crypto(queue
);
1781 /* ->sock will be released by fput() */
1782 fput(queue
->sock
->file
);
1785 mutex_destroy(&queue
->send_mutex
);
1786 mutex_destroy(&queue
->queue_lock
);
1790 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue
*queue
)
1792 struct socket
*sock
= queue
->sock
;
1794 write_lock_bh(&sock
->sk
->sk_callback_lock
);
1795 sock
->sk
->sk_user_data
= NULL
;
1796 sock
->sk
->sk_data_ready
= queue
->data_ready
;
1797 sock
->sk
->sk_state_change
= queue
->state_change
;
1798 sock
->sk
->sk_write_space
= queue
->write_space
;
1799 write_unlock_bh(&sock
->sk
->sk_callback_lock
);
1802 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue
*queue
)
1804 kernel_sock_shutdown(queue
->sock
, SHUT_RDWR
);
1805 nvme_tcp_restore_sock_ops(queue
);
1806 cancel_work_sync(&queue
->io_work
);
1809 static void nvme_tcp_stop_queue(struct nvme_ctrl
*nctrl
, int qid
)
1811 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1812 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[qid
];
1814 if (!test_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
))
1817 mutex_lock(&queue
->queue_lock
);
1818 if (test_and_clear_bit(NVME_TCP_Q_LIVE
, &queue
->flags
))
1819 __nvme_tcp_stop_queue(queue
);
1820 mutex_unlock(&queue
->queue_lock
);
1823 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue
*queue
)
1825 write_lock_bh(&queue
->sock
->sk
->sk_callback_lock
);
1826 queue
->sock
->sk
->sk_user_data
= queue
;
1827 queue
->state_change
= queue
->sock
->sk
->sk_state_change
;
1828 queue
->data_ready
= queue
->sock
->sk
->sk_data_ready
;
1829 queue
->write_space
= queue
->sock
->sk
->sk_write_space
;
1830 queue
->sock
->sk
->sk_data_ready
= nvme_tcp_data_ready
;
1831 queue
->sock
->sk
->sk_state_change
= nvme_tcp_state_change
;
1832 queue
->sock
->sk
->sk_write_space
= nvme_tcp_write_space
;
1833 #ifdef CONFIG_NET_RX_BUSY_POLL
1834 queue
->sock
->sk
->sk_ll_usec
= 1;
1836 write_unlock_bh(&queue
->sock
->sk
->sk_callback_lock
);
1839 static int nvme_tcp_start_queue(struct nvme_ctrl
*nctrl
, int idx
)
1841 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
1842 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[idx
];
1845 queue
->rd_enabled
= true;
1846 nvme_tcp_init_recv_ctx(queue
);
1847 nvme_tcp_setup_sock_ops(queue
);
1850 ret
= nvmf_connect_io_queue(nctrl
, idx
);
1852 ret
= nvmf_connect_admin_queue(nctrl
);
1855 set_bit(NVME_TCP_Q_LIVE
, &queue
->flags
);
1857 if (test_bit(NVME_TCP_Q_ALLOCATED
, &queue
->flags
))
1858 __nvme_tcp_stop_queue(queue
);
1859 dev_err(nctrl
->device
,
1860 "failed to connect queue: %d ret=%d\n", idx
, ret
);
1865 static void nvme_tcp_free_admin_queue(struct nvme_ctrl
*ctrl
)
1867 if (to_tcp_ctrl(ctrl
)->async_req
.pdu
) {
1868 cancel_work_sync(&ctrl
->async_event_work
);
1869 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl
));
1870 to_tcp_ctrl(ctrl
)->async_req
.pdu
= NULL
;
1873 nvme_tcp_free_queue(ctrl
, 0);
1876 static void nvme_tcp_free_io_queues(struct nvme_ctrl
*ctrl
)
1880 for (i
= 1; i
< ctrl
->queue_count
; i
++)
1881 nvme_tcp_free_queue(ctrl
, i
);
1884 static void nvme_tcp_stop_io_queues(struct nvme_ctrl
*ctrl
)
1888 for (i
= 1; i
< ctrl
->queue_count
; i
++)
1889 nvme_tcp_stop_queue(ctrl
, i
);
1892 static int nvme_tcp_start_io_queues(struct nvme_ctrl
*ctrl
,
1893 int first
, int last
)
1897 for (i
= first
; i
< last
; i
++) {
1898 ret
= nvme_tcp_start_queue(ctrl
, i
);
1900 goto out_stop_queues
;
1906 for (i
--; i
>= first
; i
--)
1907 nvme_tcp_stop_queue(ctrl
, i
);
1911 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl
*ctrl
)
1914 key_serial_t pskid
= 0;
1916 if (nvme_tcp_tls(ctrl
)) {
1917 if (ctrl
->opts
->tls_key
)
1918 pskid
= key_serial(ctrl
->opts
->tls_key
);
1920 pskid
= nvme_tls_psk_default(ctrl
->opts
->keyring
,
1921 ctrl
->opts
->host
->nqn
,
1922 ctrl
->opts
->subsysnqn
);
1924 dev_err(ctrl
->device
, "no valid PSK found\n");
1926 goto out_free_queue
;
1930 ret
= nvme_tcp_alloc_queue(ctrl
, 0, pskid
);
1932 goto out_free_queue
;
1934 ret
= nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl
));
1936 goto out_free_queue
;
1941 nvme_tcp_free_queue(ctrl
, 0);
1945 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl
*ctrl
)
1949 if (nvme_tcp_tls(ctrl
) && !ctrl
->tls_key
) {
1950 dev_err(ctrl
->device
, "no PSK negotiated\n");
1953 for (i
= 1; i
< ctrl
->queue_count
; i
++) {
1954 ret
= nvme_tcp_alloc_queue(ctrl
, i
,
1955 key_serial(ctrl
->tls_key
));
1957 goto out_free_queues
;
1963 for (i
--; i
>= 1; i
--)
1964 nvme_tcp_free_queue(ctrl
, i
);
1969 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl
*ctrl
)
1971 unsigned int nr_io_queues
;
1974 nr_io_queues
= nvmf_nr_io_queues(ctrl
->opts
);
1975 ret
= nvme_set_queue_count(ctrl
, &nr_io_queues
);
1979 if (nr_io_queues
== 0) {
1980 dev_err(ctrl
->device
,
1981 "unable to set any I/O queues\n");
1985 ctrl
->queue_count
= nr_io_queues
+ 1;
1986 dev_info(ctrl
->device
,
1987 "creating %d I/O queues.\n", nr_io_queues
);
1989 nvmf_set_io_queues(ctrl
->opts
, nr_io_queues
,
1990 to_tcp_ctrl(ctrl
)->io_queues
);
1991 return __nvme_tcp_alloc_io_queues(ctrl
);
1994 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl
*ctrl
, bool remove
)
1996 nvme_tcp_stop_io_queues(ctrl
);
1998 nvme_remove_io_tag_set(ctrl
);
1999 nvme_tcp_free_io_queues(ctrl
);
2002 static int nvme_tcp_configure_io_queues(struct nvme_ctrl
*ctrl
, bool new)
2006 ret
= nvme_tcp_alloc_io_queues(ctrl
);
2011 ret
= nvme_alloc_io_tag_set(ctrl
, &to_tcp_ctrl(ctrl
)->tag_set
,
2013 ctrl
->opts
->nr_poll_queues
? HCTX_MAX_TYPES
: 2,
2014 sizeof(struct nvme_tcp_request
));
2016 goto out_free_io_queues
;
2020 * Only start IO queues for which we have allocated the tagset
2021 * and limitted it to the available queues. On reconnects, the
2022 * queue number might have changed.
2024 nr_queues
= min(ctrl
->tagset
->nr_hw_queues
+ 1, ctrl
->queue_count
);
2025 ret
= nvme_tcp_start_io_queues(ctrl
, 1, nr_queues
);
2027 goto out_cleanup_connect_q
;
2030 nvme_start_freeze(ctrl
);
2031 nvme_unquiesce_io_queues(ctrl
);
2032 if (!nvme_wait_freeze_timeout(ctrl
, NVME_IO_TIMEOUT
)) {
2034 * If we timed out waiting for freeze we are likely to
2035 * be stuck. Fail the controller initialization just
2039 nvme_unfreeze(ctrl
);
2040 goto out_wait_freeze_timed_out
;
2042 blk_mq_update_nr_hw_queues(ctrl
->tagset
,
2043 ctrl
->queue_count
- 1);
2044 nvme_unfreeze(ctrl
);
2048 * If the number of queues has increased (reconnect case)
2049 * start all new queues now.
2051 ret
= nvme_tcp_start_io_queues(ctrl
, nr_queues
,
2052 ctrl
->tagset
->nr_hw_queues
+ 1);
2054 goto out_wait_freeze_timed_out
;
2058 out_wait_freeze_timed_out
:
2059 nvme_quiesce_io_queues(ctrl
);
2060 nvme_sync_io_queues(ctrl
);
2061 nvme_tcp_stop_io_queues(ctrl
);
2062 out_cleanup_connect_q
:
2063 nvme_cancel_tagset(ctrl
);
2065 nvme_remove_io_tag_set(ctrl
);
2067 nvme_tcp_free_io_queues(ctrl
);
2071 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl
*ctrl
, bool remove
)
2073 nvme_tcp_stop_queue(ctrl
, 0);
2075 nvme_remove_admin_tag_set(ctrl
);
2076 nvme_tcp_free_admin_queue(ctrl
);
2079 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl
*ctrl
, bool new)
2083 error
= nvme_tcp_alloc_admin_queue(ctrl
);
2088 error
= nvme_alloc_admin_tag_set(ctrl
,
2089 &to_tcp_ctrl(ctrl
)->admin_tag_set
,
2090 &nvme_tcp_admin_mq_ops
,
2091 sizeof(struct nvme_tcp_request
));
2093 goto out_free_queue
;
2096 error
= nvme_tcp_start_queue(ctrl
, 0);
2098 goto out_cleanup_tagset
;
2100 error
= nvme_enable_ctrl(ctrl
);
2102 goto out_stop_queue
;
2104 nvme_unquiesce_admin_queue(ctrl
);
2106 error
= nvme_init_ctrl_finish(ctrl
, false);
2108 goto out_quiesce_queue
;
2113 nvme_quiesce_admin_queue(ctrl
);
2114 blk_sync_queue(ctrl
->admin_q
);
2116 nvme_tcp_stop_queue(ctrl
, 0);
2117 nvme_cancel_admin_tagset(ctrl
);
2120 nvme_remove_admin_tag_set(ctrl
);
2122 nvme_tcp_free_admin_queue(ctrl
);
2126 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl
*ctrl
,
2129 nvme_quiesce_admin_queue(ctrl
);
2130 blk_sync_queue(ctrl
->admin_q
);
2131 nvme_tcp_stop_queue(ctrl
, 0);
2132 nvme_cancel_admin_tagset(ctrl
);
2134 nvme_unquiesce_admin_queue(ctrl
);
2135 nvme_tcp_destroy_admin_queue(ctrl
, remove
);
2138 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl
*ctrl
,
2141 if (ctrl
->queue_count
<= 1)
2143 nvme_quiesce_admin_queue(ctrl
);
2144 nvme_quiesce_io_queues(ctrl
);
2145 nvme_sync_io_queues(ctrl
);
2146 nvme_tcp_stop_io_queues(ctrl
);
2147 nvme_cancel_tagset(ctrl
);
2149 nvme_unquiesce_io_queues(ctrl
);
2150 nvme_tcp_destroy_io_queues(ctrl
, remove
);
2153 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl
*ctrl
)
2155 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2157 /* If we are resetting/deleting then do nothing */
2158 if (state
!= NVME_CTRL_CONNECTING
) {
2159 WARN_ON_ONCE(state
== NVME_CTRL_NEW
|| state
== NVME_CTRL_LIVE
);
2163 if (nvmf_should_reconnect(ctrl
)) {
2164 dev_info(ctrl
->device
, "Reconnecting in %d seconds...\n",
2165 ctrl
->opts
->reconnect_delay
);
2166 queue_delayed_work(nvme_wq
, &to_tcp_ctrl(ctrl
)->connect_work
,
2167 ctrl
->opts
->reconnect_delay
* HZ
);
2169 dev_info(ctrl
->device
, "Removing controller...\n");
2170 nvme_delete_ctrl(ctrl
);
2174 static int nvme_tcp_setup_ctrl(struct nvme_ctrl
*ctrl
, bool new)
2176 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
2179 ret
= nvme_tcp_configure_admin_queue(ctrl
, new);
2185 dev_err(ctrl
->device
, "icdoff is not supported!\n");
2189 if (!nvme_ctrl_sgl_supported(ctrl
)) {
2191 dev_err(ctrl
->device
, "Mandatory sgls are not supported!\n");
2195 if (opts
->queue_size
> ctrl
->sqsize
+ 1)
2196 dev_warn(ctrl
->device
,
2197 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2198 opts
->queue_size
, ctrl
->sqsize
+ 1);
2200 if (ctrl
->sqsize
+ 1 > ctrl
->maxcmd
) {
2201 dev_warn(ctrl
->device
,
2202 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2203 ctrl
->sqsize
+ 1, ctrl
->maxcmd
);
2204 ctrl
->sqsize
= ctrl
->maxcmd
- 1;
2207 if (ctrl
->queue_count
> 1) {
2208 ret
= nvme_tcp_configure_io_queues(ctrl
, new);
2213 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
)) {
2215 * state change failure is ok if we started ctrl delete,
2216 * unless we're during creation of a new controller to
2217 * avoid races with teardown flow.
2219 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2221 WARN_ON_ONCE(state
!= NVME_CTRL_DELETING
&&
2222 state
!= NVME_CTRL_DELETING_NOIO
);
2228 nvme_start_ctrl(ctrl
);
2232 if (ctrl
->queue_count
> 1) {
2233 nvme_quiesce_io_queues(ctrl
);
2234 nvme_sync_io_queues(ctrl
);
2235 nvme_tcp_stop_io_queues(ctrl
);
2236 nvme_cancel_tagset(ctrl
);
2237 nvme_tcp_destroy_io_queues(ctrl
, new);
2240 nvme_stop_keep_alive(ctrl
);
2241 nvme_tcp_teardown_admin_queue(ctrl
, false);
2245 static void nvme_tcp_reconnect_ctrl_work(struct work_struct
*work
)
2247 struct nvme_tcp_ctrl
*tcp_ctrl
= container_of(to_delayed_work(work
),
2248 struct nvme_tcp_ctrl
, connect_work
);
2249 struct nvme_ctrl
*ctrl
= &tcp_ctrl
->ctrl
;
2251 ++ctrl
->nr_reconnects
;
2253 if (nvme_tcp_setup_ctrl(ctrl
, false))
2256 dev_info(ctrl
->device
, "Successfully reconnected (%d attempt)\n",
2257 ctrl
->nr_reconnects
);
2259 ctrl
->nr_reconnects
= 0;
2264 dev_info(ctrl
->device
, "Failed reconnect attempt %d\n",
2265 ctrl
->nr_reconnects
);
2266 nvme_tcp_reconnect_or_remove(ctrl
);
2269 static void nvme_tcp_error_recovery_work(struct work_struct
*work
)
2271 struct nvme_tcp_ctrl
*tcp_ctrl
= container_of(work
,
2272 struct nvme_tcp_ctrl
, err_work
);
2273 struct nvme_ctrl
*ctrl
= &tcp_ctrl
->ctrl
;
2275 nvme_stop_keep_alive(ctrl
);
2276 flush_work(&ctrl
->async_event_work
);
2277 nvme_tcp_teardown_io_queues(ctrl
, false);
2278 /* unquiesce to fail fast pending requests */
2279 nvme_unquiesce_io_queues(ctrl
);
2280 nvme_tcp_teardown_admin_queue(ctrl
, false);
2281 nvme_unquiesce_admin_queue(ctrl
);
2282 nvme_auth_stop(ctrl
);
2284 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_CONNECTING
)) {
2285 /* state change failure is ok if we started ctrl delete */
2286 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2288 WARN_ON_ONCE(state
!= NVME_CTRL_DELETING
&&
2289 state
!= NVME_CTRL_DELETING_NOIO
);
2293 nvme_tcp_reconnect_or_remove(ctrl
);
2296 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl
*ctrl
, bool shutdown
)
2298 nvme_tcp_teardown_io_queues(ctrl
, shutdown
);
2299 nvme_quiesce_admin_queue(ctrl
);
2300 nvme_disable_ctrl(ctrl
, shutdown
);
2301 nvme_tcp_teardown_admin_queue(ctrl
, shutdown
);
2304 static void nvme_tcp_delete_ctrl(struct nvme_ctrl
*ctrl
)
2306 nvme_tcp_teardown_ctrl(ctrl
, true);
2309 static void nvme_reset_ctrl_work(struct work_struct
*work
)
2311 struct nvme_ctrl
*ctrl
=
2312 container_of(work
, struct nvme_ctrl
, reset_work
);
2314 nvme_stop_ctrl(ctrl
);
2315 nvme_tcp_teardown_ctrl(ctrl
, false);
2317 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_CONNECTING
)) {
2318 /* state change failure is ok if we started ctrl delete */
2319 enum nvme_ctrl_state state
= nvme_ctrl_state(ctrl
);
2321 WARN_ON_ONCE(state
!= NVME_CTRL_DELETING
&&
2322 state
!= NVME_CTRL_DELETING_NOIO
);
2326 if (nvme_tcp_setup_ctrl(ctrl
, false))
2332 ++ctrl
->nr_reconnects
;
2333 nvme_tcp_reconnect_or_remove(ctrl
);
2336 static void nvme_tcp_stop_ctrl(struct nvme_ctrl
*ctrl
)
2338 flush_work(&to_tcp_ctrl(ctrl
)->err_work
);
2339 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl
)->connect_work
);
2342 static void nvme_tcp_free_ctrl(struct nvme_ctrl
*nctrl
)
2344 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(nctrl
);
2346 if (list_empty(&ctrl
->list
))
2349 mutex_lock(&nvme_tcp_ctrl_mutex
);
2350 list_del(&ctrl
->list
);
2351 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2353 nvmf_free_options(nctrl
->opts
);
2355 kfree(ctrl
->queues
);
2359 static void nvme_tcp_set_sg_null(struct nvme_command
*c
)
2361 struct nvme_sgl_desc
*sg
= &c
->common
.dptr
.sgl
;
2365 sg
->type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2366 NVME_SGL_FMT_TRANSPORT_A
;
2369 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue
*queue
,
2370 struct nvme_command
*c
, u32 data_len
)
2372 struct nvme_sgl_desc
*sg
= &c
->common
.dptr
.sgl
;
2374 sg
->addr
= cpu_to_le64(queue
->ctrl
->ctrl
.icdoff
);
2375 sg
->length
= cpu_to_le32(data_len
);
2376 sg
->type
= (NVME_SGL_FMT_DATA_DESC
<< 4) | NVME_SGL_FMT_OFFSET
;
2379 static void nvme_tcp_set_sg_host_data(struct nvme_command
*c
,
2382 struct nvme_sgl_desc
*sg
= &c
->common
.dptr
.sgl
;
2385 sg
->length
= cpu_to_le32(data_len
);
2386 sg
->type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2387 NVME_SGL_FMT_TRANSPORT_A
;
2390 static void nvme_tcp_submit_async_event(struct nvme_ctrl
*arg
)
2392 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(arg
);
2393 struct nvme_tcp_queue
*queue
= &ctrl
->queues
[0];
2394 struct nvme_tcp_cmd_pdu
*pdu
= ctrl
->async_req
.pdu
;
2395 struct nvme_command
*cmd
= &pdu
->cmd
;
2396 u8 hdgst
= nvme_tcp_hdgst_len(queue
);
2398 memset(pdu
, 0, sizeof(*pdu
));
2399 pdu
->hdr
.type
= nvme_tcp_cmd
;
2400 if (queue
->hdr_digest
)
2401 pdu
->hdr
.flags
|= NVME_TCP_F_HDGST
;
2402 pdu
->hdr
.hlen
= sizeof(*pdu
);
2403 pdu
->hdr
.plen
= cpu_to_le32(pdu
->hdr
.hlen
+ hdgst
);
2405 cmd
->common
.opcode
= nvme_admin_async_event
;
2406 cmd
->common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
;
2407 cmd
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2408 nvme_tcp_set_sg_null(cmd
);
2410 ctrl
->async_req
.state
= NVME_TCP_SEND_CMD_PDU
;
2411 ctrl
->async_req
.offset
= 0;
2412 ctrl
->async_req
.curr_bio
= NULL
;
2413 ctrl
->async_req
.data_len
= 0;
2415 nvme_tcp_queue_request(&ctrl
->async_req
, true, true);
2418 static void nvme_tcp_complete_timed_out(struct request
*rq
)
2420 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2421 struct nvme_ctrl
*ctrl
= &req
->queue
->ctrl
->ctrl
;
2423 nvme_tcp_stop_queue(ctrl
, nvme_tcp_queue_id(req
->queue
));
2424 nvmf_complete_timed_out_request(rq
);
2427 static enum blk_eh_timer_return
nvme_tcp_timeout(struct request
*rq
)
2429 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2430 struct nvme_ctrl
*ctrl
= &req
->queue
->ctrl
->ctrl
;
2431 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
2432 u8 opc
= pdu
->cmd
.common
.opcode
, fctype
= pdu
->cmd
.fabrics
.fctype
;
2433 int qid
= nvme_tcp_queue_id(req
->queue
);
2435 dev_warn(ctrl
->device
,
2436 "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
2437 nvme_tcp_queue_id(req
->queue
), nvme_cid(rq
), pdu
->hdr
.type
,
2438 opc
, nvme_opcode_str(qid
, opc
, fctype
));
2440 if (nvme_ctrl_state(ctrl
) != NVME_CTRL_LIVE
) {
2442 * If we are resetting, connecting or deleting we should
2443 * complete immediately because we may block controller
2444 * teardown or setup sequence
2445 * - ctrl disable/shutdown fabrics requests
2446 * - connect requests
2447 * - initialization admin requests
2448 * - I/O requests that entered after unquiescing and
2449 * the controller stopped responding
2451 * All other requests should be cancelled by the error
2452 * recovery work, so it's fine that we fail it here.
2454 nvme_tcp_complete_timed_out(rq
);
2459 * LIVE state should trigger the normal error recovery which will
2460 * handle completing this request.
2462 nvme_tcp_error_recovery(ctrl
);
2463 return BLK_EH_RESET_TIMER
;
2466 static blk_status_t
nvme_tcp_map_data(struct nvme_tcp_queue
*queue
,
2469 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2470 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
2471 struct nvme_command
*c
= &pdu
->cmd
;
2473 c
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2475 if (!blk_rq_nr_phys_segments(rq
))
2476 nvme_tcp_set_sg_null(c
);
2477 else if (rq_data_dir(rq
) == WRITE
&&
2478 req
->data_len
<= nvme_tcp_inline_data_size(req
))
2479 nvme_tcp_set_sg_inline(queue
, c
, req
->data_len
);
2481 nvme_tcp_set_sg_host_data(c
, req
->data_len
);
2486 static blk_status_t
nvme_tcp_setup_cmd_pdu(struct nvme_ns
*ns
,
2489 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2490 struct nvme_tcp_cmd_pdu
*pdu
= nvme_tcp_req_cmd_pdu(req
);
2491 struct nvme_tcp_queue
*queue
= req
->queue
;
2492 u8 hdgst
= nvme_tcp_hdgst_len(queue
), ddgst
= 0;
2495 ret
= nvme_setup_cmd(ns
, rq
);
2499 req
->state
= NVME_TCP_SEND_CMD_PDU
;
2500 req
->status
= cpu_to_le16(NVME_SC_SUCCESS
);
2505 req
->h2cdata_left
= 0;
2506 req
->data_len
= blk_rq_nr_phys_segments(rq
) ?
2507 blk_rq_payload_bytes(rq
) : 0;
2508 req
->curr_bio
= rq
->bio
;
2509 if (req
->curr_bio
&& req
->data_len
)
2510 nvme_tcp_init_iter(req
, rq_data_dir(rq
));
2512 if (rq_data_dir(rq
) == WRITE
&&
2513 req
->data_len
<= nvme_tcp_inline_data_size(req
))
2514 req
->pdu_len
= req
->data_len
;
2516 pdu
->hdr
.type
= nvme_tcp_cmd
;
2518 if (queue
->hdr_digest
)
2519 pdu
->hdr
.flags
|= NVME_TCP_F_HDGST
;
2520 if (queue
->data_digest
&& req
->pdu_len
) {
2521 pdu
->hdr
.flags
|= NVME_TCP_F_DDGST
;
2522 ddgst
= nvme_tcp_ddgst_len(queue
);
2524 pdu
->hdr
.hlen
= sizeof(*pdu
);
2525 pdu
->hdr
.pdo
= req
->pdu_len
? pdu
->hdr
.hlen
+ hdgst
: 0;
2527 cpu_to_le32(pdu
->hdr
.hlen
+ hdgst
+ req
->pdu_len
+ ddgst
);
2529 ret
= nvme_tcp_map_data(queue
, rq
);
2530 if (unlikely(ret
)) {
2531 nvme_cleanup_cmd(rq
);
2532 dev_err(queue
->ctrl
->ctrl
.device
,
2533 "Failed to map data (%d)\n", ret
);
2540 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx
*hctx
)
2542 struct nvme_tcp_queue
*queue
= hctx
->driver_data
;
2544 if (!llist_empty(&queue
->req_list
))
2545 queue_work_on(queue
->io_cpu
, nvme_tcp_wq
, &queue
->io_work
);
2548 static blk_status_t
nvme_tcp_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2549 const struct blk_mq_queue_data
*bd
)
2551 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
2552 struct nvme_tcp_queue
*queue
= hctx
->driver_data
;
2553 struct request
*rq
= bd
->rq
;
2554 struct nvme_tcp_request
*req
= blk_mq_rq_to_pdu(rq
);
2555 bool queue_ready
= test_bit(NVME_TCP_Q_LIVE
, &queue
->flags
);
2558 if (!nvme_check_ready(&queue
->ctrl
->ctrl
, rq
, queue_ready
))
2559 return nvme_fail_nonready_command(&queue
->ctrl
->ctrl
, rq
);
2561 ret
= nvme_tcp_setup_cmd_pdu(ns
, rq
);
2565 nvme_start_request(rq
);
2567 nvme_tcp_queue_request(req
, true, bd
->last
);
2572 static void nvme_tcp_map_queues(struct blk_mq_tag_set
*set
)
2574 struct nvme_tcp_ctrl
*ctrl
= to_tcp_ctrl(set
->driver_data
);
2576 nvmf_map_queues(set
, &ctrl
->ctrl
, ctrl
->io_queues
);
2579 static int nvme_tcp_poll(struct blk_mq_hw_ctx
*hctx
, struct io_comp_batch
*iob
)
2581 struct nvme_tcp_queue
*queue
= hctx
->driver_data
;
2582 struct sock
*sk
= queue
->sock
->sk
;
2584 if (!test_bit(NVME_TCP_Q_LIVE
, &queue
->flags
))
2587 set_bit(NVME_TCP_Q_POLLING
, &queue
->flags
);
2588 if (sk_can_busy_loop(sk
) && skb_queue_empty_lockless(&sk
->sk_receive_queue
))
2589 sk_busy_loop(sk
, true);
2590 nvme_tcp_try_recv(queue
);
2591 clear_bit(NVME_TCP_Q_POLLING
, &queue
->flags
);
2592 return queue
->nr_cqe
;
2595 static int nvme_tcp_get_address(struct nvme_ctrl
*ctrl
, char *buf
, int size
)
2597 struct nvme_tcp_queue
*queue
= &to_tcp_ctrl(ctrl
)->queues
[0];
2598 struct sockaddr_storage src_addr
;
2601 len
= nvmf_get_address(ctrl
, buf
, size
);
2603 mutex_lock(&queue
->queue_lock
);
2605 if (!test_bit(NVME_TCP_Q_LIVE
, &queue
->flags
))
2607 ret
= kernel_getsockname(queue
->sock
, (struct sockaddr
*)&src_addr
);
2610 len
--; /* strip trailing newline */
2611 len
+= scnprintf(buf
+ len
, size
- len
, "%ssrc_addr=%pISc\n",
2612 (len
) ? "," : "", &src_addr
);
2615 mutex_unlock(&queue
->queue_lock
);
2620 static const struct blk_mq_ops nvme_tcp_mq_ops
= {
2621 .queue_rq
= nvme_tcp_queue_rq
,
2622 .commit_rqs
= nvme_tcp_commit_rqs
,
2623 .complete
= nvme_complete_rq
,
2624 .init_request
= nvme_tcp_init_request
,
2625 .exit_request
= nvme_tcp_exit_request
,
2626 .init_hctx
= nvme_tcp_init_hctx
,
2627 .timeout
= nvme_tcp_timeout
,
2628 .map_queues
= nvme_tcp_map_queues
,
2629 .poll
= nvme_tcp_poll
,
2632 static const struct blk_mq_ops nvme_tcp_admin_mq_ops
= {
2633 .queue_rq
= nvme_tcp_queue_rq
,
2634 .complete
= nvme_complete_rq
,
2635 .init_request
= nvme_tcp_init_request
,
2636 .exit_request
= nvme_tcp_exit_request
,
2637 .init_hctx
= nvme_tcp_init_admin_hctx
,
2638 .timeout
= nvme_tcp_timeout
,
2641 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops
= {
2643 .module
= THIS_MODULE
,
2644 .flags
= NVME_F_FABRICS
| NVME_F_BLOCKING
,
2645 .reg_read32
= nvmf_reg_read32
,
2646 .reg_read64
= nvmf_reg_read64
,
2647 .reg_write32
= nvmf_reg_write32
,
2648 .free_ctrl
= nvme_tcp_free_ctrl
,
2649 .submit_async_event
= nvme_tcp_submit_async_event
,
2650 .delete_ctrl
= nvme_tcp_delete_ctrl
,
2651 .get_address
= nvme_tcp_get_address
,
2652 .stop_ctrl
= nvme_tcp_stop_ctrl
,
2656 nvme_tcp_existing_controller(struct nvmf_ctrl_options
*opts
)
2658 struct nvme_tcp_ctrl
*ctrl
;
2661 mutex_lock(&nvme_tcp_ctrl_mutex
);
2662 list_for_each_entry(ctrl
, &nvme_tcp_ctrl_list
, list
) {
2663 found
= nvmf_ip_options_match(&ctrl
->ctrl
, opts
);
2667 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2672 static struct nvme_ctrl
*nvme_tcp_create_ctrl(struct device
*dev
,
2673 struct nvmf_ctrl_options
*opts
)
2675 struct nvme_tcp_ctrl
*ctrl
;
2678 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
2680 return ERR_PTR(-ENOMEM
);
2682 INIT_LIST_HEAD(&ctrl
->list
);
2683 ctrl
->ctrl
.opts
= opts
;
2684 ctrl
->ctrl
.queue_count
= opts
->nr_io_queues
+ opts
->nr_write_queues
+
2685 opts
->nr_poll_queues
+ 1;
2686 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
2687 ctrl
->ctrl
.kato
= opts
->kato
;
2689 INIT_DELAYED_WORK(&ctrl
->connect_work
,
2690 nvme_tcp_reconnect_ctrl_work
);
2691 INIT_WORK(&ctrl
->err_work
, nvme_tcp_error_recovery_work
);
2692 INIT_WORK(&ctrl
->ctrl
.reset_work
, nvme_reset_ctrl_work
);
2694 if (!(opts
->mask
& NVMF_OPT_TRSVCID
)) {
2696 kstrdup(__stringify(NVME_TCP_DISC_PORT
), GFP_KERNEL
);
2697 if (!opts
->trsvcid
) {
2701 opts
->mask
|= NVMF_OPT_TRSVCID
;
2704 ret
= inet_pton_with_scope(&init_net
, AF_UNSPEC
,
2705 opts
->traddr
, opts
->trsvcid
, &ctrl
->addr
);
2707 pr_err("malformed address passed: %s:%s\n",
2708 opts
->traddr
, opts
->trsvcid
);
2712 if (opts
->mask
& NVMF_OPT_HOST_TRADDR
) {
2713 ret
= inet_pton_with_scope(&init_net
, AF_UNSPEC
,
2714 opts
->host_traddr
, NULL
, &ctrl
->src_addr
);
2716 pr_err("malformed src address passed: %s\n",
2722 if (opts
->mask
& NVMF_OPT_HOST_IFACE
) {
2723 if (!__dev_get_by_name(&init_net
, opts
->host_iface
)) {
2724 pr_err("invalid interface passed: %s\n",
2731 if (!opts
->duplicate_connect
&& nvme_tcp_existing_controller(opts
)) {
2736 ctrl
->queues
= kcalloc(ctrl
->ctrl
.queue_count
, sizeof(*ctrl
->queues
),
2738 if (!ctrl
->queues
) {
2743 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_tcp_ctrl_ops
, 0);
2745 goto out_kfree_queues
;
2747 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
)) {
2750 goto out_uninit_ctrl
;
2753 ret
= nvme_tcp_setup_ctrl(&ctrl
->ctrl
, true);
2755 goto out_uninit_ctrl
;
2757 dev_info(ctrl
->ctrl
.device
, "new ctrl: NQN \"%s\", addr %pISp\n",
2758 nvmf_ctrl_subsysnqn(&ctrl
->ctrl
), &ctrl
->addr
);
2760 mutex_lock(&nvme_tcp_ctrl_mutex
);
2761 list_add_tail(&ctrl
->list
, &nvme_tcp_ctrl_list
);
2762 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2767 nvme_uninit_ctrl(&ctrl
->ctrl
);
2768 nvme_put_ctrl(&ctrl
->ctrl
);
2771 return ERR_PTR(ret
);
2773 kfree(ctrl
->queues
);
2776 return ERR_PTR(ret
);
2779 static struct nvmf_transport_ops nvme_tcp_transport
= {
2781 .module
= THIS_MODULE
,
2782 .required_opts
= NVMF_OPT_TRADDR
,
2783 .allowed_opts
= NVMF_OPT_TRSVCID
| NVMF_OPT_RECONNECT_DELAY
|
2784 NVMF_OPT_HOST_TRADDR
| NVMF_OPT_CTRL_LOSS_TMO
|
2785 NVMF_OPT_HDR_DIGEST
| NVMF_OPT_DATA_DIGEST
|
2786 NVMF_OPT_NR_WRITE_QUEUES
| NVMF_OPT_NR_POLL_QUEUES
|
2787 NVMF_OPT_TOS
| NVMF_OPT_HOST_IFACE
| NVMF_OPT_TLS
|
2788 NVMF_OPT_KEYRING
| NVMF_OPT_TLS_KEY
,
2789 .create_ctrl
= nvme_tcp_create_ctrl
,
2792 static int __init
nvme_tcp_init_module(void)
2794 BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr
) != 8);
2795 BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu
) != 72);
2796 BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu
) != 24);
2797 BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu
) != 24);
2798 BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu
) != 24);
2799 BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu
) != 128);
2800 BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu
) != 128);
2801 BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu
) != 24);
2803 nvme_tcp_wq
= alloc_workqueue("nvme_tcp_wq",
2804 WQ_MEM_RECLAIM
| WQ_HIGHPRI
, 0);
2808 nvmf_register_transport(&nvme_tcp_transport
);
2812 static void __exit
nvme_tcp_cleanup_module(void)
2814 struct nvme_tcp_ctrl
*ctrl
;
2816 nvmf_unregister_transport(&nvme_tcp_transport
);
2818 mutex_lock(&nvme_tcp_ctrl_mutex
);
2819 list_for_each_entry(ctrl
, &nvme_tcp_ctrl_list
, list
)
2820 nvme_delete_ctrl(&ctrl
->ctrl
);
2821 mutex_unlock(&nvme_tcp_ctrl_mutex
);
2822 flush_workqueue(nvme_delete_wq
);
2824 destroy_workqueue(nvme_tcp_wq
);
2827 module_init(nvme_tcp_init_module
);
2828 module_exit(nvme_tcp_cleanup_module
);
2830 MODULE_LICENSE("GPL v2");