]> git.ipfire.org Git - people/ms/linux.git/blame - drivers/nvme/host/tcp.c
Merge tag 'net-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[people/ms/linux.git] / drivers / nvme / host / tcp.c
CommitLineData
3f2304f8
SG
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
1a9460ce 16#include <net/busy_poll.h>
3f2304f8
SG
17
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
9912ade3
WM
23/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
841aee4d
CL
33#ifdef CONFIG_DEBUG_LOCK_ALLOC
34/* lockdep can detect a circular dependency of the form
35 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
36 * because dependencies are tracked for both nvme-tcp and user contexts. Using
37 * a separate class prevents lockdep from conflating nvme-tcp socket use with
38 * user-space socket API use.
39 */
40static struct lock_class_key nvme_tcp_sk_key[2];
41static struct lock_class_key nvme_tcp_slock_key[2];
42
43static void nvme_tcp_reclassify_socket(struct socket *sock)
44{
45 struct sock *sk = sock->sk;
46
47 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
48 return;
49
50 switch (sk->sk_family) {
51 case AF_INET:
52 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
53 &nvme_tcp_slock_key[0],
54 "sk_lock-AF_INET-NVME",
55 &nvme_tcp_sk_key[0]);
56 break;
57 case AF_INET6:
58 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
59 &nvme_tcp_slock_key[1],
60 "sk_lock-AF_INET6-NVME",
61 &nvme_tcp_sk_key[1]);
62 break;
63 default:
64 WARN_ON_ONCE(1);
65 }
66}
67#else
68static void nvme_tcp_reclassify_socket(struct socket *sock) { }
69#endif
70
3f2304f8
SG
71enum nvme_tcp_send_state {
72 NVME_TCP_SEND_CMD_PDU = 0,
73 NVME_TCP_SEND_H2C_PDU,
74 NVME_TCP_SEND_DATA,
75 NVME_TCP_SEND_DDGST,
76};
77
78struct nvme_tcp_request {
79 struct nvme_request req;
80 void *pdu;
81 struct nvme_tcp_queue *queue;
82 u32 data_len;
83 u32 pdu_len;
84 u32 pdu_sent;
c2700d28
VP
85 u32 h2cdata_left;
86 u32 h2cdata_offset;
3f2304f8 87 u16 ttag;
1ba2e507 88 __le16 status;
3f2304f8 89 struct list_head entry;
15ec928a 90 struct llist_node lentry;
a7273d40 91 __le32 ddgst;
3f2304f8
SG
92
93 struct bio *curr_bio;
94 struct iov_iter iter;
95
96 /* send state */
97 size_t offset;
98 size_t data_sent;
99 enum nvme_tcp_send_state state;
100};
101
102enum nvme_tcp_queue_flags {
103 NVME_TCP_Q_ALLOCATED = 0,
104 NVME_TCP_Q_LIVE = 1,
72e5d757 105 NVME_TCP_Q_POLLING = 2,
3f2304f8
SG
106};
107
108enum nvme_tcp_recv_state {
109 NVME_TCP_RECV_PDU = 0,
110 NVME_TCP_RECV_DATA,
111 NVME_TCP_RECV_DDGST,
112};
113
114struct nvme_tcp_ctrl;
115struct nvme_tcp_queue {
116 struct socket *sock;
117 struct work_struct io_work;
118 int io_cpu;
119
9ebbfe49 120 struct mutex queue_lock;
db5ad6b7 121 struct mutex send_mutex;
15ec928a 122 struct llist_head req_list;
3f2304f8
SG
123 struct list_head send_list;
124
125 /* recv state */
126 void *pdu;
127 int pdu_remaining;
128 int pdu_offset;
129 size_t data_remaining;
130 size_t ddgst_remaining;
1a9460ce 131 unsigned int nr_cqe;
3f2304f8
SG
132
133 /* send state */
134 struct nvme_tcp_request *request;
135
136 int queue_size;
c2700d28 137 u32 maxh2cdata;
3f2304f8
SG
138 size_t cmnd_capsule_len;
139 struct nvme_tcp_ctrl *ctrl;
140 unsigned long flags;
141 bool rd_enabled;
142
143 bool hdr_digest;
144 bool data_digest;
145 struct ahash_request *rcv_hash;
146 struct ahash_request *snd_hash;
147 __le32 exp_ddgst;
148 __le32 recv_ddgst;
149
150 struct page_frag_cache pf_cache;
151
152 void (*state_change)(struct sock *);
153 void (*data_ready)(struct sock *);
154 void (*write_space)(struct sock *);
155};
156
157struct nvme_tcp_ctrl {
158 /* read only in the hot path */
159 struct nvme_tcp_queue *queues;
160 struct blk_mq_tag_set tag_set;
161
162 /* other member variables */
163 struct list_head list;
164 struct blk_mq_tag_set admin_tag_set;
165 struct sockaddr_storage addr;
166 struct sockaddr_storage src_addr;
167 struct nvme_ctrl ctrl;
168
169 struct work_struct err_work;
170 struct delayed_work connect_work;
171 struct nvme_tcp_request async_req;
64861993 172 u32 io_queues[HCTX_MAX_TYPES];
3f2304f8
SG
173};
174
175static LIST_HEAD(nvme_tcp_ctrl_list);
176static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
177static struct workqueue_struct *nvme_tcp_wq;
6acbd961
RF
178static const struct blk_mq_ops nvme_tcp_mq_ops;
179static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
db5ad6b7 180static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
3f2304f8
SG
181
182static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
183{
184 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
185}
186
187static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
188{
189 return queue - queue->ctrl->queues;
190}
191
192static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
193{
194 u32 queue_idx = nvme_tcp_queue_id(queue);
195
196 if (queue_idx == 0)
197 return queue->ctrl->admin_tag_set.tags[queue_idx];
198 return queue->ctrl->tag_set.tags[queue_idx - 1];
199}
200
201static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
202{
203 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
204}
205
206static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
207{
208 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
209}
210
53ee9e29 211static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
3f2304f8 212{
53ee9e29
CS
213 if (nvme_is_fabrics(req->req.cmd))
214 return NVME_TCP_ADMIN_CCSZ;
215 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
3f2304f8
SG
216}
217
218static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
219{
220 return req == &req->queue->ctrl->async_req;
221}
222
223static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
224{
225 struct request *rq;
3f2304f8
SG
226
227 if (unlikely(nvme_tcp_async_req(req)))
228 return false; /* async events don't have a request */
229
230 rq = blk_mq_rq_from_pdu(req);
3f2304f8 231
25e5cb78 232 return rq_data_dir(rq) == WRITE && req->data_len &&
53ee9e29 233 req->data_len <= nvme_tcp_inline_data_size(req);
3f2304f8
SG
234}
235
236static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
237{
238 return req->iter.bvec->bv_page;
239}
240
241static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
242{
243 return req->iter.bvec->bv_offset + req->iter.iov_offset;
244}
245
246static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
247{
ca1ff67d 248 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
3f2304f8
SG
249 req->pdu_len - req->pdu_sent);
250}
251
3f2304f8
SG
252static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
253{
254 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
255 req->pdu_len - req->pdu_sent : 0;
256}
257
258static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
259 int len)
260{
261 return nvme_tcp_pdu_data_left(req) <= len;
262}
263
264static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
265 unsigned int dir)
266{
267 struct request *rq = blk_mq_rq_from_pdu(req);
268 struct bio_vec *vec;
269 unsigned int size;
0dc9edaf 270 int nr_bvec;
3f2304f8
SG
271 size_t offset;
272
273 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
274 vec = &rq->special_vec;
0dc9edaf 275 nr_bvec = 1;
3f2304f8
SG
276 size = blk_rq_payload_bytes(rq);
277 offset = 0;
278 } else {
279 struct bio *bio = req->curr_bio;
0dc9edaf
SG
280 struct bvec_iter bi;
281 struct bio_vec bv;
3f2304f8
SG
282
283 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
0dc9edaf
SG
284 nr_bvec = 0;
285 bio_for_each_bvec(bv, bio, bi) {
286 nr_bvec++;
287 }
3f2304f8
SG
288 size = bio->bi_iter.bi_size;
289 offset = bio->bi_iter.bi_bvec_done;
290 }
291
0dc9edaf 292 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
3f2304f8
SG
293 req->iter.iov_offset = offset;
294}
295
296static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
297 int len)
298{
299 req->data_sent += len;
300 req->pdu_sent += len;
301 iov_iter_advance(&req->iter, len);
302 if (!iov_iter_count(&req->iter) &&
303 req->data_sent < req->data_len) {
304 req->curr_bio = req->curr_bio->bi_next;
305 nvme_tcp_init_iter(req, WRITE);
306 }
307}
308
5c11f7d9
SG
309static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
310{
311 int ret;
312
313 /* drain the send queue as much as we can... */
314 do {
315 ret = nvme_tcp_try_send(queue);
316 } while (ret > 0);
317}
318
70f437fb
KB
319static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
320{
321 return !list_empty(&queue->send_list) ||
3770a42b 322 !llist_empty(&queue->req_list);
70f437fb
KB
323}
324
db5ad6b7 325static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
86f0348a 326 bool sync, bool last)
3f2304f8
SG
327{
328 struct nvme_tcp_queue *queue = req->queue;
db5ad6b7 329 bool empty;
3f2304f8 330
15ec928a
SG
331 empty = llist_add(&req->lentry, &queue->req_list) &&
332 list_empty(&queue->send_list) && !queue->request;
3f2304f8 333
db5ad6b7
SG
334 /*
335 * if we're the first on the send_list and we can try to send
336 * directly, otherwise queue io_work. Also, only do that if we
337 * are on the same cpu, so we don't introduce contention.
338 */
bb833370 339 if (queue->io_cpu == raw_smp_processor_id() &&
db5ad6b7 340 sync && empty && mutex_trylock(&queue->send_mutex)) {
5c11f7d9 341 nvme_tcp_send_all(queue);
db5ad6b7 342 mutex_unlock(&queue->send_mutex);
db5ad6b7 343 }
70f437fb
KB
344
345 if (last && nvme_tcp_queue_more(queue))
346 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
3f2304f8
SG
347}
348
15ec928a
SG
349static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
350{
351 struct nvme_tcp_request *req;
352 struct llist_node *node;
353
354 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
355 req = llist_entry(node, struct nvme_tcp_request, lentry);
356 list_add(&req->entry, &queue->send_list);
357 }
358}
359
3f2304f8
SG
360static inline struct nvme_tcp_request *
361nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
362{
363 struct nvme_tcp_request *req;
364
3f2304f8
SG
365 req = list_first_entry_or_null(&queue->send_list,
366 struct nvme_tcp_request, entry);
15ec928a
SG
367 if (!req) {
368 nvme_tcp_process_req_list(queue);
369 req = list_first_entry_or_null(&queue->send_list,
370 struct nvme_tcp_request, entry);
371 if (unlikely(!req))
372 return NULL;
373 }
3f2304f8 374
15ec928a 375 list_del(&req->entry);
3f2304f8
SG
376 return req;
377}
378
a7273d40
CH
379static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
380 __le32 *dgst)
3f2304f8
SG
381{
382 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
383 crypto_ahash_final(hash);
384}
385
386static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
387 struct page *page, off_t off, size_t len)
388{
389 struct scatterlist sg;
390
391 sg_init_marker(&sg, 1);
392 sg_set_page(&sg, page, len, off);
393 ahash_request_set_crypt(hash, &sg, NULL, len);
394 crypto_ahash_update(hash);
395}
396
397static inline void nvme_tcp_hdgst(struct ahash_request *hash,
398 void *pdu, size_t len)
399{
400 struct scatterlist sg;
401
402 sg_init_one(&sg, pdu, len);
403 ahash_request_set_crypt(hash, &sg, pdu + len, len);
404 crypto_ahash_digest(hash);
405}
406
407static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
408 void *pdu, size_t pdu_len)
409{
410 struct nvme_tcp_hdr *hdr = pdu;
411 __le32 recv_digest;
412 __le32 exp_digest;
413
414 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
415 dev_err(queue->ctrl->ctrl.device,
416 "queue %d: header digest flag is cleared\n",
417 nvme_tcp_queue_id(queue));
418 return -EPROTO;
419 }
420
421 recv_digest = *(__le32 *)(pdu + hdr->hlen);
422 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
423 exp_digest = *(__le32 *)(pdu + hdr->hlen);
424 if (recv_digest != exp_digest) {
425 dev_err(queue->ctrl->ctrl.device,
426 "header digest error: recv %#x expected %#x\n",
427 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
428 return -EIO;
429 }
430
431 return 0;
432}
433
434static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
435{
436 struct nvme_tcp_hdr *hdr = pdu;
437 u8 digest_len = nvme_tcp_hdgst_len(queue);
438 u32 len;
439
440 len = le32_to_cpu(hdr->plen) - hdr->hlen -
441 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
442
443 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
444 dev_err(queue->ctrl->ctrl.device,
445 "queue %d: data digest flag is cleared\n",
446 nvme_tcp_queue_id(queue));
447 return -EPROTO;
448 }
449 crypto_ahash_init(queue->rcv_hash);
450
451 return 0;
452}
453
454static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
455 struct request *rq, unsigned int hctx_idx)
456{
457 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
458
459 page_frag_free(req->pdu);
460}
461
462static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
463 struct request *rq, unsigned int hctx_idx,
464 unsigned int numa_node)
465{
466 struct nvme_tcp_ctrl *ctrl = set->driver_data;
467 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
f4b9e6c9 468 struct nvme_tcp_cmd_pdu *pdu;
3f2304f8
SG
469 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
470 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
471 u8 hdgst = nvme_tcp_hdgst_len(queue);
472
473 req->pdu = page_frag_alloc(&queue->pf_cache,
474 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
475 GFP_KERNEL | __GFP_ZERO);
476 if (!req->pdu)
477 return -ENOMEM;
478
f4b9e6c9 479 pdu = req->pdu;
3f2304f8
SG
480 req->queue = queue;
481 nvme_req(rq)->ctrl = &ctrl->ctrl;
f4b9e6c9 482 nvme_req(rq)->cmd = &pdu->cmd;
3f2304f8
SG
483
484 return 0;
485}
486
487static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
488 unsigned int hctx_idx)
489{
490 struct nvme_tcp_ctrl *ctrl = data;
491 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
492
493 hctx->driver_data = queue;
494 return 0;
495}
496
497static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
498 unsigned int hctx_idx)
499{
500 struct nvme_tcp_ctrl *ctrl = data;
501 struct nvme_tcp_queue *queue = &ctrl->queues[0];
502
503 hctx->driver_data = queue;
504 return 0;
505}
506
507static enum nvme_tcp_recv_state
508nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
509{
510 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
511 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
512 NVME_TCP_RECV_DATA;
513}
514
515static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
516{
517 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
518 nvme_tcp_hdgst_len(queue);
519 queue->pdu_offset = 0;
520 queue->data_remaining = -1;
521 queue->ddgst_remaining = 0;
522}
523
524static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
525{
526 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
527 return;
528
236187c4 529 dev_warn(ctrl->device, "starting error recovery\n");
97b2512a 530 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
3f2304f8
SG
531}
532
533static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
534 struct nvme_completion *cqe)
535{
1ba2e507 536 struct nvme_tcp_request *req;
3f2304f8
SG
537 struct request *rq;
538
e7006de6 539 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
3f2304f8
SG
540 if (!rq) {
541 dev_err(queue->ctrl->ctrl.device,
e7006de6
SG
542 "got bad cqe.command_id %#x on queue %d\n",
543 cqe->command_id, nvme_tcp_queue_id(queue));
3f2304f8
SG
544 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
545 return -EINVAL;
546 }
547
1ba2e507
DW
548 req = blk_mq_rq_to_pdu(rq);
549 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
550 req->status = cqe->status;
551
552 if (!nvme_try_complete_req(rq, req->status, cqe->result))
ff029451 553 nvme_complete_rq(rq);
1a9460ce 554 queue->nr_cqe++;
3f2304f8
SG
555
556 return 0;
557}
558
559static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
560 struct nvme_tcp_data_pdu *pdu)
561{
562 struct request *rq;
563
e7006de6 564 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
3f2304f8
SG
565 if (!rq) {
566 dev_err(queue->ctrl->ctrl.device,
e7006de6
SG
567 "got bad c2hdata.command_id %#x on queue %d\n",
568 pdu->command_id, nvme_tcp_queue_id(queue));
3f2304f8
SG
569 return -ENOENT;
570 }
571
572 if (!blk_rq_payload_bytes(rq)) {
573 dev_err(queue->ctrl->ctrl.device,
574 "queue %d tag %#x unexpected data\n",
575 nvme_tcp_queue_id(queue), rq->tag);
576 return -EIO;
577 }
578
579 queue->data_remaining = le32_to_cpu(pdu->data_length);
580
602d674c
SG
581 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
582 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
583 dev_err(queue->ctrl->ctrl.device,
584 "queue %d tag %#x SUCCESS set but not last PDU\n",
585 nvme_tcp_queue_id(queue), rq->tag);
586 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
587 return -EPROTO;
588 }
589
3f2304f8 590 return 0;
3f2304f8
SG
591}
592
593static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
594 struct nvme_tcp_rsp_pdu *pdu)
595{
596 struct nvme_completion *cqe = &pdu->cqe;
597 int ret = 0;
598
599 /*
600 * AEN requests are special as they don't time out and can
601 * survive any kind of queue freeze and often don't respond to
602 * aborts. We don't even bother to allocate a struct request
603 * for them but rather special case them here.
604 */
58a8df67
IR
605 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
606 cqe->command_id)))
3f2304f8
SG
607 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
608 &cqe->result);
609 else
610 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
611
612 return ret;
613}
614
c2700d28 615static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
3f2304f8
SG
616{
617 struct nvme_tcp_data_pdu *data = req->pdu;
618 struct nvme_tcp_queue *queue = req->queue;
619 struct request *rq = blk_mq_rq_from_pdu(req);
c2700d28 620 u32 h2cdata_sent = req->pdu_len;
3f2304f8
SG
621 u8 hdgst = nvme_tcp_hdgst_len(queue);
622 u8 ddgst = nvme_tcp_ddgst_len(queue);
623
1d3ef9c3
VP
624 req->state = NVME_TCP_SEND_H2C_PDU;
625 req->offset = 0;
c2700d28 626 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
3f2304f8 627 req->pdu_sent = 0;
c2700d28
VP
628 req->h2cdata_left -= req->pdu_len;
629 req->h2cdata_offset += h2cdata_sent;
3f2304f8 630
3f2304f8
SG
631 memset(data, 0, sizeof(*data));
632 data->hdr.type = nvme_tcp_h2c_data;
c2700d28
VP
633 if (!req->h2cdata_left)
634 data->hdr.flags = NVME_TCP_F_DATA_LAST;
3f2304f8
SG
635 if (queue->hdr_digest)
636 data->hdr.flags |= NVME_TCP_F_HDGST;
637 if (queue->data_digest)
638 data->hdr.flags |= NVME_TCP_F_DDGST;
639 data->hdr.hlen = sizeof(*data);
640 data->hdr.pdo = data->hdr.hlen + hdgst;
641 data->hdr.plen =
642 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
c2700d28 643 data->ttag = req->ttag;
e7006de6 644 data->command_id = nvme_cid(rq);
c2700d28 645 data->data_offset = cpu_to_le32(req->h2cdata_offset);
3f2304f8 646 data->data_length = cpu_to_le32(req->pdu_len);
3f2304f8
SG
647}
648
649static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
650 struct nvme_tcp_r2t_pdu *pdu)
651{
652 struct nvme_tcp_request *req;
653 struct request *rq;
1d3ef9c3 654 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
c2700d28 655 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
3f2304f8 656
e7006de6 657 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
3f2304f8
SG
658 if (!rq) {
659 dev_err(queue->ctrl->ctrl.device,
e7006de6
SG
660 "got bad r2t.command_id %#x on queue %d\n",
661 pdu->command_id, nvme_tcp_queue_id(queue));
3f2304f8
SG
662 return -ENOENT;
663 }
664 req = blk_mq_rq_to_pdu(rq);
665
1d3ef9c3
VP
666 if (unlikely(!r2t_length)) {
667 dev_err(queue->ctrl->ctrl.device,
668 "req %d r2t len is %u, probably a bug...\n",
669 rq->tag, r2t_length);
670 return -EPROTO;
671 }
3f2304f8 672
1d3ef9c3
VP
673 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
674 dev_err(queue->ctrl->ctrl.device,
675 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
676 rq->tag, r2t_length, req->data_len, req->data_sent);
677 return -EPROTO;
678 }
679
c2700d28 680 if (unlikely(r2t_offset < req->data_sent)) {
1d3ef9c3
VP
681 dev_err(queue->ctrl->ctrl.device,
682 "req %d unexpected r2t offset %u (expected %zu)\n",
c2700d28 683 rq->tag, r2t_offset, req->data_sent);
1d3ef9c3
VP
684 return -EPROTO;
685 }
3f2304f8 686
c2700d28
VP
687 req->pdu_len = 0;
688 req->h2cdata_left = r2t_length;
689 req->h2cdata_offset = r2t_offset;
690 req->ttag = pdu->ttag;
691
692 nvme_tcp_setup_h2c_data_pdu(req);
86f0348a 693 nvme_tcp_queue_request(req, false, true);
3f2304f8
SG
694
695 return 0;
696}
697
698static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
699 unsigned int *offset, size_t *len)
700{
701 struct nvme_tcp_hdr *hdr;
702 char *pdu = queue->pdu;
703 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
704 int ret;
705
706 ret = skb_copy_bits(skb, *offset,
707 &pdu[queue->pdu_offset], rcv_len);
708 if (unlikely(ret))
709 return ret;
710
711 queue->pdu_remaining -= rcv_len;
712 queue->pdu_offset += rcv_len;
713 *offset += rcv_len;
714 *len -= rcv_len;
715 if (queue->pdu_remaining)
716 return 0;
717
718 hdr = queue->pdu;
719 if (queue->hdr_digest) {
720 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
721 if (unlikely(ret))
722 return ret;
723 }
724
725
726 if (queue->data_digest) {
727 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
728 if (unlikely(ret))
729 return ret;
730 }
731
732 switch (hdr->type) {
733 case nvme_tcp_c2h_data:
6be18260 734 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
3f2304f8
SG
735 case nvme_tcp_rsp:
736 nvme_tcp_init_recv_ctx(queue);
6be18260 737 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
3f2304f8
SG
738 case nvme_tcp_r2t:
739 nvme_tcp_init_recv_ctx(queue);
6be18260 740 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
3f2304f8
SG
741 default:
742 dev_err(queue->ctrl->ctrl.device,
743 "unsupported pdu type (%d)\n", hdr->type);
744 return -EINVAL;
745 }
3f2304f8
SG
746}
747
988aef9e 748static inline void nvme_tcp_end_request(struct request *rq, u16 status)
602d674c
SG
749{
750 union nvme_result res = {};
751
2eb81a33 752 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
ff029451 753 nvme_complete_rq(rq);
602d674c
SG
754}
755
3f2304f8
SG
756static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
757 unsigned int *offset, size_t *len)
758{
759 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
3b01a9d0 760 struct request *rq =
e7006de6 761 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
3b01a9d0 762 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
3f2304f8
SG
763
764 while (true) {
765 int recv_len, ret;
766
767 recv_len = min_t(size_t, *len, queue->data_remaining);
768 if (!recv_len)
769 break;
770
771 if (!iov_iter_count(&req->iter)) {
772 req->curr_bio = req->curr_bio->bi_next;
773
774 /*
775 * If we don`t have any bios it means that controller
776 * sent more data than we requested, hence error
777 */
778 if (!req->curr_bio) {
779 dev_err(queue->ctrl->ctrl.device,
780 "queue %d no space in request %#x",
781 nvme_tcp_queue_id(queue), rq->tag);
782 nvme_tcp_init_recv_ctx(queue);
783 return -EIO;
784 }
785 nvme_tcp_init_iter(req, READ);
786 }
787
788 /* we can read only from what is left in this bio */
789 recv_len = min_t(size_t, recv_len,
790 iov_iter_count(&req->iter));
791
792 if (queue->data_digest)
793 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
794 &req->iter, recv_len, queue->rcv_hash);
795 else
796 ret = skb_copy_datagram_iter(skb, *offset,
797 &req->iter, recv_len);
798 if (ret) {
799 dev_err(queue->ctrl->ctrl.device,
800 "queue %d failed to copy request %#x data",
801 nvme_tcp_queue_id(queue), rq->tag);
802 return ret;
803 }
804
805 *len -= recv_len;
806 *offset += recv_len;
807 queue->data_remaining -= recv_len;
808 }
809
810 if (!queue->data_remaining) {
811 if (queue->data_digest) {
812 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
813 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
814 } else {
1a9460ce 815 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
1ba2e507
DW
816 nvme_tcp_end_request(rq,
817 le16_to_cpu(req->status));
1a9460ce
SG
818 queue->nr_cqe++;
819 }
3f2304f8
SG
820 nvme_tcp_init_recv_ctx(queue);
821 }
822 }
823
824 return 0;
825}
826
827static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
828 struct sk_buff *skb, unsigned int *offset, size_t *len)
829{
602d674c 830 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
3f2304f8
SG
831 char *ddgst = (char *)&queue->recv_ddgst;
832 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
833 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
834 int ret;
835
836 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
837 if (unlikely(ret))
838 return ret;
839
840 queue->ddgst_remaining -= recv_len;
841 *offset += recv_len;
842 *len -= recv_len;
843 if (queue->ddgst_remaining)
844 return 0;
845
846 if (queue->recv_ddgst != queue->exp_ddgst) {
1ba2e507
DW
847 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
848 pdu->command_id);
849 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
850
851 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
852
3f2304f8
SG
853 dev_err(queue->ctrl->ctrl.device,
854 "data digest error: recv %#x expected %#x\n",
855 le32_to_cpu(queue->recv_ddgst),
856 le32_to_cpu(queue->exp_ddgst));
3f2304f8
SG
857 }
858
602d674c 859 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
e7006de6
SG
860 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
861 pdu->command_id);
1ba2e507 862 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
602d674c 863
1ba2e507 864 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
1a9460ce 865 queue->nr_cqe++;
602d674c
SG
866 }
867
3f2304f8
SG
868 nvme_tcp_init_recv_ctx(queue);
869 return 0;
870}
871
872static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
873 unsigned int offset, size_t len)
874{
875 struct nvme_tcp_queue *queue = desc->arg.data;
876 size_t consumed = len;
877 int result;
878
879 while (len) {
880 switch (nvme_tcp_recv_state(queue)) {
881 case NVME_TCP_RECV_PDU:
882 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
883 break;
884 case NVME_TCP_RECV_DATA:
885 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
886 break;
887 case NVME_TCP_RECV_DDGST:
888 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
889 break;
890 default:
891 result = -EFAULT;
892 }
893 if (result) {
894 dev_err(queue->ctrl->ctrl.device,
895 "receive failed: %d\n", result);
896 queue->rd_enabled = false;
897 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
898 return result;
899 }
900 }
901
902 return consumed;
903}
904
905static void nvme_tcp_data_ready(struct sock *sk)
906{
907 struct nvme_tcp_queue *queue;
908
386e5e6e 909 read_lock_bh(&sk->sk_callback_lock);
3f2304f8 910 queue = sk->sk_user_data;
72e5d757
SG
911 if (likely(queue && queue->rd_enabled) &&
912 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
3f2304f8 913 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
386e5e6e 914 read_unlock_bh(&sk->sk_callback_lock);
3f2304f8
SG
915}
916
917static void nvme_tcp_write_space(struct sock *sk)
918{
919 struct nvme_tcp_queue *queue;
920
921 read_lock_bh(&sk->sk_callback_lock);
922 queue = sk->sk_user_data;
923 if (likely(queue && sk_stream_is_writeable(sk))) {
924 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
925 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
926 }
927 read_unlock_bh(&sk->sk_callback_lock);
928}
929
930static void nvme_tcp_state_change(struct sock *sk)
931{
932 struct nvme_tcp_queue *queue;
933
8b73b45d 934 read_lock_bh(&sk->sk_callback_lock);
3f2304f8
SG
935 queue = sk->sk_user_data;
936 if (!queue)
937 goto done;
938
939 switch (sk->sk_state) {
940 case TCP_CLOSE:
941 case TCP_CLOSE_WAIT:
942 case TCP_LAST_ACK:
943 case TCP_FIN_WAIT1:
944 case TCP_FIN_WAIT2:
3f2304f8
SG
945 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
946 break;
947 default:
948 dev_info(queue->ctrl->ctrl.device,
949 "queue %d socket state %d\n",
950 nvme_tcp_queue_id(queue), sk->sk_state);
951 }
952
953 queue->state_change(sk);
954done:
8b73b45d 955 read_unlock_bh(&sk->sk_callback_lock);
3f2304f8
SG
956}
957
958static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
959{
960 queue->request = NULL;
961}
962
963static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
964{
63573807
SG
965 if (nvme_tcp_async_req(req)) {
966 union nvme_result res = {};
967
968 nvme_complete_async_event(&req->queue->ctrl->ctrl,
969 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
970 } else {
971 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
972 NVME_SC_HOST_PATH_ERROR);
973 }
3f2304f8
SG
974}
975
976static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
977{
978 struct nvme_tcp_queue *queue = req->queue;
25e1f67e 979 int req_data_len = req->data_len;
c2700d28 980 u32 h2cdata_left = req->h2cdata_left;
3f2304f8
SG
981
982 while (true) {
983 struct page *page = nvme_tcp_req_cur_page(req);
984 size_t offset = nvme_tcp_req_cur_offset(req);
985 size_t len = nvme_tcp_req_cur_length(req);
986 bool last = nvme_tcp_pdu_last_send(req, len);
25e1f67e 987 int req_data_sent = req->data_sent;
3f2304f8
SG
988 int ret, flags = MSG_DONTWAIT;
989
122e5b9f 990 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
3f2304f8
SG
991 flags |= MSG_EOR;
992 else
5bb052d7 993 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
3f2304f8 994
7d4194ab
CL
995 if (sendpage_ok(page)) {
996 ret = kernel_sendpage(queue->sock, page, offset, len,
37c15219
MS
997 flags);
998 } else {
7d4194ab 999 ret = sock_no_sendpage(queue->sock, page, offset, len,
37c15219
MS
1000 flags);
1001 }
3f2304f8
SG
1002 if (ret <= 0)
1003 return ret;
1004
3f2304f8
SG
1005 if (queue->data_digest)
1006 nvme_tcp_ddgst_update(queue->snd_hash, page,
1007 offset, ret);
1008
e371af03
SG
1009 /*
1010 * update the request iterator except for the last payload send
1011 * in the request where we don't want to modify it as we may
1012 * compete with the RX path completing the request.
1013 */
25e1f67e 1014 if (req_data_sent + ret < req_data_len)
e371af03
SG
1015 nvme_tcp_advance_req(req, ret);
1016
1017 /* fully successful last send in current PDU */
3f2304f8
SG
1018 if (last && ret == len) {
1019 if (queue->data_digest) {
1020 nvme_tcp_ddgst_final(queue->snd_hash,
1021 &req->ddgst);
1022 req->state = NVME_TCP_SEND_DDGST;
1023 req->offset = 0;
1024 } else {
c2700d28
VP
1025 if (h2cdata_left)
1026 nvme_tcp_setup_h2c_data_pdu(req);
1027 else
1028 nvme_tcp_done_send_req(queue);
3f2304f8
SG
1029 }
1030 return 1;
1031 }
1032 }
1033 return -EAGAIN;
1034}
1035
1036static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1037{
1038 struct nvme_tcp_queue *queue = req->queue;
1039 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1040 bool inline_data = nvme_tcp_has_inline_data(req);
3f2304f8
SG
1041 u8 hdgst = nvme_tcp_hdgst_len(queue);
1042 int len = sizeof(*pdu) + hdgst - req->offset;
5bb052d7 1043 int flags = MSG_DONTWAIT;
3f2304f8
SG
1044 int ret;
1045
122e5b9f 1046 if (inline_data || nvme_tcp_queue_more(queue))
5bb052d7
SG
1047 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1048 else
1049 flags |= MSG_EOR;
1050
3f2304f8
SG
1051 if (queue->hdr_digest && !req->offset)
1052 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1053
1054 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1055 offset_in_page(pdu) + req->offset, len, flags);
1056 if (unlikely(ret <= 0))
1057 return ret;
1058
1059 len -= ret;
1060 if (!len) {
1061 if (inline_data) {
1062 req->state = NVME_TCP_SEND_DATA;
1063 if (queue->data_digest)
1064 crypto_ahash_init(queue->snd_hash);
3f2304f8
SG
1065 } else {
1066 nvme_tcp_done_send_req(queue);
1067 }
1068 return 1;
1069 }
1070 req->offset += ret;
1071
1072 return -EAGAIN;
1073}
1074
1075static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1076{
1077 struct nvme_tcp_queue *queue = req->queue;
1078 struct nvme_tcp_data_pdu *pdu = req->pdu;
1079 u8 hdgst = nvme_tcp_hdgst_len(queue);
1080 int len = sizeof(*pdu) - req->offset + hdgst;
1081 int ret;
1082
1083 if (queue->hdr_digest && !req->offset)
1084 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1085
c2700d28
VP
1086 if (!req->h2cdata_left)
1087 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1088 offset_in_page(pdu) + req->offset, len,
1089 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1090 else
1091 ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
1092 offset_in_page(pdu) + req->offset, len,
1093 MSG_DONTWAIT | MSG_MORE);
3f2304f8
SG
1094 if (unlikely(ret <= 0))
1095 return ret;
1096
1097 len -= ret;
1098 if (!len) {
1099 req->state = NVME_TCP_SEND_DATA;
1100 if (queue->data_digest)
1101 crypto_ahash_init(queue->snd_hash);
3f2304f8
SG
1102 return 1;
1103 }
1104 req->offset += ret;
1105
1106 return -EAGAIN;
1107}
1108
1109static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1110{
1111 struct nvme_tcp_queue *queue = req->queue;
ce7723e9 1112 size_t offset = req->offset;
c2700d28 1113 u32 h2cdata_left = req->h2cdata_left;
3f2304f8 1114 int ret;
122e5b9f 1115 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
3f2304f8 1116 struct kvec iov = {
d89b9f3b 1117 .iov_base = (u8 *)&req->ddgst + req->offset,
3f2304f8
SG
1118 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1119 };
1120
122e5b9f
SG
1121 if (nvme_tcp_queue_more(queue))
1122 msg.msg_flags |= MSG_MORE;
1123 else
1124 msg.msg_flags |= MSG_EOR;
1125
3f2304f8
SG
1126 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1127 if (unlikely(ret <= 0))
1128 return ret;
1129
ce7723e9 1130 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
c2700d28
VP
1131 if (h2cdata_left)
1132 nvme_tcp_setup_h2c_data_pdu(req);
1133 else
1134 nvme_tcp_done_send_req(queue);
3f2304f8
SG
1135 return 1;
1136 }
1137
1138 req->offset += ret;
1139 return -EAGAIN;
1140}
1141
1142static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1143{
1144 struct nvme_tcp_request *req;
1145 int ret = 1;
1146
1147 if (!queue->request) {
1148 queue->request = nvme_tcp_fetch_request(queue);
1149 if (!queue->request)
1150 return 0;
1151 }
1152 req = queue->request;
1153
1154 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1155 ret = nvme_tcp_try_send_cmd_pdu(req);
1156 if (ret <= 0)
1157 goto done;
1158 if (!nvme_tcp_has_inline_data(req))
1159 return ret;
1160 }
1161
1162 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1163 ret = nvme_tcp_try_send_data_pdu(req);
1164 if (ret <= 0)
1165 goto done;
1166 }
1167
1168 if (req->state == NVME_TCP_SEND_DATA) {
1169 ret = nvme_tcp_try_send_data(req);
1170 if (ret <= 0)
1171 goto done;
1172 }
1173
1174 if (req->state == NVME_TCP_SEND_DDGST)
1175 ret = nvme_tcp_try_send_ddgst(req);
1176done:
5ff4e112 1177 if (ret == -EAGAIN) {
3f2304f8 1178 ret = 0;
5ff4e112
SG
1179 } else if (ret < 0) {
1180 dev_err(queue->ctrl->ctrl.device,
1181 "failed to send request %d\n", ret);
41d07df7 1182 nvme_tcp_fail_request(queue->request);
5ff4e112
SG
1183 nvme_tcp_done_send_req(queue);
1184 }
3f2304f8
SG
1185 return ret;
1186}
1187
1188static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1189{
10407ec9
PBT
1190 struct socket *sock = queue->sock;
1191 struct sock *sk = sock->sk;
3f2304f8
SG
1192 read_descriptor_t rd_desc;
1193 int consumed;
1194
1195 rd_desc.arg.data = queue;
1196 rd_desc.count = 1;
1197 lock_sock(sk);
1a9460ce 1198 queue->nr_cqe = 0;
10407ec9 1199 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
3f2304f8
SG
1200 release_sock(sk);
1201 return consumed;
1202}
1203
1204static void nvme_tcp_io_work(struct work_struct *w)
1205{
1206 struct nvme_tcp_queue *queue =
1207 container_of(w, struct nvme_tcp_queue, io_work);
ddef2957 1208 unsigned long deadline = jiffies + msecs_to_jiffies(1);
3f2304f8
SG
1209
1210 do {
1211 bool pending = false;
1212 int result;
1213
db5ad6b7
SG
1214 if (mutex_trylock(&queue->send_mutex)) {
1215 result = nvme_tcp_try_send(queue);
1216 mutex_unlock(&queue->send_mutex);
1217 if (result > 0)
1218 pending = true;
1219 else if (unlikely(result < 0))
1220 break;
70f437fb 1221 }
3f2304f8
SG
1222
1223 result = nvme_tcp_try_recv(queue);
1224 if (result > 0)
1225 pending = true;
761ad26c 1226 else if (unlikely(result < 0))
39d06079 1227 return;
3f2304f8 1228
160f3549 1229 if (!pending || !queue->rd_enabled)
3f2304f8
SG
1230 return;
1231
ddef2957 1232 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
3f2304f8
SG
1233
1234 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1235}
1236
1237static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1238{
1239 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1240
1241 ahash_request_free(queue->rcv_hash);
1242 ahash_request_free(queue->snd_hash);
1243 crypto_free_ahash(tfm);
1244}
1245
1246static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1247{
1248 struct crypto_ahash *tfm;
1249
1250 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1251 if (IS_ERR(tfm))
1252 return PTR_ERR(tfm);
1253
1254 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1255 if (!queue->snd_hash)
1256 goto free_tfm;
1257 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1258
1259 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1260 if (!queue->rcv_hash)
1261 goto free_snd_hash;
1262 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1263
1264 return 0;
1265free_snd_hash:
1266 ahash_request_free(queue->snd_hash);
1267free_tfm:
1268 crypto_free_ahash(tfm);
1269 return -ENOMEM;
1270}
1271
1272static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1273{
1274 struct nvme_tcp_request *async = &ctrl->async_req;
1275
1276 page_frag_free(async->pdu);
1277}
1278
1279static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1280{
1281 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1282 struct nvme_tcp_request *async = &ctrl->async_req;
1283 u8 hdgst = nvme_tcp_hdgst_len(queue);
1284
1285 async->pdu = page_frag_alloc(&queue->pf_cache,
1286 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1287 GFP_KERNEL | __GFP_ZERO);
1288 if (!async->pdu)
1289 return -ENOMEM;
1290
1291 async->queue = &ctrl->queues[0];
1292 return 0;
1293}
1294
1295static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1296{
a5053c92 1297 struct page *page;
3f2304f8
SG
1298 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1299 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1300
1301 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1302 return;
1303
1304 if (queue->hdr_digest || queue->data_digest)
1305 nvme_tcp_free_crypto(queue);
1306
a5053c92
ML
1307 if (queue->pf_cache.va) {
1308 page = virt_to_head_page(queue->pf_cache.va);
1309 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1310 queue->pf_cache.va = NULL;
1311 }
3f2304f8
SG
1312 sock_release(queue->sock);
1313 kfree(queue->pdu);
d48f92cd 1314 mutex_destroy(&queue->send_mutex);
9ebbfe49 1315 mutex_destroy(&queue->queue_lock);
3f2304f8
SG
1316}
1317
1318static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1319{
1320 struct nvme_tcp_icreq_pdu *icreq;
1321 struct nvme_tcp_icresp_pdu *icresp;
1322 struct msghdr msg = {};
1323 struct kvec iov;
1324 bool ctrl_hdgst, ctrl_ddgst;
c2700d28 1325 u32 maxh2cdata;
3f2304f8
SG
1326 int ret;
1327
1328 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1329 if (!icreq)
1330 return -ENOMEM;
1331
1332 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1333 if (!icresp) {
1334 ret = -ENOMEM;
1335 goto free_icreq;
1336 }
1337
1338 icreq->hdr.type = nvme_tcp_icreq;
1339 icreq->hdr.hlen = sizeof(*icreq);
1340 icreq->hdr.pdo = 0;
1341 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1342 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1343 icreq->maxr2t = 0; /* single inflight r2t supported */
1344 icreq->hpda = 0; /* no alignment constraint */
1345 if (queue->hdr_digest)
1346 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1347 if (queue->data_digest)
1348 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1349
1350 iov.iov_base = icreq;
1351 iov.iov_len = sizeof(*icreq);
1352 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1353 if (ret < 0)
1354 goto free_icresp;
1355
1356 memset(&msg, 0, sizeof(msg));
1357 iov.iov_base = icresp;
1358 iov.iov_len = sizeof(*icresp);
1359 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1360 iov.iov_len, msg.msg_flags);
1361 if (ret < 0)
1362 goto free_icresp;
1363
1364 ret = -EINVAL;
1365 if (icresp->hdr.type != nvme_tcp_icresp) {
1366 pr_err("queue %d: bad type returned %d\n",
1367 nvme_tcp_queue_id(queue), icresp->hdr.type);
1368 goto free_icresp;
1369 }
1370
1371 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1372 pr_err("queue %d: bad pdu length returned %d\n",
1373 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1374 goto free_icresp;
1375 }
1376
1377 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1378 pr_err("queue %d: bad pfv returned %d\n",
1379 nvme_tcp_queue_id(queue), icresp->pfv);
1380 goto free_icresp;
1381 }
1382
1383 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1384 if ((queue->data_digest && !ctrl_ddgst) ||
1385 (!queue->data_digest && ctrl_ddgst)) {
1386 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1387 nvme_tcp_queue_id(queue),
1388 queue->data_digest ? "enabled" : "disabled",
1389 ctrl_ddgst ? "enabled" : "disabled");
1390 goto free_icresp;
1391 }
1392
1393 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1394 if ((queue->hdr_digest && !ctrl_hdgst) ||
1395 (!queue->hdr_digest && ctrl_hdgst)) {
1396 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1397 nvme_tcp_queue_id(queue),
1398 queue->hdr_digest ? "enabled" : "disabled",
1399 ctrl_hdgst ? "enabled" : "disabled");
1400 goto free_icresp;
1401 }
1402
1403 if (icresp->cpda != 0) {
1404 pr_err("queue %d: unsupported cpda returned %d\n",
1405 nvme_tcp_queue_id(queue), icresp->cpda);
1406 goto free_icresp;
1407 }
1408
c2700d28
VP
1409 maxh2cdata = le32_to_cpu(icresp->maxdata);
1410 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1411 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1412 nvme_tcp_queue_id(queue), maxh2cdata);
1413 goto free_icresp;
1414 }
1415 queue->maxh2cdata = maxh2cdata;
1416
3f2304f8
SG
1417 ret = 0;
1418free_icresp:
1419 kfree(icresp);
1420free_icreq:
1421 kfree(icreq);
1422 return ret;
1423}
1424
40510a63
SG
1425static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1426{
1427 return nvme_tcp_queue_id(queue) == 0;
1428}
1429
1430static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1431{
1432 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1433 int qid = nvme_tcp_queue_id(queue);
1434
1435 return !nvme_tcp_admin_queue(queue) &&
1436 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1437}
1438
1439static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1440{
1441 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1442 int qid = nvme_tcp_queue_id(queue);
1443
1444 return !nvme_tcp_admin_queue(queue) &&
1445 !nvme_tcp_default_queue(queue) &&
1446 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1447 ctrl->io_queues[HCTX_TYPE_READ];
1448}
1449
1450static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1451{
1452 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1453 int qid = nvme_tcp_queue_id(queue);
1454
1455 return !nvme_tcp_admin_queue(queue) &&
1456 !nvme_tcp_default_queue(queue) &&
1457 !nvme_tcp_read_queue(queue) &&
1458 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1459 ctrl->io_queues[HCTX_TYPE_READ] +
1460 ctrl->io_queues[HCTX_TYPE_POLL];
1461}
1462
1463static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1464{
1465 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1466 int qid = nvme_tcp_queue_id(queue);
1467 int n = 0;
1468
1469 if (nvme_tcp_default_queue(queue))
1470 n = qid - 1;
1471 else if (nvme_tcp_read_queue(queue))
1472 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1473 else if (nvme_tcp_poll_queue(queue))
1474 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1475 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1476 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1477}
1478
3f2304f8
SG
1479static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1480 int qid, size_t queue_size)
1481{
1482 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1483 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
6ebf71ba 1484 int ret, rcv_pdu_size;
3f2304f8 1485
9ebbfe49 1486 mutex_init(&queue->queue_lock);
3f2304f8 1487 queue->ctrl = ctrl;
15ec928a 1488 init_llist_head(&queue->req_list);
3f2304f8 1489 INIT_LIST_HEAD(&queue->send_list);
db5ad6b7 1490 mutex_init(&queue->send_mutex);
3f2304f8
SG
1491 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1492 queue->queue_size = queue_size;
1493
1494 if (qid > 0)
9924b030 1495 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
3f2304f8
SG
1496 else
1497 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1498 NVME_TCP_ADMIN_CCSZ;
1499
1500 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1501 IPPROTO_TCP, &queue->sock);
1502 if (ret) {
9924b030 1503 dev_err(nctrl->device,
3f2304f8 1504 "failed to create socket: %d\n", ret);
9ebbfe49 1505 goto err_destroy_mutex;
3f2304f8
SG
1506 }
1507
841aee4d
CL
1508 nvme_tcp_reclassify_socket(queue->sock);
1509
3f2304f8 1510 /* Single syn retry */
557eadfc 1511 tcp_sock_set_syncnt(queue->sock->sk, 1);
3f2304f8
SG
1512
1513 /* Set TCP no delay */
12abc5ee 1514 tcp_sock_set_nodelay(queue->sock->sk);
3f2304f8
SG
1515
1516 /*
1517 * Cleanup whatever is sitting in the TCP transmit queue on socket
1518 * close. This is done to prevent stale data from being sent should
1519 * the network connection be restored before TCP times out.
1520 */
c433594c 1521 sock_no_linger(queue->sock->sk);
3f2304f8 1522
6e434967
CH
1523 if (so_priority > 0)
1524 sock_set_priority(queue->sock->sk, so_priority);
9912ade3 1525
bb13985d 1526 /* Set socket type of service */
6ebf71ba
CH
1527 if (nctrl->opts->tos >= 0)
1528 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
bb13985d 1529
adc99fd3
SG
1530 /* Set 10 seconds timeout for icresp recvmsg */
1531 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1532
3f2304f8 1533 queue->sock->sk->sk_allocation = GFP_ATOMIC;
40510a63 1534 nvme_tcp_set_queue_io_cpu(queue);
3f2304f8
SG
1535 queue->request = NULL;
1536 queue->data_remaining = 0;
1537 queue->ddgst_remaining = 0;
1538 queue->pdu_remaining = 0;
1539 queue->pdu_offset = 0;
1540 sk_set_memalloc(queue->sock->sk);
1541
9924b030 1542 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
3f2304f8
SG
1543 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1544 sizeof(ctrl->src_addr));
1545 if (ret) {
9924b030 1546 dev_err(nctrl->device,
3f2304f8
SG
1547 "failed to bind queue %d socket %d\n",
1548 qid, ret);
1549 goto err_sock;
1550 }
1551 }
1552
3ede8f72
MB
1553 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1554 char *iface = nctrl->opts->host_iface;
1555 sockptr_t optval = KERNEL_SOCKPTR(iface);
1556
1557 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1558 optval, strlen(iface));
1559 if (ret) {
1560 dev_err(nctrl->device,
1561 "failed to bind to interface %s queue %d err %d\n",
1562 iface, qid, ret);
1563 goto err_sock;
1564 }
1565 }
1566
3f2304f8
SG
1567 queue->hdr_digest = nctrl->opts->hdr_digest;
1568 queue->data_digest = nctrl->opts->data_digest;
1569 if (queue->hdr_digest || queue->data_digest) {
1570 ret = nvme_tcp_alloc_crypto(queue);
1571 if (ret) {
9924b030 1572 dev_err(nctrl->device,
3f2304f8
SG
1573 "failed to allocate queue %d crypto\n", qid);
1574 goto err_sock;
1575 }
1576 }
1577
1578 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1579 nvme_tcp_hdgst_len(queue);
1580 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1581 if (!queue->pdu) {
1582 ret = -ENOMEM;
1583 goto err_crypto;
1584 }
1585
9924b030 1586 dev_dbg(nctrl->device, "connecting queue %d\n",
3f2304f8
SG
1587 nvme_tcp_queue_id(queue));
1588
1589 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1590 sizeof(ctrl->addr), 0);
1591 if (ret) {
9924b030 1592 dev_err(nctrl->device,
3f2304f8
SG
1593 "failed to connect socket: %d\n", ret);
1594 goto err_rcv_pdu;
1595 }
1596
1597 ret = nvme_tcp_init_connection(queue);
1598 if (ret)
1599 goto err_init_connect;
1600
1601 queue->rd_enabled = true;
1602 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1603 nvme_tcp_init_recv_ctx(queue);
1604
1605 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1606 queue->sock->sk->sk_user_data = queue;
1607 queue->state_change = queue->sock->sk->sk_state_change;
1608 queue->data_ready = queue->sock->sk->sk_data_ready;
1609 queue->write_space = queue->sock->sk->sk_write_space;
1610 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1611 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1612 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
ac1c4e18 1613#ifdef CONFIG_NET_RX_BUSY_POLL
1a9460ce 1614 queue->sock->sk->sk_ll_usec = 1;
ac1c4e18 1615#endif
3f2304f8
SG
1616 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1617
1618 return 0;
1619
1620err_init_connect:
1621 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1622err_rcv_pdu:
1623 kfree(queue->pdu);
1624err_crypto:
1625 if (queue->hdr_digest || queue->data_digest)
1626 nvme_tcp_free_crypto(queue);
1627err_sock:
1628 sock_release(queue->sock);
1629 queue->sock = NULL;
9ebbfe49 1630err_destroy_mutex:
d48f92cd 1631 mutex_destroy(&queue->send_mutex);
9ebbfe49 1632 mutex_destroy(&queue->queue_lock);
3f2304f8
SG
1633 return ret;
1634}
1635
1636static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1637{
1638 struct socket *sock = queue->sock;
1639
1640 write_lock_bh(&sock->sk->sk_callback_lock);
1641 sock->sk->sk_user_data = NULL;
1642 sock->sk->sk_data_ready = queue->data_ready;
1643 sock->sk->sk_state_change = queue->state_change;
1644 sock->sk->sk_write_space = queue->write_space;
1645 write_unlock_bh(&sock->sk->sk_callback_lock);
1646}
1647
1648static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1649{
1650 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1651 nvme_tcp_restore_sock_calls(queue);
1652 cancel_work_sync(&queue->io_work);
1653}
1654
1655static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1656{
1657 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1658 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1659
2bff487f
ML
1660 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1661 return;
1662
9ebbfe49
CL
1663 mutex_lock(&queue->queue_lock);
1664 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1665 __nvme_tcp_stop_queue(queue);
1666 mutex_unlock(&queue->queue_lock);
3f2304f8
SG
1667}
1668
1669static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1670{
1671 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1672 int ret;
1673
1674 if (idx)
be42a33b 1675 ret = nvmf_connect_io_queue(nctrl, idx);
3f2304f8
SG
1676 else
1677 ret = nvmf_connect_admin_queue(nctrl);
1678
1679 if (!ret) {
1680 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1681 } else {
f34e2589
SG
1682 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1683 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
3f2304f8
SG
1684 dev_err(nctrl->device,
1685 "failed to connect queue: %d ret=%d\n", idx, ret);
1686 }
1687 return ret;
1688}
1689
2f7a7e5d 1690static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
3f2304f8
SG
1691{
1692 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2f7a7e5d 1693 struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
3f2304f8
SG
1694 int ret;
1695
2f7a7e5d
CH
1696 memset(set, 0, sizeof(*set));
1697 set->ops = &nvme_tcp_admin_mq_ops;
1698 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1699 set->reserved_tags = NVMF_RESERVED_TAGS;
1700 set->numa_node = nctrl->numa_node;
1701 set->flags = BLK_MQ_F_BLOCKING;
1702 set->cmd_size = sizeof(struct nvme_tcp_request);
1703 set->driver_data = ctrl;
1704 set->nr_hw_queues = 1;
1705 set->timeout = NVME_ADMIN_TIMEOUT;
3f2304f8 1706 ret = blk_mq_alloc_tag_set(set);
2f7a7e5d
CH
1707 if (!ret)
1708 nctrl->admin_tagset = set;
1709 return ret;
1710}
3f2304f8 1711
2f7a7e5d
CH
1712static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
1713{
1714 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1715 struct blk_mq_tag_set *set = &ctrl->tag_set;
1716 int ret;
1717
1718 memset(set, 0, sizeof(*set));
1719 set->ops = &nvme_tcp_mq_ops;
1720 set->queue_depth = nctrl->sqsize + 1;
1721 set->reserved_tags = NVMF_RESERVED_TAGS;
1722 set->numa_node = nctrl->numa_node;
1723 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1724 set->cmd_size = sizeof(struct nvme_tcp_request);
1725 set->driver_data = ctrl;
1726 set->nr_hw_queues = nctrl->queue_count - 1;
1727 set->timeout = NVME_IO_TIMEOUT;
1728 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1729 ret = blk_mq_alloc_tag_set(set);
1730 if (!ret)
1731 nctrl->tagset = set;
1732 return ret;
3f2304f8
SG
1733}
1734
1735static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1736{
1737 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
ceb1e087 1738 cancel_work_sync(&ctrl->async_event_work);
3f2304f8
SG
1739 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1740 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1741 }
1742
1743 nvme_tcp_free_queue(ctrl, 0);
1744}
1745
1746static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1747{
1748 int i;
1749
1750 for (i = 1; i < ctrl->queue_count; i++)
1751 nvme_tcp_free_queue(ctrl, i);
1752}
1753
1754static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1755{
1756 int i;
1757
1758 for (i = 1; i < ctrl->queue_count; i++)
1759 nvme_tcp_stop_queue(ctrl, i);
1760}
1761
1762static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1763{
462b8b2d 1764 int i, ret;
3f2304f8
SG
1765
1766 for (i = 1; i < ctrl->queue_count; i++) {
1767 ret = nvme_tcp_start_queue(ctrl, i);
1768 if (ret)
1769 goto out_stop_queues;
1770 }
1771
1772 return 0;
1773
1774out_stop_queues:
1775 for (i--; i >= 1; i--)
1776 nvme_tcp_stop_queue(ctrl, i);
1777 return ret;
1778}
1779
1780static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1781{
1782 int ret;
1783
1784 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1785 if (ret)
1786 return ret;
1787
1788 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1789 if (ret)
1790 goto out_free_queue;
1791
1792 return 0;
1793
1794out_free_queue:
1795 nvme_tcp_free_queue(ctrl, 0);
1796 return ret;
1797}
1798
efb973b1 1799static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
3f2304f8
SG
1800{
1801 int i, ret;
1802
1803 for (i = 1; i < ctrl->queue_count; i++) {
a387935c 1804 ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
3f2304f8
SG
1805 if (ret)
1806 goto out_free_queues;
1807 }
1808
1809 return 0;
1810
1811out_free_queues:
1812 for (i--; i >= 1; i--)
1813 nvme_tcp_free_queue(ctrl, i);
1814
1815 return ret;
1816}
1817
1818static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1819{
873946f4
SG
1820 unsigned int nr_io_queues;
1821
1822 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1823 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1a9460ce 1824 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
873946f4
SG
1825
1826 return nr_io_queues;
3f2304f8
SG
1827}
1828
64861993
SG
1829static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1830 unsigned int nr_io_queues)
1831{
1832 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1833 struct nvmf_ctrl_options *opts = nctrl->opts;
1834
1835 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1836 /*
1837 * separate read/write queues
1838 * hand out dedicated default queues only after we have
1839 * sufficient read queues.
1840 */
1841 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1842 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1843 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1844 min(opts->nr_write_queues, nr_io_queues);
1845 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1846 } else {
1847 /*
1848 * shared read/write queues
1849 * either no write queues were requested, or we don't have
1850 * sufficient queue count to have dedicated default queues.
1851 */
1852 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1853 min(opts->nr_io_queues, nr_io_queues);
1854 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1855 }
1a9460ce
SG
1856
1857 if (opts->nr_poll_queues && nr_io_queues) {
1858 /* map dedicated poll queues only if we have queues left */
1859 ctrl->io_queues[HCTX_TYPE_POLL] =
1860 min(opts->nr_poll_queues, nr_io_queues);
1861 }
64861993
SG
1862}
1863
efb973b1 1864static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
3f2304f8
SG
1865{
1866 unsigned int nr_io_queues;
1867 int ret;
1868
1869 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1870 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1871 if (ret)
1872 return ret;
1873
664227fd 1874 if (nr_io_queues == 0) {
72f57242
SG
1875 dev_err(ctrl->device,
1876 "unable to set any I/O queues\n");
1877 return -ENOMEM;
1878 }
3f2304f8 1879
664227fd 1880 ctrl->queue_count = nr_io_queues + 1;
3f2304f8
SG
1881 dev_info(ctrl->device,
1882 "creating %d I/O queues.\n", nr_io_queues);
1883
64861993
SG
1884 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1885
efb973b1 1886 return __nvme_tcp_alloc_io_queues(ctrl);
3f2304f8
SG
1887}
1888
1889static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1890{
1891 nvme_tcp_stop_io_queues(ctrl);
1892 if (remove) {
6f8191fd 1893 blk_mq_destroy_queue(ctrl->connect_q);
3f2304f8
SG
1894 blk_mq_free_tag_set(ctrl->tagset);
1895 }
1896 nvme_tcp_free_io_queues(ctrl);
1897}
1898
1899static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1900{
1901 int ret;
1902
efb973b1 1903 ret = nvme_tcp_alloc_io_queues(ctrl);
3f2304f8
SG
1904 if (ret)
1905 return ret;
1906
1907 if (new) {
2f7a7e5d
CH
1908 ret = nvme_tcp_alloc_tag_set(ctrl);
1909 if (ret)
3f2304f8 1910 goto out_free_io_queues;
3f2304f8 1911
72e8b5cd
CK
1912 ret = nvme_ctrl_init_connect_q(ctrl);
1913 if (ret)
e85037a2 1914 goto out_free_tag_set;
3f2304f8
SG
1915 }
1916
1917 ret = nvme_tcp_start_io_queues(ctrl);
1918 if (ret)
1919 goto out_cleanup_connect_q;
1920
2875b0ae
SG
1921 if (!new) {
1922 nvme_start_queues(ctrl);
e5c01f4f
SG
1923 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1924 /*
1925 * If we timed out waiting for freeze we are likely to
1926 * be stuck. Fail the controller initialization just
1927 * to be safe.
1928 */
1929 ret = -ENODEV;
1930 goto out_wait_freeze_timed_out;
1931 }
2875b0ae
SG
1932 blk_mq_update_nr_hw_queues(ctrl->tagset,
1933 ctrl->queue_count - 1);
1934 nvme_unfreeze(ctrl);
1935 }
1936
3f2304f8
SG
1937 return 0;
1938
e5c01f4f
SG
1939out_wait_freeze_timed_out:
1940 nvme_stop_queues(ctrl);
70a99574 1941 nvme_sync_io_queues(ctrl);
e5c01f4f 1942 nvme_tcp_stop_io_queues(ctrl);
3f2304f8 1943out_cleanup_connect_q:
70a99574 1944 nvme_cancel_tagset(ctrl);
e85037a2 1945 if (new)
6f8191fd 1946 blk_mq_destroy_queue(ctrl->connect_q);
3f2304f8
SG
1947out_free_tag_set:
1948 if (new)
1949 blk_mq_free_tag_set(ctrl->tagset);
1950out_free_io_queues:
1951 nvme_tcp_free_io_queues(ctrl);
1952 return ret;
1953}
1954
1955static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1956{
1957 nvme_tcp_stop_queue(ctrl, 0);
1958 if (remove) {
6f8191fd
CH
1959 blk_mq_destroy_queue(ctrl->admin_q);
1960 blk_mq_destroy_queue(ctrl->fabrics_q);
3f2304f8
SG
1961 blk_mq_free_tag_set(ctrl->admin_tagset);
1962 }
1963 nvme_tcp_free_admin_queue(ctrl);
1964}
1965
1966static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1967{
1968 int error;
1969
1970 error = nvme_tcp_alloc_admin_queue(ctrl);
1971 if (error)
1972 return error;
1973
1974 if (new) {
2f7a7e5d
CH
1975 error = nvme_tcp_alloc_admin_tag_set(ctrl);
1976 if (error)
3f2304f8 1977 goto out_free_queue;
3f2304f8 1978
e7832cb4
SG
1979 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1980 if (IS_ERR(ctrl->fabrics_q)) {
1981 error = PTR_ERR(ctrl->fabrics_q);
1982 goto out_free_tagset;
1983 }
1984
3f2304f8
SG
1985 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1986 if (IS_ERR(ctrl->admin_q)) {
1987 error = PTR_ERR(ctrl->admin_q);
e7832cb4 1988 goto out_cleanup_fabrics_q;
3f2304f8
SG
1989 }
1990 }
1991
1992 error = nvme_tcp_start_queue(ctrl, 0);
1993 if (error)
1994 goto out_cleanup_queue;
1995
c0f2f45b 1996 error = nvme_enable_ctrl(ctrl);
3f2304f8
SG
1997 if (error)
1998 goto out_stop_queue;
1999
6ca1d902 2000 nvme_start_admin_queue(ctrl);
e7832cb4 2001
f21c4769 2002 error = nvme_init_ctrl_finish(ctrl);
3f2304f8 2003 if (error)
70a99574 2004 goto out_quiesce_queue;
3f2304f8
SG
2005
2006 return 0;
2007
70a99574 2008out_quiesce_queue:
6ca1d902 2009 nvme_stop_admin_queue(ctrl);
70a99574 2010 blk_sync_queue(ctrl->admin_q);
3f2304f8
SG
2011out_stop_queue:
2012 nvme_tcp_stop_queue(ctrl, 0);
70a99574 2013 nvme_cancel_admin_tagset(ctrl);
3f2304f8
SG
2014out_cleanup_queue:
2015 if (new)
6f8191fd 2016 blk_mq_destroy_queue(ctrl->admin_q);
e7832cb4
SG
2017out_cleanup_fabrics_q:
2018 if (new)
6f8191fd 2019 blk_mq_destroy_queue(ctrl->fabrics_q);
3f2304f8
SG
2020out_free_tagset:
2021 if (new)
2022 blk_mq_free_tag_set(ctrl->admin_tagset);
2023out_free_queue:
2024 nvme_tcp_free_admin_queue(ctrl);
2025 return error;
2026}
2027
2028static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2029 bool remove)
2030{
6ca1d902 2031 nvme_stop_admin_queue(ctrl);
d6f66210 2032 blk_sync_queue(ctrl->admin_q);
3f2304f8 2033 nvme_tcp_stop_queue(ctrl, 0);
563c8158 2034 nvme_cancel_admin_tagset(ctrl);
e7832cb4 2035 if (remove)
6ca1d902 2036 nvme_start_admin_queue(ctrl);
3f2304f8
SG
2037 nvme_tcp_destroy_admin_queue(ctrl, remove);
2038}
2039
2040static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2041 bool remove)
2042{
2043 if (ctrl->queue_count <= 1)
d6f66210 2044 return;
6ca1d902 2045 nvme_stop_admin_queue(ctrl);
2875b0ae 2046 nvme_start_freeze(ctrl);
3f2304f8 2047 nvme_stop_queues(ctrl);
d6f66210 2048 nvme_sync_io_queues(ctrl);
3f2304f8 2049 nvme_tcp_stop_io_queues(ctrl);
563c8158 2050 nvme_cancel_tagset(ctrl);
3f2304f8
SG
2051 if (remove)
2052 nvme_start_queues(ctrl);
2053 nvme_tcp_destroy_io_queues(ctrl, remove);
2054}
2055
2056static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2057{
2058 /* If we are resetting/deleting then do nothing */
2059 if (ctrl->state != NVME_CTRL_CONNECTING) {
2060 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2061 ctrl->state == NVME_CTRL_LIVE);
2062 return;
2063 }
2064
2065 if (nvmf_should_reconnect(ctrl)) {
2066 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2067 ctrl->opts->reconnect_delay);
2068 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2069 ctrl->opts->reconnect_delay * HZ);
2070 } else {
2071 dev_info(ctrl->device, "Removing controller...\n");
2072 nvme_delete_ctrl(ctrl);
2073 }
2074}
2075
2076static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2077{
2078 struct nvmf_ctrl_options *opts = ctrl->opts;
312910f4 2079 int ret;
3f2304f8
SG
2080
2081 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2082 if (ret)
2083 return ret;
2084
2085 if (ctrl->icdoff) {
522af60c 2086 ret = -EOPNOTSUPP;
3f2304f8
SG
2087 dev_err(ctrl->device, "icdoff is not supported!\n");
2088 goto destroy_admin;
2089 }
2090
3b54064f 2091 if (!nvme_ctrl_sgl_supported(ctrl)) {
522af60c 2092 ret = -EOPNOTSUPP;
73ffcefc
MG
2093 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2094 goto destroy_admin;
2095 }
2096
3f2304f8
SG
2097 if (opts->queue_size > ctrl->sqsize + 1)
2098 dev_warn(ctrl->device,
2099 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2100 opts->queue_size, ctrl->sqsize + 1);
2101
2102 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2103 dev_warn(ctrl->device,
2104 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2105 ctrl->sqsize + 1, ctrl->maxcmd);
2106 ctrl->sqsize = ctrl->maxcmd - 1;
2107 }
2108
2109 if (ctrl->queue_count > 1) {
2110 ret = nvme_tcp_configure_io_queues(ctrl, new);
2111 if (ret)
2112 goto destroy_admin;
2113 }
2114
2115 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
bea54ef5 2116 /*
ecca390e 2117 * state change failure is ok if we started ctrl delete,
bea54ef5
IR
2118 * unless we're during creation of a new controller to
2119 * avoid races with teardown flow.
2120 */
ecca390e
SG
2121 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2122 ctrl->state != NVME_CTRL_DELETING_NOIO);
bea54ef5 2123 WARN_ON_ONCE(new);
3f2304f8
SG
2124 ret = -EINVAL;
2125 goto destroy_io;
2126 }
2127
2128 nvme_start_ctrl(ctrl);
2129 return 0;
2130
2131destroy_io:
70a99574
CL
2132 if (ctrl->queue_count > 1) {
2133 nvme_stop_queues(ctrl);
2134 nvme_sync_io_queues(ctrl);
2135 nvme_tcp_stop_io_queues(ctrl);
2136 nvme_cancel_tagset(ctrl);
3f2304f8 2137 nvme_tcp_destroy_io_queues(ctrl, new);
70a99574 2138 }
3f2304f8 2139destroy_admin:
6ca1d902 2140 nvme_stop_admin_queue(ctrl);
70a99574 2141 blk_sync_queue(ctrl->admin_q);
3f2304f8 2142 nvme_tcp_stop_queue(ctrl, 0);
70a99574 2143 nvme_cancel_admin_tagset(ctrl);
3f2304f8
SG
2144 nvme_tcp_destroy_admin_queue(ctrl, new);
2145 return ret;
2146}
2147
2148static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2149{
2150 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2151 struct nvme_tcp_ctrl, connect_work);
2152 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2153
2154 ++ctrl->nr_reconnects;
2155
2156 if (nvme_tcp_setup_ctrl(ctrl, false))
2157 goto requeue;
2158
56a77d26 2159 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
3f2304f8
SG
2160 ctrl->nr_reconnects);
2161
2162 ctrl->nr_reconnects = 0;
2163
2164 return;
2165
2166requeue:
2167 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2168 ctrl->nr_reconnects);
2169 nvme_tcp_reconnect_or_remove(ctrl);
2170}
2171
2172static void nvme_tcp_error_recovery_work(struct work_struct *work)
2173{
2174 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2175 struct nvme_tcp_ctrl, err_work);
2176 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2177
f50fff73 2178 nvme_auth_stop(ctrl);
3f2304f8 2179 nvme_stop_keep_alive(ctrl);
ff9fc7eb 2180 flush_work(&ctrl->async_event_work);
3f2304f8
SG
2181 nvme_tcp_teardown_io_queues(ctrl, false);
2182 /* unquiesce to fail fast pending requests */
2183 nvme_start_queues(ctrl);
2184 nvme_tcp_teardown_admin_queue(ctrl, false);
6ca1d902 2185 nvme_start_admin_queue(ctrl);
3f2304f8
SG
2186
2187 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
ecca390e
SG
2188 /* state change failure is ok if we started ctrl delete */
2189 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2190 ctrl->state != NVME_CTRL_DELETING_NOIO);
3f2304f8
SG
2191 return;
2192 }
2193
2194 nvme_tcp_reconnect_or_remove(ctrl);
2195}
2196
2197static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2198{
2199 nvme_tcp_teardown_io_queues(ctrl, shutdown);
6ca1d902 2200 nvme_stop_admin_queue(ctrl);
3f2304f8
SG
2201 if (shutdown)
2202 nvme_shutdown_ctrl(ctrl);
2203 else
b5b05048 2204 nvme_disable_ctrl(ctrl);
3f2304f8
SG
2205 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2206}
2207
2208static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2209{
2210 nvme_tcp_teardown_ctrl(ctrl, true);
2211}
2212
2213static void nvme_reset_ctrl_work(struct work_struct *work)
2214{
2215 struct nvme_ctrl *ctrl =
2216 container_of(work, struct nvme_ctrl, reset_work);
2217
2218 nvme_stop_ctrl(ctrl);
2219 nvme_tcp_teardown_ctrl(ctrl, false);
2220
2221 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
ecca390e
SG
2222 /* state change failure is ok if we started ctrl delete */
2223 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2224 ctrl->state != NVME_CTRL_DELETING_NOIO);
3f2304f8
SG
2225 return;
2226 }
2227
2228 if (nvme_tcp_setup_ctrl(ctrl, false))
2229 goto out_fail;
2230
2231 return;
2232
2233out_fail:
2234 ++ctrl->nr_reconnects;
2235 nvme_tcp_reconnect_or_remove(ctrl);
2236}
2237
f7f70f4a
RL
2238static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2239{
2240 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2241 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2242}
2243
3f2304f8
SG
2244static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2245{
2246 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2247
2248 if (list_empty(&ctrl->list))
2249 goto free_ctrl;
2250
2251 mutex_lock(&nvme_tcp_ctrl_mutex);
2252 list_del(&ctrl->list);
2253 mutex_unlock(&nvme_tcp_ctrl_mutex);
2254
2255 nvmf_free_options(nctrl->opts);
2256free_ctrl:
2257 kfree(ctrl->queues);
2258 kfree(ctrl);
2259}
2260
2261static void nvme_tcp_set_sg_null(struct nvme_command *c)
2262{
2263 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2264
2265 sg->addr = 0;
2266 sg->length = 0;
2267 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2268 NVME_SGL_FMT_TRANSPORT_A;
2269}
2270
2271static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2272 struct nvme_command *c, u32 data_len)
2273{
2274 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2275
2276 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2277 sg->length = cpu_to_le32(data_len);
2278 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2279}
2280
2281static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2282 u32 data_len)
2283{
2284 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2285
2286 sg->addr = 0;
2287 sg->length = cpu_to_le32(data_len);
2288 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2289 NVME_SGL_FMT_TRANSPORT_A;
2290}
2291
2292static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2293{
2294 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2295 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2296 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2297 struct nvme_command *cmd = &pdu->cmd;
2298 u8 hdgst = nvme_tcp_hdgst_len(queue);
2299
2300 memset(pdu, 0, sizeof(*pdu));
2301 pdu->hdr.type = nvme_tcp_cmd;
2302 if (queue->hdr_digest)
2303 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2304 pdu->hdr.hlen = sizeof(*pdu);
2305 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2306
2307 cmd->common.opcode = nvme_admin_async_event;
2308 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2309 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2310 nvme_tcp_set_sg_null(cmd);
2311
2312 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2313 ctrl->async_req.offset = 0;
2314 ctrl->async_req.curr_bio = NULL;
2315 ctrl->async_req.data_len = 0;
2316
86f0348a 2317 nvme_tcp_queue_request(&ctrl->async_req, true, true);
3f2304f8
SG
2318}
2319
236187c4
SG
2320static void nvme_tcp_complete_timed_out(struct request *rq)
2321{
2322 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2323 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2324
236187c4 2325 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
93ba75c9 2326 nvmf_complete_timed_out_request(rq);
236187c4
SG
2327}
2328
9bdb4833 2329static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
3f2304f8
SG
2330{
2331 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
236187c4 2332 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
3f2304f8
SG
2333 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2334
236187c4 2335 dev_warn(ctrl->device,
3f2304f8 2336 "queue %d: timeout request %#x type %d\n",
39d57757 2337 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
3f2304f8 2338
236187c4 2339 if (ctrl->state != NVME_CTRL_LIVE) {
39d57757 2340 /*
236187c4
SG
2341 * If we are resetting, connecting or deleting we should
2342 * complete immediately because we may block controller
2343 * teardown or setup sequence
2344 * - ctrl disable/shutdown fabrics requests
2345 * - connect requests
2346 * - initialization admin requests
2347 * - I/O requests that entered after unquiescing and
2348 * the controller stopped responding
2349 *
2350 * All other requests should be cancelled by the error
2351 * recovery work, so it's fine that we fail it here.
39d57757 2352 */
236187c4 2353 nvme_tcp_complete_timed_out(rq);
3f2304f8
SG
2354 return BLK_EH_DONE;
2355 }
2356
236187c4
SG
2357 /*
2358 * LIVE state should trigger the normal error recovery which will
2359 * handle completing this request.
2360 */
2361 nvme_tcp_error_recovery(ctrl);
3f2304f8
SG
2362 return BLK_EH_RESET_TIMER;
2363}
2364
2365static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2366 struct request *rq)
2367{
2368 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2369 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2370 struct nvme_command *c = &pdu->cmd;
2371
2372 c->common.flags |= NVME_CMD_SGL_METABUF;
2373
25e5cb78
SG
2374 if (!blk_rq_nr_phys_segments(rq))
2375 nvme_tcp_set_sg_null(c);
2376 else if (rq_data_dir(rq) == WRITE &&
53ee9e29 2377 req->data_len <= nvme_tcp_inline_data_size(req))
3f2304f8
SG
2378 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2379 else
2380 nvme_tcp_set_sg_host_data(c, req->data_len);
2381
2382 return 0;
2383}
2384
2385static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2386 struct request *rq)
2387{
2388 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2389 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2390 struct nvme_tcp_queue *queue = req->queue;
2391 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2392 blk_status_t ret;
2393
f4b9e6c9 2394 ret = nvme_setup_cmd(ns, rq);
3f2304f8
SG
2395 if (ret)
2396 return ret;
2397
2398 req->state = NVME_TCP_SEND_CMD_PDU;
1ba2e507 2399 req->status = cpu_to_le16(NVME_SC_SUCCESS);
3f2304f8
SG
2400 req->offset = 0;
2401 req->data_sent = 0;
2402 req->pdu_len = 0;
2403 req->pdu_sent = 0;
c2700d28 2404 req->h2cdata_left = 0;
25e5cb78
SG
2405 req->data_len = blk_rq_nr_phys_segments(rq) ?
2406 blk_rq_payload_bytes(rq) : 0;
3f2304f8 2407 req->curr_bio = rq->bio;
e11e5116 2408 if (req->curr_bio && req->data_len)
cb9b870f 2409 nvme_tcp_init_iter(req, rq_data_dir(rq));
3f2304f8
SG
2410
2411 if (rq_data_dir(rq) == WRITE &&
53ee9e29 2412 req->data_len <= nvme_tcp_inline_data_size(req))
3f2304f8 2413 req->pdu_len = req->data_len;
3f2304f8
SG
2414
2415 pdu->hdr.type = nvme_tcp_cmd;
2416 pdu->hdr.flags = 0;
2417 if (queue->hdr_digest)
2418 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2419 if (queue->data_digest && req->pdu_len) {
2420 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2421 ddgst = nvme_tcp_ddgst_len(queue);
2422 }
2423 pdu->hdr.hlen = sizeof(*pdu);
2424 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2425 pdu->hdr.plen =
2426 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2427
2428 ret = nvme_tcp_map_data(queue, rq);
2429 if (unlikely(ret)) {
28a4cac4 2430 nvme_cleanup_cmd(rq);
3f2304f8
SG
2431 dev_err(queue->ctrl->ctrl.device,
2432 "Failed to map data (%d)\n", ret);
2433 return ret;
2434 }
2435
2436 return 0;
2437}
2438
86f0348a
SG
2439static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2440{
2441 struct nvme_tcp_queue *queue = hctx->driver_data;
2442
2443 if (!llist_empty(&queue->req_list))
2444 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2445}
2446
3f2304f8
SG
2447static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2448 const struct blk_mq_queue_data *bd)
2449{
2450 struct nvme_ns *ns = hctx->queue->queuedata;
2451 struct nvme_tcp_queue *queue = hctx->driver_data;
2452 struct request *rq = bd->rq;
2453 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2454 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2455 blk_status_t ret;
2456
a9715744
TC
2457 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2458 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
3f2304f8
SG
2459
2460 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2461 if (unlikely(ret))
2462 return ret;
2463
2464 blk_mq_start_request(rq);
2465
86f0348a 2466 nvme_tcp_queue_request(req, true, bd->last);
3f2304f8
SG
2467
2468 return BLK_STS_OK;
2469}
2470
873946f4
SG
2471static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2472{
2473 struct nvme_tcp_ctrl *ctrl = set->driver_data;
64861993 2474 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
873946f4 2475
64861993 2476 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
873946f4
SG
2477 /* separate read/write queues */
2478 set->map[HCTX_TYPE_DEFAULT].nr_queues =
64861993
SG
2479 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2480 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2481 set->map[HCTX_TYPE_READ].nr_queues =
2482 ctrl->io_queues[HCTX_TYPE_READ];
873946f4 2483 set->map[HCTX_TYPE_READ].queue_offset =
64861993 2484 ctrl->io_queues[HCTX_TYPE_DEFAULT];
873946f4 2485 } else {
64861993 2486 /* shared read/write queues */
873946f4 2487 set->map[HCTX_TYPE_DEFAULT].nr_queues =
64861993
SG
2488 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2489 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2490 set->map[HCTX_TYPE_READ].nr_queues =
2491 ctrl->io_queues[HCTX_TYPE_DEFAULT];
873946f4
SG
2492 set->map[HCTX_TYPE_READ].queue_offset = 0;
2493 }
2494 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2495 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
64861993 2496
1a9460ce
SG
2497 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2498 /* map dedicated poll queues only if we have queues left */
2499 set->map[HCTX_TYPE_POLL].nr_queues =
2500 ctrl->io_queues[HCTX_TYPE_POLL];
2501 set->map[HCTX_TYPE_POLL].queue_offset =
2502 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2503 ctrl->io_queues[HCTX_TYPE_READ];
2504 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2505 }
2506
64861993 2507 dev_info(ctrl->ctrl.device,
1a9460ce 2508 "mapped %d/%d/%d default/read/poll queues.\n",
64861993 2509 ctrl->io_queues[HCTX_TYPE_DEFAULT],
1a9460ce
SG
2510 ctrl->io_queues[HCTX_TYPE_READ],
2511 ctrl->io_queues[HCTX_TYPE_POLL]);
64861993 2512
873946f4
SG
2513 return 0;
2514}
2515
5a72e899 2516static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1a9460ce
SG
2517{
2518 struct nvme_tcp_queue *queue = hctx->driver_data;
2519 struct sock *sk = queue->sock->sk;
2520
f86e5bf8
SG
2521 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2522 return 0;
2523
72e5d757 2524 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
3f926af3 2525 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
1a9460ce
SG
2526 sk_busy_loop(sk, true);
2527 nvme_tcp_try_recv(queue);
72e5d757 2528 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
1a9460ce
SG
2529 return queue->nr_cqe;
2530}
2531
6acbd961 2532static const struct blk_mq_ops nvme_tcp_mq_ops = {
3f2304f8 2533 .queue_rq = nvme_tcp_queue_rq,
86f0348a 2534 .commit_rqs = nvme_tcp_commit_rqs,
3f2304f8
SG
2535 .complete = nvme_complete_rq,
2536 .init_request = nvme_tcp_init_request,
2537 .exit_request = nvme_tcp_exit_request,
2538 .init_hctx = nvme_tcp_init_hctx,
2539 .timeout = nvme_tcp_timeout,
873946f4 2540 .map_queues = nvme_tcp_map_queues,
1a9460ce 2541 .poll = nvme_tcp_poll,
3f2304f8
SG
2542};
2543
6acbd961 2544static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
3f2304f8
SG
2545 .queue_rq = nvme_tcp_queue_rq,
2546 .complete = nvme_complete_rq,
2547 .init_request = nvme_tcp_init_request,
2548 .exit_request = nvme_tcp_exit_request,
2549 .init_hctx = nvme_tcp_init_admin_hctx,
2550 .timeout = nvme_tcp_timeout,
2551};
2552
2553static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2554 .name = "tcp",
2555 .module = THIS_MODULE,
2556 .flags = NVME_F_FABRICS,
2557 .reg_read32 = nvmf_reg_read32,
2558 .reg_read64 = nvmf_reg_read64,
2559 .reg_write32 = nvmf_reg_write32,
2560 .free_ctrl = nvme_tcp_free_ctrl,
2561 .submit_async_event = nvme_tcp_submit_async_event,
2562 .delete_ctrl = nvme_tcp_delete_ctrl,
2563 .get_address = nvmf_get_address,
f7f70f4a 2564 .stop_ctrl = nvme_tcp_stop_ctrl,
3f2304f8
SG
2565};
2566
2567static bool
2568nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2569{
2570 struct nvme_tcp_ctrl *ctrl;
2571 bool found = false;
2572
2573 mutex_lock(&nvme_tcp_ctrl_mutex);
2574 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2575 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2576 if (found)
2577 break;
2578 }
2579 mutex_unlock(&nvme_tcp_ctrl_mutex);
2580
2581 return found;
2582}
2583
2584static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2585 struct nvmf_ctrl_options *opts)
2586{
2587 struct nvme_tcp_ctrl *ctrl;
2588 int ret;
2589
2590 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2591 if (!ctrl)
2592 return ERR_PTR(-ENOMEM);
2593
2594 INIT_LIST_HEAD(&ctrl->list);
2595 ctrl->ctrl.opts = opts;
1a9460ce
SG
2596 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2597 opts->nr_poll_queues + 1;
3f2304f8
SG
2598 ctrl->ctrl.sqsize = opts->queue_size - 1;
2599 ctrl->ctrl.kato = opts->kato;
2600
2601 INIT_DELAYED_WORK(&ctrl->connect_work,
2602 nvme_tcp_reconnect_ctrl_work);
2603 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2604 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2605
2606 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2607 opts->trsvcid =
2608 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2609 if (!opts->trsvcid) {
2610 ret = -ENOMEM;
2611 goto out_free_ctrl;
2612 }
2613 opts->mask |= NVMF_OPT_TRSVCID;
2614 }
2615
2616 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2617 opts->traddr, opts->trsvcid, &ctrl->addr);
2618 if (ret) {
2619 pr_err("malformed address passed: %s:%s\n",
2620 opts->traddr, opts->trsvcid);
2621 goto out_free_ctrl;
2622 }
2623
2624 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2625 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2626 opts->host_traddr, NULL, &ctrl->src_addr);
2627 if (ret) {
2628 pr_err("malformed src address passed: %s\n",
2629 opts->host_traddr);
2630 goto out_free_ctrl;
2631 }
2632 }
2633
3ede8f72 2634 if (opts->mask & NVMF_OPT_HOST_IFACE) {
8b43ced6 2635 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
3ede8f72
MB
2636 pr_err("invalid interface passed: %s\n",
2637 opts->host_iface);
2638 ret = -ENODEV;
2639 goto out_free_ctrl;
2640 }
2641 }
2642
3f2304f8
SG
2643 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2644 ret = -EALREADY;
2645 goto out_free_ctrl;
2646 }
2647
873946f4 2648 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
3f2304f8
SG
2649 GFP_KERNEL);
2650 if (!ctrl->queues) {
2651 ret = -ENOMEM;
2652 goto out_free_ctrl;
2653 }
2654
2655 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2656 if (ret)
2657 goto out_kfree_queues;
2658
2659 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2660 WARN_ON_ONCE(1);
2661 ret = -EINTR;
2662 goto out_uninit_ctrl;
2663 }
2664
2665 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2666 if (ret)
2667 goto out_uninit_ctrl;
2668
2669 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
e5ea42fa 2670 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
3f2304f8 2671
3f2304f8
SG
2672 mutex_lock(&nvme_tcp_ctrl_mutex);
2673 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2674 mutex_unlock(&nvme_tcp_ctrl_mutex);
2675
2676 return &ctrl->ctrl;
2677
2678out_uninit_ctrl:
2679 nvme_uninit_ctrl(&ctrl->ctrl);
2680 nvme_put_ctrl(&ctrl->ctrl);
2681 if (ret > 0)
2682 ret = -EIO;
2683 return ERR_PTR(ret);
2684out_kfree_queues:
2685 kfree(ctrl->queues);
2686out_free_ctrl:
2687 kfree(ctrl);
2688 return ERR_PTR(ret);
2689}
2690
2691static struct nvmf_transport_ops nvme_tcp_transport = {
2692 .name = "tcp",
2693 .module = THIS_MODULE,
2694 .required_opts = NVMF_OPT_TRADDR,
2695 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2696 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
873946f4 2697 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
bb13985d 2698 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
3ede8f72 2699 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
3f2304f8
SG
2700 .create_ctrl = nvme_tcp_create_ctrl,
2701};
2702
2703static int __init nvme_tcp_init_module(void)
2704{
2705 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2706 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2707 if (!nvme_tcp_wq)
2708 return -ENOMEM;
2709
2710 nvmf_register_transport(&nvme_tcp_transport);
2711 return 0;
2712}
2713
2714static void __exit nvme_tcp_cleanup_module(void)
2715{
2716 struct nvme_tcp_ctrl *ctrl;
2717
2718 nvmf_unregister_transport(&nvme_tcp_transport);
2719
2720 mutex_lock(&nvme_tcp_ctrl_mutex);
2721 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2722 nvme_delete_ctrl(&ctrl->ctrl);
2723 mutex_unlock(&nvme_tcp_ctrl_mutex);
2724 flush_workqueue(nvme_delete_wq);
2725
2726 destroy_workqueue(nvme_tcp_wq);
2727}
2728
2729module_init(nvme_tcp_init_module);
2730module_exit(nvme_tcp_cleanup_module);
2731
2732MODULE_LICENSE("GPL v2");