]> git.ipfire.org Git - people/ms/linux.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
IB/srp: Fix indirect data buffer rkey endianness
[people/ms/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
56b5390c 43#include <rdma/ib_cache.h>
aef9ec39 44
60063497 45#include <linux/atomic.h>
aef9ec39
RD
46
47#include <scsi/scsi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_dbg.h>
71444b97 50#include <scsi/scsi_tcq.h>
aef9ec39 51#include <scsi/srp.h>
3236822b 52#include <scsi/scsi_transport_srp.h>
aef9ec39 53
aef9ec39
RD
54#include "ib_srp.h"
55
56#define DRV_NAME "ib_srp"
57#define PFX DRV_NAME ": "
713ef24e
BVA
58#define DRV_VERSION "2.0"
59#define DRV_RELDATE "July 26, 2015"
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39 63MODULE_LICENSE("Dual BSD/GPL");
33ab3e5b
BVA
64MODULE_VERSION(DRV_VERSION);
65MODULE_INFO(release_date, DRV_RELDATE);
aef9ec39 66
49248644
DD
67static unsigned int srp_sg_tablesize;
68static unsigned int cmd_sg_entries;
c07d424d
DD
69static unsigned int indirect_sg_entries;
70static bool allow_ext_sg;
03f6fb93
BVA
71static bool prefer_fr = true;
72static bool register_always = true;
49248644 73static int topspin_workarounds = 1;
74b0a15b 74
49248644
DD
75module_param(srp_sg_tablesize, uint, 0444);
76MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 77
49248644
DD
78module_param(cmd_sg_entries, uint, 0444);
79MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 81
c07d424d
DD
82module_param(indirect_sg_entries, uint, 0444);
83MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86module_param(allow_ext_sg, bool, 0444);
87MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
aef9ec39
RD
90module_param(topspin_workarounds, int, 0444);
91MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
5cfb1782
BVA
94module_param(prefer_fr, bool, 0444);
95MODULE_PARM_DESC(prefer_fr,
96"Whether to use fast registration if both FMR and fast registration are supported");
97
b1b8854d
BVA
98module_param(register_always, bool, 0444);
99MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
101
9c27847d 102static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 103
a95cadb9
BVA
104static int srp_reconnect_delay = 10;
105module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106 S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
ed9b2264
BVA
109static int srp_fast_io_fail_tmo = 15;
110module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
116
a95cadb9 117static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
118module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119 S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
127
d92c0da7
BVA
128static unsigned ch_count;
129module_param(ch_count, uint, 0444);
130MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
aef9ec39 133static void srp_add_one(struct ib_device *device);
7c1eb45a 134static void srp_remove_one(struct ib_device *device, void *client_data);
509c07bc
BVA
135static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
aef9ec39
RD
137static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
3236822b 139static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 140static struct workqueue_struct *srp_remove_wq;
3236822b 141
aef9ec39
RD
142static struct ib_client srp_client = {
143 .name = "srp",
144 .add = srp_add_one,
145 .remove = srp_remove_one
146};
147
c1a0b23b
MT
148static struct ib_sa_client srp_sa_client;
149
ed9b2264
BVA
150static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151{
152 int tmo = *(int *)kp->arg;
153
154 if (tmo >= 0)
155 return sprintf(buffer, "%d", tmo);
156 else
157 return sprintf(buffer, "off");
158}
159
160static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{
162 int tmo, res;
163
3fdf70ac
SG
164 res = srp_parse_tmo(&tmo, val);
165 if (res)
166 goto out;
167
a95cadb9
BVA
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170 srp_dev_loss_tmo);
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 173 else
a95cadb9
BVA
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175 tmo);
ed9b2264
BVA
176 if (res)
177 goto out;
178 *(int *)kp->arg = tmo;
179
180out:
181 return res;
182}
183
9c27847d 184static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
185 .get = srp_tmo_get,
186 .set = srp_tmo_set,
187};
188
aef9ec39
RD
189static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190{
191 return (struct srp_target_port *) host->hostdata;
192}
193
194static const char *srp_target_info(struct Scsi_Host *host)
195{
196 return host_to_target(host)->target_name;
197}
198
5d7cbfd6
RD
199static int srp_target_is_topspin(struct srp_target_port *target)
200{
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
203
204 return topspin_workarounds &&
3d1ff48d
RK
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
207}
208
aef9ec39
RD
209static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210 gfp_t gfp_mask,
211 enum dma_data_direction direction)
212{
213 struct srp_iu *iu;
214
215 iu = kmalloc(sizeof *iu, gfp_mask);
216 if (!iu)
217 goto out;
218
219 iu->buf = kzalloc(size, gfp_mask);
220 if (!iu->buf)
221 goto out_free_iu;
222
05321937
GKH
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224 direction);
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
226 goto out_free_buf;
227
228 iu->size = size;
229 iu->direction = direction;
230
231 return iu;
232
233out_free_buf:
234 kfree(iu->buf);
235out_free_iu:
236 kfree(iu);
237out:
238 return NULL;
239}
240
241static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242{
243 if (!iu)
244 return;
245
05321937
GKH
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247 iu->direction);
aef9ec39
RD
248 kfree(iu->buf);
249 kfree(iu);
250}
251
252static void srp_qp_event(struct ib_event *event, void *context)
253{
57363d98
SG
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
aef9ec39
RD
256}
257
258static int srp_init_qp(struct srp_target_port *target,
259 struct ib_qp *qp)
260{
261 struct ib_qp_attr *attr;
262 int ret;
263
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
265 if (!attr)
266 return -ENOMEM;
267
56b5390c
BVA
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
271 &attr->pkey_index);
aef9ec39
RD
272 if (ret)
273 goto out;
274
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
279
280 ret = ib_modify_qp(qp, attr,
281 IB_QP_STATE |
282 IB_QP_PKEY_INDEX |
283 IB_QP_ACCESS_FLAGS |
284 IB_QP_PORT);
285
286out:
287 kfree(attr);
288 return ret;
289}
290
509c07bc 291static int srp_new_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 292{
509c07bc 293 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
294 struct ib_cm_id *new_cm_id;
295
05321937 296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
509c07bc 297 srp_cm_handler, ch);
9fe4bcf4
DD
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
300
509c07bc
BVA
301 if (ch->cm_id)
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
9fe4bcf4
DD
308
309 return 0;
310}
311
d1b4289e
BVA
312static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313{
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
316
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
320 fmr_param.cache = 1;
52ede08f
BVA
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
d1b4289e
BVA
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
326
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
328}
329
5cfb1782
BVA
330/**
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
333 */
334static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335{
336 int i;
337 struct srp_fr_desc *d;
338
339 if (!pool)
340 return;
341
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
343 if (d->mr)
344 ib_dereg_mr(d->mr);
345 }
346 kfree(pool);
347}
348
349/**
350 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
351 * @device: IB device to allocate fast registration descriptors for.
352 * @pd: Protection domain associated with the FR descriptors.
353 * @pool_size: Number of descriptors to allocate.
354 * @max_page_list_len: Maximum fast registration work request page list length.
355 */
356static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
357 struct ib_pd *pd, int pool_size,
358 int max_page_list_len)
359{
360 struct srp_fr_pool *pool;
361 struct srp_fr_desc *d;
362 struct ib_mr *mr;
5cfb1782
BVA
363 int i, ret = -EINVAL;
364
365 if (pool_size <= 0)
366 goto err;
367 ret = -ENOMEM;
368 pool = kzalloc(sizeof(struct srp_fr_pool) +
369 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
370 if (!pool)
371 goto err;
372 pool->size = pool_size;
373 pool->max_page_list_len = max_page_list_len;
374 spin_lock_init(&pool->lock);
375 INIT_LIST_HEAD(&pool->free_list);
376
377 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
563b67c5
SG
378 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
379 max_page_list_len);
5cfb1782
BVA
380 if (IS_ERR(mr)) {
381 ret = PTR_ERR(mr);
382 goto destroy_pool;
383 }
384 d->mr = mr;
5cfb1782
BVA
385 list_add_tail(&d->entry, &pool->free_list);
386 }
387
388out:
389 return pool;
390
391destroy_pool:
392 srp_destroy_fr_pool(pool);
393
394err:
395 pool = ERR_PTR(ret);
396 goto out;
397}
398
399/**
400 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
401 * @pool: Pool to obtain descriptor from.
402 */
403static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
404{
405 struct srp_fr_desc *d = NULL;
406 unsigned long flags;
407
408 spin_lock_irqsave(&pool->lock, flags);
409 if (!list_empty(&pool->free_list)) {
410 d = list_first_entry(&pool->free_list, typeof(*d), entry);
411 list_del(&d->entry);
412 }
413 spin_unlock_irqrestore(&pool->lock, flags);
414
415 return d;
416}
417
418/**
419 * srp_fr_pool_put() - put an FR descriptor back in the free list
420 * @pool: Pool the descriptor was allocated from.
421 * @desc: Pointer to an array of fast registration descriptor pointers.
422 * @n: Number of descriptors to put back.
423 *
424 * Note: The caller must already have queued an invalidation request for
425 * desc->mr->rkey before calling this function.
426 */
427static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
428 int n)
429{
430 unsigned long flags;
431 int i;
432
433 spin_lock_irqsave(&pool->lock, flags);
434 for (i = 0; i < n; i++)
435 list_add(&desc[i]->entry, &pool->free_list);
436 spin_unlock_irqrestore(&pool->lock, flags);
437}
438
439static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
440{
441 struct srp_device *dev = target->srp_host->srp_dev;
442
443 return srp_create_fr_pool(dev->dev, dev->pd,
444 target->scsi_host->can_queue,
445 dev->max_pages_per_mr);
446}
447
7dad6b2e
BVA
448/**
449 * srp_destroy_qp() - destroy an RDMA queue pair
450 * @ch: SRP RDMA channel.
451 *
452 * Change a queue pair into the error state and wait until all receive
453 * completions have been processed before destroying it. This avoids that
454 * the receive completion handler can access the queue pair while it is
455 * being destroyed.
456 */
457static void srp_destroy_qp(struct srp_rdma_ch *ch)
458{
7dad6b2e
BVA
459 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
460 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
461 struct ib_recv_wr *bad_wr;
462 int ret;
463
464 /* Destroying a QP and reusing ch->done is only safe if not connected */
c014c8cd 465 WARN_ON_ONCE(ch->connected);
7dad6b2e
BVA
466
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
468 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
469 if (ret)
470 goto out;
471
472 init_completion(&ch->done);
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
474 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
475 if (ret == 0)
476 wait_for_completion(&ch->done);
477
478out:
479 ib_destroy_qp(ch->qp);
480}
481
509c07bc 482static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 483{
509c07bc 484 struct srp_target_port *target = ch->target;
62154b2e 485 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39 486 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
487 struct ib_cq *recv_cq, *send_cq;
488 struct ib_qp *qp;
d1b4289e 489 struct ib_fmr_pool *fmr_pool = NULL;
5cfb1782 490 struct srp_fr_pool *fr_pool = NULL;
09c0c0be 491 const int m = dev->use_fast_reg ? 3 : 1;
8e37210b 492 struct ib_cq_init_attr cq_attr = {};
aef9ec39
RD
493 int ret;
494
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 if (!init_attr)
497 return -ENOMEM;
498
7dad6b2e 499 /* + 1 for SRP_LAST_WR_ID */
8e37210b
MB
500 cq_attr.cqe = target->queue_size + 1;
501 cq_attr.comp_vector = ch->comp_vector;
509c07bc 502 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
8e37210b 503 &cq_attr);
73aa89ed
IR
504 if (IS_ERR(recv_cq)) {
505 ret = PTR_ERR(recv_cq);
da9d2f07 506 goto err;
aef9ec39
RD
507 }
508
8e37210b
MB
509 cq_attr.cqe = m * target->queue_size;
510 cq_attr.comp_vector = ch->comp_vector;
509c07bc 511 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
8e37210b 512 &cq_attr);
73aa89ed
IR
513 if (IS_ERR(send_cq)) {
514 ret = PTR_ERR(send_cq);
da9d2f07 515 goto err_recv_cq;
9c03dc9f
BVA
516 }
517
73aa89ed 518 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
519
520 init_attr->event_handler = srp_qp_event;
5cfb1782 521 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 522 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39
RD
523 init_attr->cap.max_recv_sge = 1;
524 init_attr->cap.max_send_sge = 1;
5cfb1782 525 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 526 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
527 init_attr->send_cq = send_cq;
528 init_attr->recv_cq = recv_cq;
aef9ec39 529
62154b2e 530 qp = ib_create_qp(dev->pd, init_attr);
73aa89ed
IR
531 if (IS_ERR(qp)) {
532 ret = PTR_ERR(qp);
da9d2f07 533 goto err_send_cq;
aef9ec39
RD
534 }
535
73aa89ed 536 ret = srp_init_qp(target, qp);
da9d2f07
RD
537 if (ret)
538 goto err_qp;
aef9ec39 539
002f1567 540 if (dev->use_fast_reg) {
5cfb1782
BVA
541 fr_pool = srp_alloc_fr_pool(target);
542 if (IS_ERR(fr_pool)) {
543 ret = PTR_ERR(fr_pool);
544 shost_printk(KERN_WARNING, target->scsi_host, PFX
545 "FR pool allocation failed (%d)\n", ret);
546 goto err_qp;
547 }
002f1567 548 } else if (dev->use_fmr) {
d1b4289e
BVA
549 fmr_pool = srp_alloc_fmr_pool(target);
550 if (IS_ERR(fmr_pool)) {
551 ret = PTR_ERR(fmr_pool);
552 shost_printk(KERN_WARNING, target->scsi_host, PFX
553 "FMR pool allocation failed (%d)\n", ret);
554 goto err_qp;
555 }
d1b4289e
BVA
556 }
557
509c07bc 558 if (ch->qp)
7dad6b2e 559 srp_destroy_qp(ch);
509c07bc
BVA
560 if (ch->recv_cq)
561 ib_destroy_cq(ch->recv_cq);
562 if (ch->send_cq)
563 ib_destroy_cq(ch->send_cq);
73aa89ed 564
509c07bc
BVA
565 ch->qp = qp;
566 ch->recv_cq = recv_cq;
567 ch->send_cq = send_cq;
73aa89ed 568
7fbc67df
SG
569 if (dev->use_fast_reg) {
570 if (ch->fr_pool)
571 srp_destroy_fr_pool(ch->fr_pool);
572 ch->fr_pool = fr_pool;
573 } else if (dev->use_fmr) {
574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
576 ch->fmr_pool = fmr_pool;
577 }
578
da9d2f07
RD
579 kfree(init_attr);
580 return 0;
581
582err_qp:
73aa89ed 583 ib_destroy_qp(qp);
da9d2f07
RD
584
585err_send_cq:
73aa89ed 586 ib_destroy_cq(send_cq);
da9d2f07
RD
587
588err_recv_cq:
73aa89ed 589 ib_destroy_cq(recv_cq);
da9d2f07
RD
590
591err:
aef9ec39
RD
592 kfree(init_attr);
593 return ret;
594}
595
4d73f95f
BVA
596/*
597 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 598 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 599 */
509c07bc
BVA
600static void srp_free_ch_ib(struct srp_target_port *target,
601 struct srp_rdma_ch *ch)
aef9ec39 602{
5cfb1782 603 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
604 int i;
605
d92c0da7
BVA
606 if (!ch->target)
607 return;
608
509c07bc
BVA
609 if (ch->cm_id) {
610 ib_destroy_cm_id(ch->cm_id);
611 ch->cm_id = NULL;
394c595e
BVA
612 }
613
d92c0da7
BVA
614 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
615 if (!ch->qp)
616 return;
617
5cfb1782 618 if (dev->use_fast_reg) {
509c07bc
BVA
619 if (ch->fr_pool)
620 srp_destroy_fr_pool(ch->fr_pool);
002f1567 621 } else if (dev->use_fmr) {
509c07bc
BVA
622 if (ch->fmr_pool)
623 ib_destroy_fmr_pool(ch->fmr_pool);
5cfb1782 624 }
7dad6b2e 625 srp_destroy_qp(ch);
509c07bc
BVA
626 ib_destroy_cq(ch->send_cq);
627 ib_destroy_cq(ch->recv_cq);
aef9ec39 628
d92c0da7
BVA
629 /*
630 * Avoid that the SCSI error handler tries to use this channel after
631 * it has been freed. The SCSI error handler can namely continue
632 * trying to perform recovery actions after scsi_remove_host()
633 * returned.
634 */
635 ch->target = NULL;
636
509c07bc
BVA
637 ch->qp = NULL;
638 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 639
509c07bc 640 if (ch->rx_ring) {
4d73f95f 641 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
642 srp_free_iu(target->srp_host, ch->rx_ring[i]);
643 kfree(ch->rx_ring);
644 ch->rx_ring = NULL;
4d73f95f 645 }
509c07bc 646 if (ch->tx_ring) {
4d73f95f 647 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
648 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 kfree(ch->tx_ring);
650 ch->tx_ring = NULL;
4d73f95f 651 }
aef9ec39
RD
652}
653
654static void srp_path_rec_completion(int status,
655 struct ib_sa_path_rec *pathrec,
509c07bc 656 void *ch_ptr)
aef9ec39 657{
509c07bc
BVA
658 struct srp_rdma_ch *ch = ch_ptr;
659 struct srp_target_port *target = ch->target;
aef9ec39 660
509c07bc 661 ch->status = status;
aef9ec39 662 if (status)
7aa54bd7
DD
663 shost_printk(KERN_ERR, target->scsi_host,
664 PFX "Got failed path rec status %d\n", status);
aef9ec39 665 else
509c07bc
BVA
666 ch->path = *pathrec;
667 complete(&ch->done);
aef9ec39
RD
668}
669
509c07bc 670static int srp_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 671{
509c07bc 672 struct srp_target_port *target = ch->target;
a702adce
BVA
673 int ret;
674
509c07bc
BVA
675 ch->path.numb_path = 1;
676
677 init_completion(&ch->done);
678
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
680 target->srp_host->srp_dev->dev,
681 target->srp_host->port,
682 &ch->path,
683 IB_SA_PATH_REC_SERVICE_ID |
684 IB_SA_PATH_REC_DGID |
685 IB_SA_PATH_REC_SGID |
686 IB_SA_PATH_REC_NUMB_PATH |
687 IB_SA_PATH_REC_PKEY,
688 SRP_PATH_REC_TIMEOUT_MS,
689 GFP_KERNEL,
690 srp_path_rec_completion,
691 ch, &ch->path_query);
692 if (ch->path_query_id < 0)
693 return ch->path_query_id;
694
695 ret = wait_for_completion_interruptible(&ch->done);
a702adce
BVA
696 if (ret < 0)
697 return ret;
aef9ec39 698
509c07bc 699 if (ch->status < 0)
7aa54bd7
DD
700 shost_printk(KERN_WARNING, target->scsi_host,
701 PFX "Path record query failed\n");
aef9ec39 702
509c07bc 703 return ch->status;
aef9ec39
RD
704}
705
d92c0da7 706static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
aef9ec39 707{
509c07bc 708 struct srp_target_port *target = ch->target;
aef9ec39
RD
709 struct {
710 struct ib_cm_req_param param;
711 struct srp_login_req priv;
712 } *req = NULL;
713 int status;
714
715 req = kzalloc(sizeof *req, GFP_KERNEL);
716 if (!req)
717 return -ENOMEM;
718
509c07bc 719 req->param.primary_path = &ch->path;
aef9ec39
RD
720 req->param.alternate_path = NULL;
721 req->param.service_id = target->service_id;
509c07bc
BVA
722 req->param.qp_num = ch->qp->qp_num;
723 req->param.qp_type = ch->qp->qp_type;
aef9ec39
RD
724 req->param.private_data = &req->priv;
725 req->param.private_data_len = sizeof req->priv;
726 req->param.flow_control = 1;
727
728 get_random_bytes(&req->param.starting_psn, 4);
729 req->param.starting_psn &= 0xffffff;
730
731 /*
732 * Pick some arbitrary defaults here; we could make these
733 * module parameters if anyone cared about setting them.
734 */
735 req->param.responder_resources = 4;
736 req->param.remote_cm_response_timeout = 20;
737 req->param.local_cm_response_timeout = 20;
7bb312e4 738 req->param.retry_count = target->tl_retry_count;
aef9ec39
RD
739 req->param.rnr_retry_count = 7;
740 req->param.max_cm_retries = 15;
741
742 req->priv.opcode = SRP_LOGIN_REQ;
743 req->priv.tag = 0;
49248644 744 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
aef9ec39
RD
745 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
746 SRP_BUF_FORMAT_INDIRECT);
d92c0da7
BVA
747 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
748 SRP_MULTICHAN_SINGLE);
0c0450db 749 /*
3cd96564 750 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
751 * port identifier format is 8 bytes of ID extension followed
752 * by 8 bytes of GUID. Older drafts put the two halves in the
753 * opposite order, so that the GUID comes first.
754 *
755 * Targets conforming to these obsolete drafts can be
756 * recognized by the I/O Class they report.
757 */
758 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
759 memcpy(req->priv.initiator_port_id,
747fe000 760 &target->sgid.global.interface_id, 8);
0c0450db 761 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 762 &target->initiator_ext, 8);
0c0450db
R
763 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
764 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
765 } else {
766 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
767 &target->initiator_ext, 8);
768 memcpy(req->priv.initiator_port_id + 8,
747fe000 769 &target->sgid.global.interface_id, 8);
0c0450db
R
770 memcpy(req->priv.target_port_id, &target->id_ext, 8);
771 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
772 }
773
aef9ec39
RD
774 /*
775 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
776 * zero out the first 8 bytes of our initiator port ID and set
777 * the second 8 bytes to the local node GUID.
aef9ec39 778 */
5d7cbfd6 779 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
780 shost_printk(KERN_DEBUG, target->scsi_host,
781 PFX "Topspin/Cisco initiator port ID workaround "
782 "activated for target GUID %016llx\n",
45c37cad 783 be64_to_cpu(target->ioc_guid));
aef9ec39 784 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 785 memcpy(req->priv.initiator_port_id + 8,
05321937 786 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 787 }
aef9ec39 788
509c07bc 789 status = ib_send_cm_req(ch->cm_id, &req->param);
aef9ec39
RD
790
791 kfree(req);
792
793 return status;
794}
795
ef6c49d8
BVA
796static bool srp_queue_remove_work(struct srp_target_port *target)
797{
798 bool changed = false;
799
800 spin_lock_irq(&target->lock);
801 if (target->state != SRP_TARGET_REMOVED) {
802 target->state = SRP_TARGET_REMOVED;
803 changed = true;
804 }
805 spin_unlock_irq(&target->lock);
806
807 if (changed)
bcc05910 808 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
809
810 return changed;
811}
812
aef9ec39
RD
813static void srp_disconnect_target(struct srp_target_port *target)
814{
d92c0da7
BVA
815 struct srp_rdma_ch *ch;
816 int i;
509c07bc 817
c014c8cd 818 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 819
c014c8cd
BVA
820 for (i = 0; i < target->ch_count; i++) {
821 ch = &target->ch[i];
822 ch->connected = false;
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
824 shost_printk(KERN_DEBUG, target->scsi_host,
825 PFX "Sending CM DREQ failed\n");
294c875a 826 }
e6581056 827 }
aef9ec39
RD
828}
829
509c07bc
BVA
830static void srp_free_req_data(struct srp_target_port *target,
831 struct srp_rdma_ch *ch)
8f26c9ff 832{
5cfb1782
BVA
833 struct srp_device *dev = target->srp_host->srp_dev;
834 struct ib_device *ibdev = dev->dev;
8f26c9ff
DD
835 struct srp_request *req;
836 int i;
837
47513cf4 838 if (!ch->req_ring)
4d73f95f
BVA
839 return;
840
841 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 842 req = &ch->req_ring[i];
9a21be53 843 if (dev->use_fast_reg) {
5cfb1782 844 kfree(req->fr_list);
9a21be53 845 } else {
5cfb1782 846 kfree(req->fmr_list);
9a21be53
SG
847 kfree(req->map_page);
848 }
c07d424d
DD
849 if (req->indirect_dma_addr) {
850 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
851 target->indirect_size,
852 DMA_TO_DEVICE);
853 }
854 kfree(req->indirect_desc);
8f26c9ff 855 }
4d73f95f 856
509c07bc
BVA
857 kfree(ch->req_ring);
858 ch->req_ring = NULL;
8f26c9ff
DD
859}
860
509c07bc 861static int srp_alloc_req_data(struct srp_rdma_ch *ch)
b81d00bd 862{
509c07bc 863 struct srp_target_port *target = ch->target;
b81d00bd
BVA
864 struct srp_device *srp_dev = target->srp_host->srp_dev;
865 struct ib_device *ibdev = srp_dev->dev;
866 struct srp_request *req;
5cfb1782 867 void *mr_list;
b81d00bd
BVA
868 dma_addr_t dma_addr;
869 int i, ret = -ENOMEM;
870
509c07bc
BVA
871 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
872 GFP_KERNEL);
873 if (!ch->req_ring)
4d73f95f
BVA
874 goto out;
875
876 for (i = 0; i < target->req_ring_size; ++i) {
509c07bc 877 req = &ch->req_ring[i];
5cfb1782
BVA
878 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
879 GFP_KERNEL);
880 if (!mr_list)
881 goto out;
9a21be53 882 if (srp_dev->use_fast_reg) {
5cfb1782 883 req->fr_list = mr_list;
9a21be53 884 } else {
5cfb1782 885 req->fmr_list = mr_list;
9a21be53
SG
886 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
887 sizeof(void *), GFP_KERNEL);
888 if (!req->map_page)
889 goto out;
890 }
b81d00bd 891 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
5cfb1782 892 if (!req->indirect_desc)
b81d00bd
BVA
893 goto out;
894
895 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
896 target->indirect_size,
897 DMA_TO_DEVICE);
898 if (ib_dma_mapping_error(ibdev, dma_addr))
899 goto out;
900
901 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
902 }
903 ret = 0;
904
905out:
906 return ret;
907}
908
683b159a
BVA
909/**
910 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
911 * @shost: SCSI host whose attributes to remove from sysfs.
912 *
913 * Note: Any attributes defined in the host template and that did not exist
914 * before invocation of this function will be ignored.
915 */
916static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
917{
918 struct device_attribute **attr;
919
920 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
921 device_remove_file(&shost->shost_dev, *attr);
922}
923
ee12d6a8
BVA
924static void srp_remove_target(struct srp_target_port *target)
925{
d92c0da7
BVA
926 struct srp_rdma_ch *ch;
927 int i;
509c07bc 928
ef6c49d8
BVA
929 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
930
ee12d6a8 931 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 932 srp_rport_get(target->rport);
ee12d6a8
BVA
933 srp_remove_host(target->scsi_host);
934 scsi_remove_host(target->scsi_host);
93079162 935 srp_stop_rport_timers(target->rport);
ef6c49d8 936 srp_disconnect_target(target);
d92c0da7
BVA
937 for (i = 0; i < target->ch_count; i++) {
938 ch = &target->ch[i];
939 srp_free_ch_ib(target, ch);
940 }
c1120f89 941 cancel_work_sync(&target->tl_err_work);
9dd69a60 942 srp_rport_put(target->rport);
d92c0da7
BVA
943 for (i = 0; i < target->ch_count; i++) {
944 ch = &target->ch[i];
945 srp_free_req_data(target, ch);
946 }
947 kfree(target->ch);
948 target->ch = NULL;
65d7dd2f
VP
949
950 spin_lock(&target->srp_host->target_lock);
951 list_del(&target->list);
952 spin_unlock(&target->srp_host->target_lock);
953
ee12d6a8
BVA
954 scsi_host_put(target->scsi_host);
955}
956
c4028958 957static void srp_remove_work(struct work_struct *work)
aef9ec39 958{
c4028958 959 struct srp_target_port *target =
ef6c49d8 960 container_of(work, struct srp_target_port, remove_work);
aef9ec39 961
ef6c49d8 962 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 963
96fc248a 964 srp_remove_target(target);
aef9ec39
RD
965}
966
dc1bdbd9
BVA
967static void srp_rport_delete(struct srp_rport *rport)
968{
969 struct srp_target_port *target = rport->lld_data;
970
971 srp_queue_remove_work(target);
972}
973
c014c8cd
BVA
974/**
975 * srp_connected_ch() - number of connected channels
976 * @target: SRP target port.
977 */
978static int srp_connected_ch(struct srp_target_port *target)
979{
980 int i, c = 0;
981
982 for (i = 0; i < target->ch_count; i++)
983 c += target->ch[i].connected;
984
985 return c;
986}
987
d92c0da7 988static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
aef9ec39 989{
509c07bc 990 struct srp_target_port *target = ch->target;
aef9ec39
RD
991 int ret;
992
c014c8cd 993 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 994
509c07bc 995 ret = srp_lookup_path(ch);
aef9ec39 996 if (ret)
4d59ad29 997 goto out;
aef9ec39
RD
998
999 while (1) {
509c07bc 1000 init_completion(&ch->done);
d92c0da7 1001 ret = srp_send_req(ch, multich);
aef9ec39 1002 if (ret)
4d59ad29 1003 goto out;
509c07bc 1004 ret = wait_for_completion_interruptible(&ch->done);
a702adce 1005 if (ret < 0)
4d59ad29 1006 goto out;
aef9ec39
RD
1007
1008 /*
1009 * The CM event handling code will set status to
1010 * SRP_PORT_REDIRECT if we get a port redirect REJ
1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1012 * redirect REJ back.
1013 */
4d59ad29
BVA
1014 ret = ch->status;
1015 switch (ret) {
aef9ec39 1016 case 0:
c014c8cd 1017 ch->connected = true;
4d59ad29 1018 goto out;
aef9ec39
RD
1019
1020 case SRP_PORT_REDIRECT:
509c07bc 1021 ret = srp_lookup_path(ch);
aef9ec39 1022 if (ret)
4d59ad29 1023 goto out;
aef9ec39
RD
1024 break;
1025
1026 case SRP_DLID_REDIRECT:
1027 break;
1028
9fe4bcf4 1029 case SRP_STALE_CONN:
9fe4bcf4 1030 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1031 "giving up on stale connection\n");
4d59ad29
BVA
1032 ret = -ECONNRESET;
1033 goto out;
9fe4bcf4 1034
aef9ec39 1035 default:
4d59ad29 1036 goto out;
aef9ec39
RD
1037 }
1038 }
4d59ad29
BVA
1039
1040out:
1041 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1042}
1043
509c07bc 1044static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
5cfb1782
BVA
1045{
1046 struct ib_send_wr *bad_wr;
1047 struct ib_send_wr wr = {
1048 .opcode = IB_WR_LOCAL_INV,
1049 .wr_id = LOCAL_INV_WR_ID_MASK,
1050 .next = NULL,
1051 .num_sge = 0,
1052 .send_flags = 0,
1053 .ex.invalidate_rkey = rkey,
1054 };
1055
509c07bc 1056 return ib_post_send(ch->qp, &wr, &bad_wr);
5cfb1782
BVA
1057}
1058
d945e1df 1059static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1060 struct srp_rdma_ch *ch,
d945e1df
RD
1061 struct srp_request *req)
1062{
509c07bc 1063 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1064 struct srp_device *dev = target->srp_host->srp_dev;
1065 struct ib_device *ibdev = dev->dev;
1066 int i, res;
8f26c9ff 1067
bb350d1d 1068 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1069 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1070 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1071 return;
1072
5cfb1782
BVA
1073 if (dev->use_fast_reg) {
1074 struct srp_fr_desc **pfr;
1075
1076 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
509c07bc 1077 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1078 if (res < 0) {
1079 shost_printk(KERN_ERR, target->scsi_host, PFX
1080 "Queueing INV WR for rkey %#x failed (%d)\n",
1081 (*pfr)->mr->rkey, res);
1082 queue_work(system_long_wq,
1083 &target->tl_err_work);
1084 }
1085 }
1086 if (req->nmdesc)
509c07bc 1087 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1088 req->nmdesc);
002f1567 1089 } else if (dev->use_fmr) {
5cfb1782
BVA
1090 struct ib_pool_fmr **pfmr;
1091
1092 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1093 ib_fmr_pool_unmap(*pfmr);
1094 }
f5358a17 1095
8f26c9ff
DD
1096 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1097 scmnd->sc_data_direction);
d945e1df
RD
1098}
1099
22032991
BVA
1100/**
1101 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1102 * @ch: SRP RDMA channel.
22032991 1103 * @req: SRP request.
b3fe628d 1104 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1105 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1106 * ownership of @req->scmnd if it equals @scmnd.
1107 *
1108 * Return value:
1109 * Either NULL or a pointer to the SCSI command the caller became owner of.
1110 */
509c07bc 1111static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1112 struct srp_request *req,
b3fe628d 1113 struct scsi_device *sdev,
22032991
BVA
1114 struct scsi_cmnd *scmnd)
1115{
1116 unsigned long flags;
1117
509c07bc 1118 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1119 if (req->scmnd &&
1120 (!sdev || req->scmnd->device == sdev) &&
1121 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1122 scmnd = req->scmnd;
1123 req->scmnd = NULL;
22032991
BVA
1124 } else {
1125 scmnd = NULL;
1126 }
509c07bc 1127 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1128
1129 return scmnd;
1130}
1131
1132/**
1133 * srp_free_req() - Unmap data and add request to the free request list.
509c07bc 1134 * @ch: SRP RDMA channel.
af24663b
BVA
1135 * @req: Request to be freed.
1136 * @scmnd: SCSI command associated with @req.
1137 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1138 */
509c07bc
BVA
1139static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1140 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1141{
94a9174c
BVA
1142 unsigned long flags;
1143
509c07bc 1144 srp_unmap_data(scmnd, ch, req);
22032991 1145
509c07bc
BVA
1146 spin_lock_irqsave(&ch->lock, flags);
1147 ch->req_lim += req_lim_delta;
509c07bc 1148 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1149}
1150
509c07bc
BVA
1151static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1152 struct scsi_device *sdev, int result)
526b4caa 1153{
509c07bc 1154 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1155
1156 if (scmnd) {
509c07bc 1157 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1158 scmnd->result = result;
22032991 1159 scmnd->scsi_done(scmnd);
22032991 1160 }
526b4caa
IR
1161}
1162
ed9b2264 1163static void srp_terminate_io(struct srp_rport *rport)
aef9ec39 1164{
ed9b2264 1165 struct srp_target_port *target = rport->lld_data;
d92c0da7 1166 struct srp_rdma_ch *ch;
b3fe628d
BVA
1167 struct Scsi_Host *shost = target->scsi_host;
1168 struct scsi_device *sdev;
d92c0da7 1169 int i, j;
ed9b2264 1170
b3fe628d
BVA
1171 /*
1172 * Invoking srp_terminate_io() while srp_queuecommand() is running
1173 * is not safe. Hence the warning statement below.
1174 */
1175 shost_for_each_device(sdev, shost)
1176 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1177
d92c0da7
BVA
1178 for (i = 0; i < target->ch_count; i++) {
1179 ch = &target->ch[i];
509c07bc 1180
d92c0da7
BVA
1181 for (j = 0; j < target->req_ring_size; ++j) {
1182 struct srp_request *req = &ch->req_ring[j];
1183
1184 srp_finish_req(ch, req, NULL,
1185 DID_TRANSPORT_FAILFAST << 16);
1186 }
ed9b2264
BVA
1187 }
1188}
aef9ec39 1189
ed9b2264
BVA
1190/*
1191 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1192 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1193 * srp_reset_device() or srp_reset_host() calls will occur while this function
1194 * is in progress. One way to realize that is not to call this function
1195 * directly but to call srp_reconnect_rport() instead since that last function
1196 * serializes calls of this function via rport->mutex and also blocks
1197 * srp_queuecommand() calls before invoking this function.
1198 */
1199static int srp_rport_reconnect(struct srp_rport *rport)
1200{
1201 struct srp_target_port *target = rport->lld_data;
d92c0da7
BVA
1202 struct srp_rdma_ch *ch;
1203 int i, j, ret = 0;
1204 bool multich = false;
09be70a2 1205
aef9ec39 1206 srp_disconnect_target(target);
34aa654e
BVA
1207
1208 if (target->state == SRP_TARGET_SCANNING)
1209 return -ENODEV;
1210
aef9ec39 1211 /*
c7c4e7ff
BVA
1212 * Now get a new local CM ID so that we avoid confusing the target in
1213 * case things are really fouled up. Doing so also ensures that all CM
1214 * callbacks will have finished before a new QP is allocated.
aef9ec39 1215 */
d92c0da7
BVA
1216 for (i = 0; i < target->ch_count; i++) {
1217 ch = &target->ch[i];
d92c0da7 1218 ret += srp_new_cm_id(ch);
536ae14e 1219 }
d92c0da7
BVA
1220 for (i = 0; i < target->ch_count; i++) {
1221 ch = &target->ch[i];
d92c0da7
BVA
1222 for (j = 0; j < target->req_ring_size; ++j) {
1223 struct srp_request *req = &ch->req_ring[j];
aef9ec39 1224
d92c0da7
BVA
1225 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1226 }
1227 }
1228 for (i = 0; i < target->ch_count; i++) {
1229 ch = &target->ch[i];
d92c0da7
BVA
1230 /*
1231 * Whether or not creating a new CM ID succeeded, create a new
1232 * QP. This guarantees that all completion callback function
1233 * invocations have finished before request resetting starts.
1234 */
1235 ret += srp_create_ch_ib(ch);
aef9ec39 1236
d92c0da7
BVA
1237 INIT_LIST_HEAD(&ch->free_tx);
1238 for (j = 0; j < target->queue_size; ++j)
1239 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1240 }
8de9fe3a
BVA
1241
1242 target->qp_in_error = false;
1243
d92c0da7
BVA
1244 for (i = 0; i < target->ch_count; i++) {
1245 ch = &target->ch[i];
bbac5ccf 1246 if (ret)
d92c0da7 1247 break;
d92c0da7
BVA
1248 ret = srp_connect_ch(ch, multich);
1249 multich = true;
1250 }
09be70a2 1251
ed9b2264
BVA
1252 if (ret == 0)
1253 shost_printk(KERN_INFO, target->scsi_host,
1254 PFX "reconnect succeeded\n");
aef9ec39
RD
1255
1256 return ret;
1257}
1258
8f26c9ff
DD
1259static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1260 unsigned int dma_len, u32 rkey)
f5358a17 1261{
8f26c9ff 1262 struct srp_direct_buf *desc = state->desc;
f5358a17 1263
3ae95da8
BVA
1264 WARN_ON_ONCE(!dma_len);
1265
8f26c9ff
DD
1266 desc->va = cpu_to_be64(dma_addr);
1267 desc->key = cpu_to_be32(rkey);
1268 desc->len = cpu_to_be32(dma_len);
f5358a17 1269
8f26c9ff
DD
1270 state->total_len += dma_len;
1271 state->desc++;
1272 state->ndesc++;
1273}
559ce8f1 1274
8f26c9ff 1275static int srp_map_finish_fmr(struct srp_map_state *state,
509c07bc 1276 struct srp_rdma_ch *ch)
8f26c9ff 1277{
186fbc66
BVA
1278 struct srp_target_port *target = ch->target;
1279 struct srp_device *dev = target->srp_host->srp_dev;
8f26c9ff
DD
1280 struct ib_pool_fmr *fmr;
1281 u64 io_addr = 0;
85507bcc 1282
f731ed62
BVA
1283 if (state->fmr.next >= state->fmr.end)
1284 return -ENOMEM;
1285
26630e8a
SG
1286 WARN_ON_ONCE(!dev->use_fmr);
1287
1288 if (state->npages == 0)
1289 return 0;
1290
1291 if (state->npages == 1 && target->global_mr) {
1292 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1293 target->global_mr->rkey);
1294 goto reset_state;
1295 }
1296
509c07bc 1297 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
8f26c9ff
DD
1298 state->npages, io_addr);
1299 if (IS_ERR(fmr))
1300 return PTR_ERR(fmr);
f5358a17 1301
f731ed62 1302 *state->fmr.next++ = fmr;
52ede08f 1303 state->nmdesc++;
f5358a17 1304
186fbc66
BVA
1305 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1306 state->dma_len, fmr->fmr->rkey);
539dde6f 1307
26630e8a
SG
1308reset_state:
1309 state->npages = 0;
1310 state->dma_len = 0;
1311
8f26c9ff
DD
1312 return 0;
1313}
1314
5cfb1782 1315static int srp_map_finish_fr(struct srp_map_state *state,
509c07bc 1316 struct srp_rdma_ch *ch)
5cfb1782 1317{
509c07bc 1318 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1319 struct srp_device *dev = target->srp_host->srp_dev;
1320 struct ib_send_wr *bad_wr;
f7f7aab1 1321 struct ib_reg_wr wr;
5cfb1782
BVA
1322 struct srp_fr_desc *desc;
1323 u32 rkey;
f7f7aab1 1324 int n, err;
5cfb1782 1325
f731ed62
BVA
1326 if (state->fr.next >= state->fr.end)
1327 return -ENOMEM;
1328
26630e8a
SG
1329 WARN_ON_ONCE(!dev->use_fast_reg);
1330
f7f7aab1 1331 if (state->sg_nents == 0)
26630e8a
SG
1332 return 0;
1333
f7f7aab1
SG
1334 if (state->sg_nents == 1 && target->global_mr) {
1335 srp_map_desc(state, sg_dma_address(state->sg),
1336 sg_dma_len(state->sg),
26630e8a 1337 target->global_mr->rkey);
f7f7aab1 1338 return 1;
26630e8a
SG
1339 }
1340
509c07bc 1341 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1342 if (!desc)
1343 return -ENOMEM;
1344
1345 rkey = ib_inc_rkey(desc->mr->rkey);
1346 ib_update_fast_reg_key(desc->mr, rkey);
1347
f7f7aab1
SG
1348 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
1349 dev->mr_page_size);
1350 if (unlikely(n < 0))
1351 return n;
5cfb1782 1352
f7f7aab1
SG
1353 wr.wr.next = NULL;
1354 wr.wr.opcode = IB_WR_REG_MR;
e622f2f4 1355 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
f7f7aab1
SG
1356 wr.wr.num_sge = 0;
1357 wr.wr.send_flags = 0;
1358 wr.mr = desc->mr;
1359 wr.key = desc->mr->rkey;
1360 wr.access = (IB_ACCESS_LOCAL_WRITE |
1361 IB_ACCESS_REMOTE_READ |
1362 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1363
f731ed62 1364 *state->fr.next++ = desc;
5cfb1782
BVA
1365 state->nmdesc++;
1366
f7f7aab1
SG
1367 srp_map_desc(state, desc->mr->iova,
1368 desc->mr->length, desc->mr->rkey);
5cfb1782 1369
26630e8a 1370 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
f7f7aab1 1371 if (unlikely(err))
26630e8a
SG
1372 return err;
1373
f7f7aab1 1374 return n;
5cfb1782
BVA
1375}
1376
8f26c9ff 1377static int srp_map_sg_entry(struct srp_map_state *state,
509c07bc 1378 struct srp_rdma_ch *ch,
3ae95da8 1379 struct scatterlist *sg, int sg_index)
8f26c9ff 1380{
509c07bc 1381 struct srp_target_port *target = ch->target;
8f26c9ff
DD
1382 struct srp_device *dev = target->srp_host->srp_dev;
1383 struct ib_device *ibdev = dev->dev;
1384 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1385 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3ae95da8 1386 unsigned int len = 0;
8f26c9ff
DD
1387 int ret;
1388
3ae95da8 1389 WARN_ON_ONCE(!dma_len);
f5358a17 1390
8f26c9ff 1391 while (dma_len) {
5cfb1782
BVA
1392 unsigned offset = dma_addr & ~dev->mr_page_mask;
1393 if (state->npages == dev->max_pages_per_mr || offset != 0) {
f7f7aab1 1394 ret = srp_map_finish_fmr(state, ch);
8f26c9ff
DD
1395 if (ret)
1396 return ret;
8f26c9ff
DD
1397 }
1398
5cfb1782 1399 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
f5358a17 1400
8f26c9ff
DD
1401 if (!state->npages)
1402 state->base_dma_addr = dma_addr;
5cfb1782 1403 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
52ede08f 1404 state->dma_len += len;
8f26c9ff
DD
1405 dma_addr += len;
1406 dma_len -= len;
1407 }
1408
5cfb1782
BVA
1409 /*
1410 * If the last entry of the MR wasn't a full page, then we need to
8f26c9ff
DD
1411 * close it out and start a new one -- we can only merge at page
1412 * boundries.
1413 */
1414 ret = 0;
0e0d3a48 1415 if (len != dev->mr_page_size)
f7f7aab1 1416 ret = srp_map_finish_fmr(state, ch);
f5358a17
RD
1417 return ret;
1418}
1419
26630e8a
SG
1420static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1421 struct srp_request *req, struct scatterlist *scat,
1422 int count)
76bc1e1d 1423{
76bc1e1d 1424 struct scatterlist *sg;
0e0d3a48 1425 int i, ret;
76bc1e1d 1426
26630e8a
SG
1427 state->desc = req->indirect_desc;
1428 state->pages = req->map_page;
1429 state->fmr.next = req->fmr_list;
1430 state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
1431
1432 for_each_sg(scat, sg, count, i) {
1433 ret = srp_map_sg_entry(state, ch, sg, i);
1434 if (ret)
1435 return ret;
5cfb1782 1436 }
76bc1e1d 1437
f7f7aab1 1438 ret = srp_map_finish_fmr(state, ch);
26630e8a
SG
1439 if (ret)
1440 return ret;
1441
1442 req->nmdesc = state->nmdesc;
1443
1444 return 0;
1445}
1446
1447static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1448 struct srp_request *req, struct scatterlist *scat,
1449 int count)
1450{
26630e8a 1451 state->desc = req->indirect_desc;
f7f7aab1
SG
1452 state->fr.next = req->fr_list;
1453 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1454 state->sg = scat;
1455 state->sg_nents = scsi_sg_count(req->scmnd);
26630e8a 1456
f7f7aab1
SG
1457 while (state->sg_nents) {
1458 int i, n;
26630e8a 1459
f7f7aab1
SG
1460 n = srp_map_finish_fr(state, ch);
1461 if (unlikely(n < 0))
1462 return n;
1463
1464 state->sg_nents -= n;
1465 for (i = 0; i < n; i++)
1466 state->sg = sg_next(state->sg);
1467 }
26630e8a
SG
1468
1469 req->nmdesc = state->nmdesc;
1470
1471 return 0;
1472}
1473
1474static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1475 struct srp_request *req, struct scatterlist *scat,
1476 int count)
1477{
1478 struct srp_target_port *target = ch->target;
1479 struct srp_device *dev = target->srp_host->srp_dev;
1480 struct scatterlist *sg;
1481 int i;
1482
1483 state->desc = req->indirect_desc;
1484 for_each_sg(scat, sg, count, i) {
1485 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1486 ib_sg_dma_len(dev->dev, sg),
1487 target->global_mr->rkey);
0e0d3a48 1488 }
76bc1e1d 1489
52ede08f 1490 req->nmdesc = state->nmdesc;
5cfb1782 1491
26630e8a 1492 return 0;
76bc1e1d
BVA
1493}
1494
330179f2
BVA
1495/*
1496 * Register the indirect data buffer descriptor with the HCA.
1497 *
1498 * Note: since the indirect data buffer descriptor has been allocated with
1499 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1500 * memory buffer.
1501 */
1502static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1503 void **next_mr, void **end_mr, u32 idb_len,
1504 __be32 *idb_rkey)
1505{
1506 struct srp_target_port *target = ch->target;
1507 struct srp_device *dev = target->srp_host->srp_dev;
1508 struct srp_map_state state;
1509 struct srp_direct_buf idb_desc;
1510 u64 idb_pages[1];
f7f7aab1 1511 struct scatterlist idb_sg[1];
330179f2
BVA
1512 int ret;
1513
1514 memset(&state, 0, sizeof(state));
1515 memset(&idb_desc, 0, sizeof(idb_desc));
1516 state.gen.next = next_mr;
1517 state.gen.end = end_mr;
1518 state.desc = &idb_desc;
330179f2
BVA
1519 state.base_dma_addr = req->indirect_dma_addr;
1520 state.dma_len = idb_len;
f7f7aab1
SG
1521
1522 if (dev->use_fast_reg) {
1523 state.sg = idb_sg;
1524 state.sg_nents = 1;
1525 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1526 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1527#ifdef CONFIG_NEED_SG_DMA_LENGTH
1528 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1529#endif
f7f7aab1
SG
1530 ret = srp_map_finish_fr(&state, ch);
1531 if (ret < 0)
1532 return ret;
1533 } else if (dev->use_fmr) {
1534 state.pages = idb_pages;
1535 state.pages[0] = (req->indirect_dma_addr &
1536 dev->mr_page_mask);
1537 state.npages = 1;
1538 ret = srp_map_finish_fmr(&state, ch);
1539 if (ret < 0)
1540 return ret;
1541 } else {
1542 return -EINVAL;
1543 }
330179f2
BVA
1544
1545 *idb_rkey = idb_desc.key;
1546
f7f7aab1 1547 return 0;
330179f2
BVA
1548}
1549
509c07bc 1550static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1551 struct srp_request *req)
1552{
509c07bc 1553 struct srp_target_port *target = ch->target;
76bc1e1d 1554 struct scatterlist *scat;
aef9ec39 1555 struct srp_cmd *cmd = req->cmd->buf;
330179f2 1556 int len, nents, count, ret;
85507bcc
RC
1557 struct srp_device *dev;
1558 struct ib_device *ibdev;
8f26c9ff
DD
1559 struct srp_map_state state;
1560 struct srp_indirect_buf *indirect_hdr;
330179f2
BVA
1561 u32 idb_len, table_len;
1562 __be32 idb_rkey;
8f26c9ff 1563 u8 fmt;
aef9ec39 1564
bb350d1d 1565 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
1566 return sizeof (struct srp_cmd);
1567
1568 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1569 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1570 shost_printk(KERN_WARNING, target->scsi_host,
1571 PFX "Unhandled data direction %d\n",
1572 scmnd->sc_data_direction);
aef9ec39
RD
1573 return -EINVAL;
1574 }
1575
bb350d1d
FT
1576 nents = scsi_sg_count(scmnd);
1577 scat = scsi_sglist(scmnd);
aef9ec39 1578
05321937 1579 dev = target->srp_host->srp_dev;
85507bcc
RC
1580 ibdev = dev->dev;
1581
1582 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1583 if (unlikely(count == 0))
1584 return -EIO;
f5358a17
RD
1585
1586 fmt = SRP_DATA_DESC_DIRECT;
1587 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 1588
03f6fb93 1589 if (count == 1 && target->global_mr) {
f5358a17
RD
1590 /*
1591 * The midlayer only generated a single gather/scatter
1592 * entry, or DMA mapping coalesced everything to a
1593 * single entry. So a direct descriptor along with
1594 * the DMA MR suffices.
1595 */
cf368713 1596 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 1597
85507bcc 1598 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
03f6fb93 1599 buf->key = cpu_to_be32(target->global_mr->rkey);
85507bcc 1600 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
8f26c9ff 1601
52ede08f 1602 req->nmdesc = 0;
8f26c9ff
DD
1603 goto map_complete;
1604 }
1605
5cfb1782
BVA
1606 /*
1607 * We have more than one scatter/gather entry, so build our indirect
1608 * descriptor table, trying to merge as many entries as we can.
8f26c9ff
DD
1609 */
1610 indirect_hdr = (void *) cmd->add_data;
1611
c07d424d
DD
1612 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1613 target->indirect_size, DMA_TO_DEVICE);
1614
8f26c9ff 1615 memset(&state, 0, sizeof(state));
26630e8a
SG
1616 if (dev->use_fast_reg)
1617 srp_map_sg_fr(&state, ch, req, scat, count);
1618 else if (dev->use_fmr)
1619 srp_map_sg_fmr(&state, ch, req, scat, count);
1620 else
1621 srp_map_sg_dma(&state, ch, req, scat, count);
cf368713 1622
c07d424d
DD
1623 /* We've mapped the request, now pull as much of the indirect
1624 * descriptor table as we can into the command buffer. If this
1625 * target is not using an external indirect table, we are
1626 * guaranteed to fit into the command, as the SCSI layer won't
1627 * give us more S/G entries than we allow.
8f26c9ff 1628 */
8f26c9ff 1629 if (state.ndesc == 1) {
5cfb1782
BVA
1630 /*
1631 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1632 * so use a direct descriptor.
1633 */
1634 struct srp_direct_buf *buf = (void *) cmd->add_data;
cf368713 1635
c07d424d 1636 *buf = req->indirect_desc[0];
8f26c9ff 1637 goto map_complete;
aef9ec39
RD
1638 }
1639
c07d424d
DD
1640 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1641 !target->allow_ext_sg)) {
1642 shost_printk(KERN_ERR, target->scsi_host,
1643 "Could not fit S/G list into SRP_CMD\n");
1644 return -EIO;
1645 }
1646
1647 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1648 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1649 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1650
1651 fmt = SRP_DATA_DESC_INDIRECT;
1652 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
c07d424d 1653 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1654
c07d424d
DD
1655 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1656 count * sizeof (struct srp_direct_buf));
8f26c9ff 1657
03f6fb93 1658 if (!target->global_mr) {
330179f2
BVA
1659 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1660 idb_len, &idb_rkey);
1661 if (ret < 0)
1662 return ret;
1663 req->nmdesc++;
1664 } else {
a745f4f4 1665 idb_rkey = cpu_to_be32(target->global_mr->rkey);
330179f2
BVA
1666 }
1667
c07d424d 1668 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1669 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1670 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1671 indirect_hdr->len = cpu_to_be32(state.total_len);
1672
1673 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1674 cmd->data_out_desc_cnt = count;
8f26c9ff 1675 else
c07d424d
DD
1676 cmd->data_in_desc_cnt = count;
1677
1678 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1679 DMA_TO_DEVICE);
8f26c9ff
DD
1680
1681map_complete:
aef9ec39
RD
1682 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1683 cmd->buf_fmt = fmt << 4;
1684 else
1685 cmd->buf_fmt = fmt;
1686
aef9ec39
RD
1687 return len;
1688}
1689
76c75b25
BVA
1690/*
1691 * Return an IU and possible credit to the free pool
1692 */
509c07bc 1693static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1694 enum srp_iu_type iu_type)
1695{
1696 unsigned long flags;
1697
509c07bc
BVA
1698 spin_lock_irqsave(&ch->lock, flags);
1699 list_add(&iu->list, &ch->free_tx);
76c75b25 1700 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1701 ++ch->req_lim;
1702 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1703}
1704
05a1d750 1705/*
509c07bc 1706 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1707 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1708 *
1709 * Note:
1710 * An upper limit for the number of allocated information units for each
1711 * request type is:
1712 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1713 * more than Scsi_Host.can_queue requests.
1714 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1715 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1716 * one unanswered SRP request to an initiator.
1717 */
509c07bc 1718static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1719 enum srp_iu_type iu_type)
1720{
509c07bc 1721 struct srp_target_port *target = ch->target;
05a1d750
DD
1722 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1723 struct srp_iu *iu;
1724
509c07bc 1725 srp_send_completion(ch->send_cq, ch);
05a1d750 1726
509c07bc 1727 if (list_empty(&ch->free_tx))
05a1d750
DD
1728 return NULL;
1729
1730 /* Initiator responses to target requests do not consume credits */
76c75b25 1731 if (iu_type != SRP_IU_RSP) {
509c07bc 1732 if (ch->req_lim <= rsv) {
76c75b25
BVA
1733 ++target->zero_req_lim;
1734 return NULL;
1735 }
1736
509c07bc 1737 --ch->req_lim;
05a1d750
DD
1738 }
1739
509c07bc 1740 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1741 list_del(&iu->list);
05a1d750
DD
1742 return iu;
1743}
1744
509c07bc 1745static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1746{
509c07bc 1747 struct srp_target_port *target = ch->target;
05a1d750
DD
1748 struct ib_sge list;
1749 struct ib_send_wr wr, *bad_wr;
05a1d750
DD
1750
1751 list.addr = iu->dma;
1752 list.length = len;
9af76271 1753 list.lkey = target->lkey;
05a1d750
DD
1754
1755 wr.next = NULL;
dcb4cb85 1756 wr.wr_id = (uintptr_t) iu;
05a1d750
DD
1757 wr.sg_list = &list;
1758 wr.num_sge = 1;
1759 wr.opcode = IB_WR_SEND;
1760 wr.send_flags = IB_SEND_SIGNALED;
1761
509c07bc 1762 return ib_post_send(ch->qp, &wr, &bad_wr);
05a1d750
DD
1763}
1764
509c07bc 1765static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1766{
509c07bc 1767 struct srp_target_port *target = ch->target;
c996bb47 1768 struct ib_recv_wr wr, *bad_wr;
dcb4cb85 1769 struct ib_sge list;
c996bb47
BVA
1770
1771 list.addr = iu->dma;
1772 list.length = iu->size;
9af76271 1773 list.lkey = target->lkey;
c996bb47
BVA
1774
1775 wr.next = NULL;
dcb4cb85 1776 wr.wr_id = (uintptr_t) iu;
c996bb47
BVA
1777 wr.sg_list = &list;
1778 wr.num_sge = 1;
1779
509c07bc 1780 return ib_post_recv(ch->qp, &wr, &bad_wr);
c996bb47
BVA
1781}
1782
509c07bc 1783static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1784{
509c07bc 1785 struct srp_target_port *target = ch->target;
aef9ec39
RD
1786 struct srp_request *req;
1787 struct scsi_cmnd *scmnd;
1788 unsigned long flags;
aef9ec39 1789
aef9ec39 1790 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1791 spin_lock_irqsave(&ch->lock, flags);
1792 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1793 spin_unlock_irqrestore(&ch->lock, flags);
94a9174c 1794
509c07bc 1795 ch->tsk_mgmt_status = -1;
f8b6e31e 1796 if (be32_to_cpu(rsp->resp_data_len) >= 4)
509c07bc
BVA
1797 ch->tsk_mgmt_status = rsp->data[3];
1798 complete(&ch->tsk_mgmt_done);
aef9ec39 1799 } else {
77f2c1a4
BVA
1800 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1801 if (scmnd) {
1802 req = (void *)scmnd->host_scribble;
1803 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1804 }
22032991 1805 if (!scmnd) {
7aa54bd7 1806 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1807 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1808 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1809
509c07bc
BVA
1810 spin_lock_irqsave(&ch->lock, flags);
1811 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1812 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1813
1814 return;
1815 }
aef9ec39
RD
1816 scmnd->result = rsp->status;
1817
1818 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1819 memcpy(scmnd->sense_buffer, rsp->data +
1820 be32_to_cpu(rsp->resp_data_len),
1821 min_t(int, be32_to_cpu(rsp->sense_data_len),
1822 SCSI_SENSE_BUFFERSIZE));
1823 }
1824
e714531a 1825 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1826 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1827 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1828 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1829 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1830 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1831 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1832 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1833
509c07bc 1834 srp_free_req(ch, req, scmnd,
22032991
BVA
1835 be32_to_cpu(rsp->req_lim_delta));
1836
f8b6e31e
DD
1837 scmnd->host_scribble = NULL;
1838 scmnd->scsi_done(scmnd);
aef9ec39 1839 }
aef9ec39
RD
1840}
1841
509c07bc 1842static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
1843 void *rsp, int len)
1844{
509c07bc 1845 struct srp_target_port *target = ch->target;
76c75b25 1846 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
1847 unsigned long flags;
1848 struct srp_iu *iu;
76c75b25 1849 int err;
bb12588a 1850
509c07bc
BVA
1851 spin_lock_irqsave(&ch->lock, flags);
1852 ch->req_lim += req_delta;
1853 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1854 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 1855
bb12588a
DD
1856 if (!iu) {
1857 shost_printk(KERN_ERR, target->scsi_host, PFX
1858 "no IU available to send response\n");
76c75b25 1859 return 1;
bb12588a
DD
1860 }
1861
1862 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1863 memcpy(iu->buf, rsp, len);
1864 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1865
509c07bc 1866 err = srp_post_send(ch, iu, len);
76c75b25 1867 if (err) {
bb12588a
DD
1868 shost_printk(KERN_ERR, target->scsi_host, PFX
1869 "unable to post response: %d\n", err);
509c07bc 1870 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 1871 }
bb12588a 1872
bb12588a
DD
1873 return err;
1874}
1875
509c07bc 1876static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
1877 struct srp_cred_req *req)
1878{
1879 struct srp_cred_rsp rsp = {
1880 .opcode = SRP_CRED_RSP,
1881 .tag = req->tag,
1882 };
1883 s32 delta = be32_to_cpu(req->req_lim_delta);
1884
509c07bc
BVA
1885 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1886 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
1887 "problems processing SRP_CRED_REQ\n");
1888}
1889
509c07bc 1890static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
1891 struct srp_aer_req *req)
1892{
509c07bc 1893 struct srp_target_port *target = ch->target;
bb12588a
DD
1894 struct srp_aer_rsp rsp = {
1895 .opcode = SRP_AER_RSP,
1896 .tag = req->tag,
1897 };
1898 s32 delta = be32_to_cpu(req->req_lim_delta);
1899
1900 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 1901 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 1902
509c07bc 1903 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
1904 shost_printk(KERN_ERR, target->scsi_host, PFX
1905 "problems processing SRP_AER_REQ\n");
1906}
1907
509c07bc 1908static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
aef9ec39 1909{
509c07bc 1910 struct srp_target_port *target = ch->target;
dcb4cb85 1911 struct ib_device *dev = target->srp_host->srp_dev->dev;
737b94eb 1912 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
c996bb47 1913 int res;
aef9ec39
RD
1914 u8 opcode;
1915
509c07bc 1916 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1917 DMA_FROM_DEVICE);
aef9ec39
RD
1918
1919 opcode = *(u8 *) iu->buf;
1920
1921 if (0) {
7aa54bd7
DD
1922 shost_printk(KERN_ERR, target->scsi_host,
1923 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
1924 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1925 iu->buf, wc->byte_len, true);
aef9ec39
RD
1926 }
1927
1928 switch (opcode) {
1929 case SRP_RSP:
509c07bc 1930 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
1931 break;
1932
bb12588a 1933 case SRP_CRED_REQ:
509c07bc 1934 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
1935 break;
1936
1937 case SRP_AER_REQ:
509c07bc 1938 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
1939 break;
1940
aef9ec39
RD
1941 case SRP_T_LOGOUT:
1942 /* XXX Handle target logout */
7aa54bd7
DD
1943 shost_printk(KERN_WARNING, target->scsi_host,
1944 PFX "Got target logout request\n");
aef9ec39
RD
1945 break;
1946
1947 default:
7aa54bd7
DD
1948 shost_printk(KERN_WARNING, target->scsi_host,
1949 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
1950 break;
1951 }
1952
509c07bc 1953 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 1954 DMA_FROM_DEVICE);
c996bb47 1955
509c07bc 1956 res = srp_post_recv(ch, iu);
c996bb47
BVA
1957 if (res != 0)
1958 shost_printk(KERN_ERR, target->scsi_host,
1959 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
1960}
1961
c1120f89
BVA
1962/**
1963 * srp_tl_err_work() - handle a transport layer error
af24663b 1964 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
1965 *
1966 * Note: This function may get invoked before the rport has been created,
1967 * hence the target->rport test.
1968 */
1969static void srp_tl_err_work(struct work_struct *work)
1970{
1971 struct srp_target_port *target;
1972
1973 target = container_of(work, struct srp_target_port, tl_err_work);
1974 if (target->rport)
1975 srp_start_tl_fail_timers(target->rport);
1976}
1977
5cfb1782 1978static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
7dad6b2e 1979 bool send_err, struct srp_rdma_ch *ch)
948d1e88 1980{
7dad6b2e
BVA
1981 struct srp_target_port *target = ch->target;
1982
1983 if (wr_id == SRP_LAST_WR_ID) {
1984 complete(&ch->done);
1985 return;
1986 }
1987
c014c8cd 1988 if (ch->connected && !target->qp_in_error) {
5cfb1782
BVA
1989 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1990 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1991 "LOCAL_INV failed with status %s (%d)\n",
1992 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1993 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1994 shost_printk(KERN_ERR, target->scsi_host, PFX
57363d98
SG
1995 "FAST_REG_MR failed status %s (%d)\n",
1996 ib_wc_status_msg(wc_status), wc_status);
5cfb1782
BVA
1997 } else {
1998 shost_printk(KERN_ERR, target->scsi_host,
57363d98 1999 PFX "failed %s status %s (%d) for iu %p\n",
5cfb1782 2000 send_err ? "send" : "receive",
57363d98
SG
2001 ib_wc_status_msg(wc_status), wc_status,
2002 (void *)(uintptr_t)wr_id);
5cfb1782 2003 }
c1120f89 2004 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2005 }
948d1e88
BVA
2006 target->qp_in_error = true;
2007}
2008
509c07bc 2009static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
aef9ec39 2010{
509c07bc 2011 struct srp_rdma_ch *ch = ch_ptr;
aef9ec39 2012 struct ib_wc wc;
aef9ec39
RD
2013
2014 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2015 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88 2016 if (likely(wc.status == IB_WC_SUCCESS)) {
509c07bc 2017 srp_handle_recv(ch, &wc);
948d1e88 2018 } else {
7dad6b2e 2019 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
aef9ec39 2020 }
9c03dc9f
BVA
2021 }
2022}
2023
509c07bc 2024static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
9c03dc9f 2025{
509c07bc 2026 struct srp_rdma_ch *ch = ch_ptr;
9c03dc9f 2027 struct ib_wc wc;
dcb4cb85 2028 struct srp_iu *iu;
9c03dc9f
BVA
2029
2030 while (ib_poll_cq(cq, 1, &wc) > 0) {
948d1e88
BVA
2031 if (likely(wc.status == IB_WC_SUCCESS)) {
2032 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
509c07bc 2033 list_add(&iu->list, &ch->free_tx);
948d1e88 2034 } else {
7dad6b2e 2035 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
9c03dc9f 2036 }
aef9ec39
RD
2037 }
2038}
2039
76c75b25 2040static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2041{
76c75b25 2042 struct srp_target_port *target = host_to_target(shost);
a95cadb9 2043 struct srp_rport *rport = target->rport;
509c07bc 2044 struct srp_rdma_ch *ch;
aef9ec39
RD
2045 struct srp_request *req;
2046 struct srp_iu *iu;
2047 struct srp_cmd *cmd;
85507bcc 2048 struct ib_device *dev;
76c75b25 2049 unsigned long flags;
77f2c1a4
BVA
2050 u32 tag;
2051 u16 idx;
d1b4289e 2052 int len, ret;
a95cadb9
BVA
2053 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2054
2055 /*
2056 * The SCSI EH thread is the only context from which srp_queuecommand()
2057 * can get invoked for blocked devices (SDEV_BLOCK /
2058 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2059 * locking the rport mutex if invoked from inside the SCSI EH.
2060 */
2061 if (in_scsi_eh)
2062 mutex_lock(&rport->mutex);
aef9ec39 2063
d1b4289e
BVA
2064 scmnd->result = srp_chkready(target->rport);
2065 if (unlikely(scmnd->result))
2066 goto err;
2ce19e72 2067
77f2c1a4
BVA
2068 WARN_ON_ONCE(scmnd->request->tag < 0);
2069 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7 2070 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
77f2c1a4
BVA
2071 idx = blk_mq_unique_tag_to_tag(tag);
2072 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2073 dev_name(&shost->shost_gendev), tag, idx,
2074 target->req_ring_size);
509c07bc
BVA
2075
2076 spin_lock_irqsave(&ch->lock, flags);
2077 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2078 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2079
77f2c1a4
BVA
2080 if (!iu)
2081 goto err;
2082
2083 req = &ch->req_ring[idx];
05321937 2084 dev = target->srp_host->srp_dev->dev;
49248644 2085 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
85507bcc 2086 DMA_TO_DEVICE);
aef9ec39 2087
f8b6e31e 2088 scmnd->host_scribble = (void *) req;
aef9ec39
RD
2089
2090 cmd = iu->buf;
2091 memset(cmd, 0, sizeof *cmd);
2092
2093 cmd->opcode = SRP_CMD;
985aa495 2094 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2095 cmd->tag = tag;
aef9ec39
RD
2096 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2097
aef9ec39
RD
2098 req->scmnd = scmnd;
2099 req->cmd = iu;
aef9ec39 2100
509c07bc 2101 len = srp_map_data(scmnd, ch, req);
aef9ec39 2102 if (len < 0) {
7aa54bd7 2103 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2104 PFX "Failed to map data (%d)\n", len);
2105 /*
2106 * If we ran out of memory descriptors (-ENOMEM) because an
2107 * application is queuing many requests with more than
52ede08f 2108 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2109 * to reduce queue depth temporarily.
2110 */
2111 scmnd->result = len == -ENOMEM ?
2112 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
76c75b25 2113 goto err_iu;
aef9ec39
RD
2114 }
2115
49248644 2116 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
85507bcc 2117 DMA_TO_DEVICE);
aef9ec39 2118
509c07bc 2119 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2120 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
2121 goto err_unmap;
2122 }
2123
d1b4289e
BVA
2124 ret = 0;
2125
a95cadb9
BVA
2126unlock_rport:
2127 if (in_scsi_eh)
2128 mutex_unlock(&rport->mutex);
2129
d1b4289e 2130 return ret;
aef9ec39
RD
2131
2132err_unmap:
509c07bc 2133 srp_unmap_data(scmnd, ch, req);
aef9ec39 2134
76c75b25 2135err_iu:
509c07bc 2136 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2137
024ca901
BVA
2138 /*
2139 * Avoid that the loops that iterate over the request ring can
2140 * encounter a dangling SCSI command pointer.
2141 */
2142 req->scmnd = NULL;
2143
d1b4289e
BVA
2144err:
2145 if (scmnd->result) {
2146 scmnd->scsi_done(scmnd);
2147 ret = 0;
2148 } else {
2149 ret = SCSI_MLQUEUE_HOST_BUSY;
2150 }
a95cadb9 2151
d1b4289e 2152 goto unlock_rport;
aef9ec39
RD
2153}
2154
4d73f95f
BVA
2155/*
2156 * Note: the resources allocated in this function are freed in
509c07bc 2157 * srp_free_ch_ib().
4d73f95f 2158 */
509c07bc 2159static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2160{
509c07bc 2161 struct srp_target_port *target = ch->target;
aef9ec39
RD
2162 int i;
2163
509c07bc
BVA
2164 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2165 GFP_KERNEL);
2166 if (!ch->rx_ring)
4d73f95f 2167 goto err_no_ring;
509c07bc
BVA
2168 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2169 GFP_KERNEL);
2170 if (!ch->tx_ring)
4d73f95f
BVA
2171 goto err_no_ring;
2172
2173 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2174 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2175 ch->max_ti_iu_len,
2176 GFP_KERNEL, DMA_FROM_DEVICE);
2177 if (!ch->rx_ring[i])
aef9ec39
RD
2178 goto err;
2179 }
2180
4d73f95f 2181 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2182 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2183 target->max_iu_len,
2184 GFP_KERNEL, DMA_TO_DEVICE);
2185 if (!ch->tx_ring[i])
aef9ec39 2186 goto err;
dcb4cb85 2187
509c07bc 2188 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2189 }
2190
2191 return 0;
2192
2193err:
4d73f95f 2194 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2195 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2196 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2197 }
2198
4d73f95f
BVA
2199
2200err_no_ring:
509c07bc
BVA
2201 kfree(ch->tx_ring);
2202 ch->tx_ring = NULL;
2203 kfree(ch->rx_ring);
2204 ch->rx_ring = NULL;
4d73f95f 2205
aef9ec39
RD
2206 return -ENOMEM;
2207}
2208
c9b03c1a
BVA
2209static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2210{
2211 uint64_t T_tr_ns, max_compl_time_ms;
2212 uint32_t rq_tmo_jiffies;
2213
2214 /*
2215 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2216 * table 91), both the QP timeout and the retry count have to be set
2217 * for RC QP's during the RTR to RTS transition.
2218 */
2219 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2220 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2221
2222 /*
2223 * Set target->rq_tmo_jiffies to one second more than the largest time
2224 * it can take before an error completion is generated. See also
2225 * C9-140..142 in the IBTA spec for more information about how to
2226 * convert the QP Local ACK Timeout value to nanoseconds.
2227 */
2228 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2229 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2230 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2231 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2232
2233 return rq_tmo_jiffies;
2234}
2235
961e0be8 2236static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2237 const struct srp_login_rsp *lrsp,
509c07bc 2238 struct srp_rdma_ch *ch)
961e0be8 2239{
509c07bc 2240 struct srp_target_port *target = ch->target;
961e0be8
DD
2241 struct ib_qp_attr *qp_attr = NULL;
2242 int attr_mask = 0;
2243 int ret;
2244 int i;
2245
2246 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2247 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2248 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
961e0be8
DD
2249
2250 /*
2251 * Reserve credits for task management so we don't
2252 * bounce requests back to the SCSI mid-layer.
2253 */
2254 target->scsi_host->can_queue
509c07bc 2255 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2256 target->scsi_host->can_queue);
4d73f95f
BVA
2257 target->scsi_host->cmd_per_lun
2258 = min_t(int, target->scsi_host->can_queue,
2259 target->scsi_host->cmd_per_lun);
961e0be8
DD
2260 } else {
2261 shost_printk(KERN_WARNING, target->scsi_host,
2262 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2263 ret = -ECONNRESET;
2264 goto error;
2265 }
2266
509c07bc
BVA
2267 if (!ch->rx_ring) {
2268 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2269 if (ret)
2270 goto error;
2271 }
2272
2273 ret = -ENOMEM;
2274 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2275 if (!qp_attr)
2276 goto error;
2277
2278 qp_attr->qp_state = IB_QPS_RTR;
2279 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2280 if (ret)
2281 goto error_free;
2282
509c07bc 2283 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2284 if (ret)
2285 goto error_free;
2286
4d73f95f 2287 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2288 struct srp_iu *iu = ch->rx_ring[i];
2289
2290 ret = srp_post_recv(ch, iu);
961e0be8
DD
2291 if (ret)
2292 goto error_free;
2293 }
2294
2295 qp_attr->qp_state = IB_QPS_RTS;
2296 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2297 if (ret)
2298 goto error_free;
2299
c9b03c1a
BVA
2300 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2301
509c07bc 2302 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
961e0be8
DD
2303 if (ret)
2304 goto error_free;
2305
2306 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2307
2308error_free:
2309 kfree(qp_attr);
2310
2311error:
509c07bc 2312 ch->status = ret;
961e0be8
DD
2313}
2314
aef9ec39
RD
2315static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2316 struct ib_cm_event *event,
509c07bc 2317 struct srp_rdma_ch *ch)
aef9ec39 2318{
509c07bc 2319 struct srp_target_port *target = ch->target;
7aa54bd7 2320 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2321 struct ib_class_port_info *cpi;
2322 int opcode;
2323
2324 switch (event->param.rej_rcvd.reason) {
2325 case IB_CM_REJ_PORT_CM_REDIRECT:
2326 cpi = event->param.rej_rcvd.ari;
509c07bc
BVA
2327 ch->path.dlid = cpi->redirect_lid;
2328 ch->path.pkey = cpi->redirect_pkey;
aef9ec39 2329 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
509c07bc 2330 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2331
509c07bc 2332 ch->status = ch->path.dlid ?
aef9ec39
RD
2333 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2334 break;
2335
2336 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2337 if (srp_target_is_topspin(target)) {
aef9ec39
RD
2338 /*
2339 * Topspin/Cisco SRP gateways incorrectly send
2340 * reject reason code 25 when they mean 24
2341 * (port redirect).
2342 */
509c07bc 2343 memcpy(ch->path.dgid.raw,
aef9ec39
RD
2344 event->param.rej_rcvd.ari, 16);
2345
7aa54bd7
DD
2346 shost_printk(KERN_DEBUG, shost,
2347 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
509c07bc
BVA
2348 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2349 be64_to_cpu(ch->path.dgid.global.interface_id));
aef9ec39 2350
509c07bc 2351 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2352 } else {
7aa54bd7
DD
2353 shost_printk(KERN_WARNING, shost,
2354 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2355 ch->status = -ECONNRESET;
aef9ec39
RD
2356 }
2357 break;
2358
2359 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2360 shost_printk(KERN_WARNING, shost,
2361 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2362 ch->status = -ECONNRESET;
aef9ec39
RD
2363 break;
2364
2365 case IB_CM_REJ_CONSUMER_DEFINED:
2366 opcode = *(u8 *) event->private_data;
2367 if (opcode == SRP_LOGIN_REJ) {
2368 struct srp_login_rej *rej = event->private_data;
2369 u32 reason = be32_to_cpu(rej->reason);
2370
2371 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2372 shost_printk(KERN_WARNING, shost,
2373 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2374 else
e7ffde01
BVA
2375 shost_printk(KERN_WARNING, shost, PFX
2376 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000
BVA
2377 target->sgid.raw,
2378 target->orig_dgid.raw, reason);
aef9ec39 2379 } else
7aa54bd7
DD
2380 shost_printk(KERN_WARNING, shost,
2381 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2382 " opcode 0x%02x\n", opcode);
509c07bc 2383 ch->status = -ECONNRESET;
aef9ec39
RD
2384 break;
2385
9fe4bcf4
DD
2386 case IB_CM_REJ_STALE_CONN:
2387 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2388 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2389 break;
2390
aef9ec39 2391 default:
7aa54bd7
DD
2392 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2393 event->param.rej_rcvd.reason);
509c07bc 2394 ch->status = -ECONNRESET;
aef9ec39
RD
2395 }
2396}
2397
2398static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2399{
509c07bc
BVA
2400 struct srp_rdma_ch *ch = cm_id->context;
2401 struct srp_target_port *target = ch->target;
aef9ec39 2402 int comp = 0;
aef9ec39
RD
2403
2404 switch (event->event) {
2405 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2406 shost_printk(KERN_DEBUG, target->scsi_host,
2407 PFX "Sending CM REQ failed\n");
aef9ec39 2408 comp = 1;
509c07bc 2409 ch->status = -ECONNRESET;
aef9ec39
RD
2410 break;
2411
2412 case IB_CM_REP_RECEIVED:
2413 comp = 1;
509c07bc 2414 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2415 break;
2416
2417 case IB_CM_REJ_RECEIVED:
7aa54bd7 2418 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2419 comp = 1;
2420
509c07bc 2421 srp_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2422 break;
2423
b7ac4ab4 2424 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2425 shost_printk(KERN_WARNING, target->scsi_host,
2426 PFX "DREQ received - connection closed\n");
c014c8cd 2427 ch->connected = false;
b7ac4ab4 2428 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2429 shost_printk(KERN_ERR, target->scsi_host,
2430 PFX "Sending CM DREP failed\n");
c1120f89 2431 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2432 break;
2433
2434 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2435 shost_printk(KERN_ERR, target->scsi_host,
2436 PFX "connection closed\n");
ac72d766 2437 comp = 1;
aef9ec39 2438
509c07bc 2439 ch->status = 0;
aef9ec39
RD
2440 break;
2441
b7ac4ab4
IR
2442 case IB_CM_MRA_RECEIVED:
2443 case IB_CM_DREQ_ERROR:
2444 case IB_CM_DREP_RECEIVED:
2445 break;
2446
aef9ec39 2447 default:
7aa54bd7
DD
2448 shost_printk(KERN_WARNING, target->scsi_host,
2449 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2450 break;
2451 }
2452
2453 if (comp)
509c07bc 2454 complete(&ch->done);
aef9ec39 2455
aef9ec39
RD
2456 return 0;
2457}
2458
71444b97
JW
2459/**
2460 * srp_change_queue_depth - setting device queue depth
2461 * @sdev: scsi device struct
2462 * @qdepth: requested queue depth
71444b97
JW
2463 *
2464 * Returns queue depth.
2465 */
2466static int
db5ed4df 2467srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2468{
c40ecc12 2469 if (!sdev->tagged_supported)
1e6f2416 2470 qdepth = 1;
db5ed4df 2471 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2472}
2473
985aa495
BVA
2474static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2475 u8 func)
aef9ec39 2476{
509c07bc 2477 struct srp_target_port *target = ch->target;
a95cadb9 2478 struct srp_rport *rport = target->rport;
19081f31 2479 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2480 struct srp_iu *iu;
2481 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39 2482
c014c8cd 2483 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2484 return -1;
2485
509c07bc 2486 init_completion(&ch->tsk_mgmt_done);
aef9ec39 2487
a95cadb9 2488 /*
509c07bc 2489 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2490 * invoked while a task management function is being sent.
2491 */
2492 mutex_lock(&rport->mutex);
509c07bc
BVA
2493 spin_lock_irq(&ch->lock);
2494 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2495 spin_unlock_irq(&ch->lock);
76c75b25 2496
a95cadb9
BVA
2497 if (!iu) {
2498 mutex_unlock(&rport->mutex);
2499
76c75b25 2500 return -1;
a95cadb9 2501 }
aef9ec39 2502
19081f31
DD
2503 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2504 DMA_TO_DEVICE);
aef9ec39
RD
2505 tsk_mgmt = iu->buf;
2506 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2507
2508 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2509 int_to_scsilun(lun, &tsk_mgmt->lun);
f8b6e31e 2510 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
aef9ec39 2511 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2512 tsk_mgmt->task_tag = req_tag;
aef9ec39 2513
19081f31
DD
2514 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2515 DMA_TO_DEVICE);
509c07bc
BVA
2516 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2517 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2518 mutex_unlock(&rport->mutex);
2519
76c75b25
BVA
2520 return -1;
2521 }
a95cadb9 2522 mutex_unlock(&rport->mutex);
d945e1df 2523
509c07bc 2524 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
aef9ec39 2525 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 2526 return -1;
aef9ec39 2527
d945e1df 2528 return 0;
d945e1df
RD
2529}
2530
aef9ec39
RD
2531static int srp_abort(struct scsi_cmnd *scmnd)
2532{
d945e1df 2533 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2534 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2535 u32 tag;
d92c0da7 2536 u16 ch_idx;
509c07bc 2537 struct srp_rdma_ch *ch;
086f44f5 2538 int ret;
d945e1df 2539
7aa54bd7 2540 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2541
d92c0da7 2542 if (!req)
99b6697a 2543 return SUCCESS;
77f2c1a4 2544 tag = blk_mq_unique_tag(scmnd->request);
d92c0da7
BVA
2545 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2546 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2547 return SUCCESS;
2548 ch = &target->ch[ch_idx];
2549 if (!srp_claim_req(ch, req, NULL, scmnd))
2550 return SUCCESS;
2551 shost_printk(KERN_ERR, target->scsi_host,
2552 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2553 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
80d5e8a2 2554 SRP_TSK_ABORT_TASK) == 0)
086f44f5 2555 ret = SUCCESS;
ed9b2264 2556 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2557 ret = FAST_IO_FAIL;
086f44f5
BVA
2558 else
2559 ret = FAILED;
509c07bc 2560 srp_free_req(ch, req, scmnd, 0);
22032991 2561 scmnd->result = DID_ABORT << 16;
d8536670 2562 scmnd->scsi_done(scmnd);
d945e1df 2563
086f44f5 2564 return ret;
aef9ec39
RD
2565}
2566
2567static int srp_reset_device(struct scsi_cmnd *scmnd)
2568{
d945e1df 2569 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2570 struct srp_rdma_ch *ch;
536ae14e 2571 int i;
d945e1df 2572
7aa54bd7 2573 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2574
d92c0da7 2575 ch = &target->ch[0];
509c07bc 2576 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
f8b6e31e 2577 SRP_TSK_LUN_RESET))
d945e1df 2578 return FAILED;
509c07bc 2579 if (ch->tsk_mgmt_status)
d945e1df
RD
2580 return FAILED;
2581
d92c0da7
BVA
2582 for (i = 0; i < target->ch_count; i++) {
2583 ch = &target->ch[i];
2584 for (i = 0; i < target->req_ring_size; ++i) {
2585 struct srp_request *req = &ch->req_ring[i];
509c07bc 2586
d92c0da7
BVA
2587 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2588 }
536ae14e 2589 }
d945e1df 2590
d945e1df 2591 return SUCCESS;
aef9ec39
RD
2592}
2593
2594static int srp_reset_host(struct scsi_cmnd *scmnd)
2595{
2596 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2597
7aa54bd7 2598 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2599
ed9b2264 2600 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2601}
2602
c9b03c1a
BVA
2603static int srp_slave_configure(struct scsi_device *sdev)
2604{
2605 struct Scsi_Host *shost = sdev->host;
2606 struct srp_target_port *target = host_to_target(shost);
2607 struct request_queue *q = sdev->request_queue;
2608 unsigned long timeout;
2609
2610 if (sdev->type == TYPE_DISK) {
2611 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2612 blk_queue_rq_timeout(q, timeout);
2613 }
2614
2615 return 0;
2616}
2617
ee959b00
TJ
2618static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2619 char *buf)
6ecb0c84 2620{
ee959b00 2621 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2622
45c37cad 2623 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2624}
2625
ee959b00
TJ
2626static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2627 char *buf)
6ecb0c84 2628{
ee959b00 2629 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2630
45c37cad 2631 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2632}
2633
ee959b00
TJ
2634static ssize_t show_service_id(struct device *dev,
2635 struct device_attribute *attr, char *buf)
6ecb0c84 2636{
ee959b00 2637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2638
45c37cad 2639 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
6ecb0c84
RD
2640}
2641
ee959b00
TJ
2642static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2643 char *buf)
6ecb0c84 2644{
ee959b00 2645 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2646
747fe000 2647 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
6ecb0c84
RD
2648}
2649
848b3082
BVA
2650static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2651 char *buf)
2652{
2653 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2654
747fe000 2655 return sprintf(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2656}
2657
ee959b00
TJ
2658static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2659 char *buf)
6ecb0c84 2660{
ee959b00 2661 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2662 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2663
509c07bc 2664 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
6ecb0c84
RD
2665}
2666
ee959b00
TJ
2667static ssize_t show_orig_dgid(struct device *dev,
2668 struct device_attribute *attr, char *buf)
3633b3d0 2669{
ee959b00 2670 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2671
747fe000 2672 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
3633b3d0
IR
2673}
2674
89de7486
BVA
2675static ssize_t show_req_lim(struct device *dev,
2676 struct device_attribute *attr, char *buf)
2677{
2678 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2679 struct srp_rdma_ch *ch;
2680 int i, req_lim = INT_MAX;
89de7486 2681
d92c0da7
BVA
2682 for (i = 0; i < target->ch_count; i++) {
2683 ch = &target->ch[i];
2684 req_lim = min(req_lim, ch->req_lim);
2685 }
2686 return sprintf(buf, "%d\n", req_lim);
89de7486
BVA
2687}
2688
ee959b00
TJ
2689static ssize_t show_zero_req_lim(struct device *dev,
2690 struct device_attribute *attr, char *buf)
6bfa24fa 2691{
ee959b00 2692 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2693
6bfa24fa
RD
2694 return sprintf(buf, "%d\n", target->zero_req_lim);
2695}
2696
ee959b00
TJ
2697static ssize_t show_local_ib_port(struct device *dev,
2698 struct device_attribute *attr, char *buf)
ded7f1a1 2699{
ee959b00 2700 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
2701
2702 return sprintf(buf, "%d\n", target->srp_host->port);
2703}
2704
ee959b00
TJ
2705static ssize_t show_local_ib_device(struct device *dev,
2706 struct device_attribute *attr, char *buf)
ded7f1a1 2707{
ee959b00 2708 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2709
05321937 2710 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
2711}
2712
d92c0da7
BVA
2713static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2714 char *buf)
2715{
2716 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2717
2718 return sprintf(buf, "%d\n", target->ch_count);
2719}
2720
4b5e5f41
BVA
2721static ssize_t show_comp_vector(struct device *dev,
2722 struct device_attribute *attr, char *buf)
2723{
2724 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2725
2726 return sprintf(buf, "%d\n", target->comp_vector);
2727}
2728
7bb312e4
VP
2729static ssize_t show_tl_retry_count(struct device *dev,
2730 struct device_attribute *attr, char *buf)
2731{
2732 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2733
2734 return sprintf(buf, "%d\n", target->tl_retry_count);
2735}
2736
49248644
DD
2737static ssize_t show_cmd_sg_entries(struct device *dev,
2738 struct device_attribute *attr, char *buf)
2739{
2740 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2741
2742 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2743}
2744
c07d424d
DD
2745static ssize_t show_allow_ext_sg(struct device *dev,
2746 struct device_attribute *attr, char *buf)
2747{
2748 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2749
2750 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2751}
2752
ee959b00
TJ
2753static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2754static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2755static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2756static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
848b3082 2757static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
ee959b00
TJ
2758static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2759static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 2760static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
2761static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2762static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2763static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
d92c0da7 2764static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
4b5e5f41 2765static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
7bb312e4 2766static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
49248644 2767static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
c07d424d 2768static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
ee959b00
TJ
2769
2770static struct device_attribute *srp_host_attrs[] = {
2771 &dev_attr_id_ext,
2772 &dev_attr_ioc_guid,
2773 &dev_attr_service_id,
2774 &dev_attr_pkey,
848b3082 2775 &dev_attr_sgid,
ee959b00
TJ
2776 &dev_attr_dgid,
2777 &dev_attr_orig_dgid,
89de7486 2778 &dev_attr_req_lim,
ee959b00
TJ
2779 &dev_attr_zero_req_lim,
2780 &dev_attr_local_ib_port,
2781 &dev_attr_local_ib_device,
d92c0da7 2782 &dev_attr_ch_count,
4b5e5f41 2783 &dev_attr_comp_vector,
7bb312e4 2784 &dev_attr_tl_retry_count,
49248644 2785 &dev_attr_cmd_sg_entries,
c07d424d 2786 &dev_attr_allow_ext_sg,
6ecb0c84
RD
2787 NULL
2788};
2789
aef9ec39
RD
2790static struct scsi_host_template srp_template = {
2791 .module = THIS_MODULE,
b7f008fd
RD
2792 .name = "InfiniBand SRP initiator",
2793 .proc_name = DRV_NAME,
c9b03c1a 2794 .slave_configure = srp_slave_configure,
aef9ec39
RD
2795 .info = srp_target_info,
2796 .queuecommand = srp_queuecommand,
71444b97 2797 .change_queue_depth = srp_change_queue_depth,
aef9ec39
RD
2798 .eh_abort_handler = srp_abort,
2799 .eh_device_reset_handler = srp_reset_device,
2800 .eh_host_reset_handler = srp_reset_host,
2742c1da 2801 .skip_settle_delay = true,
49248644 2802 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 2803 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 2804 .this_id = -1,
4d73f95f 2805 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
6ecb0c84 2806 .use_clustering = ENABLE_CLUSTERING,
77f2c1a4 2807 .shost_attrs = srp_host_attrs,
c40ecc12 2808 .track_queue_depth = 1,
aef9ec39
RD
2809};
2810
34aa654e
BVA
2811static int srp_sdev_count(struct Scsi_Host *host)
2812{
2813 struct scsi_device *sdev;
2814 int c = 0;
2815
2816 shost_for_each_device(sdev, host)
2817 c++;
2818
2819 return c;
2820}
2821
bc44bd1d
BVA
2822/*
2823 * Return values:
2824 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2825 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2826 * removal has been scheduled.
2827 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2828 */
aef9ec39
RD
2829static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2830{
3236822b
FT
2831 struct srp_rport_identifiers ids;
2832 struct srp_rport *rport;
2833
34aa654e 2834 target->state = SRP_TARGET_SCANNING;
aef9ec39 2835 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 2836 be64_to_cpu(target->id_ext));
aef9ec39 2837
05321937 2838 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
2839 return -ENODEV;
2840
3236822b
FT
2841 memcpy(ids.port_id, &target->id_ext, 8);
2842 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 2843 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
2844 rport = srp_rport_add(target->scsi_host, &ids);
2845 if (IS_ERR(rport)) {
2846 scsi_remove_host(target->scsi_host);
2847 return PTR_ERR(rport);
2848 }
2849
dc1bdbd9 2850 rport->lld_data = target;
9dd69a60 2851 target->rport = rport;
dc1bdbd9 2852
b3589fd4 2853 spin_lock(&host->target_lock);
aef9ec39 2854 list_add_tail(&target->list, &host->target_list);
b3589fd4 2855 spin_unlock(&host->target_lock);
aef9ec39 2856
aef9ec39 2857 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 2858 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39 2859
c014c8cd
BVA
2860 if (srp_connected_ch(target) < target->ch_count ||
2861 target->qp_in_error) {
34aa654e
BVA
2862 shost_printk(KERN_INFO, target->scsi_host,
2863 PFX "SCSI scan failed - removing SCSI host\n");
2864 srp_queue_remove_work(target);
2865 goto out;
2866 }
2867
2868 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2869 dev_name(&target->scsi_host->shost_gendev),
2870 srp_sdev_count(target->scsi_host));
2871
2872 spin_lock_irq(&target->lock);
2873 if (target->state == SRP_TARGET_SCANNING)
2874 target->state = SRP_TARGET_LIVE;
2875 spin_unlock_irq(&target->lock);
2876
2877out:
aef9ec39
RD
2878 return 0;
2879}
2880
ee959b00 2881static void srp_release_dev(struct device *dev)
aef9ec39
RD
2882{
2883 struct srp_host *host =
ee959b00 2884 container_of(dev, struct srp_host, dev);
aef9ec39
RD
2885
2886 complete(&host->released);
2887}
2888
2889static struct class srp_class = {
2890 .name = "infiniband_srp",
ee959b00 2891 .dev_release = srp_release_dev
aef9ec39
RD
2892};
2893
96fc248a
BVA
2894/**
2895 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
2896 * @host: SRP host.
2897 * @target: SRP target port.
96fc248a
BVA
2898 */
2899static bool srp_conn_unique(struct srp_host *host,
2900 struct srp_target_port *target)
2901{
2902 struct srp_target_port *t;
2903 bool ret = false;
2904
2905 if (target->state == SRP_TARGET_REMOVED)
2906 goto out;
2907
2908 ret = true;
2909
2910 spin_lock(&host->target_lock);
2911 list_for_each_entry(t, &host->target_list, list) {
2912 if (t != target &&
2913 target->id_ext == t->id_ext &&
2914 target->ioc_guid == t->ioc_guid &&
2915 target->initiator_ext == t->initiator_ext) {
2916 ret = false;
2917 break;
2918 }
2919 }
2920 spin_unlock(&host->target_lock);
2921
2922out:
2923 return ret;
2924}
2925
aef9ec39
RD
2926/*
2927 * Target ports are added by writing
2928 *
2929 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2930 * pkey=<P_Key>,service_id=<service ID>
2931 *
2932 * to the add_target sysfs attribute.
2933 */
2934enum {
2935 SRP_OPT_ERR = 0,
2936 SRP_OPT_ID_EXT = 1 << 0,
2937 SRP_OPT_IOC_GUID = 1 << 1,
2938 SRP_OPT_DGID = 1 << 2,
2939 SRP_OPT_PKEY = 1 << 3,
2940 SRP_OPT_SERVICE_ID = 1 << 4,
2941 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 2942 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 2943 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 2944 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 2945 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
2946 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2947 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 2948 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 2949 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 2950 SRP_OPT_QUEUE_SIZE = 1 << 14,
aef9ec39
RD
2951 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2952 SRP_OPT_IOC_GUID |
2953 SRP_OPT_DGID |
2954 SRP_OPT_PKEY |
2955 SRP_OPT_SERVICE_ID),
2956};
2957
a447c093 2958static const match_table_t srp_opt_tokens = {
52fb2b50
VP
2959 { SRP_OPT_ID_EXT, "id_ext=%s" },
2960 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2961 { SRP_OPT_DGID, "dgid=%s" },
2962 { SRP_OPT_PKEY, "pkey=%x" },
2963 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2964 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2965 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 2966 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 2967 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 2968 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
2969 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2970 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 2971 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 2972 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 2973 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
52fb2b50 2974 { SRP_OPT_ERR, NULL }
aef9ec39
RD
2975};
2976
2977static int srp_parse_options(const char *buf, struct srp_target_port *target)
2978{
2979 char *options, *sep_opt;
2980 char *p;
2981 char dgid[3];
2982 substring_t args[MAX_OPT_ARGS];
2983 int opt_mask = 0;
2984 int token;
2985 int ret = -EINVAL;
2986 int i;
2987
2988 options = kstrdup(buf, GFP_KERNEL);
2989 if (!options)
2990 return -ENOMEM;
2991
2992 sep_opt = options;
7dcf9c19 2993 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
2994 if (!*p)
2995 continue;
2996
2997 token = match_token(p, srp_opt_tokens, args);
2998 opt_mask |= token;
2999
3000 switch (token) {
3001 case SRP_OPT_ID_EXT:
3002 p = match_strdup(args);
a20f3a6d
IR
3003 if (!p) {
3004 ret = -ENOMEM;
3005 goto out;
3006 }
aef9ec39
RD
3007 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3008 kfree(p);
3009 break;
3010
3011 case SRP_OPT_IOC_GUID:
3012 p = match_strdup(args);
a20f3a6d
IR
3013 if (!p) {
3014 ret = -ENOMEM;
3015 goto out;
3016 }
aef9ec39
RD
3017 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3018 kfree(p);
3019 break;
3020
3021 case SRP_OPT_DGID:
3022 p = match_strdup(args);
a20f3a6d
IR
3023 if (!p) {
3024 ret = -ENOMEM;
3025 goto out;
3026 }
aef9ec39 3027 if (strlen(p) != 32) {
e0bda7d8 3028 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3029 kfree(p);
aef9ec39
RD
3030 goto out;
3031 }
3032
3033 for (i = 0; i < 16; ++i) {
747fe000
BVA
3034 strlcpy(dgid, p + i * 2, sizeof(dgid));
3035 if (sscanf(dgid, "%hhx",
3036 &target->orig_dgid.raw[i]) < 1) {
3037 ret = -EINVAL;
3038 kfree(p);
3039 goto out;
3040 }
aef9ec39 3041 }
bf17c1c7 3042 kfree(p);
aef9ec39
RD
3043 break;
3044
3045 case SRP_OPT_PKEY:
3046 if (match_hex(args, &token)) {
e0bda7d8 3047 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3048 goto out;
3049 }
747fe000 3050 target->pkey = cpu_to_be16(token);
aef9ec39
RD
3051 break;
3052
3053 case SRP_OPT_SERVICE_ID:
3054 p = match_strdup(args);
a20f3a6d
IR
3055 if (!p) {
3056 ret = -ENOMEM;
3057 goto out;
3058 }
aef9ec39
RD
3059 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3060 kfree(p);
3061 break;
3062
3063 case SRP_OPT_MAX_SECT:
3064 if (match_int(args, &token)) {
e0bda7d8 3065 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3066 goto out;
3067 }
3068 target->scsi_host->max_sectors = token;
3069 break;
3070
4d73f95f
BVA
3071 case SRP_OPT_QUEUE_SIZE:
3072 if (match_int(args, &token) || token < 1) {
3073 pr_warn("bad queue_size parameter '%s'\n", p);
3074 goto out;
3075 }
3076 target->scsi_host->can_queue = token;
3077 target->queue_size = token + SRP_RSP_SQ_SIZE +
3078 SRP_TSK_MGMT_SQ_SIZE;
3079 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3080 target->scsi_host->cmd_per_lun = token;
3081 break;
3082
52fb2b50 3083 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3084 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3085 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3086 p);
52fb2b50
VP
3087 goto out;
3088 }
4d73f95f 3089 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3090 break;
3091
0c0450db
R
3092 case SRP_OPT_IO_CLASS:
3093 if (match_hex(args, &token)) {
e0bda7d8 3094 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3095 goto out;
3096 }
3097 if (token != SRP_REV10_IB_IO_CLASS &&
3098 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3099 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3100 token, SRP_REV10_IB_IO_CLASS,
3101 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3102 goto out;
3103 }
3104 target->io_class = token;
3105 break;
3106
01cb9bcb
IR
3107 case SRP_OPT_INITIATOR_EXT:
3108 p = match_strdup(args);
a20f3a6d
IR
3109 if (!p) {
3110 ret = -ENOMEM;
3111 goto out;
3112 }
01cb9bcb
IR
3113 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3114 kfree(p);
3115 break;
3116
49248644
DD
3117 case SRP_OPT_CMD_SG_ENTRIES:
3118 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3119 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3120 p);
49248644
DD
3121 goto out;
3122 }
3123 target->cmd_sg_cnt = token;
3124 break;
3125
c07d424d
DD
3126 case SRP_OPT_ALLOW_EXT_SG:
3127 if (match_int(args, &token)) {
e0bda7d8 3128 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3129 goto out;
3130 }
3131 target->allow_ext_sg = !!token;
3132 break;
3133
3134 case SRP_OPT_SG_TABLESIZE:
3135 if (match_int(args, &token) || token < 1 ||
3136 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
e0bda7d8
BVA
3137 pr_warn("bad max sg_tablesize parameter '%s'\n",
3138 p);
c07d424d
DD
3139 goto out;
3140 }
3141 target->sg_tablesize = token;
3142 break;
3143
4b5e5f41
BVA
3144 case SRP_OPT_COMP_VECTOR:
3145 if (match_int(args, &token) || token < 0) {
3146 pr_warn("bad comp_vector parameter '%s'\n", p);
3147 goto out;
3148 }
3149 target->comp_vector = token;
3150 break;
3151
7bb312e4
VP
3152 case SRP_OPT_TL_RETRY_COUNT:
3153 if (match_int(args, &token) || token < 2 || token > 7) {
3154 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3155 p);
3156 goto out;
3157 }
3158 target->tl_retry_count = token;
3159 break;
3160
aef9ec39 3161 default:
e0bda7d8
BVA
3162 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3163 p);
aef9ec39
RD
3164 goto out;
3165 }
3166 }
3167
3168 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3169 ret = 0;
3170 else
3171 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3172 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3173 !(srp_opt_tokens[i].token & opt_mask))
e0bda7d8
BVA
3174 pr_warn("target creation request is missing parameter '%s'\n",
3175 srp_opt_tokens[i].pattern);
aef9ec39 3176
4d73f95f
BVA
3177 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3178 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3179 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3180 target->scsi_host->cmd_per_lun,
3181 target->scsi_host->can_queue);
3182
aef9ec39
RD
3183out:
3184 kfree(options);
3185 return ret;
3186}
3187
ee959b00
TJ
3188static ssize_t srp_create_target(struct device *dev,
3189 struct device_attribute *attr,
aef9ec39
RD
3190 const char *buf, size_t count)
3191{
3192 struct srp_host *host =
ee959b00 3193 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3194 struct Scsi_Host *target_host;
3195 struct srp_target_port *target;
509c07bc 3196 struct srp_rdma_ch *ch;
d1b4289e
BVA
3197 struct srp_device *srp_dev = host->srp_dev;
3198 struct ib_device *ibdev = srp_dev->dev;
d92c0da7
BVA
3199 int ret, node_idx, node, cpu, i;
3200 bool multich = false;
aef9ec39
RD
3201
3202 target_host = scsi_host_alloc(&srp_template,
3203 sizeof (struct srp_target_port));
3204 if (!target_host)
3205 return -ENOMEM;
3206
49248644 3207 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3208 target_host->max_channel = 0;
3209 target_host->max_id = 1;
985aa495 3210 target_host->max_lun = -1LL;
3c8edf0e 3211 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 3212
aef9ec39 3213 target = host_to_target(target_host);
aef9ec39 3214
49248644
DD
3215 target->io_class = SRP_REV16A_IB_IO_CLASS;
3216 target->scsi_host = target_host;
3217 target->srp_host = host;
e6bf5f48 3218 target->lkey = host->srp_dev->pd->local_dma_lkey;
03f6fb93 3219 target->global_mr = host->srp_dev->global_mr;
49248644 3220 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3221 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3222 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3223 target->tl_retry_count = 7;
4d73f95f 3224 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3225
34aa654e
BVA
3226 /*
3227 * Avoid that the SCSI host can be removed by srp_remove_target()
3228 * before this function returns.
3229 */
3230 scsi_host_get(target->scsi_host);
3231
2d7091bc
BVA
3232 mutex_lock(&host->add_target_mutex);
3233
aef9ec39
RD
3234 ret = srp_parse_options(buf, target);
3235 if (ret)
fb49c8bb 3236 goto out;
aef9ec39 3237
4d73f95f
BVA
3238 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3239
96fc248a
BVA
3240 if (!srp_conn_unique(target->srp_host, target)) {
3241 shost_printk(KERN_INFO, target->scsi_host,
3242 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3243 be64_to_cpu(target->id_ext),
3244 be64_to_cpu(target->ioc_guid),
3245 be64_to_cpu(target->initiator_ext));
3246 ret = -EEXIST;
fb49c8bb 3247 goto out;
96fc248a
BVA
3248 }
3249
5cfb1782 3250 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3251 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3252 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3253 target->sg_tablesize = target->cmd_sg_cnt;
3254 }
3255
3256 target_host->sg_tablesize = target->sg_tablesize;
3257 target->indirect_size = target->sg_tablesize *
3258 sizeof (struct srp_direct_buf);
49248644
DD
3259 target->max_iu_len = sizeof (struct srp_cmd) +
3260 sizeof (struct srp_indirect_buf) +
3261 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3262
c1120f89 3263 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3264 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3265 spin_lock_init(&target->lock);
55ee3ab2 3266 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
2088ca66 3267 if (ret)
fb49c8bb 3268 goto out;
aef9ec39 3269
d92c0da7
BVA
3270 ret = -ENOMEM;
3271 target->ch_count = max_t(unsigned, num_online_nodes(),
3272 min(ch_count ? :
3273 min(4 * num_online_nodes(),
3274 ibdev->num_comp_vectors),
3275 num_online_cpus()));
3276 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3277 GFP_KERNEL);
3278 if (!target->ch)
fb49c8bb 3279 goto out;
aef9ec39 3280
d92c0da7
BVA
3281 node_idx = 0;
3282 for_each_online_node(node) {
3283 const int ch_start = (node_idx * target->ch_count /
3284 num_online_nodes());
3285 const int ch_end = ((node_idx + 1) * target->ch_count /
3286 num_online_nodes());
3287 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3288 num_online_nodes() + target->comp_vector)
3289 % ibdev->num_comp_vectors;
3290 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3291 num_online_nodes() + target->comp_vector)
3292 % ibdev->num_comp_vectors;
3293 int cpu_idx = 0;
3294
3295 for_each_online_cpu(cpu) {
3296 if (cpu_to_node(cpu) != node)
3297 continue;
3298 if (ch_start + cpu_idx >= ch_end)
3299 continue;
3300 ch = &target->ch[ch_start + cpu_idx];
3301 ch->target = target;
3302 ch->comp_vector = cv_start == cv_end ? cv_start :
3303 cv_start + cpu_idx % (cv_end - cv_start);
3304 spin_lock_init(&ch->lock);
3305 INIT_LIST_HEAD(&ch->free_tx);
3306 ret = srp_new_cm_id(ch);
3307 if (ret)
3308 goto err_disconnect;
aef9ec39 3309
d92c0da7
BVA
3310 ret = srp_create_ch_ib(ch);
3311 if (ret)
3312 goto err_disconnect;
3313
3314 ret = srp_alloc_req_data(ch);
3315 if (ret)
3316 goto err_disconnect;
3317
3318 ret = srp_connect_ch(ch, multich);
3319 if (ret) {
3320 shost_printk(KERN_ERR, target->scsi_host,
3321 PFX "Connection %d/%d failed\n",
3322 ch_start + cpu_idx,
3323 target->ch_count);
3324 if (node_idx == 0 && cpu_idx == 0) {
3325 goto err_disconnect;
3326 } else {
3327 srp_free_ch_ib(target, ch);
3328 srp_free_req_data(target, ch);
3329 target->ch_count = ch - target->ch;
c257ea6f 3330 goto connected;
d92c0da7
BVA
3331 }
3332 }
3333
3334 multich = true;
3335 cpu_idx++;
3336 }
3337 node_idx++;
aef9ec39
RD
3338 }
3339
c257ea6f 3340connected:
d92c0da7
BVA
3341 target->scsi_host->nr_hw_queues = target->ch_count;
3342
aef9ec39
RD
3343 ret = srp_add_target(host, target);
3344 if (ret)
3345 goto err_disconnect;
3346
34aa654e
BVA
3347 if (target->state != SRP_TARGET_REMOVED) {
3348 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3349 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3350 be64_to_cpu(target->id_ext),
3351 be64_to_cpu(target->ioc_guid),
747fe000 3352 be16_to_cpu(target->pkey),
34aa654e 3353 be64_to_cpu(target->service_id),
747fe000 3354 target->sgid.raw, target->orig_dgid.raw);
34aa654e 3355 }
e7ffde01 3356
2d7091bc
BVA
3357 ret = count;
3358
3359out:
3360 mutex_unlock(&host->add_target_mutex);
34aa654e
BVA
3361
3362 scsi_host_put(target->scsi_host);
bc44bd1d
BVA
3363 if (ret < 0)
3364 scsi_host_put(target->scsi_host);
34aa654e 3365
2d7091bc 3366 return ret;
aef9ec39
RD
3367
3368err_disconnect:
3369 srp_disconnect_target(target);
3370
d92c0da7
BVA
3371 for (i = 0; i < target->ch_count; i++) {
3372 ch = &target->ch[i];
3373 srp_free_ch_ib(target, ch);
3374 srp_free_req_data(target, ch);
3375 }
aef9ec39 3376
d92c0da7 3377 kfree(target->ch);
2d7091bc 3378 goto out;
aef9ec39
RD
3379}
3380
ee959b00 3381static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 3382
ee959b00
TJ
3383static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3384 char *buf)
aef9ec39 3385{
ee959b00 3386 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3387
05321937 3388 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
3389}
3390
ee959b00 3391static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 3392
ee959b00
TJ
3393static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3394 char *buf)
aef9ec39 3395{
ee959b00 3396 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
3397
3398 return sprintf(buf, "%d\n", host->port);
3399}
3400
ee959b00 3401static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 3402
f5358a17 3403static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3404{
3405 struct srp_host *host;
3406
3407 host = kzalloc(sizeof *host, GFP_KERNEL);
3408 if (!host)
3409 return NULL;
3410
3411 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3412 spin_lock_init(&host->target_lock);
aef9ec39 3413 init_completion(&host->released);
2d7091bc 3414 mutex_init(&host->add_target_mutex);
05321937 3415 host->srp_dev = device;
aef9ec39
RD
3416 host->port = port;
3417
ee959b00
TJ
3418 host->dev.class = &srp_class;
3419 host->dev.parent = device->dev->dma_device;
d927e38c 3420 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 3421
ee959b00 3422 if (device_register(&host->dev))
f5358a17 3423 goto free_host;
ee959b00 3424 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3425 goto err_class;
ee959b00 3426 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3427 goto err_class;
ee959b00 3428 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3429 goto err_class;
3430
3431 return host;
3432
3433err_class:
ee959b00 3434 device_unregister(&host->dev);
aef9ec39 3435
f5358a17 3436free_host:
aef9ec39
RD
3437 kfree(host);
3438
3439 return NULL;
3440}
3441
3442static void srp_add_one(struct ib_device *device)
3443{
f5358a17
RD
3444 struct srp_device *srp_dev;
3445 struct ib_device_attr *dev_attr;
aef9ec39 3446 struct srp_host *host;
4139032b 3447 int mr_page_shift, p;
52ede08f 3448 u64 max_pages_per_mr;
aef9ec39 3449
f5358a17
RD
3450 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3451 if (!dev_attr)
cf311cd4 3452 return;
aef9ec39 3453
f5358a17 3454 if (ib_query_device(device, dev_attr)) {
e0bda7d8 3455 pr_warn("Query device failed for %s\n", device->name);
f5358a17
RD
3456 goto free_attr;
3457 }
3458
3459 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3460 if (!srp_dev)
3461 goto free_attr;
3462
d1b4289e
BVA
3463 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3464 device->map_phys_fmr && device->unmap_fmr);
5cfb1782
BVA
3465 srp_dev->has_fr = (dev_attr->device_cap_flags &
3466 IB_DEVICE_MEM_MGT_EXTENSIONS);
3467 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3468 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3469
3470 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3471 (!srp_dev->has_fmr || prefer_fr));
002f1567 3472 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
d1b4289e 3473
f5358a17
RD
3474 /*
3475 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3476 * minimum of 4096 bytes. We're unlikely to build large sglists
3477 * out of smaller entries.
f5358a17 3478 */
52ede08f
BVA
3479 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3480 srp_dev->mr_page_size = 1 << mr_page_shift;
3481 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3482 max_pages_per_mr = dev_attr->max_mr_size;
3483 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3484 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3485 max_pages_per_mr);
5cfb1782
BVA
3486 if (srp_dev->use_fast_reg) {
3487 srp_dev->max_pages_per_mr =
3488 min_t(u32, srp_dev->max_pages_per_mr,
3489 dev_attr->max_fast_reg_page_list_len);
3490 }
52ede08f
BVA
3491 srp_dev->mr_max_size = srp_dev->mr_page_size *
3492 srp_dev->max_pages_per_mr;
5cfb1782 3493 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
52ede08f 3494 device->name, mr_page_shift, dev_attr->max_mr_size,
5cfb1782 3495 dev_attr->max_fast_reg_page_list_len,
52ede08f 3496 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3497
3498 INIT_LIST_HEAD(&srp_dev->dev_list);
3499
3500 srp_dev->dev = device;
3501 srp_dev->pd = ib_alloc_pd(device);
3502 if (IS_ERR(srp_dev->pd))
3503 goto free_dev;
3504
03f6fb93
BVA
3505 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3506 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3507 IB_ACCESS_LOCAL_WRITE |
3508 IB_ACCESS_REMOTE_READ |
3509 IB_ACCESS_REMOTE_WRITE);
3510 if (IS_ERR(srp_dev->global_mr))
3511 goto err_pd;
3512 } else {
3513 srp_dev->global_mr = NULL;
3514 }
f5358a17 3515
4139032b 3516 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
f5358a17 3517 host = srp_add_port(srp_dev, p);
aef9ec39 3518 if (host)
f5358a17 3519 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
3520 }
3521
f5358a17
RD
3522 ib_set_client_data(device, &srp_client, srp_dev);
3523
3524 goto free_attr;
3525
3526err_pd:
3527 ib_dealloc_pd(srp_dev->pd);
3528
3529free_dev:
3530 kfree(srp_dev);
3531
3532free_attr:
3533 kfree(dev_attr);
aef9ec39
RD
3534}
3535
7c1eb45a 3536static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 3537{
f5358a17 3538 struct srp_device *srp_dev;
aef9ec39 3539 struct srp_host *host, *tmp_host;
ef6c49d8 3540 struct srp_target_port *target;
aef9ec39 3541
7c1eb45a 3542 srp_dev = client_data;
1fe0cb84
DB
3543 if (!srp_dev)
3544 return;
aef9ec39 3545
f5358a17 3546 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 3547 device_unregister(&host->dev);
aef9ec39
RD
3548 /*
3549 * Wait for the sysfs entry to go away, so that no new
3550 * target ports can be created.
3551 */
3552 wait_for_completion(&host->released);
3553
3554 /*
ef6c49d8 3555 * Remove all target ports.
aef9ec39 3556 */
b3589fd4 3557 spin_lock(&host->target_lock);
ef6c49d8
BVA
3558 list_for_each_entry(target, &host->target_list, list)
3559 srp_queue_remove_work(target);
b3589fd4 3560 spin_unlock(&host->target_lock);
aef9ec39
RD
3561
3562 /*
bcc05910 3563 * Wait for tl_err and target port removal tasks.
aef9ec39 3564 */
ef6c49d8 3565 flush_workqueue(system_long_wq);
bcc05910 3566 flush_workqueue(srp_remove_wq);
aef9ec39 3567
aef9ec39
RD
3568 kfree(host);
3569 }
3570
03f6fb93
BVA
3571 if (srp_dev->global_mr)
3572 ib_dereg_mr(srp_dev->global_mr);
f5358a17
RD
3573 ib_dealloc_pd(srp_dev->pd);
3574
3575 kfree(srp_dev);
aef9ec39
RD
3576}
3577
3236822b 3578static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
3579 .has_rport_state = true,
3580 .reset_timer_if_blocked = true,
a95cadb9 3581 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
3582 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3583 .dev_loss_tmo = &srp_dev_loss_tmo,
3584 .reconnect = srp_rport_reconnect,
dc1bdbd9 3585 .rport_delete = srp_rport_delete,
ed9b2264 3586 .terminate_rport_io = srp_terminate_io,
3236822b
FT
3587};
3588
aef9ec39
RD
3589static int __init srp_init_module(void)
3590{
3591 int ret;
3592
dcb4cb85 3593 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
dd5e6e38 3594
49248644 3595 if (srp_sg_tablesize) {
e0bda7d8 3596 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
3597 if (!cmd_sg_entries)
3598 cmd_sg_entries = srp_sg_tablesize;
3599 }
3600
3601 if (!cmd_sg_entries)
3602 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3603
3604 if (cmd_sg_entries > 255) {
e0bda7d8 3605 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 3606 cmd_sg_entries = 255;
1e89a194
DD
3607 }
3608
c07d424d
DD
3609 if (!indirect_sg_entries)
3610 indirect_sg_entries = cmd_sg_entries;
3611 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
3612 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3613 cmd_sg_entries);
c07d424d
DD
3614 indirect_sg_entries = cmd_sg_entries;
3615 }
3616
bcc05910 3617 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
3618 if (!srp_remove_wq) {
3619 ret = -ENOMEM;
bcc05910
BVA
3620 goto out;
3621 }
3622
3623 ret = -ENOMEM;
3236822b
FT
3624 ib_srp_transport_template =
3625 srp_attach_transport(&ib_srp_transport_functions);
3626 if (!ib_srp_transport_template)
bcc05910 3627 goto destroy_wq;
3236822b 3628
aef9ec39
RD
3629 ret = class_register(&srp_class);
3630 if (ret) {
e0bda7d8 3631 pr_err("couldn't register class infiniband_srp\n");
bcc05910 3632 goto release_tr;
aef9ec39
RD
3633 }
3634
c1a0b23b
MT
3635 ib_sa_register_client(&srp_sa_client);
3636
aef9ec39
RD
3637 ret = ib_register_client(&srp_client);
3638 if (ret) {
e0bda7d8 3639 pr_err("couldn't register IB client\n");
bcc05910 3640 goto unreg_sa;
aef9ec39
RD
3641 }
3642
bcc05910
BVA
3643out:
3644 return ret;
3645
3646unreg_sa:
3647 ib_sa_unregister_client(&srp_sa_client);
3648 class_unregister(&srp_class);
3649
3650release_tr:
3651 srp_release_transport(ib_srp_transport_template);
3652
3653destroy_wq:
3654 destroy_workqueue(srp_remove_wq);
3655 goto out;
aef9ec39
RD
3656}
3657
3658static void __exit srp_cleanup_module(void)
3659{
3660 ib_unregister_client(&srp_client);
c1a0b23b 3661 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 3662 class_unregister(&srp_class);
3236822b 3663 srp_release_transport(ib_srp_transport_template);
bcc05910 3664 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
3665}
3666
3667module_init(srp_init_module);
3668module_exit(srp_cleanup_module);