]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/infiniband/ulp/srp/ib_srp.c
blk-mq: Drop 'reserved' arg of busy_tag_iter_fn
[thirdparty/linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
d236cd0e 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
e0bda7d8 34
aef9ec39
RD
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
de25968c 42#include <linux/jiffies.h>
93c76dbb 43#include <linux/lockdep.h>
19f31343 44#include <linux/inet.h>
56b5390c 45#include <rdma/ib_cache.h>
aef9ec39 46
60063497 47#include <linux/atomic.h>
aef9ec39
RD
48
49#include <scsi/scsi.h>
50#include <scsi/scsi_device.h>
51#include <scsi/scsi_dbg.h>
71444b97 52#include <scsi/scsi_tcq.h>
aef9ec39 53#include <scsi/srp.h>
3236822b 54#include <scsi/scsi_transport_srp.h>
aef9ec39 55
aef9ec39
RD
56#include "ib_srp.h"
57
58#define DRV_NAME "ib_srp"
59#define PFX DRV_NAME ": "
aef9ec39
RD
60
61MODULE_AUTHOR("Roland Dreier");
33ab3e5b 62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
aef9ec39
RD
63MODULE_LICENSE("Dual BSD/GPL");
64
1a1faf7a
BVA
65#if !defined(CONFIG_DYNAMIC_DEBUG)
66#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67#define DYNAMIC_DEBUG_BRANCH(descriptor) false
68#endif
69
49248644
DD
70static unsigned int srp_sg_tablesize;
71static unsigned int cmd_sg_entries;
c07d424d
DD
72static unsigned int indirect_sg_entries;
73static bool allow_ext_sg;
03f6fb93 74static bool register_always = true;
c222a39f 75static bool never_register;
49248644 76static int topspin_workarounds = 1;
74b0a15b 77
49248644
DD
78module_param(srp_sg_tablesize, uint, 0444);
79MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
74b0a15b 80
49248644
DD
81module_param(cmd_sg_entries, uint, 0444);
82MODULE_PARM_DESC(cmd_sg_entries,
83 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
aef9ec39 84
c07d424d
DD
85module_param(indirect_sg_entries, uint, 0444);
86MODULE_PARM_DESC(indirect_sg_entries,
65e8617f 87 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
c07d424d
DD
88
89module_param(allow_ext_sg, bool, 0444);
90MODULE_PARM_DESC(allow_ext_sg,
91 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
92
aef9ec39
RD
93module_param(topspin_workarounds, int, 0444);
94MODULE_PARM_DESC(topspin_workarounds,
95 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
96
b1b8854d
BVA
97module_param(register_always, bool, 0444);
98MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
c222a39f
BVA
101module_param(never_register, bool, 0444);
102MODULE_PARM_DESC(never_register, "Never register memory");
103
9c27847d 104static const struct kernel_param_ops srp_tmo_ops;
ed9b2264 105
a95cadb9
BVA
106static int srp_reconnect_delay = 10;
107module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
108 S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
110
ed9b2264
BVA
111static int srp_fast_io_fail_tmo = 15;
112module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
113 S_IRUGO | S_IWUSR);
114MODULE_PARM_DESC(fast_io_fail_tmo,
115 "Number of seconds between the observation of a transport"
116 " layer error and failing all I/O. \"off\" means that this"
117 " functionality is disabled.");
118
a95cadb9 119static int srp_dev_loss_tmo = 600;
ed9b2264
BVA
120module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
121 S_IRUGO | S_IWUSR);
122MODULE_PARM_DESC(dev_loss_tmo,
123 "Maximum number of seconds that the SRP transport should"
124 " insulate transport layer errors. After this time has been"
125 " exceeded the SCSI host is removed. Should be"
126 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 " this functionality is disabled.");
129
882981f4
BVA
130static bool srp_use_imm_data = true;
131module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132MODULE_PARM_DESC(use_imm_data,
133 "Whether or not to request permission to use immediate data during SRP login.");
134
135static unsigned int srp_max_imm_data = 8 * 1024;
136module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138
d92c0da7
BVA
139static unsigned ch_count;
140module_param(ch_count, uint, 0444);
141MODULE_PARM_DESC(ch_count,
142 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143
11a0ae4c 144static int srp_add_one(struct ib_device *device);
7c1eb45a 145static void srp_remove_one(struct ib_device *device, void *client_data);
dc1435c0 146static void srp_rename_dev(struct ib_device *device, void *client_data);
1dc7b1f1
CH
147static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
149 const char *opname);
e7ff98ae
PP
150static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151 const struct ib_cm_event *event);
19f31343
BVA
152static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153 struct rdma_cm_event *event);
aef9ec39 154
3236822b 155static struct scsi_transport_template *ib_srp_transport_template;
bcc05910 156static struct workqueue_struct *srp_remove_wq;
3236822b 157
aef9ec39
RD
158static struct ib_client srp_client = {
159 .name = "srp",
160 .add = srp_add_one,
dc1435c0
LR
161 .remove = srp_remove_one,
162 .rename = srp_rename_dev
aef9ec39
RD
163};
164
c1a0b23b
MT
165static struct ib_sa_client srp_sa_client;
166
ed9b2264
BVA
167static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
168{
169 int tmo = *(int *)kp->arg;
170
171 if (tmo >= 0)
e28bf1f0 172 return sysfs_emit(buffer, "%d\n", tmo);
ed9b2264 173 else
e28bf1f0 174 return sysfs_emit(buffer, "off\n");
ed9b2264
BVA
175}
176
177static int srp_tmo_set(const char *val, const struct kernel_param *kp)
178{
179 int tmo, res;
180
3fdf70ac
SG
181 res = srp_parse_tmo(&tmo, val);
182 if (res)
183 goto out;
184
a95cadb9
BVA
185 if (kp->arg == &srp_reconnect_delay)
186 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
187 srp_dev_loss_tmo);
188 else if (kp->arg == &srp_fast_io_fail_tmo)
189 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
ed9b2264 190 else
a95cadb9
BVA
191 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
192 tmo);
ed9b2264
BVA
193 if (res)
194 goto out;
195 *(int *)kp->arg = tmo;
196
197out:
198 return res;
199}
200
9c27847d 201static const struct kernel_param_ops srp_tmo_ops = {
ed9b2264
BVA
202 .get = srp_tmo_get,
203 .set = srp_tmo_set,
204};
205
aef9ec39
RD
206static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
207{
208 return (struct srp_target_port *) host->hostdata;
209}
210
211static const char *srp_target_info(struct Scsi_Host *host)
212{
213 return host_to_target(host)->target_name;
214}
215
5d7cbfd6
RD
216static int srp_target_is_topspin(struct srp_target_port *target)
217{
218 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 219 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
220
221 return topspin_workarounds &&
3d1ff48d
RK
222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
224}
225
aef9ec39
RD
226static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
227 gfp_t gfp_mask,
228 enum dma_data_direction direction)
229{
230 struct srp_iu *iu;
231
232 iu = kmalloc(sizeof *iu, gfp_mask);
233 if (!iu)
234 goto out;
235
236 iu->buf = kzalloc(size, gfp_mask);
237 if (!iu->buf)
238 goto out_free_iu;
239
05321937
GKH
240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
241 direction);
242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
243 goto out_free_buf;
244
245 iu->size = size;
246 iu->direction = direction;
247
248 return iu;
249
250out_free_buf:
251 kfree(iu->buf);
252out_free_iu:
253 kfree(iu);
254out:
255 return NULL;
256}
257
258static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
259{
260 if (!iu)
261 return;
262
05321937
GKH
263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 iu->direction);
aef9ec39
RD
265 kfree(iu->buf);
266 kfree(iu);
267}
268
269static void srp_qp_event(struct ib_event *event, void *context)
270{
57363d98
SG
271 pr_debug("QP event %s (%d)\n",
272 ib_event_msg(event->event), event->event);
aef9ec39
RD
273}
274
19f31343
BVA
275static int srp_init_ib_qp(struct srp_target_port *target,
276 struct ib_qp *qp)
aef9ec39
RD
277{
278 struct ib_qp_attr *attr;
279 int ret;
280
281 attr = kmalloc(sizeof *attr, GFP_KERNEL);
282 if (!attr)
283 return -ENOMEM;
284
56b5390c
BVA
285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286 target->srp_host->port,
19f31343 287 be16_to_cpu(target->ib_cm.pkey),
56b5390c 288 &attr->pkey_index);
aef9ec39
RD
289 if (ret)
290 goto out;
291
292 attr->qp_state = IB_QPS_INIT;
293 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294 IB_ACCESS_REMOTE_WRITE);
295 attr->port_num = target->srp_host->port;
296
297 ret = ib_modify_qp(qp, attr,
298 IB_QP_STATE |
299 IB_QP_PKEY_INDEX |
300 IB_QP_ACCESS_FLAGS |
301 IB_QP_PORT);
302
303out:
304 kfree(attr);
305 return ret;
306}
307
19f31343 308static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
9fe4bcf4 309{
509c07bc 310 struct srp_target_port *target = ch->target;
9fe4bcf4
DD
311 struct ib_cm_id *new_cm_id;
312
05321937 313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
19f31343 314 srp_ib_cm_handler, ch);
9fe4bcf4
DD
315 if (IS_ERR(new_cm_id))
316 return PTR_ERR(new_cm_id);
317
19f31343
BVA
318 if (ch->ib_cm.cm_id)
319 ib_destroy_cm_id(ch->ib_cm.cm_id);
320 ch->ib_cm.cm_id = new_cm_id;
4c33bd19
DC
321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322 target->srp_host->port))
19f31343 323 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
4c33bd19 324 else
19f31343
BVA
325 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326 ch->ib_cm.path.sgid = target->sgid;
327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328 ch->ib_cm.path.pkey = target->ib_cm.pkey;
329 ch->ib_cm.path.service_id = target->ib_cm.service_id;
9fe4bcf4
DD
330
331 return 0;
332}
333
19f31343
BVA
334static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
335{
336 struct srp_target_port *target = ch->target;
337 struct rdma_cm_id *new_cm_id;
19f31343
BVA
338 int ret;
339
340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341 RDMA_PS_TCP, IB_QPT_RC);
342 if (IS_ERR(new_cm_id)) {
343 ret = PTR_ERR(new_cm_id);
344 new_cm_id = NULL;
345 goto out;
346 }
347
348 init_completion(&ch->done);
349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
14673778
BVA
350 &target->rdma_cm.src.sa : NULL,
351 &target->rdma_cm.dst.sa,
19f31343
BVA
352 SRP_PATH_REC_TIMEOUT_MS);
353 if (ret) {
fdbcf5c0 354 pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
7da09af9 355 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
19f31343
BVA
356 goto out;
357 }
358 ret = wait_for_completion_interruptible(&ch->done);
359 if (ret < 0)
360 goto out;
361
362 ret = ch->status;
363 if (ret) {
fdbcf5c0 364 pr_err("Resolving address %pISpsc failed (%d)\n",
7da09af9 365 &target->rdma_cm.dst, ret);
19f31343
BVA
366 goto out;
367 }
368
369 swap(ch->rdma_cm.cm_id, new_cm_id);
370
371out:
372 if (new_cm_id)
373 rdma_destroy_id(new_cm_id);
374
375 return ret;
376}
377
378static int srp_new_cm_id(struct srp_rdma_ch *ch)
379{
380 struct srp_target_port *target = ch->target;
381
382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383 srp_new_ib_cm_id(ch);
384}
385
5cfb1782
BVA
386/**
387 * srp_destroy_fr_pool() - free the resources owned by a pool
388 * @pool: Fast registration pool to be destroyed.
389 */
390static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
391{
392 int i;
393 struct srp_fr_desc *d;
394
395 if (!pool)
396 return;
397
398 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
5cfb1782
BVA
399 if (d->mr)
400 ib_dereg_mr(d->mr);
401 }
402 kfree(pool);
403}
404
405/**
406 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407 * @device: IB device to allocate fast registration descriptors for.
408 * @pd: Protection domain associated with the FR descriptors.
409 * @pool_size: Number of descriptors to allocate.
410 * @max_page_list_len: Maximum fast registration work request page list length.
411 */
412static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413 struct ib_pd *pd, int pool_size,
414 int max_page_list_len)
415{
416 struct srp_fr_pool *pool;
417 struct srp_fr_desc *d;
418 struct ib_mr *mr;
5cfb1782 419 int i, ret = -EINVAL;
fbd36818 420 enum ib_mr_type mr_type;
5cfb1782
BVA
421
422 if (pool_size <= 0)
423 goto err;
424 ret = -ENOMEM;
7a7b0fea 425 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
5cfb1782
BVA
426 if (!pool)
427 goto err;
428 pool->size = pool_size;
429 pool->max_page_list_len = max_page_list_len;
430 spin_lock_init(&pool->lock);
431 INIT_LIST_HEAD(&pool->free_list);
432
e945c653 433 if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
fbd36818
SG
434 mr_type = IB_MR_TYPE_SG_GAPS;
435 else
436 mr_type = IB_MR_TYPE_MEM_REG;
437
5cfb1782 438 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
fbd36818 439 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
5cfb1782
BVA
440 if (IS_ERR(mr)) {
441 ret = PTR_ERR(mr);
3787d990
BVA
442 if (ret == -ENOMEM)
443 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 dev_name(&device->dev));
5cfb1782
BVA
445 goto destroy_pool;
446 }
447 d->mr = mr;
5cfb1782
BVA
448 list_add_tail(&d->entry, &pool->free_list);
449 }
450
451out:
452 return pool;
453
454destroy_pool:
455 srp_destroy_fr_pool(pool);
456
457err:
458 pool = ERR_PTR(ret);
459 goto out;
460}
461
462/**
463 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464 * @pool: Pool to obtain descriptor from.
465 */
466static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
467{
468 struct srp_fr_desc *d = NULL;
469 unsigned long flags;
470
471 spin_lock_irqsave(&pool->lock, flags);
472 if (!list_empty(&pool->free_list)) {
473 d = list_first_entry(&pool->free_list, typeof(*d), entry);
474 list_del(&d->entry);
475 }
476 spin_unlock_irqrestore(&pool->lock, flags);
477
478 return d;
479}
480
481/**
482 * srp_fr_pool_put() - put an FR descriptor back in the free list
483 * @pool: Pool the descriptor was allocated from.
484 * @desc: Pointer to an array of fast registration descriptor pointers.
485 * @n: Number of descriptors to put back.
486 *
487 * Note: The caller must already have queued an invalidation request for
488 * desc->mr->rkey before calling this function.
489 */
490static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491 int n)
492{
493 unsigned long flags;
494 int i;
495
496 spin_lock_irqsave(&pool->lock, flags);
497 for (i = 0; i < n; i++)
498 list_add(&desc[i]->entry, &pool->free_list);
499 spin_unlock_irqrestore(&pool->lock, flags);
500}
501
502static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
503{
504 struct srp_device *dev = target->srp_host->srp_dev;
505
fa9863f8 506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
5cfb1782
BVA
507 dev->max_pages_per_mr);
508}
509
7dad6b2e
BVA
510/**
511 * srp_destroy_qp() - destroy an RDMA queue pair
9566b054 512 * @ch: SRP RDMA channel.
7dad6b2e 513 *
561392d4
SW
514 * Drain the qp before destroying it. This avoids that the receive
515 * completion handler can access the queue pair while it is
7dad6b2e
BVA
516 * being destroyed.
517 */
9566b054 518static void srp_destroy_qp(struct srp_rdma_ch *ch)
7dad6b2e 519{
9294000d
BVA
520 spin_lock_irq(&ch->lock);
521 ib_process_cq_direct(ch->send_cq, -1);
522 spin_unlock_irq(&ch->lock);
523
9566b054
BVA
524 ib_drain_qp(ch->qp);
525 ib_destroy_qp(ch->qp);
7dad6b2e
BVA
526}
527
509c07bc 528static int srp_create_ch_ib(struct srp_rdma_ch *ch)
aef9ec39 529{
509c07bc 530 struct srp_target_port *target = ch->target;
62154b2e 531 struct srp_device *dev = target->srp_host->srp_dev;
bf583470 532 const struct ib_device_attr *attr = &dev->dev->attrs;
aef9ec39 533 struct ib_qp_init_attr *init_attr;
73aa89ed
IR
534 struct ib_cq *recv_cq, *send_cq;
535 struct ib_qp *qp;
5cfb1782 536 struct srp_fr_pool *fr_pool = NULL;
509c5f33 537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
aef9ec39
RD
538 int ret;
539
540 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
541 if (!init_attr)
542 return -ENOMEM;
543
561392d4 544 /* queue_size + 1 for ib_drain_rq() */
1dc7b1f1
CH
545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546 ch->comp_vector, IB_POLL_SOFTIRQ);
73aa89ed
IR
547 if (IS_ERR(recv_cq)) {
548 ret = PTR_ERR(recv_cq);
da9d2f07 549 goto err;
aef9ec39
RD
550 }
551
1dc7b1f1
CH
552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553 ch->comp_vector, IB_POLL_DIRECT);
73aa89ed
IR
554 if (IS_ERR(send_cq)) {
555 ret = PTR_ERR(send_cq);
da9d2f07 556 goto err_recv_cq;
9c03dc9f
BVA
557 }
558
aef9ec39 559 init_attr->event_handler = srp_qp_event;
5cfb1782 560 init_attr->cap.max_send_wr = m * target->queue_size;
7dad6b2e 561 init_attr->cap.max_recv_wr = target->queue_size + 1;
aef9ec39 562 init_attr->cap.max_recv_sge = 1;
bf583470 563 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
5cfb1782 564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
aef9ec39 565 init_attr->qp_type = IB_QPT_RC;
73aa89ed
IR
566 init_attr->send_cq = send_cq;
567 init_attr->recv_cq = recv_cq;
aef9ec39 568
bf583470
BVA
569 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
570
19f31343
BVA
571 if (target->using_rdma_cm) {
572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573 qp = ch->rdma_cm.cm_id->qp;
574 } else {
575 qp = ib_create_qp(dev->pd, init_attr);
576 if (!IS_ERR(qp)) {
577 ret = srp_init_ib_qp(target, qp);
578 if (ret)
579 ib_destroy_qp(qp);
580 } else {
581 ret = PTR_ERR(qp);
582 }
583 }
584 if (ret) {
585 pr_err("QP creation failed for dev %s: %d\n",
586 dev_name(&dev->dev->dev), ret);
da9d2f07 587 goto err_send_cq;
aef9ec39
RD
588 }
589
002f1567 590 if (dev->use_fast_reg) {
5cfb1782
BVA
591 fr_pool = srp_alloc_fr_pool(target);
592 if (IS_ERR(fr_pool)) {
593 ret = PTR_ERR(fr_pool);
594 shost_printk(KERN_WARNING, target->scsi_host, PFX
595 "FR pool allocation failed (%d)\n", ret);
596 goto err_qp;
597 }
d1b4289e
BVA
598 }
599
509c07bc 600 if (ch->qp)
9566b054 601 srp_destroy_qp(ch);
509c07bc 602 if (ch->recv_cq)
1dc7b1f1 603 ib_free_cq(ch->recv_cq);
509c07bc 604 if (ch->send_cq)
1dc7b1f1 605 ib_free_cq(ch->send_cq);
73aa89ed 606
509c07bc
BVA
607 ch->qp = qp;
608 ch->recv_cq = recv_cq;
609 ch->send_cq = send_cq;
73aa89ed 610
7fbc67df
SG
611 if (dev->use_fast_reg) {
612 if (ch->fr_pool)
613 srp_destroy_fr_pool(ch->fr_pool);
614 ch->fr_pool = fr_pool;
7fbc67df
SG
615 }
616
da9d2f07
RD
617 kfree(init_attr);
618 return 0;
619
620err_qp:
19f31343
BVA
621 if (target->using_rdma_cm)
622 rdma_destroy_qp(ch->rdma_cm.cm_id);
623 else
624 ib_destroy_qp(qp);
da9d2f07
RD
625
626err_send_cq:
1dc7b1f1 627 ib_free_cq(send_cq);
da9d2f07
RD
628
629err_recv_cq:
1dc7b1f1 630 ib_free_cq(recv_cq);
da9d2f07
RD
631
632err:
aef9ec39
RD
633 kfree(init_attr);
634 return ret;
635}
636
4d73f95f
BVA
637/*
638 * Note: this function may be called without srp_alloc_iu_bufs() having been
509c07bc 639 * invoked. Hence the ch->[rt]x_ring checks.
4d73f95f 640 */
509c07bc
BVA
641static void srp_free_ch_ib(struct srp_target_port *target,
642 struct srp_rdma_ch *ch)
aef9ec39 643{
5cfb1782 644 struct srp_device *dev = target->srp_host->srp_dev;
aef9ec39
RD
645 int i;
646
d92c0da7
BVA
647 if (!ch->target)
648 return;
649
19f31343
BVA
650 if (target->using_rdma_cm) {
651 if (ch->rdma_cm.cm_id) {
652 rdma_destroy_id(ch->rdma_cm.cm_id);
653 ch->rdma_cm.cm_id = NULL;
654 }
655 } else {
656 if (ch->ib_cm.cm_id) {
657 ib_destroy_cm_id(ch->ib_cm.cm_id);
658 ch->ib_cm.cm_id = NULL;
659 }
394c595e
BVA
660 }
661
d92c0da7
BVA
662 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
663 if (!ch->qp)
664 return;
665
5cfb1782 666 if (dev->use_fast_reg) {
509c07bc
BVA
667 if (ch->fr_pool)
668 srp_destroy_fr_pool(ch->fr_pool);
5cfb1782 669 }
1dc7b1f1 670
9566b054 671 srp_destroy_qp(ch);
1dc7b1f1
CH
672 ib_free_cq(ch->send_cq);
673 ib_free_cq(ch->recv_cq);
aef9ec39 674
d92c0da7
BVA
675 /*
676 * Avoid that the SCSI error handler tries to use this channel after
677 * it has been freed. The SCSI error handler can namely continue
678 * trying to perform recovery actions after scsi_remove_host()
679 * returned.
680 */
681 ch->target = NULL;
682
509c07bc
BVA
683 ch->qp = NULL;
684 ch->send_cq = ch->recv_cq = NULL;
73aa89ed 685
509c07bc 686 if (ch->rx_ring) {
4d73f95f 687 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
688 srp_free_iu(target->srp_host, ch->rx_ring[i]);
689 kfree(ch->rx_ring);
690 ch->rx_ring = NULL;
4d73f95f 691 }
509c07bc 692 if (ch->tx_ring) {
4d73f95f 693 for (i = 0; i < target->queue_size; ++i)
509c07bc
BVA
694 srp_free_iu(target->srp_host, ch->tx_ring[i]);
695 kfree(ch->tx_ring);
696 ch->tx_ring = NULL;
4d73f95f 697 }
aef9ec39
RD
698}
699
700static void srp_path_rec_completion(int status,
c2f8fc4e 701 struct sa_path_rec *pathrec,
509c07bc 702 void *ch_ptr)
aef9ec39 703{
509c07bc
BVA
704 struct srp_rdma_ch *ch = ch_ptr;
705 struct srp_target_port *target = ch->target;
aef9ec39 706
509c07bc 707 ch->status = status;
aef9ec39 708 if (status)
7aa54bd7
DD
709 shost_printk(KERN_ERR, target->scsi_host,
710 PFX "Got failed path rec status %d\n", status);
aef9ec39 711 else
19f31343 712 ch->ib_cm.path = *pathrec;
509c07bc 713 complete(&ch->done);
aef9ec39
RD
714}
715
19f31343 716static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
aef9ec39 717{
509c07bc 718 struct srp_target_port *target = ch->target;
c74ff750 719 int ret;
a702adce 720
19f31343 721 ch->ib_cm.path.numb_path = 1;
509c07bc
BVA
722
723 init_completion(&ch->done);
724
19f31343 725 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
509c07bc
BVA
726 target->srp_host->srp_dev->dev,
727 target->srp_host->port,
19f31343 728 &ch->ib_cm.path,
509c07bc
BVA
729 IB_SA_PATH_REC_SERVICE_ID |
730 IB_SA_PATH_REC_DGID |
731 IB_SA_PATH_REC_SGID |
732 IB_SA_PATH_REC_NUMB_PATH |
733 IB_SA_PATH_REC_PKEY,
734 SRP_PATH_REC_TIMEOUT_MS,
735 GFP_KERNEL,
736 srp_path_rec_completion,
19f31343 737 ch, &ch->ib_cm.path_query);
c74ff750
BVA
738 if (ch->ib_cm.path_query_id < 0)
739 return ch->ib_cm.path_query_id;
509c07bc
BVA
740
741 ret = wait_for_completion_interruptible(&ch->done);
a702adce 742 if (ret < 0)
c74ff750 743 return ret;
aef9ec39 744
c74ff750 745 if (ch->status < 0)
7aa54bd7 746 shost_printk(KERN_WARNING, target->scsi_host,
85769c6f 747 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
19f31343
BVA
748 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749 be16_to_cpu(target->ib_cm.pkey),
750 be64_to_cpu(target->ib_cm.service_id));
aef9ec39 751
c74ff750 752 return ch->status;
aef9ec39
RD
753}
754
19f31343
BVA
755static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
756{
757 struct srp_target_port *target = ch->target;
758 int ret;
759
760 init_completion(&ch->done);
761
762 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
763 if (ret)
764 return ret;
765
766 wait_for_completion_interruptible(&ch->done);
767
768 if (ch->status != 0)
769 shost_printk(KERN_WARNING, target->scsi_host,
770 PFX "Path resolution failed\n");
771
772 return ch->status;
773}
774
775static int srp_lookup_path(struct srp_rdma_ch *ch)
776{
777 struct srp_target_port *target = ch->target;
778
779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780 srp_ib_lookup_path(ch);
781}
782
4c532d6c
BVA
783static u8 srp_get_subnet_timeout(struct srp_host *host)
784{
785 struct ib_port_attr attr;
786 int ret;
787 u8 subnet_timeout = 18;
788
789 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790 if (ret == 0)
791 subnet_timeout = attr.subnet_timeout;
792
793 if (unlikely(subnet_timeout < 15))
794 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
796
797 return subnet_timeout;
798}
799
513d5647
BVA
800static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801 bool multich)
aef9ec39 802{
509c07bc 803 struct srp_target_port *target = ch->target;
aef9ec39 804 struct {
19f31343
BVA
805 struct rdma_conn_param rdma_param;
806 struct srp_login_req_rdma rdma_req;
807 struct ib_cm_req_param ib_param;
808 struct srp_login_req ib_req;
aef9ec39 809 } *req = NULL;
48900a28 810 char *ipi, *tpi;
aef9ec39
RD
811 int status;
812
813 req = kzalloc(sizeof *req, GFP_KERNEL);
814 if (!req)
815 return -ENOMEM;
816
19f31343
BVA
817 req->ib_param.flow_control = 1;
818 req->ib_param.retry_count = target->tl_retry_count;
aef9ec39
RD
819
820 /*
821 * Pick some arbitrary defaults here; we could make these
822 * module parameters if anyone cared about setting them.
823 */
19f31343
BVA
824 req->ib_param.responder_resources = 4;
825 req->ib_param.rnr_retry_count = 7;
826 req->ib_param.max_cm_retries = 15;
827
828 req->ib_req.opcode = SRP_LOGIN_REQ;
829 req->ib_req.tag = 0;
513d5647 830 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
19f31343 831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
aef9ec39 832 SRP_BUF_FORMAT_INDIRECT);
19f31343
BVA
833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834 SRP_MULTICHAN_SINGLE);
882981f4
BVA
835 if (srp_use_imm_data) {
836 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838 }
19f31343
BVA
839
840 if (target->using_rdma_cm) {
841 req->rdma_param.flow_control = req->ib_param.flow_control;
842 req->rdma_param.responder_resources =
843 req->ib_param.responder_resources;
844 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845 req->rdma_param.retry_count = req->ib_param.retry_count;
846 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847 req->rdma_param.private_data = &req->rdma_req;
848 req->rdma_param.private_data_len = sizeof(req->rdma_req);
849
850 req->rdma_req.opcode = req->ib_req.opcode;
851 req->rdma_req.tag = req->ib_req.tag;
852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854 req->rdma_req.req_flags = req->ib_req.req_flags;
882981f4 855 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
19f31343
BVA
856
857 ipi = req->rdma_req.initiator_port_id;
858 tpi = req->rdma_req.target_port_id;
859 } else {
48900a28
BVA
860 u8 subnet_timeout;
861
862 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
863
19f31343
BVA
864 req->ib_param.primary_path = &ch->ib_cm.path;
865 req->ib_param.alternate_path = NULL;
866 req->ib_param.service_id = target->ib_cm.service_id;
867 get_random_bytes(&req->ib_param.starting_psn, 4);
868 req->ib_param.starting_psn &= 0xffffff;
869 req->ib_param.qp_num = ch->qp->qp_num;
870 req->ib_param.qp_type = ch->qp->qp_type;
871 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873 req->ib_param.private_data = &req->ib_req;
874 req->ib_param.private_data_len = sizeof(req->ib_req);
48900a28 875
19f31343
BVA
876 ipi = req->ib_req.initiator_port_id;
877 tpi = req->ib_req.target_port_id;
48900a28
BVA
878 }
879
0c0450db 880 /*
3cd96564 881 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
882 * port identifier format is 8 bytes of ID extension followed
883 * by 8 bytes of GUID. Older drafts put the two halves in the
884 * opposite order, so that the GUID comes first.
885 *
886 * Targets conforming to these obsolete drafts can be
887 * recognized by the I/O Class they report.
888 */
889 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
48900a28
BVA
890 memcpy(ipi, &target->sgid.global.interface_id, 8);
891 memcpy(ipi + 8, &target->initiator_ext, 8);
892 memcpy(tpi, &target->ioc_guid, 8);
893 memcpy(tpi + 8, &target->id_ext, 8);
0c0450db 894 } else {
48900a28
BVA
895 memcpy(ipi, &target->initiator_ext, 8);
896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897 memcpy(tpi, &target->id_ext, 8);
898 memcpy(tpi + 8, &target->ioc_guid, 8);
0c0450db
R
899 }
900
aef9ec39
RD
901 /*
902 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
903 * zero out the first 8 bytes of our initiator port ID and set
904 * the second 8 bytes to the local node GUID.
aef9ec39 905 */
5d7cbfd6 906 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
907 shost_printk(KERN_DEBUG, target->scsi_host,
908 PFX "Topspin/Cisco initiator port ID workaround "
909 "activated for target GUID %016llx\n",
45c37cad 910 be64_to_cpu(target->ioc_guid));
48900a28
BVA
911 memset(ipi, 0, 8);
912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 913 }
aef9ec39 914
19f31343
BVA
915 if (target->using_rdma_cm)
916 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
917 else
918 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
aef9ec39
RD
919
920 kfree(req);
921
922 return status;
923}
924
ef6c49d8
BVA
925static bool srp_queue_remove_work(struct srp_target_port *target)
926{
927 bool changed = false;
928
929 spin_lock_irq(&target->lock);
930 if (target->state != SRP_TARGET_REMOVED) {
931 target->state = SRP_TARGET_REMOVED;
932 changed = true;
933 }
934 spin_unlock_irq(&target->lock);
935
936 if (changed)
bcc05910 937 queue_work(srp_remove_wq, &target->remove_work);
ef6c49d8
BVA
938
939 return changed;
940}
941
aef9ec39
RD
942static void srp_disconnect_target(struct srp_target_port *target)
943{
d92c0da7 944 struct srp_rdma_ch *ch;
19f31343 945 int i, ret;
509c07bc 946
c014c8cd 947 /* XXX should send SRP_I_LOGOUT request */
aef9ec39 948
c014c8cd
BVA
949 for (i = 0; i < target->ch_count; i++) {
950 ch = &target->ch[i];
951 ch->connected = false;
19f31343
BVA
952 ret = 0;
953 if (target->using_rdma_cm) {
954 if (ch->rdma_cm.cm_id)
955 rdma_disconnect(ch->rdma_cm.cm_id);
956 } else {
957 if (ch->ib_cm.cm_id)
958 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
959 NULL, 0);
960 }
961 if (ret < 0) {
c014c8cd
BVA
962 shost_printk(KERN_DEBUG, target->scsi_host,
963 PFX "Sending CM DREQ failed\n");
294c875a 964 }
e6581056 965 }
aef9ec39
RD
966}
967
ad215aae 968static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8f26c9ff 969{
ad215aae 970 struct srp_target_port *target = host_to_target(shost);
5cfb1782
BVA
971 struct srp_device *dev = target->srp_host->srp_dev;
972 struct ib_device *ibdev = dev->dev;
ad215aae 973 struct srp_request *req = scsi_cmd_priv(cmd);
8f26c9ff 974
ad215aae
BVA
975 kfree(req->fr_list);
976 if (req->indirect_dma_addr) {
977 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
978 target->indirect_size,
979 DMA_TO_DEVICE);
8f26c9ff 980 }
ad215aae 981 kfree(req->indirect_desc);
4d73f95f 982
ad215aae 983 return 0;
8f26c9ff
DD
984}
985
ad215aae 986static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
b81d00bd 987{
ad215aae 988 struct srp_target_port *target = host_to_target(shost);
b81d00bd
BVA
989 struct srp_device *srp_dev = target->srp_host->srp_dev;
990 struct ib_device *ibdev = srp_dev->dev;
ad215aae 991 struct srp_request *req = scsi_cmd_priv(cmd);
b81d00bd 992 dma_addr_t dma_addr;
ad215aae 993 int ret = -ENOMEM;
b81d00bd 994
ad215aae
BVA
995 if (srp_dev->use_fast_reg) {
996 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
997 GFP_KERNEL);
998 if (!req->fr_list)
b81d00bd 999 goto out;
ad215aae
BVA
1000 }
1001 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1002 if (!req->indirect_desc)
1003 goto out;
b81d00bd 1004
ad215aae
BVA
1005 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1006 target->indirect_size,
1007 DMA_TO_DEVICE);
1008 if (ib_dma_mapping_error(ibdev, dma_addr)) {
1009 srp_exit_cmd_priv(shost, cmd);
1010 goto out;
b81d00bd 1011 }
ad215aae
BVA
1012
1013 req->indirect_dma_addr = dma_addr;
b81d00bd
BVA
1014 ret = 0;
1015
1016out:
1017 return ret;
1018}
1019
683b159a
BVA
1020/**
1021 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1022 * @shost: SCSI host whose attributes to remove from sysfs.
1023 *
1024 * Note: Any attributes defined in the host template and that did not exist
1025 * before invocation of this function will be ignored.
1026 */
1027static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1028{
a3cf94c9
BVA
1029 const struct attribute_group **g;
1030 struct attribute **attr;
683b159a 1031
a3cf94c9
BVA
1032 for (g = shost->hostt->shost_groups; *g; ++g) {
1033 for (attr = (*g)->attrs; *attr; ++attr) {
1034 struct device_attribute *dev_attr =
1035 container_of(*attr, typeof(*dev_attr), attr);
1036
1037 device_remove_file(&shost->shost_dev, dev_attr);
1038 }
1039 }
683b159a
BVA
1040}
1041
ee12d6a8
BVA
1042static void srp_remove_target(struct srp_target_port *target)
1043{
d92c0da7
BVA
1044 struct srp_rdma_ch *ch;
1045 int i;
509c07bc 1046
ef6c49d8
BVA
1047 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1048
ee12d6a8 1049 srp_del_scsi_host_attr(target->scsi_host);
9dd69a60 1050 srp_rport_get(target->rport);
ee12d6a8
BVA
1051 srp_remove_host(target->scsi_host);
1052 scsi_remove_host(target->scsi_host);
93079162 1053 srp_stop_rport_timers(target->rport);
ef6c49d8 1054 srp_disconnect_target(target);
19f31343 1055 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
d92c0da7
BVA
1056 for (i = 0; i < target->ch_count; i++) {
1057 ch = &target->ch[i];
1058 srp_free_ch_ib(target, ch);
1059 }
c1120f89 1060 cancel_work_sync(&target->tl_err_work);
9dd69a60 1061 srp_rport_put(target->rport);
d92c0da7
BVA
1062 kfree(target->ch);
1063 target->ch = NULL;
65d7dd2f
VP
1064
1065 spin_lock(&target->srp_host->target_lock);
1066 list_del(&target->list);
1067 spin_unlock(&target->srp_host->target_lock);
1068
ee12d6a8
BVA
1069 scsi_host_put(target->scsi_host);
1070}
1071
c4028958 1072static void srp_remove_work(struct work_struct *work)
aef9ec39 1073{
c4028958 1074 struct srp_target_port *target =
ef6c49d8 1075 container_of(work, struct srp_target_port, remove_work);
aef9ec39 1076
ef6c49d8 1077 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
aef9ec39 1078
96fc248a 1079 srp_remove_target(target);
aef9ec39
RD
1080}
1081
dc1bdbd9
BVA
1082static void srp_rport_delete(struct srp_rport *rport)
1083{
1084 struct srp_target_port *target = rport->lld_data;
1085
1086 srp_queue_remove_work(target);
1087}
1088
c014c8cd
BVA
1089/**
1090 * srp_connected_ch() - number of connected channels
1091 * @target: SRP target port.
1092 */
1093static int srp_connected_ch(struct srp_target_port *target)
1094{
1095 int i, c = 0;
1096
1097 for (i = 0; i < target->ch_count; i++)
1098 c += target->ch[i].connected;
1099
1100 return c;
1101}
1102
513d5647
BVA
1103static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1104 bool multich)
aef9ec39 1105{
509c07bc 1106 struct srp_target_port *target = ch->target;
aef9ec39
RD
1107 int ret;
1108
c014c8cd 1109 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
294c875a 1110
509c07bc 1111 ret = srp_lookup_path(ch);
aef9ec39 1112 if (ret)
4d59ad29 1113 goto out;
aef9ec39
RD
1114
1115 while (1) {
509c07bc 1116 init_completion(&ch->done);
513d5647 1117 ret = srp_send_req(ch, max_iu_len, multich);
aef9ec39 1118 if (ret)
4d59ad29 1119 goto out;
509c07bc 1120 ret = wait_for_completion_interruptible(&ch->done);
a702adce 1121 if (ret < 0)
4d59ad29 1122 goto out;
aef9ec39
RD
1123
1124 /*
1125 * The CM event handling code will set status to
1126 * SRP_PORT_REDIRECT if we get a port redirect REJ
1127 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1128 * redirect REJ back.
1129 */
4d59ad29
BVA
1130 ret = ch->status;
1131 switch (ret) {
aef9ec39 1132 case 0:
c014c8cd 1133 ch->connected = true;
4d59ad29 1134 goto out;
aef9ec39
RD
1135
1136 case SRP_PORT_REDIRECT:
509c07bc 1137 ret = srp_lookup_path(ch);
aef9ec39 1138 if (ret)
4d59ad29 1139 goto out;
aef9ec39
RD
1140 break;
1141
1142 case SRP_DLID_REDIRECT:
1143 break;
1144
9fe4bcf4 1145 case SRP_STALE_CONN:
9fe4bcf4 1146 shost_printk(KERN_ERR, target->scsi_host, PFX
205619f2 1147 "giving up on stale connection\n");
4d59ad29
BVA
1148 ret = -ECONNRESET;
1149 goto out;
9fe4bcf4 1150
aef9ec39 1151 default:
4d59ad29 1152 goto out;
aef9ec39
RD
1153 }
1154 }
4d59ad29
BVA
1155
1156out:
1157 return ret <= 0 ? ret : -ENODEV;
aef9ec39
RD
1158}
1159
1dc7b1f1
CH
1160static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1161{
1162 srp_handle_qp_err(cq, wc, "INV RKEY");
1163}
1164
1165static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1166 u32 rkey)
5cfb1782 1167{
5cfb1782
BVA
1168 struct ib_send_wr wr = {
1169 .opcode = IB_WR_LOCAL_INV,
5cfb1782
BVA
1170 .next = NULL,
1171 .num_sge = 0,
1172 .send_flags = 0,
1173 .ex.invalidate_rkey = rkey,
1174 };
1175
1dc7b1f1
CH
1176 wr.wr_cqe = &req->reg_cqe;
1177 req->reg_cqe.done = srp_inv_rkey_err_done;
71347b0c 1178 return ib_post_send(ch->qp, &wr, NULL);
5cfb1782
BVA
1179}
1180
d945e1df 1181static void srp_unmap_data(struct scsi_cmnd *scmnd,
509c07bc 1182 struct srp_rdma_ch *ch,
d945e1df
RD
1183 struct srp_request *req)
1184{
509c07bc 1185 struct srp_target_port *target = ch->target;
5cfb1782
BVA
1186 struct srp_device *dev = target->srp_host->srp_dev;
1187 struct ib_device *ibdev = dev->dev;
1188 int i, res;
8f26c9ff 1189
bb350d1d 1190 if (!scsi_sglist(scmnd) ||
d945e1df
RD
1191 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1192 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1193 return;
1194
5cfb1782
BVA
1195 if (dev->use_fast_reg) {
1196 struct srp_fr_desc **pfr;
1197
1198 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1dc7b1f1 1199 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
5cfb1782
BVA
1200 if (res < 0) {
1201 shost_printk(KERN_ERR, target->scsi_host, PFX
1202 "Queueing INV WR for rkey %#x failed (%d)\n",
1203 (*pfr)->mr->rkey, res);
1204 queue_work(system_long_wq,
1205 &target->tl_err_work);
1206 }
1207 }
1208 if (req->nmdesc)
509c07bc 1209 srp_fr_pool_put(ch->fr_pool, req->fr_list,
5cfb1782 1210 req->nmdesc);
5cfb1782 1211 }
f5358a17 1212
8f26c9ff
DD
1213 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1214 scmnd->sc_data_direction);
d945e1df
RD
1215}
1216
22032991
BVA
1217/**
1218 * srp_claim_req - Take ownership of the scmnd associated with a request.
509c07bc 1219 * @ch: SRP RDMA channel.
22032991 1220 * @req: SRP request.
b3fe628d 1221 * @sdev: If not NULL, only take ownership for this SCSI device.
22032991
BVA
1222 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1223 * ownership of @req->scmnd if it equals @scmnd.
1224 *
1225 * Return value:
1226 * Either NULL or a pointer to the SCSI command the caller became owner of.
1227 */
509c07bc 1228static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
22032991 1229 struct srp_request *req,
b3fe628d 1230 struct scsi_device *sdev,
22032991
BVA
1231 struct scsi_cmnd *scmnd)
1232{
1233 unsigned long flags;
1234
509c07bc 1235 spin_lock_irqsave(&ch->lock, flags);
b3fe628d
BVA
1236 if (req->scmnd &&
1237 (!sdev || req->scmnd->device == sdev) &&
1238 (!scmnd || req->scmnd == scmnd)) {
22032991
BVA
1239 scmnd = req->scmnd;
1240 req->scmnd = NULL;
22032991
BVA
1241 } else {
1242 scmnd = NULL;
1243 }
509c07bc 1244 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1245
1246 return scmnd;
1247}
1248
1249/**
6ec2ba02 1250 * srp_free_req() - Unmap data and adjust ch->req_lim.
509c07bc 1251 * @ch: SRP RDMA channel.
af24663b
BVA
1252 * @req: Request to be freed.
1253 * @scmnd: SCSI command associated with @req.
1254 * @req_lim_delta: Amount to be added to @target->req_lim.
22032991 1255 */
509c07bc
BVA
1256static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1257 struct scsi_cmnd *scmnd, s32 req_lim_delta)
526b4caa 1258{
94a9174c
BVA
1259 unsigned long flags;
1260
509c07bc 1261 srp_unmap_data(scmnd, ch, req);
22032991 1262
509c07bc
BVA
1263 spin_lock_irqsave(&ch->lock, flags);
1264 ch->req_lim += req_lim_delta;
509c07bc 1265 spin_unlock_irqrestore(&ch->lock, flags);
526b4caa
IR
1266}
1267
509c07bc
BVA
1268static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1269 struct scsi_device *sdev, int result)
526b4caa 1270{
509c07bc 1271 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
22032991
BVA
1272
1273 if (scmnd) {
509c07bc 1274 srp_free_req(ch, req, scmnd, 0);
ed9b2264 1275 scmnd->result = result;
5f9ae9ee 1276 scsi_done(scmnd);
22032991 1277 }
526b4caa
IR
1278}
1279
ad215aae
BVA
1280struct srp_terminate_context {
1281 struct srp_target_port *srp_target;
1282 int scsi_result;
1283};
1284
2dd6532e 1285static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
aef9ec39 1286{
ad215aae
BVA
1287 struct srp_terminate_context *context = context_ptr;
1288 struct srp_target_port *target = context->srp_target;
9c5274ee 1289 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
ad215aae
BVA
1290 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1291 struct srp_request *req = scsi_cmd_priv(scmnd);
ed9b2264 1292
ad215aae 1293 srp_finish_req(ch, req, NULL, context->scsi_result);
509c07bc 1294
ad215aae
BVA
1295 return true;
1296}
d92c0da7 1297
ad215aae
BVA
1298static void srp_terminate_io(struct srp_rport *rport)
1299{
1300 struct srp_target_port *target = rport->lld_data;
1301 struct srp_terminate_context context = { .srp_target = target,
1302 .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1303
1304 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
ed9b2264 1305}
aef9ec39 1306
513d5647 1307/* Calculate maximum initiator to target information unit length. */
b2e872f4
HL
1308static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1309 uint32_t max_it_iu_size)
513d5647
BVA
1310{
1311 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1312 sizeof(struct srp_indirect_buf) +
1313 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1314
882981f4
BVA
1315 if (use_imm_data)
1316 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1317 srp_max_imm_data);
1318
b2e872f4
HL
1319 if (max_it_iu_size)
1320 max_iu_len = min(max_iu_len, max_it_iu_size);
1321
1322 pr_debug("max_iu_len = %d\n", max_iu_len);
1323
513d5647
BVA
1324 return max_iu_len;
1325}
1326
ed9b2264
BVA
1327/*
1328 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1329 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1330 * srp_reset_device() or srp_reset_host() calls will occur while this function
1331 * is in progress. One way to realize that is not to call this function
1332 * directly but to call srp_reconnect_rport() instead since that last function
1333 * serializes calls of this function via rport->mutex and also blocks
1334 * srp_queuecommand() calls before invoking this function.
1335 */
1336static int srp_rport_reconnect(struct srp_rport *rport)
1337{
1338 struct srp_target_port *target = rport->lld_data;
d92c0da7 1339 struct srp_rdma_ch *ch;
882981f4 1340 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
b2e872f4
HL
1341 srp_use_imm_data,
1342 target->max_it_iu_size);
d92c0da7
BVA
1343 int i, j, ret = 0;
1344 bool multich = false;
09be70a2 1345
aef9ec39 1346 srp_disconnect_target(target);
34aa654e
BVA
1347
1348 if (target->state == SRP_TARGET_SCANNING)
1349 return -ENODEV;
1350
aef9ec39 1351 /*
c7c4e7ff
BVA
1352 * Now get a new local CM ID so that we avoid confusing the target in
1353 * case things are really fouled up. Doing so also ensures that all CM
1354 * callbacks will have finished before a new QP is allocated.
aef9ec39 1355 */
d92c0da7
BVA
1356 for (i = 0; i < target->ch_count; i++) {
1357 ch = &target->ch[i];
d92c0da7 1358 ret += srp_new_cm_id(ch);
536ae14e 1359 }
ad215aae
BVA
1360 {
1361 struct srp_terminate_context context = {
1362 .srp_target = target, .scsi_result = DID_RESET << 16};
aef9ec39 1363
ad215aae
BVA
1364 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1365 &context);
d92c0da7
BVA
1366 }
1367 for (i = 0; i < target->ch_count; i++) {
1368 ch = &target->ch[i];
d92c0da7
BVA
1369 /*
1370 * Whether or not creating a new CM ID succeeded, create a new
1371 * QP. This guarantees that all completion callback function
1372 * invocations have finished before request resetting starts.
1373 */
1374 ret += srp_create_ch_ib(ch);
aef9ec39 1375
d92c0da7
BVA
1376 INIT_LIST_HEAD(&ch->free_tx);
1377 for (j = 0; j < target->queue_size; ++j)
1378 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1379 }
8de9fe3a
BVA
1380
1381 target->qp_in_error = false;
1382
d92c0da7
BVA
1383 for (i = 0; i < target->ch_count; i++) {
1384 ch = &target->ch[i];
bbac5ccf 1385 if (ret)
d92c0da7 1386 break;
513d5647 1387 ret = srp_connect_ch(ch, max_iu_len, multich);
d92c0da7
BVA
1388 multich = true;
1389 }
09be70a2 1390
ed9b2264
BVA
1391 if (ret == 0)
1392 shost_printk(KERN_INFO, target->scsi_host,
1393 PFX "reconnect succeeded\n");
aef9ec39
RD
1394
1395 return ret;
1396}
1397
8f26c9ff
DD
1398static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1399 unsigned int dma_len, u32 rkey)
f5358a17 1400{
8f26c9ff 1401 struct srp_direct_buf *desc = state->desc;
f5358a17 1402
3ae95da8
BVA
1403 WARN_ON_ONCE(!dma_len);
1404
8f26c9ff
DD
1405 desc->va = cpu_to_be64(dma_addr);
1406 desc->key = cpu_to_be32(rkey);
1407 desc->len = cpu_to_be32(dma_len);
f5358a17 1408
8f26c9ff
DD
1409 state->total_len += dma_len;
1410 state->desc++;
1411 state->ndesc++;
1412}
559ce8f1 1413
1dc7b1f1
CH
1414static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1415{
1416 srp_handle_qp_err(cq, wc, "FAST REG");
1417}
1418
509c5f33
BVA
1419/*
1420 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1421 * where to start in the first element. If sg_offset_p != NULL then
1422 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1423 * byte that has not yet been mapped.
1424 */
5cfb1782 1425static int srp_map_finish_fr(struct srp_map_state *state,
1dc7b1f1 1426 struct srp_request *req,
509c5f33
BVA
1427 struct srp_rdma_ch *ch, int sg_nents,
1428 unsigned int *sg_offset_p)
5cfb1782 1429{
509c07bc 1430 struct srp_target_port *target = ch->target;
5cfb1782 1431 struct srp_device *dev = target->srp_host->srp_dev;
f7f7aab1 1432 struct ib_reg_wr wr;
5cfb1782
BVA
1433 struct srp_fr_desc *desc;
1434 u32 rkey;
f7f7aab1 1435 int n, err;
5cfb1782 1436
290081b4
BVA
1437 if (state->fr.next >= state->fr.end) {
1438 shost_printk(KERN_ERR, ch->target->scsi_host,
1439 PFX "Out of MRs (mr_per_cmd = %d)\n",
1440 ch->target->mr_per_cmd);
f731ed62 1441 return -ENOMEM;
290081b4 1442 }
f731ed62 1443
26630e8a
SG
1444 WARN_ON_ONCE(!dev->use_fast_reg);
1445
cee687b6 1446 if (sg_nents == 1 && target->global_rkey) {
509c5f33
BVA
1447 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1448
1449 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1450 sg_dma_len(state->sg) - sg_offset,
cee687b6 1451 target->global_rkey);
509c5f33
BVA
1452 if (sg_offset_p)
1453 *sg_offset_p = 0;
f7f7aab1 1454 return 1;
26630e8a
SG
1455 }
1456
509c07bc 1457 desc = srp_fr_pool_get(ch->fr_pool);
5cfb1782
BVA
1458 if (!desc)
1459 return -ENOMEM;
1460
1461 rkey = ib_inc_rkey(desc->mr->rkey);
1462 ib_update_fast_reg_key(desc->mr, rkey);
1463
509c5f33
BVA
1464 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1465 dev->mr_page_size);
9d8e7d0d
BVA
1466 if (unlikely(n < 0)) {
1467 srp_fr_pool_put(ch->fr_pool, &desc, 1);
509c5f33 1468 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
9d8e7d0d 1469 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
509c5f33 1470 sg_offset_p ? *sg_offset_p : -1, n);
f7f7aab1 1471 return n;
9d8e7d0d 1472 }
5cfb1782 1473
509c5f33 1474 WARN_ON_ONCE(desc->mr->length == 0);
5cfb1782 1475
1dc7b1f1
CH
1476 req->reg_cqe.done = srp_reg_mr_err_done;
1477
f7f7aab1
SG
1478 wr.wr.next = NULL;
1479 wr.wr.opcode = IB_WR_REG_MR;
1dc7b1f1 1480 wr.wr.wr_cqe = &req->reg_cqe;
f7f7aab1
SG
1481 wr.wr.num_sge = 0;
1482 wr.wr.send_flags = 0;
1483 wr.mr = desc->mr;
1484 wr.key = desc->mr->rkey;
1485 wr.access = (IB_ACCESS_LOCAL_WRITE |
1486 IB_ACCESS_REMOTE_READ |
1487 IB_ACCESS_REMOTE_WRITE);
5cfb1782 1488
f731ed62 1489 *state->fr.next++ = desc;
5cfb1782
BVA
1490 state->nmdesc++;
1491
f7f7aab1
SG
1492 srp_map_desc(state, desc->mr->iova,
1493 desc->mr->length, desc->mr->rkey);
5cfb1782 1494
71347b0c 1495 err = ib_post_send(ch->qp, &wr.wr, NULL);
509c5f33
BVA
1496 if (unlikely(err)) {
1497 WARN_ON_ONCE(err == -ENOMEM);
26630e8a 1498 return err;
509c5f33 1499 }
26630e8a 1500
f7f7aab1 1501 return n;
5cfb1782
BVA
1502}
1503
26630e8a
SG
1504static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1505 struct srp_request *req, struct scatterlist *scat,
1506 int count)
1507{
509c5f33
BVA
1508 unsigned int sg_offset = 0;
1509
f7f7aab1 1510 state->fr.next = req->fr_list;
509c5f33 1511 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
f7f7aab1 1512 state->sg = scat;
26630e8a 1513
3b59b7a6
BVA
1514 if (count == 0)
1515 return 0;
1516
57b0be9c 1517 while (count) {
f7f7aab1 1518 int i, n;
26630e8a 1519
509c5f33 1520 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
f7f7aab1
SG
1521 if (unlikely(n < 0))
1522 return n;
1523
57b0be9c 1524 count -= n;
f7f7aab1
SG
1525 for (i = 0; i < n; i++)
1526 state->sg = sg_next(state->sg);
1527 }
26630e8a 1528
26630e8a
SG
1529 return 0;
1530}
1531
1532static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1533 struct srp_request *req, struct scatterlist *scat,
1534 int count)
1535{
1536 struct srp_target_port *target = ch->target;
26630e8a
SG
1537 struct scatterlist *sg;
1538 int i;
1539
26630e8a 1540 for_each_sg(scat, sg, count, i) {
a163afc8 1541 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
cee687b6 1542 target->global_rkey);
0e0d3a48 1543 }
76bc1e1d 1544
26630e8a 1545 return 0;
76bc1e1d
BVA
1546}
1547
330179f2
BVA
1548/*
1549 * Register the indirect data buffer descriptor with the HCA.
1550 *
1551 * Note: since the indirect data buffer descriptor has been allocated with
1552 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1553 * memory buffer.
1554 */
1555static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1556 void **next_mr, void **end_mr, u32 idb_len,
1557 __be32 *idb_rkey)
1558{
1559 struct srp_target_port *target = ch->target;
1560 struct srp_device *dev = target->srp_host->srp_dev;
1561 struct srp_map_state state;
1562 struct srp_direct_buf idb_desc;
f7f7aab1 1563 struct scatterlist idb_sg[1];
330179f2
BVA
1564 int ret;
1565
1566 memset(&state, 0, sizeof(state));
1567 memset(&idb_desc, 0, sizeof(idb_desc));
1568 state.gen.next = next_mr;
1569 state.gen.end = end_mr;
1570 state.desc = &idb_desc;
330179f2
BVA
1571 state.base_dma_addr = req->indirect_dma_addr;
1572 state.dma_len = idb_len;
f7f7aab1
SG
1573
1574 if (dev->use_fast_reg) {
1575 state.sg = idb_sg;
54f5c9c5 1576 sg_init_one(idb_sg, req->indirect_desc, idb_len);
f7f7aab1 1577 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
fc925518
CH
1578#ifdef CONFIG_NEED_SG_DMA_LENGTH
1579 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1580#endif
509c5f33 1581 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
f7f7aab1
SG
1582 if (ret < 0)
1583 return ret;
509c5f33 1584 WARN_ON_ONCE(ret < 1);
f7f7aab1
SG
1585 } else {
1586 return -EINVAL;
1587 }
330179f2
BVA
1588
1589 *idb_rkey = idb_desc.key;
1590
f7f7aab1 1591 return 0;
330179f2
BVA
1592}
1593
509c5f33
BVA
1594static void srp_check_mapping(struct srp_map_state *state,
1595 struct srp_rdma_ch *ch, struct srp_request *req,
1596 struct scatterlist *scat, int count)
1597{
1598 struct srp_device *dev = ch->target->srp_host->srp_dev;
1599 struct srp_fr_desc **pfr;
1600 u64 desc_len = 0, mr_len = 0;
1601 int i;
1602
1603 for (i = 0; i < state->ndesc; i++)
1604 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1605 if (dev->use_fast_reg)
1606 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1607 mr_len += (*pfr)->mr->length;
509c5f33
BVA
1608 if (desc_len != scsi_bufflen(req->scmnd) ||
1609 mr_len > scsi_bufflen(req->scmnd))
1610 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1611 scsi_bufflen(req->scmnd), desc_len, mr_len,
1612 state->ndesc, state->nmdesc);
1613}
509c5f33 1614
77269cdf
BVA
1615/**
1616 * srp_map_data() - map SCSI data buffer onto an SRP request
1617 * @scmnd: SCSI command to map
1618 * @ch: SRP RDMA channel
1619 * @req: SRP request
1620 *
1621 * Returns the length in bytes of the SRP_CMD IU or a negative value if
882981f4
BVA
1622 * mapping failed. The size of any immediate data is not included in the
1623 * return value.
77269cdf 1624 */
509c07bc 1625static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
aef9ec39
RD
1626 struct srp_request *req)
1627{
509c07bc 1628 struct srp_target_port *target = ch->target;
882981f4 1629 struct scatterlist *scat, *sg;
aef9ec39 1630 struct srp_cmd *cmd = req->cmd->buf;
882981f4 1631 int i, len, nents, count, ret;
85507bcc
RC
1632 struct srp_device *dev;
1633 struct ib_device *ibdev;
8f26c9ff
DD
1634 struct srp_map_state state;
1635 struct srp_indirect_buf *indirect_hdr;
882981f4 1636 u64 data_len;
330179f2
BVA
1637 u32 idb_len, table_len;
1638 __be32 idb_rkey;
8f26c9ff 1639 u8 fmt;
aef9ec39 1640
882981f4
BVA
1641 req->cmd->num_sge = 1;
1642
bb350d1d 1643 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
482fffc4 1644 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
aef9ec39
RD
1645
1646 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1647 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
1648 shost_printk(KERN_WARNING, target->scsi_host,
1649 PFX "Unhandled data direction %d\n",
1650 scmnd->sc_data_direction);
aef9ec39
RD
1651 return -EINVAL;
1652 }
1653
bb350d1d
FT
1654 nents = scsi_sg_count(scmnd);
1655 scat = scsi_sglist(scmnd);
882981f4 1656 data_len = scsi_bufflen(scmnd);
aef9ec39 1657
05321937 1658 dev = target->srp_host->srp_dev;
85507bcc
RC
1659 ibdev = dev->dev;
1660
1661 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
8f26c9ff
DD
1662 if (unlikely(count == 0))
1663 return -EIO;
f5358a17 1664
882981f4 1665 if (ch->use_imm_data &&
bf583470 1666 count <= ch->max_imm_sge &&
882981f4
BVA
1667 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1668 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1669 struct srp_imm_buf *buf;
1670 struct ib_sge *sge = &req->cmd->sge[1];
1671
1672 fmt = SRP_DATA_DESC_IMM;
1673 len = SRP_IMM_DATA_OFFSET;
1674 req->nmdesc = 0;
1675 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1676 buf->len = cpu_to_be32(data_len);
1677 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1678 for_each_sg(scat, sg, count, i) {
a163afc8
BVA
1679 sge[i].addr = sg_dma_address(sg);
1680 sge[i].length = sg_dma_len(sg);
882981f4
BVA
1681 sge[i].lkey = target->lkey;
1682 }
1683 req->cmd->num_sge += count;
1684 goto map_complete;
1685 }
1686
f5358a17 1687 fmt = SRP_DATA_DESC_DIRECT;
482fffc4
BVA
1688 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1689 sizeof(struct srp_direct_buf);
aef9ec39 1690
cee687b6 1691 if (count == 1 && target->global_rkey) {
f5358a17
RD
1692 /*
1693 * The midlayer only generated a single gather/scatter
1694 * entry, or DMA mapping coalesced everything to a
1695 * single entry. So a direct descriptor along with
1696 * the DMA MR suffices.
1697 */
482fffc4 1698 struct srp_direct_buf *buf;
aef9ec39 1699
482fffc4 1700 buf = (void *)cmd->add_data + cmd->add_cdb_len;
a163afc8 1701 buf->va = cpu_to_be64(sg_dma_address(scat));
cee687b6 1702 buf->key = cpu_to_be32(target->global_rkey);
a163afc8 1703 buf->len = cpu_to_be32(sg_dma_len(scat));
8f26c9ff 1704
52ede08f 1705 req->nmdesc = 0;
8f26c9ff
DD
1706 goto map_complete;
1707 }
1708
5cfb1782
BVA
1709 /*
1710 * We have more than one scatter/gather entry, so build our indirect
1711 * descriptor table, trying to merge as many entries as we can.
8f26c9ff 1712 */
482fffc4 1713 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
8f26c9ff 1714
c07d424d
DD
1715 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1716 target->indirect_size, DMA_TO_DEVICE);
1717
8f26c9ff 1718 memset(&state, 0, sizeof(state));
9edba790 1719 state.desc = req->indirect_desc;
26630e8a 1720 if (dev->use_fast_reg)
e012f363 1721 ret = srp_map_sg_fr(&state, ch, req, scat, count);
26630e8a 1722 else
e012f363
BVA
1723 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1724 req->nmdesc = state.nmdesc;
1725 if (ret < 0)
1726 goto unmap;
cf368713 1727
509c5f33
BVA
1728 {
1729 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1730 "Memory mapping consistency check");
1a1faf7a 1731 if (DYNAMIC_DEBUG_BRANCH(ddm))
509c5f33
BVA
1732 srp_check_mapping(&state, ch, req, scat, count);
1733 }
cf368713 1734
c07d424d
DD
1735 /* We've mapped the request, now pull as much of the indirect
1736 * descriptor table as we can into the command buffer. If this
1737 * target is not using an external indirect table, we are
1738 * guaranteed to fit into the command, as the SCSI layer won't
1739 * give us more S/G entries than we allow.
8f26c9ff 1740 */
8f26c9ff 1741 if (state.ndesc == 1) {
5cfb1782
BVA
1742 /*
1743 * Memory registration collapsed the sg-list into one entry,
8f26c9ff
DD
1744 * so use a direct descriptor.
1745 */
482fffc4 1746 struct srp_direct_buf *buf;
cf368713 1747
482fffc4 1748 buf = (void *)cmd->add_data + cmd->add_cdb_len;
c07d424d 1749 *buf = req->indirect_desc[0];
8f26c9ff 1750 goto map_complete;
aef9ec39
RD
1751 }
1752
c07d424d
DD
1753 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1754 !target->allow_ext_sg)) {
1755 shost_printk(KERN_ERR, target->scsi_host,
1756 "Could not fit S/G list into SRP_CMD\n");
e012f363
BVA
1757 ret = -EIO;
1758 goto unmap;
c07d424d
DD
1759 }
1760
1761 count = min(state.ndesc, target->cmd_sg_cnt);
8f26c9ff 1762 table_len = state.ndesc * sizeof (struct srp_direct_buf);
330179f2 1763 idb_len = sizeof(struct srp_indirect_buf) + table_len;
8f26c9ff
DD
1764
1765 fmt = SRP_DATA_DESC_INDIRECT;
482fffc4
BVA
1766 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1767 sizeof(struct srp_indirect_buf);
c07d424d 1768 len += count * sizeof (struct srp_direct_buf);
8f26c9ff 1769
c07d424d
DD
1770 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1771 count * sizeof (struct srp_direct_buf));
8f26c9ff 1772
cee687b6 1773 if (!target->global_rkey) {
330179f2
BVA
1774 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1775 idb_len, &idb_rkey);
1776 if (ret < 0)
e012f363 1777 goto unmap;
330179f2
BVA
1778 req->nmdesc++;
1779 } else {
cee687b6 1780 idb_rkey = cpu_to_be32(target->global_rkey);
330179f2
BVA
1781 }
1782
c07d424d 1783 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
330179f2 1784 indirect_hdr->table_desc.key = idb_rkey;
8f26c9ff
DD
1785 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1786 indirect_hdr->len = cpu_to_be32(state.total_len);
1787
1788 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
c07d424d 1789 cmd->data_out_desc_cnt = count;
8f26c9ff 1790 else
c07d424d
DD
1791 cmd->data_in_desc_cnt = count;
1792
1793 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1794 DMA_TO_DEVICE);
8f26c9ff
DD
1795
1796map_complete:
aef9ec39
RD
1797 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1798 cmd->buf_fmt = fmt << 4;
1799 else
1800 cmd->buf_fmt = fmt;
1801
aef9ec39 1802 return len;
e012f363
BVA
1803
1804unmap:
1805 srp_unmap_data(scmnd, ch, req);
ffc548bb
BVA
1806 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1807 ret = -E2BIG;
e012f363 1808 return ret;
aef9ec39
RD
1809}
1810
76c75b25
BVA
1811/*
1812 * Return an IU and possible credit to the free pool
1813 */
509c07bc 1814static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
76c75b25
BVA
1815 enum srp_iu_type iu_type)
1816{
1817 unsigned long flags;
1818
509c07bc
BVA
1819 spin_lock_irqsave(&ch->lock, flags);
1820 list_add(&iu->list, &ch->free_tx);
76c75b25 1821 if (iu_type != SRP_IU_RSP)
509c07bc
BVA
1822 ++ch->req_lim;
1823 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25
BVA
1824}
1825
05a1d750 1826/*
509c07bc 1827 * Must be called with ch->lock held to protect req_lim and free_tx.
e9684678 1828 * If IU is not sent, it must be returned using srp_put_tx_iu().
05a1d750
DD
1829 *
1830 * Note:
1831 * An upper limit for the number of allocated information units for each
1832 * request type is:
1833 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1834 * more than Scsi_Host.can_queue requests.
1835 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1836 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1837 * one unanswered SRP request to an initiator.
1838 */
509c07bc 1839static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
05a1d750
DD
1840 enum srp_iu_type iu_type)
1841{
509c07bc 1842 struct srp_target_port *target = ch->target;
05a1d750
DD
1843 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1844 struct srp_iu *iu;
1845
93c76dbb
BVA
1846 lockdep_assert_held(&ch->lock);
1847
1dc7b1f1 1848 ib_process_cq_direct(ch->send_cq, -1);
05a1d750 1849
509c07bc 1850 if (list_empty(&ch->free_tx))
05a1d750
DD
1851 return NULL;
1852
1853 /* Initiator responses to target requests do not consume credits */
76c75b25 1854 if (iu_type != SRP_IU_RSP) {
509c07bc 1855 if (ch->req_lim <= rsv) {
76c75b25
BVA
1856 ++target->zero_req_lim;
1857 return NULL;
1858 }
1859
509c07bc 1860 --ch->req_lim;
05a1d750
DD
1861 }
1862
509c07bc 1863 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
76c75b25 1864 list_del(&iu->list);
05a1d750
DD
1865 return iu;
1866}
1867
9294000d
BVA
1868/*
1869 * Note: if this function is called from inside ib_drain_sq() then it will
1870 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1871 * with status IB_WC_SUCCESS then that's a bug.
1872 */
1dc7b1f1
CH
1873static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1874{
1875 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1876 struct srp_rdma_ch *ch = cq->cq_context;
1877
1878 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1879 srp_handle_qp_err(cq, wc, "SEND");
1880 return;
1881 }
1882
93c76dbb
BVA
1883 lockdep_assert_held(&ch->lock);
1884
1dc7b1f1
CH
1885 list_add(&iu->list, &ch->free_tx);
1886}
1887
882981f4
BVA
1888/**
1889 * srp_post_send() - send an SRP information unit
1890 * @ch: RDMA channel over which to send the information unit.
1891 * @iu: Information unit to send.
1892 * @len: Length of the information unit excluding immediate data.
1893 */
509c07bc 1894static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
05a1d750 1895{
509c07bc 1896 struct srp_target_port *target = ch->target;
71347b0c 1897 struct ib_send_wr wr;
05a1d750 1898
882981f4
BVA
1899 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1900 return -EINVAL;
1901
1902 iu->sge[0].addr = iu->dma;
1903 iu->sge[0].length = len;
1904 iu->sge[0].lkey = target->lkey;
05a1d750 1905
1dc7b1f1
CH
1906 iu->cqe.done = srp_send_done;
1907
05a1d750 1908 wr.next = NULL;
1dc7b1f1 1909 wr.wr_cqe = &iu->cqe;
882981f4
BVA
1910 wr.sg_list = &iu->sge[0];
1911 wr.num_sge = iu->num_sge;
05a1d750
DD
1912 wr.opcode = IB_WR_SEND;
1913 wr.send_flags = IB_SEND_SIGNALED;
1914
71347b0c 1915 return ib_post_send(ch->qp, &wr, NULL);
05a1d750
DD
1916}
1917
509c07bc 1918static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
c996bb47 1919{
509c07bc 1920 struct srp_target_port *target = ch->target;
71347b0c 1921 struct ib_recv_wr wr;
dcb4cb85 1922 struct ib_sge list;
c996bb47
BVA
1923
1924 list.addr = iu->dma;
1925 list.length = iu->size;
9af76271 1926 list.lkey = target->lkey;
c996bb47 1927
1dc7b1f1
CH
1928 iu->cqe.done = srp_recv_done;
1929
c996bb47 1930 wr.next = NULL;
1dc7b1f1 1931 wr.wr_cqe = &iu->cqe;
c996bb47
BVA
1932 wr.sg_list = &list;
1933 wr.num_sge = 1;
1934
71347b0c 1935 return ib_post_recv(ch->qp, &wr, NULL);
c996bb47
BVA
1936}
1937
509c07bc 1938static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
aef9ec39 1939{
509c07bc 1940 struct srp_target_port *target = ch->target;
aef9ec39
RD
1941 struct srp_request *req;
1942 struct scsi_cmnd *scmnd;
1943 unsigned long flags;
aef9ec39 1944
aef9ec39 1945 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
509c07bc
BVA
1946 spin_lock_irqsave(&ch->lock, flags);
1947 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
0a6fdbde
BVA
1948 if (rsp->tag == ch->tsk_mgmt_tag) {
1949 ch->tsk_mgmt_status = -1;
1950 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1951 ch->tsk_mgmt_status = rsp->data[3];
1952 complete(&ch->tsk_mgmt_done);
1953 } else {
1954 shost_printk(KERN_ERR, target->scsi_host,
1955 "Received tsk mgmt response too late for tag %#llx\n",
1956 rsp->tag);
1957 }
509c07bc 1958 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 1959 } else {
77f2c1a4 1960 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
ad215aae
BVA
1961 if (scmnd) {
1962 req = scsi_cmd_priv(scmnd);
77f2c1a4 1963 scmnd = srp_claim_req(ch, req, NULL, scmnd);
6cb72bc1 1964 } else {
7aa54bd7 1965 shost_printk(KERN_ERR, target->scsi_host,
d92c0da7
BVA
1966 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1967 rsp->tag, ch - target->ch, ch->qp->qp_num);
22032991 1968
509c07bc
BVA
1969 spin_lock_irqsave(&ch->lock, flags);
1970 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1971 spin_unlock_irqrestore(&ch->lock, flags);
22032991
BVA
1972
1973 return;
1974 }
aef9ec39
RD
1975 scmnd->result = rsp->status;
1976
1977 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1978 memcpy(scmnd->sense_buffer, rsp->data +
1979 be32_to_cpu(rsp->resp_data_len),
1980 min_t(int, be32_to_cpu(rsp->sense_data_len),
1981 SCSI_SENSE_BUFFERSIZE));
1982 }
1983
e714531a 1984 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
bb350d1d 1985 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
e714531a
BVA
1986 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1987 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1988 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1989 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1990 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1991 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 1992
509c07bc 1993 srp_free_req(ch, req, scmnd,
22032991
BVA
1994 be32_to_cpu(rsp->req_lim_delta));
1995
5f9ae9ee 1996 scsi_done(scmnd);
aef9ec39 1997 }
aef9ec39
RD
1998}
1999
509c07bc 2000static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
bb12588a
DD
2001 void *rsp, int len)
2002{
509c07bc 2003 struct srp_target_port *target = ch->target;
76c75b25 2004 struct ib_device *dev = target->srp_host->srp_dev->dev;
bb12588a
DD
2005 unsigned long flags;
2006 struct srp_iu *iu;
76c75b25 2007 int err;
bb12588a 2008
509c07bc
BVA
2009 spin_lock_irqsave(&ch->lock, flags);
2010 ch->req_lim += req_delta;
2011 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2012 spin_unlock_irqrestore(&ch->lock, flags);
76c75b25 2013
bb12588a
DD
2014 if (!iu) {
2015 shost_printk(KERN_ERR, target->scsi_host, PFX
2016 "no IU available to send response\n");
76c75b25 2017 return 1;
bb12588a
DD
2018 }
2019
882981f4 2020 iu->num_sge = 1;
bb12588a
DD
2021 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2022 memcpy(iu->buf, rsp, len);
2023 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2024
509c07bc 2025 err = srp_post_send(ch, iu, len);
76c75b25 2026 if (err) {
bb12588a
DD
2027 shost_printk(KERN_ERR, target->scsi_host, PFX
2028 "unable to post response: %d\n", err);
509c07bc 2029 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
76c75b25 2030 }
bb12588a 2031
bb12588a
DD
2032 return err;
2033}
2034
509c07bc 2035static void srp_process_cred_req(struct srp_rdma_ch *ch,
bb12588a
DD
2036 struct srp_cred_req *req)
2037{
2038 struct srp_cred_rsp rsp = {
2039 .opcode = SRP_CRED_RSP,
2040 .tag = req->tag,
2041 };
2042 s32 delta = be32_to_cpu(req->req_lim_delta);
2043
509c07bc
BVA
2044 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2045 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
bb12588a
DD
2046 "problems processing SRP_CRED_REQ\n");
2047}
2048
509c07bc 2049static void srp_process_aer_req(struct srp_rdma_ch *ch,
bb12588a
DD
2050 struct srp_aer_req *req)
2051{
509c07bc 2052 struct srp_target_port *target = ch->target;
bb12588a
DD
2053 struct srp_aer_rsp rsp = {
2054 .opcode = SRP_AER_RSP,
2055 .tag = req->tag,
2056 };
2057 s32 delta = be32_to_cpu(req->req_lim_delta);
2058
2059 shost_printk(KERN_ERR, target->scsi_host, PFX
985aa495 2060 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
bb12588a 2061
509c07bc 2062 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
bb12588a
DD
2063 shost_printk(KERN_ERR, target->scsi_host, PFX
2064 "problems processing SRP_AER_REQ\n");
2065}
2066
1dc7b1f1 2067static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
aef9ec39 2068{
1dc7b1f1
CH
2069 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2070 struct srp_rdma_ch *ch = cq->cq_context;
509c07bc 2071 struct srp_target_port *target = ch->target;
dcb4cb85 2072 struct ib_device *dev = target->srp_host->srp_dev->dev;
c996bb47 2073 int res;
aef9ec39
RD
2074 u8 opcode;
2075
1dc7b1f1
CH
2076 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2077 srp_handle_qp_err(cq, wc, "RECV");
2078 return;
2079 }
2080
509c07bc 2081 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2082 DMA_FROM_DEVICE);
aef9ec39
RD
2083
2084 opcode = *(u8 *) iu->buf;
2085
2086 if (0) {
7aa54bd7
DD
2087 shost_printk(KERN_ERR, target->scsi_host,
2088 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
2089 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2090 iu->buf, wc->byte_len, true);
aef9ec39
RD
2091 }
2092
2093 switch (opcode) {
2094 case SRP_RSP:
509c07bc 2095 srp_process_rsp(ch, iu->buf);
aef9ec39
RD
2096 break;
2097
bb12588a 2098 case SRP_CRED_REQ:
509c07bc 2099 srp_process_cred_req(ch, iu->buf);
bb12588a
DD
2100 break;
2101
2102 case SRP_AER_REQ:
509c07bc 2103 srp_process_aer_req(ch, iu->buf);
bb12588a
DD
2104 break;
2105
aef9ec39
RD
2106 case SRP_T_LOGOUT:
2107 /* XXX Handle target logout */
7aa54bd7
DD
2108 shost_printk(KERN_WARNING, target->scsi_host,
2109 PFX "Got target logout request\n");
aef9ec39
RD
2110 break;
2111
2112 default:
7aa54bd7
DD
2113 shost_printk(KERN_WARNING, target->scsi_host,
2114 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
2115 break;
2116 }
2117
509c07bc 2118 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
85507bcc 2119 DMA_FROM_DEVICE);
c996bb47 2120
509c07bc 2121 res = srp_post_recv(ch, iu);
c996bb47
BVA
2122 if (res != 0)
2123 shost_printk(KERN_ERR, target->scsi_host,
2124 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
2125}
2126
c1120f89
BVA
2127/**
2128 * srp_tl_err_work() - handle a transport layer error
af24663b 2129 * @work: Work structure embedded in an SRP target port.
c1120f89
BVA
2130 *
2131 * Note: This function may get invoked before the rport has been created,
2132 * hence the target->rport test.
2133 */
2134static void srp_tl_err_work(struct work_struct *work)
2135{
2136 struct srp_target_port *target;
2137
2138 target = container_of(work, struct srp_target_port, tl_err_work);
2139 if (target->rport)
2140 srp_start_tl_fail_timers(target->rport);
2141}
2142
1dc7b1f1
CH
2143static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2144 const char *opname)
948d1e88 2145{
1dc7b1f1 2146 struct srp_rdma_ch *ch = cq->cq_context;
7dad6b2e
BVA
2147 struct srp_target_port *target = ch->target;
2148
c014c8cd 2149 if (ch->connected && !target->qp_in_error) {
1dc7b1f1
CH
2150 shost_printk(KERN_ERR, target->scsi_host,
2151 PFX "failed %s status %s (%d) for CQE %p\n",
2152 opname, ib_wc_status_msg(wc->status), wc->status,
2153 wc->wr_cqe);
c1120f89 2154 queue_work(system_long_wq, &target->tl_err_work);
4f0af697 2155 }
948d1e88
BVA
2156 target->qp_in_error = true;
2157}
2158
76c75b25 2159static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
aef9ec39 2160{
9c5274ee 2161 struct request *rq = scsi_cmd_to_rq(scmnd);
76c75b25 2162 struct srp_target_port *target = host_to_target(shost);
509c07bc 2163 struct srp_rdma_ch *ch;
ad215aae 2164 struct srp_request *req = scsi_cmd_priv(scmnd);
aef9ec39
RD
2165 struct srp_iu *iu;
2166 struct srp_cmd *cmd;
85507bcc 2167 struct ib_device *dev;
76c75b25 2168 unsigned long flags;
77f2c1a4 2169 u32 tag;
d1b4289e 2170 int len, ret;
aef9ec39 2171
d1b4289e
BVA
2172 scmnd->result = srp_chkready(target->rport);
2173 if (unlikely(scmnd->result))
2174 goto err;
2ce19e72 2175
9c5274ee
BVA
2176 WARN_ON_ONCE(rq->tag < 0);
2177 tag = blk_mq_unique_tag(rq);
d92c0da7 2178 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
509c07bc
BVA
2179
2180 spin_lock_irqsave(&ch->lock, flags);
2181 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
509c07bc 2182 spin_unlock_irqrestore(&ch->lock, flags);
aef9ec39 2183
77f2c1a4
BVA
2184 if (!iu)
2185 goto err;
2186
05321937 2187 dev = target->srp_host->srp_dev->dev;
513d5647 2188 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
85507bcc 2189 DMA_TO_DEVICE);
aef9ec39 2190
aef9ec39
RD
2191 cmd = iu->buf;
2192 memset(cmd, 0, sizeof *cmd);
2193
2194 cmd->opcode = SRP_CMD;
985aa495 2195 int_to_scsilun(scmnd->device->lun, &cmd->lun);
77f2c1a4 2196 cmd->tag = tag;
aef9ec39 2197 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
482fffc4
BVA
2198 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2199 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2200 4);
2201 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2202 goto err_iu;
2203 }
aef9ec39 2204
aef9ec39
RD
2205 req->scmnd = scmnd;
2206 req->cmd = iu;
aef9ec39 2207
509c07bc 2208 len = srp_map_data(scmnd, ch, req);
aef9ec39 2209 if (len < 0) {
7aa54bd7 2210 shost_printk(KERN_ERR, target->scsi_host,
d1b4289e
BVA
2211 PFX "Failed to map data (%d)\n", len);
2212 /*
2213 * If we ran out of memory descriptors (-ENOMEM) because an
2214 * application is queuing many requests with more than
52ede08f 2215 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
d1b4289e
BVA
2216 * to reduce queue depth temporarily.
2217 */
2218 scmnd->result = len == -ENOMEM ?
3d45cefc 2219 DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
76c75b25 2220 goto err_iu;
aef9ec39
RD
2221 }
2222
513d5647 2223 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
85507bcc 2224 DMA_TO_DEVICE);
aef9ec39 2225
509c07bc 2226 if (srp_post_send(ch, iu, len)) {
7aa54bd7 2227 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2ee00f6a 2228 scmnd->result = DID_ERROR << 16;
aef9ec39
RD
2229 goto err_unmap;
2230 }
2231
fd561412 2232 return 0;
aef9ec39
RD
2233
2234err_unmap:
509c07bc 2235 srp_unmap_data(scmnd, ch, req);
aef9ec39 2236
76c75b25 2237err_iu:
509c07bc 2238 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
76c75b25 2239
024ca901
BVA
2240 /*
2241 * Avoid that the loops that iterate over the request ring can
2242 * encounter a dangling SCSI command pointer.
2243 */
2244 req->scmnd = NULL;
2245
d1b4289e
BVA
2246err:
2247 if (scmnd->result) {
5f9ae9ee 2248 scsi_done(scmnd);
d1b4289e
BVA
2249 ret = 0;
2250 } else {
2251 ret = SCSI_MLQUEUE_HOST_BUSY;
2252 }
a95cadb9 2253
fd561412 2254 return ret;
aef9ec39
RD
2255}
2256
4d73f95f
BVA
2257/*
2258 * Note: the resources allocated in this function are freed in
509c07bc 2259 * srp_free_ch_ib().
4d73f95f 2260 */
509c07bc 2261static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
aef9ec39 2262{
509c07bc 2263 struct srp_target_port *target = ch->target;
aef9ec39
RD
2264 int i;
2265
509c07bc
BVA
2266 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2267 GFP_KERNEL);
2268 if (!ch->rx_ring)
4d73f95f 2269 goto err_no_ring;
509c07bc
BVA
2270 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2271 GFP_KERNEL);
2272 if (!ch->tx_ring)
4d73f95f
BVA
2273 goto err_no_ring;
2274
2275 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2276 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2277 ch->max_ti_iu_len,
2278 GFP_KERNEL, DMA_FROM_DEVICE);
2279 if (!ch->rx_ring[i])
aef9ec39
RD
2280 goto err;
2281 }
2282
4d73f95f 2283 for (i = 0; i < target->queue_size; ++i) {
509c07bc 2284 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
513d5647 2285 ch->max_it_iu_len,
509c07bc
BVA
2286 GFP_KERNEL, DMA_TO_DEVICE);
2287 if (!ch->tx_ring[i])
aef9ec39 2288 goto err;
dcb4cb85 2289
509c07bc 2290 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
aef9ec39
RD
2291 }
2292
2293 return 0;
2294
2295err:
4d73f95f 2296 for (i = 0; i < target->queue_size; ++i) {
509c07bc
BVA
2297 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2298 srp_free_iu(target->srp_host, ch->tx_ring[i]);
aef9ec39
RD
2299 }
2300
4d73f95f
BVA
2301
2302err_no_ring:
509c07bc
BVA
2303 kfree(ch->tx_ring);
2304 ch->tx_ring = NULL;
2305 kfree(ch->rx_ring);
2306 ch->rx_ring = NULL;
4d73f95f 2307
aef9ec39
RD
2308 return -ENOMEM;
2309}
2310
c9b03c1a
BVA
2311static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2312{
2313 uint64_t T_tr_ns, max_compl_time_ms;
2314 uint32_t rq_tmo_jiffies;
2315
2316 /*
2317 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2318 * table 91), both the QP timeout and the retry count have to be set
2319 * for RC QP's during the RTR to RTS transition.
2320 */
2321 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2322 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2323
2324 /*
2325 * Set target->rq_tmo_jiffies to one second more than the largest time
2326 * it can take before an error completion is generated. See also
2327 * C9-140..142 in the IBTA spec for more information about how to
2328 * convert the QP Local ACK Timeout value to nanoseconds.
2329 */
2330 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2331 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2332 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2333 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2334
2335 return rq_tmo_jiffies;
2336}
2337
961e0be8 2338static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
e6300cbd 2339 const struct srp_login_rsp *lrsp,
509c07bc 2340 struct srp_rdma_ch *ch)
961e0be8 2341{
509c07bc 2342 struct srp_target_port *target = ch->target;
961e0be8
DD
2343 struct ib_qp_attr *qp_attr = NULL;
2344 int attr_mask = 0;
19f31343 2345 int ret = 0;
961e0be8
DD
2346 int i;
2347
2348 if (lrsp->opcode == SRP_LOGIN_RSP) {
509c07bc
BVA
2349 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2350 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
0fbb37dd
SG
2351 ch->use_imm_data = srp_use_imm_data &&
2352 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
882981f4 2353 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
b2e872f4
HL
2354 ch->use_imm_data,
2355 target->max_it_iu_size);
513d5647
BVA
2356 WARN_ON_ONCE(ch->max_it_iu_len >
2357 be32_to_cpu(lrsp->max_it_iu_len));
961e0be8 2358
882981f4
BVA
2359 if (ch->use_imm_data)
2360 shost_printk(KERN_DEBUG, target->scsi_host,
2361 PFX "using immediate data\n");
961e0be8
DD
2362
2363 /*
2364 * Reserve credits for task management so we don't
2365 * bounce requests back to the SCSI mid-layer.
2366 */
2367 target->scsi_host->can_queue
509c07bc 2368 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
961e0be8 2369 target->scsi_host->can_queue);
4d73f95f
BVA
2370 target->scsi_host->cmd_per_lun
2371 = min_t(int, target->scsi_host->can_queue,
2372 target->scsi_host->cmd_per_lun);
961e0be8
DD
2373 } else {
2374 shost_printk(KERN_WARNING, target->scsi_host,
2375 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2376 ret = -ECONNRESET;
2377 goto error;
2378 }
2379
509c07bc
BVA
2380 if (!ch->rx_ring) {
2381 ret = srp_alloc_iu_bufs(ch);
961e0be8
DD
2382 if (ret)
2383 goto error;
2384 }
2385
4d73f95f 2386 for (i = 0; i < target->queue_size; i++) {
509c07bc
BVA
2387 struct srp_iu *iu = ch->rx_ring[i];
2388
2389 ret = srp_post_recv(ch, iu);
961e0be8 2390 if (ret)
19f31343 2391 goto error;
961e0be8
DD
2392 }
2393
19f31343
BVA
2394 if (!target->using_rdma_cm) {
2395 ret = -ENOMEM;
2396 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2397 if (!qp_attr)
2398 goto error;
2399
2400 qp_attr->qp_state = IB_QPS_RTR;
2401 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2402 if (ret)
2403 goto error_free;
2404
2405 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2406 if (ret)
2407 goto error_free;
961e0be8 2408
19f31343
BVA
2409 qp_attr->qp_state = IB_QPS_RTS;
2410 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2411 if (ret)
2412 goto error_free;
c9b03c1a 2413
19f31343 2414 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
961e0be8 2415
19f31343
BVA
2416 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2417 if (ret)
2418 goto error_free;
2419
2420 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2421 }
961e0be8
DD
2422
2423error_free:
2424 kfree(qp_attr);
2425
2426error:
509c07bc 2427 ch->status = ret;
961e0be8
DD
2428}
2429
19f31343 2430static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
e7ff98ae 2431 const struct ib_cm_event *event,
19f31343 2432 struct srp_rdma_ch *ch)
aef9ec39 2433{
509c07bc 2434 struct srp_target_port *target = ch->target;
7aa54bd7 2435 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
2436 struct ib_class_port_info *cpi;
2437 int opcode;
19f31343 2438 u16 dlid;
aef9ec39
RD
2439
2440 switch (event->param.rej_rcvd.reason) {
2441 case IB_CM_REJ_PORT_CM_REDIRECT:
2442 cpi = event->param.rej_rcvd.ari;
19f31343
BVA
2443 dlid = be16_to_cpu(cpi->redirect_lid);
2444 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2445 ch->ib_cm.path.pkey = cpi->redirect_pkey;
aef9ec39 2446 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
19f31343 2447 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
aef9ec39 2448
19f31343 2449 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
aef9ec39
RD
2450 break;
2451
2452 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 2453 if (srp_target_is_topspin(target)) {
19f31343
BVA
2454 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2455
aef9ec39
RD
2456 /*
2457 * Topspin/Cisco SRP gateways incorrectly send
2458 * reject reason code 25 when they mean 24
2459 * (port redirect).
2460 */
19f31343 2461 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
aef9ec39 2462
7aa54bd7
DD
2463 shost_printk(KERN_DEBUG, shost,
2464 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
19f31343
BVA
2465 be64_to_cpu(dgid->global.subnet_prefix),
2466 be64_to_cpu(dgid->global.interface_id));
aef9ec39 2467
509c07bc 2468 ch->status = SRP_PORT_REDIRECT;
aef9ec39 2469 } else {
7aa54bd7
DD
2470 shost_printk(KERN_WARNING, shost,
2471 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
509c07bc 2472 ch->status = -ECONNRESET;
aef9ec39
RD
2473 }
2474 break;
2475
2476 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
2477 shost_printk(KERN_WARNING, shost,
2478 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
509c07bc 2479 ch->status = -ECONNRESET;
aef9ec39
RD
2480 break;
2481
2482 case IB_CM_REJ_CONSUMER_DEFINED:
2483 opcode = *(u8 *) event->private_data;
2484 if (opcode == SRP_LOGIN_REJ) {
2485 struct srp_login_rej *rej = event->private_data;
2486 u32 reason = be32_to_cpu(rej->reason);
2487
2488 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
2489 shost_printk(KERN_WARNING, shost,
2490 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 2491 else
e7ffde01
BVA
2492 shost_printk(KERN_WARNING, shost, PFX
2493 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
747fe000 2494 target->sgid.raw,
19f31343
BVA
2495 target->ib_cm.orig_dgid.raw,
2496 reason);
aef9ec39 2497 } else
7aa54bd7
DD
2498 shost_printk(KERN_WARNING, shost,
2499 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2500 " opcode 0x%02x\n", opcode);
509c07bc 2501 ch->status = -ECONNRESET;
aef9ec39
RD
2502 break;
2503
9fe4bcf4
DD
2504 case IB_CM_REJ_STALE_CONN:
2505 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
509c07bc 2506 ch->status = SRP_STALE_CONN;
9fe4bcf4
DD
2507 break;
2508
aef9ec39 2509 default:
7aa54bd7
DD
2510 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2511 event->param.rej_rcvd.reason);
509c07bc 2512 ch->status = -ECONNRESET;
aef9ec39
RD
2513 }
2514}
2515
e7ff98ae
PP
2516static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2517 const struct ib_cm_event *event)
aef9ec39 2518{
509c07bc
BVA
2519 struct srp_rdma_ch *ch = cm_id->context;
2520 struct srp_target_port *target = ch->target;
aef9ec39 2521 int comp = 0;
aef9ec39
RD
2522
2523 switch (event->event) {
2524 case IB_CM_REQ_ERROR:
7aa54bd7
DD
2525 shost_printk(KERN_DEBUG, target->scsi_host,
2526 PFX "Sending CM REQ failed\n");
aef9ec39 2527 comp = 1;
509c07bc 2528 ch->status = -ECONNRESET;
aef9ec39
RD
2529 break;
2530
2531 case IB_CM_REP_RECEIVED:
2532 comp = 1;
509c07bc 2533 srp_cm_rep_handler(cm_id, event->private_data, ch);
aef9ec39
RD
2534 break;
2535
2536 case IB_CM_REJ_RECEIVED:
7aa54bd7 2537 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
2538 comp = 1;
2539
19f31343 2540 srp_ib_cm_rej_handler(cm_id, event, ch);
aef9ec39
RD
2541 break;
2542
b7ac4ab4 2543 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
2544 shost_printk(KERN_WARNING, target->scsi_host,
2545 PFX "DREQ received - connection closed\n");
c014c8cd 2546 ch->connected = false;
b7ac4ab4 2547 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
2548 shost_printk(KERN_ERR, target->scsi_host,
2549 PFX "Sending CM DREP failed\n");
c1120f89 2550 queue_work(system_long_wq, &target->tl_err_work);
aef9ec39
RD
2551 break;
2552
2553 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
2554 shost_printk(KERN_ERR, target->scsi_host,
2555 PFX "connection closed\n");
ac72d766 2556 comp = 1;
aef9ec39 2557
509c07bc 2558 ch->status = 0;
aef9ec39
RD
2559 break;
2560
b7ac4ab4
IR
2561 case IB_CM_MRA_RECEIVED:
2562 case IB_CM_DREQ_ERROR:
2563 case IB_CM_DREP_RECEIVED:
2564 break;
2565
aef9ec39 2566 default:
7aa54bd7
DD
2567 shost_printk(KERN_WARNING, target->scsi_host,
2568 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
2569 break;
2570 }
2571
2572 if (comp)
509c07bc 2573 complete(&ch->done);
aef9ec39 2574
aef9ec39
RD
2575 return 0;
2576}
2577
19f31343
BVA
2578static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2579 struct rdma_cm_event *event)
2580{
2581 struct srp_target_port *target = ch->target;
2582 struct Scsi_Host *shost = target->scsi_host;
2583 int opcode;
2584
2585 switch (event->status) {
2586 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2587 shost_printk(KERN_WARNING, shost,
2588 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2589 ch->status = -ECONNRESET;
2590 break;
2591
2592 case IB_CM_REJ_CONSUMER_DEFINED:
2593 opcode = *(u8 *) event->param.conn.private_data;
2594 if (opcode == SRP_LOGIN_REJ) {
2595 struct srp_login_rej *rej =
2596 (struct srp_login_rej *)
2597 event->param.conn.private_data;
2598 u32 reason = be32_to_cpu(rej->reason);
2599
2600 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2601 shost_printk(KERN_WARNING, shost,
2602 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2603 else
2604 shost_printk(KERN_WARNING, shost,
2605 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2606 } else {
2607 shost_printk(KERN_WARNING, shost,
2608 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2609 opcode);
2610 }
2611 ch->status = -ECONNRESET;
2612 break;
2613
2614 case IB_CM_REJ_STALE_CONN:
2615 shost_printk(KERN_WARNING, shost,
2616 " REJ reason: stale connection\n");
2617 ch->status = SRP_STALE_CONN;
2618 break;
2619
2620 default:
2621 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2622 event->status);
2623 ch->status = -ECONNRESET;
2624 break;
2625 }
2626}
2627
2628static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2629 struct rdma_cm_event *event)
2630{
2631 struct srp_rdma_ch *ch = cm_id->context;
2632 struct srp_target_port *target = ch->target;
2633 int comp = 0;
2634
2635 switch (event->event) {
2636 case RDMA_CM_EVENT_ADDR_RESOLVED:
2637 ch->status = 0;
2638 comp = 1;
2639 break;
2640
2641 case RDMA_CM_EVENT_ADDR_ERROR:
2642 ch->status = -ENXIO;
2643 comp = 1;
2644 break;
2645
2646 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2647 ch->status = 0;
2648 comp = 1;
2649 break;
2650
2651 case RDMA_CM_EVENT_ROUTE_ERROR:
2652 case RDMA_CM_EVENT_UNREACHABLE:
2653 ch->status = -EHOSTUNREACH;
2654 comp = 1;
2655 break;
2656
2657 case RDMA_CM_EVENT_CONNECT_ERROR:
2658 shost_printk(KERN_DEBUG, target->scsi_host,
2659 PFX "Sending CM REQ failed\n");
2660 comp = 1;
2661 ch->status = -ECONNRESET;
2662 break;
2663
2664 case RDMA_CM_EVENT_ESTABLISHED:
2665 comp = 1;
2666 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2667 break;
2668
2669 case RDMA_CM_EVENT_REJECTED:
2670 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2671 comp = 1;
2672
2673 srp_rdma_cm_rej_handler(ch, event);
2674 break;
2675
2676 case RDMA_CM_EVENT_DISCONNECTED:
2677 if (ch->connected) {
2678 shost_printk(KERN_WARNING, target->scsi_host,
2679 PFX "received DREQ\n");
2680 rdma_disconnect(ch->rdma_cm.cm_id);
2681 comp = 1;
2682 ch->status = 0;
2683 queue_work(system_long_wq, &target->tl_err_work);
2684 }
2685 break;
2686
2687 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2688 shost_printk(KERN_ERR, target->scsi_host,
2689 PFX "connection closed\n");
2690
2691 comp = 1;
2692 ch->status = 0;
2693 break;
2694
2695 default:
2696 shost_printk(KERN_WARNING, target->scsi_host,
2697 PFX "Unhandled CM event %d\n", event->event);
2698 break;
2699 }
2700
2701 if (comp)
2702 complete(&ch->done);
2703
2704 return 0;
2705}
2706
71444b97
JW
2707/**
2708 * srp_change_queue_depth - setting device queue depth
2709 * @sdev: scsi device struct
2710 * @qdepth: requested queue depth
71444b97
JW
2711 *
2712 * Returns queue depth.
2713 */
2714static int
db5ed4df 2715srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
71444b97 2716{
c40ecc12 2717 if (!sdev->tagged_supported)
1e6f2416 2718 qdepth = 1;
db5ed4df 2719 return scsi_change_queue_depth(sdev, qdepth);
71444b97
JW
2720}
2721
985aa495 2722static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
0a6fdbde 2723 u8 func, u8 *status)
aef9ec39 2724{
509c07bc 2725 struct srp_target_port *target = ch->target;
a95cadb9 2726 struct srp_rport *rport = target->rport;
19081f31 2727 struct ib_device *dev = target->srp_host->srp_dev->dev;
aef9ec39
RD
2728 struct srp_iu *iu;
2729 struct srp_tsk_mgmt *tsk_mgmt;
0a6fdbde 2730 int res;
aef9ec39 2731
c014c8cd 2732 if (!ch->connected || target->qp_in_error)
3780d1f0
BVA
2733 return -1;
2734
a95cadb9 2735 /*
509c07bc 2736 * Lock the rport mutex to avoid that srp_create_ch_ib() is
a95cadb9
BVA
2737 * invoked while a task management function is being sent.
2738 */
2739 mutex_lock(&rport->mutex);
509c07bc
BVA
2740 spin_lock_irq(&ch->lock);
2741 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2742 spin_unlock_irq(&ch->lock);
76c75b25 2743
a95cadb9
BVA
2744 if (!iu) {
2745 mutex_unlock(&rport->mutex);
2746
76c75b25 2747 return -1;
a95cadb9 2748 }
aef9ec39 2749
882981f4
BVA
2750 iu->num_sge = 1;
2751
19081f31
DD
2752 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2753 DMA_TO_DEVICE);
aef9ec39
RD
2754 tsk_mgmt = iu->buf;
2755 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2756
2757 tsk_mgmt->opcode = SRP_TSK_MGMT;
985aa495 2758 int_to_scsilun(lun, &tsk_mgmt->lun);
aef9ec39 2759 tsk_mgmt->tsk_mgmt_func = func;
f8b6e31e 2760 tsk_mgmt->task_tag = req_tag;
aef9ec39 2761
0a6fdbde
BVA
2762 spin_lock_irq(&ch->lock);
2763 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2764 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2765 spin_unlock_irq(&ch->lock);
2766
2767 init_completion(&ch->tsk_mgmt_done);
2768
19081f31
DD
2769 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2770 DMA_TO_DEVICE);
509c07bc
BVA
2771 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2772 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
a95cadb9
BVA
2773 mutex_unlock(&rport->mutex);
2774
76c75b25
BVA
2775 return -1;
2776 }
0a6fdbde
BVA
2777 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2778 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2779 if (res > 0 && status)
2780 *status = ch->tsk_mgmt_status;
a95cadb9 2781 mutex_unlock(&rport->mutex);
d945e1df 2782
0a6fdbde 2783 WARN_ON_ONCE(res < 0);
aef9ec39 2784
0a6fdbde 2785 return res > 0 ? 0 : -1;
d945e1df
RD
2786}
2787
aef9ec39
RD
2788static int srp_abort(struct scsi_cmnd *scmnd)
2789{
d945e1df 2790 struct srp_target_port *target = host_to_target(scmnd->device->host);
f8b6e31e 2791 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
77f2c1a4 2792 u32 tag;
d92c0da7 2793 u16 ch_idx;
509c07bc 2794 struct srp_rdma_ch *ch;
086f44f5 2795 int ret;
d945e1df 2796
7aa54bd7 2797 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 2798
d92c0da7 2799 if (!req)
99b6697a 2800 return SUCCESS;
9c5274ee 2801 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
d92c0da7
BVA
2802 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2803 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2804 return SUCCESS;
2805 ch = &target->ch[ch_idx];
2806 if (!srp_claim_req(ch, req, NULL, scmnd))
2807 return SUCCESS;
2808 shost_printk(KERN_ERR, target->scsi_host,
2809 "Sending SRP abort for tag %#x\n", tag);
77f2c1a4 2810 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
0a6fdbde 2811 SRP_TSK_ABORT_TASK, NULL) == 0)
086f44f5 2812 ret = SUCCESS;
ed9b2264 2813 else if (target->rport->state == SRP_RPORT_LOST)
99e1c139 2814 ret = FAST_IO_FAIL;
086f44f5
BVA
2815 else
2816 ret = FAILED;
e68088e7
BVA
2817 if (ret == SUCCESS) {
2818 srp_free_req(ch, req, scmnd, 0);
2819 scmnd->result = DID_ABORT << 16;
5f9ae9ee 2820 scsi_done(scmnd);
e68088e7 2821 }
d945e1df 2822
086f44f5 2823 return ret;
aef9ec39
RD
2824}
2825
2826static int srp_reset_device(struct scsi_cmnd *scmnd)
2827{
d945e1df 2828 struct srp_target_port *target = host_to_target(scmnd->device->host);
d92c0da7 2829 struct srp_rdma_ch *ch;
0a6fdbde 2830 u8 status;
d945e1df 2831
7aa54bd7 2832 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 2833
d92c0da7 2834 ch = &target->ch[0];
509c07bc 2835 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
0a6fdbde 2836 SRP_TSK_LUN_RESET, &status))
d945e1df 2837 return FAILED;
0a6fdbde 2838 if (status)
d945e1df
RD
2839 return FAILED;
2840
d945e1df 2841 return SUCCESS;
aef9ec39
RD
2842}
2843
2844static int srp_reset_host(struct scsi_cmnd *scmnd)
2845{
2846 struct srp_target_port *target = host_to_target(scmnd->device->host);
aef9ec39 2847
7aa54bd7 2848 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39 2849
ed9b2264 2850 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
aef9ec39
RD
2851}
2852
b0780ee5
BVA
2853static int srp_target_alloc(struct scsi_target *starget)
2854{
2855 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2856 struct srp_target_port *target = host_to_target(shost);
2857
2858 if (target->target_can_queue)
2859 starget->can_queue = target->target_can_queue;
2860 return 0;
2861}
2862
c9b03c1a
BVA
2863static int srp_slave_configure(struct scsi_device *sdev)
2864{
2865 struct Scsi_Host *shost = sdev->host;
2866 struct srp_target_port *target = host_to_target(shost);
2867 struct request_queue *q = sdev->request_queue;
2868 unsigned long timeout;
2869
2870 if (sdev->type == TYPE_DISK) {
2871 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2872 blk_queue_rq_timeout(q, timeout);
2873 }
2874
2875 return 0;
2876}
2877
33e82346 2878static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
ee959b00 2879 char *buf)
6ecb0c84 2880{
ee959b00 2881 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2882
1c7fd726 2883 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
6ecb0c84
RD
2884}
2885
33e82346
Y
2886static DEVICE_ATTR_RO(id_ext);
2887
2888static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
ee959b00 2889 char *buf)
6ecb0c84 2890{
ee959b00 2891 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2892
1c7fd726 2893 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
6ecb0c84
RD
2894}
2895
33e82346
Y
2896static DEVICE_ATTR_RO(ioc_guid);
2897
2898static ssize_t service_id_show(struct device *dev,
ee959b00 2899 struct device_attribute *attr, char *buf)
6ecb0c84 2900{
ee959b00 2901 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2902
19f31343
BVA
2903 if (target->using_rdma_cm)
2904 return -ENOENT;
1c7fd726
JP
2905 return sysfs_emit(buf, "0x%016llx\n",
2906 be64_to_cpu(target->ib_cm.service_id));
6ecb0c84
RD
2907}
2908
33e82346
Y
2909static DEVICE_ATTR_RO(service_id);
2910
2911static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
ee959b00 2912 char *buf)
6ecb0c84 2913{
ee959b00 2914 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84 2915
19f31343
BVA
2916 if (target->using_rdma_cm)
2917 return -ENOENT;
45808361 2918
1c7fd726 2919 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
6ecb0c84
RD
2920}
2921
33e82346
Y
2922static DEVICE_ATTR_RO(pkey);
2923
2924static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
848b3082
BVA
2925 char *buf)
2926{
2927 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2928
1c7fd726 2929 return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
848b3082
BVA
2930}
2931
33e82346
Y
2932static DEVICE_ATTR_RO(sgid);
2933
2934static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
ee959b00 2935 char *buf)
6ecb0c84 2936{
ee959b00 2937 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7 2938 struct srp_rdma_ch *ch = &target->ch[0];
6ecb0c84 2939
19f31343
BVA
2940 if (target->using_rdma_cm)
2941 return -ENOENT;
45808361 2942
1c7fd726 2943 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
6ecb0c84
RD
2944}
2945
33e82346
Y
2946static DEVICE_ATTR_RO(dgid);
2947
2948static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2949 char *buf)
3633b3d0 2950{
ee959b00 2951 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0 2952
19f31343
BVA
2953 if (target->using_rdma_cm)
2954 return -ENOENT;
45808361 2955
1c7fd726 2956 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
3633b3d0
IR
2957}
2958
33e82346
Y
2959static DEVICE_ATTR_RO(orig_dgid);
2960
2961static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2962 char *buf)
89de7486
BVA
2963{
2964 struct srp_target_port *target = host_to_target(class_to_shost(dev));
d92c0da7
BVA
2965 struct srp_rdma_ch *ch;
2966 int i, req_lim = INT_MAX;
89de7486 2967
d92c0da7
BVA
2968 for (i = 0; i < target->ch_count; i++) {
2969 ch = &target->ch[i];
2970 req_lim = min(req_lim, ch->req_lim);
2971 }
45808361 2972
1c7fd726 2973 return sysfs_emit(buf, "%d\n", req_lim);
89de7486
BVA
2974}
2975
33e82346
Y
2976static DEVICE_ATTR_RO(req_lim);
2977
2978static ssize_t zero_req_lim_show(struct device *dev,
ee959b00 2979 struct device_attribute *attr, char *buf)
6bfa24fa 2980{
ee959b00 2981 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa 2982
1c7fd726 2983 return sysfs_emit(buf, "%d\n", target->zero_req_lim);
6bfa24fa
RD
2984}
2985
33e82346
Y
2986static DEVICE_ATTR_RO(zero_req_lim);
2987
2988static ssize_t local_ib_port_show(struct device *dev,
ee959b00 2989 struct device_attribute *attr, char *buf)
ded7f1a1 2990{
ee959b00 2991 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 2992
1c7fd726 2993 return sysfs_emit(buf, "%d\n", target->srp_host->port);
ded7f1a1
IR
2994}
2995
33e82346
Y
2996static DEVICE_ATTR_RO(local_ib_port);
2997
2998static ssize_t local_ib_device_show(struct device *dev,
ee959b00 2999 struct device_attribute *attr, char *buf)
ded7f1a1 3000{
ee959b00 3001 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 3002
1c7fd726
JP
3003 return sysfs_emit(buf, "%s\n",
3004 dev_name(&target->srp_host->srp_dev->dev->dev));
ded7f1a1
IR
3005}
3006
33e82346
Y
3007static DEVICE_ATTR_RO(local_ib_device);
3008
3009static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
d92c0da7
BVA
3010 char *buf)
3011{
3012 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3013
1c7fd726 3014 return sysfs_emit(buf, "%d\n", target->ch_count);
d92c0da7
BVA
3015}
3016
33e82346
Y
3017static DEVICE_ATTR_RO(ch_count);
3018
3019static ssize_t comp_vector_show(struct device *dev,
4b5e5f41
BVA
3020 struct device_attribute *attr, char *buf)
3021{
3022 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3023
1c7fd726 3024 return sysfs_emit(buf, "%d\n", target->comp_vector);
4b5e5f41
BVA
3025}
3026
33e82346
Y
3027static DEVICE_ATTR_RO(comp_vector);
3028
3029static ssize_t tl_retry_count_show(struct device *dev,
7bb312e4
VP
3030 struct device_attribute *attr, char *buf)
3031{
3032 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3033
1c7fd726 3034 return sysfs_emit(buf, "%d\n", target->tl_retry_count);
7bb312e4
VP
3035}
3036
33e82346
Y
3037static DEVICE_ATTR_RO(tl_retry_count);
3038
3039static ssize_t cmd_sg_entries_show(struct device *dev,
49248644
DD
3040 struct device_attribute *attr, char *buf)
3041{
3042 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3043
1c7fd726 3044 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
49248644
DD
3045}
3046
33e82346
Y
3047static DEVICE_ATTR_RO(cmd_sg_entries);
3048
3049static ssize_t allow_ext_sg_show(struct device *dev,
c07d424d
DD
3050 struct device_attribute *attr, char *buf)
3051{
3052 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3053
1c7fd726 3054 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
c07d424d
DD
3055}
3056
33e82346 3057static DEVICE_ATTR_RO(allow_ext_sg);
ee959b00 3058
a3cf94c9
BVA
3059static struct attribute *srp_host_attrs[] = {
3060 &dev_attr_id_ext.attr,
3061 &dev_attr_ioc_guid.attr,
3062 &dev_attr_service_id.attr,
3063 &dev_attr_pkey.attr,
3064 &dev_attr_sgid.attr,
3065 &dev_attr_dgid.attr,
3066 &dev_attr_orig_dgid.attr,
3067 &dev_attr_req_lim.attr,
3068 &dev_attr_zero_req_lim.attr,
3069 &dev_attr_local_ib_port.attr,
3070 &dev_attr_local_ib_device.attr,
3071 &dev_attr_ch_count.attr,
3072 &dev_attr_comp_vector.attr,
3073 &dev_attr_tl_retry_count.attr,
3074 &dev_attr_cmd_sg_entries.attr,
3075 &dev_attr_allow_ext_sg.attr,
6ecb0c84
RD
3076 NULL
3077};
3078
a3cf94c9
BVA
3079ATTRIBUTE_GROUPS(srp_host);
3080
aef9ec39
RD
3081static struct scsi_host_template srp_template = {
3082 .module = THIS_MODULE,
b7f008fd
RD
3083 .name = "InfiniBand SRP initiator",
3084 .proc_name = DRV_NAME,
b0780ee5 3085 .target_alloc = srp_target_alloc,
c9b03c1a 3086 .slave_configure = srp_slave_configure,
aef9ec39 3087 .info = srp_target_info,
ad215aae
BVA
3088 .init_cmd_priv = srp_init_cmd_priv,
3089 .exit_cmd_priv = srp_exit_cmd_priv,
aef9ec39 3090 .queuecommand = srp_queuecommand,
71444b97 3091 .change_queue_depth = srp_change_queue_depth,
b6a05c82 3092 .eh_timed_out = srp_timed_out,
aef9ec39
RD
3093 .eh_abort_handler = srp_abort,
3094 .eh_device_reset_handler = srp_reset_device,
3095 .eh_host_reset_handler = srp_reset_host,
2742c1da 3096 .skip_settle_delay = true,
49248644 3097 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
4d73f95f 3098 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
aef9ec39 3099 .this_id = -1,
4d73f95f 3100 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
a3cf94c9 3101 .shost_groups = srp_host_groups,
c40ecc12 3102 .track_queue_depth = 1,
ad215aae 3103 .cmd_size = sizeof(struct srp_request),
aef9ec39
RD
3104};
3105
34aa654e
BVA
3106static int srp_sdev_count(struct Scsi_Host *host)
3107{
3108 struct scsi_device *sdev;
3109 int c = 0;
3110
3111 shost_for_each_device(sdev, host)
3112 c++;
3113
3114 return c;
3115}
3116
bc44bd1d
BVA
3117/*
3118 * Return values:
3119 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3120 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3121 * removal has been scheduled.
3122 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3123 */
aef9ec39
RD
3124static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3125{
3236822b
FT
3126 struct srp_rport_identifiers ids;
3127 struct srp_rport *rport;
3128
34aa654e 3129 target->state = SRP_TARGET_SCANNING;
aef9ec39 3130 sprintf(target->target_name, "SRP.T10:%016llX",
45c37cad 3131 be64_to_cpu(target->id_ext));
aef9ec39 3132
dee2b82a 3133 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
aef9ec39
RD
3134 return -ENODEV;
3135
3236822b
FT
3136 memcpy(ids.port_id, &target->id_ext, 8);
3137 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 3138 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
3139 rport = srp_rport_add(target->scsi_host, &ids);
3140 if (IS_ERR(rport)) {
3141 scsi_remove_host(target->scsi_host);
3142 return PTR_ERR(rport);
3143 }
3144
dc1bdbd9 3145 rport->lld_data = target;
9dd69a60 3146 target->rport = rport;
dc1bdbd9 3147
b3589fd4 3148 spin_lock(&host->target_lock);
aef9ec39 3149 list_add_tail(&target->list, &host->target_list);
b3589fd4 3150 spin_unlock(&host->target_lock);
aef9ec39 3151
aef9ec39 3152 scsi_scan_target(&target->scsi_host->shost_gendev,
1d645088 3153 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
aef9ec39 3154
c014c8cd
BVA
3155 if (srp_connected_ch(target) < target->ch_count ||
3156 target->qp_in_error) {
34aa654e
BVA
3157 shost_printk(KERN_INFO, target->scsi_host,
3158 PFX "SCSI scan failed - removing SCSI host\n");
3159 srp_queue_remove_work(target);
3160 goto out;
3161 }
3162
cf1acab7 3163 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
34aa654e
BVA
3164 dev_name(&target->scsi_host->shost_gendev),
3165 srp_sdev_count(target->scsi_host));
3166
3167 spin_lock_irq(&target->lock);
3168 if (target->state == SRP_TARGET_SCANNING)
3169 target->state = SRP_TARGET_LIVE;
3170 spin_unlock_irq(&target->lock);
3171
3172out:
aef9ec39
RD
3173 return 0;
3174}
3175
ee959b00 3176static void srp_release_dev(struct device *dev)
aef9ec39
RD
3177{
3178 struct srp_host *host =
ee959b00 3179 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3180
3181 complete(&host->released);
3182}
3183
3184static struct class srp_class = {
3185 .name = "infiniband_srp",
ee959b00 3186 .dev_release = srp_release_dev
aef9ec39
RD
3187};
3188
96fc248a
BVA
3189/**
3190 * srp_conn_unique() - check whether the connection to a target is unique
af24663b
BVA
3191 * @host: SRP host.
3192 * @target: SRP target port.
96fc248a
BVA
3193 */
3194static bool srp_conn_unique(struct srp_host *host,
3195 struct srp_target_port *target)
3196{
3197 struct srp_target_port *t;
3198 bool ret = false;
3199
3200 if (target->state == SRP_TARGET_REMOVED)
3201 goto out;
3202
3203 ret = true;
3204
3205 spin_lock(&host->target_lock);
3206 list_for_each_entry(t, &host->target_list, list) {
3207 if (t != target &&
3208 target->id_ext == t->id_ext &&
3209 target->ioc_guid == t->ioc_guid &&
3210 target->initiator_ext == t->initiator_ext) {
3211 ret = false;
3212 break;
3213 }
3214 }
3215 spin_unlock(&host->target_lock);
3216
3217out:
3218 return ret;
3219}
3220
aef9ec39
RD
3221/*
3222 * Target ports are added by writing
3223 *
3224 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3225 * pkey=<P_Key>,service_id=<service ID>
19f31343
BVA
3226 * or
3227 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3228 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
aef9ec39
RD
3229 *
3230 * to the add_target sysfs attribute.
3231 */
3232enum {
3233 SRP_OPT_ERR = 0,
3234 SRP_OPT_ID_EXT = 1 << 0,
3235 SRP_OPT_IOC_GUID = 1 << 1,
3236 SRP_OPT_DGID = 1 << 2,
3237 SRP_OPT_PKEY = 1 << 3,
3238 SRP_OPT_SERVICE_ID = 1 << 4,
3239 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 3240 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 3241 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 3242 SRP_OPT_INITIATOR_EXT = 1 << 8,
49248644 3243 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
c07d424d
DD
3244 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3245 SRP_OPT_SG_TABLESIZE = 1 << 11,
4b5e5f41 3246 SRP_OPT_COMP_VECTOR = 1 << 12,
7bb312e4 3247 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
4d73f95f 3248 SRP_OPT_QUEUE_SIZE = 1 << 14,
19f31343
BVA
3249 SRP_OPT_IP_SRC = 1 << 15,
3250 SRP_OPT_IP_DEST = 1 << 16,
b0780ee5 3251 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
547ed331 3252 SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
87fee61c 3253 SRP_OPT_CH_COUNT = 1 << 19,
19f31343
BVA
3254};
3255
3256static unsigned int srp_opt_mandatory[] = {
3257 SRP_OPT_ID_EXT |
3258 SRP_OPT_IOC_GUID |
3259 SRP_OPT_DGID |
3260 SRP_OPT_PKEY |
3261 SRP_OPT_SERVICE_ID,
3262 SRP_OPT_ID_EXT |
3263 SRP_OPT_IOC_GUID |
3264 SRP_OPT_IP_DEST,
aef9ec39
RD
3265};
3266
a447c093 3267static const match_table_t srp_opt_tokens = {
52fb2b50
VP
3268 { SRP_OPT_ID_EXT, "id_ext=%s" },
3269 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3270 { SRP_OPT_DGID, "dgid=%s" },
3271 { SRP_OPT_PKEY, "pkey=%x" },
3272 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3273 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3274 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
b0780ee5 3275 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
0c0450db 3276 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 3277 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
49248644 3278 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
c07d424d
DD
3279 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3280 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
4b5e5f41 3281 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
7bb312e4 3282 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
4d73f95f 3283 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
19f31343
BVA
3284 { SRP_OPT_IP_SRC, "src=%s" },
3285 { SRP_OPT_IP_DEST, "dest=%s" },
547ed331 3286 { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
87fee61c 3287 { SRP_OPT_CH_COUNT, "ch_count=%u", },
52fb2b50 3288 { SRP_OPT_ERR, NULL }
aef9ec39
RD
3289};
3290
c62adb7d
BVA
3291/**
3292 * srp_parse_in - parse an IP address and port number combination
e37df2d5
BVA
3293 * @net: [in] Network namespace.
3294 * @sa: [out] Address family, IP address and port number.
3295 * @addr_port_str: [in] IP address and port number.
bcef5b72 3296 * @has_port: [out] Whether or not @addr_port_str includes a port number.
c62adb7d
BVA
3297 *
3298 * Parse the following address formats:
3299 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3300 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3301 */
19f31343 3302static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
bcef5b72 3303 const char *addr_port_str, bool *has_port)
19f31343 3304{
c62adb7d
BVA
3305 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3306 char *port_str;
19f31343
BVA
3307 int ret;
3308
3309 if (!addr)
3310 return -ENOMEM;
c62adb7d 3311 port_str = strrchr(addr, ':');
bcef5b72
BVA
3312 if (port_str && strchr(port_str, ']'))
3313 port_str = NULL;
3314 if (port_str)
3315 *port_str++ = '\0';
3316 if (has_port)
3317 *has_port = port_str != NULL;
c62adb7d
BVA
3318 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3319 if (ret && addr[0]) {
3320 addr_end = addr + strlen(addr) - 1;
3321 if (addr[0] == '[' && *addr_end == ']') {
3322 *addr_end = '\0';
3323 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3324 port_str, sa);
3325 }
3326 }
19f31343 3327 kfree(addr);
c62adb7d 3328 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
19f31343
BVA
3329 return ret;
3330}
3331
3332static int srp_parse_options(struct net *net, const char *buf,
3333 struct srp_target_port *target)
aef9ec39
RD
3334{
3335 char *options, *sep_opt;
3336 char *p;
aef9ec39 3337 substring_t args[MAX_OPT_ARGS];
2a174df0 3338 unsigned long long ull;
bcef5b72 3339 bool has_port;
aef9ec39
RD
3340 int opt_mask = 0;
3341 int token;
3342 int ret = -EINVAL;
3343 int i;
3344
3345 options = kstrdup(buf, GFP_KERNEL);
3346 if (!options)
3347 return -ENOMEM;
3348
3349 sep_opt = options;
7dcf9c19 3350 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
aef9ec39
RD
3351 if (!*p)
3352 continue;
3353
3354 token = match_token(p, srp_opt_tokens, args);
3355 opt_mask |= token;
3356
3357 switch (token) {
3358 case SRP_OPT_ID_EXT:
3359 p = match_strdup(args);
a20f3a6d
IR
3360 if (!p) {
3361 ret = -ENOMEM;
3362 goto out;
3363 }
2a174df0
BVA
3364 ret = kstrtoull(p, 16, &ull);
3365 if (ret) {
3366 pr_warn("invalid id_ext parameter '%s'\n", p);
3367 kfree(p);
3368 goto out;
3369 }
3370 target->id_ext = cpu_to_be64(ull);
aef9ec39
RD
3371 kfree(p);
3372 break;
3373
3374 case SRP_OPT_IOC_GUID:
3375 p = match_strdup(args);
a20f3a6d
IR
3376 if (!p) {
3377 ret = -ENOMEM;
3378 goto out;
3379 }
2a174df0
BVA
3380 ret = kstrtoull(p, 16, &ull);
3381 if (ret) {
3382 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3383 kfree(p);
3384 goto out;
3385 }
3386 target->ioc_guid = cpu_to_be64(ull);
aef9ec39
RD
3387 kfree(p);
3388 break;
3389
3390 case SRP_OPT_DGID:
3391 p = match_strdup(args);
a20f3a6d
IR
3392 if (!p) {
3393 ret = -ENOMEM;
3394 goto out;
3395 }
aef9ec39 3396 if (strlen(p) != 32) {
e0bda7d8 3397 pr_warn("bad dest GID parameter '%s'\n", p);
ce1823f0 3398 kfree(p);
aef9ec39
RD
3399 goto out;
3400 }
3401
19f31343 3402 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
bf17c1c7 3403 kfree(p);
e711f968
AS
3404 if (ret < 0)
3405 goto out;
aef9ec39
RD
3406 break;
3407
3408 case SRP_OPT_PKEY:
3409 if (match_hex(args, &token)) {
e0bda7d8 3410 pr_warn("bad P_Key parameter '%s'\n", p);
aef9ec39
RD
3411 goto out;
3412 }
19f31343 3413 target->ib_cm.pkey = cpu_to_be16(token);
aef9ec39
RD
3414 break;
3415
3416 case SRP_OPT_SERVICE_ID:
3417 p = match_strdup(args);
a20f3a6d
IR
3418 if (!p) {
3419 ret = -ENOMEM;
3420 goto out;
3421 }
2a174df0
BVA
3422 ret = kstrtoull(p, 16, &ull);
3423 if (ret) {
3424 pr_warn("bad service_id parameter '%s'\n", p);
3425 kfree(p);
3426 goto out;
3427 }
19f31343
BVA
3428 target->ib_cm.service_id = cpu_to_be64(ull);
3429 kfree(p);
3430 break;
3431
3432 case SRP_OPT_IP_SRC:
3433 p = match_strdup(args);
3434 if (!p) {
3435 ret = -ENOMEM;
3436 goto out;
3437 }
bcef5b72
BVA
3438 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3439 NULL);
19f31343
BVA
3440 if (ret < 0) {
3441 pr_warn("bad source parameter '%s'\n", p);
3442 kfree(p);
3443 goto out;
3444 }
3445 target->rdma_cm.src_specified = true;
3446 kfree(p);
3447 break;
3448
3449 case SRP_OPT_IP_DEST:
3450 p = match_strdup(args);
3451 if (!p) {
3452 ret = -ENOMEM;
3453 goto out;
3454 }
bcef5b72
BVA
3455 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3456 &has_port);
3457 if (!has_port)
3458 ret = -EINVAL;
19f31343
BVA
3459 if (ret < 0) {
3460 pr_warn("bad dest parameter '%s'\n", p);
3461 kfree(p);
3462 goto out;
3463 }
3464 target->using_rdma_cm = true;
aef9ec39
RD
3465 kfree(p);
3466 break;
3467
3468 case SRP_OPT_MAX_SECT:
3469 if (match_int(args, &token)) {
e0bda7d8 3470 pr_warn("bad max sect parameter '%s'\n", p);
aef9ec39
RD
3471 goto out;
3472 }
3473 target->scsi_host->max_sectors = token;
3474 break;
3475
4d73f95f
BVA
3476 case SRP_OPT_QUEUE_SIZE:
3477 if (match_int(args, &token) || token < 1) {
3478 pr_warn("bad queue_size parameter '%s'\n", p);
3479 goto out;
3480 }
3481 target->scsi_host->can_queue = token;
3482 target->queue_size = token + SRP_RSP_SQ_SIZE +
3483 SRP_TSK_MGMT_SQ_SIZE;
3484 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3485 target->scsi_host->cmd_per_lun = token;
3486 break;
3487
52fb2b50 3488 case SRP_OPT_MAX_CMD_PER_LUN:
4d73f95f 3489 if (match_int(args, &token) || token < 1) {
e0bda7d8
BVA
3490 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3491 p);
52fb2b50
VP
3492 goto out;
3493 }
4d73f95f 3494 target->scsi_host->cmd_per_lun = token;
52fb2b50
VP
3495 break;
3496
b0780ee5
BVA
3497 case SRP_OPT_TARGET_CAN_QUEUE:
3498 if (match_int(args, &token) || token < 1) {
3499 pr_warn("bad max target_can_queue parameter '%s'\n",
3500 p);
3501 goto out;
3502 }
3503 target->target_can_queue = token;
3504 break;
3505
0c0450db
R
3506 case SRP_OPT_IO_CLASS:
3507 if (match_hex(args, &token)) {
e0bda7d8 3508 pr_warn("bad IO class parameter '%s'\n", p);
0c0450db
R
3509 goto out;
3510 }
3511 if (token != SRP_REV10_IB_IO_CLASS &&
3512 token != SRP_REV16A_IB_IO_CLASS) {
e0bda7d8
BVA
3513 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3514 token, SRP_REV10_IB_IO_CLASS,
3515 SRP_REV16A_IB_IO_CLASS);
0c0450db
R
3516 goto out;
3517 }
3518 target->io_class = token;
3519 break;
3520
01cb9bcb
IR
3521 case SRP_OPT_INITIATOR_EXT:
3522 p = match_strdup(args);
a20f3a6d
IR
3523 if (!p) {
3524 ret = -ENOMEM;
3525 goto out;
3526 }
2a174df0
BVA
3527 ret = kstrtoull(p, 16, &ull);
3528 if (ret) {
3529 pr_warn("bad initiator_ext value '%s'\n", p);
3530 kfree(p);
3531 goto out;
3532 }
3533 target->initiator_ext = cpu_to_be64(ull);
01cb9bcb
IR
3534 kfree(p);
3535 break;
3536
49248644
DD
3537 case SRP_OPT_CMD_SG_ENTRIES:
3538 if (match_int(args, &token) || token < 1 || token > 255) {
e0bda7d8
BVA
3539 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3540 p);
49248644
DD
3541 goto out;
3542 }
3543 target->cmd_sg_cnt = token;
3544 break;
3545
c07d424d
DD
3546 case SRP_OPT_ALLOW_EXT_SG:
3547 if (match_int(args, &token)) {
e0bda7d8 3548 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
c07d424d
DD
3549 goto out;
3550 }
3551 target->allow_ext_sg = !!token;
3552 break;
3553
3554 case SRP_OPT_SG_TABLESIZE:
3555 if (match_int(args, &token) || token < 1 ||
65e8617f 3556 token > SG_MAX_SEGMENTS) {
e0bda7d8
BVA
3557 pr_warn("bad max sg_tablesize parameter '%s'\n",
3558 p);
c07d424d
DD
3559 goto out;
3560 }
3561 target->sg_tablesize = token;
3562 break;
3563
4b5e5f41
BVA
3564 case SRP_OPT_COMP_VECTOR:
3565 if (match_int(args, &token) || token < 0) {
3566 pr_warn("bad comp_vector parameter '%s'\n", p);
3567 goto out;
3568 }
3569 target->comp_vector = token;
3570 break;
3571
7bb312e4
VP
3572 case SRP_OPT_TL_RETRY_COUNT:
3573 if (match_int(args, &token) || token < 2 || token > 7) {
3574 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3575 p);
3576 goto out;
3577 }
3578 target->tl_retry_count = token;
3579 break;
3580
547ed331
HL
3581 case SRP_OPT_MAX_IT_IU_SIZE:
3582 if (match_int(args, &token) || token < 0) {
3583 pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3584 goto out;
3585 }
3586 target->max_it_iu_size = token;
3587 break;
3588
87fee61c
BVA
3589 case SRP_OPT_CH_COUNT:
3590 if (match_int(args, &token) || token < 1) {
3591 pr_warn("bad channel count %s\n", p);
3592 goto out;
3593 }
3594 target->ch_count = token;
3595 break;
3596
aef9ec39 3597 default:
e0bda7d8
BVA
3598 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3599 p);
aef9ec39
RD
3600 goto out;
3601 }
3602 }
3603
19f31343
BVA
3604 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3605 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3606 ret = 0;
3607 break;
3608 }
3609 }
3610 if (ret)
3611 pr_warn("target creation request is missing one or more parameters\n");
aef9ec39 3612
4d73f95f
BVA
3613 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3614 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3615 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3616 target->scsi_host->cmd_per_lun,
3617 target->scsi_host->can_queue);
3618
aef9ec39
RD
3619out:
3620 kfree(options);
3621 return ret;
3622}
3623
33e82346
Y
3624static ssize_t add_target_store(struct device *dev,
3625 struct device_attribute *attr, const char *buf,
3626 size_t count)
aef9ec39
RD
3627{
3628 struct srp_host *host =
ee959b00 3629 container_of(dev, struct srp_host, dev);
aef9ec39
RD
3630 struct Scsi_Host *target_host;
3631 struct srp_target_port *target;
509c07bc 3632 struct srp_rdma_ch *ch;
d1b4289e
BVA
3633 struct srp_device *srp_dev = host->srp_dev;
3634 struct ib_device *ibdev = srp_dev->dev;
2b5715fc 3635 int ret, i, ch_idx;
509c5f33 3636 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
d92c0da7 3637 bool multich = false;
513d5647 3638 uint32_t max_iu_len;
aef9ec39
RD
3639
3640 target_host = scsi_host_alloc(&srp_template,
3641 sizeof (struct srp_target_port));
3642 if (!target_host)
3643 return -ENOMEM;
3644
49248644 3645 target_host->transportt = ib_srp_transport_template;
fd1b6c4a
BVA
3646 target_host->max_channel = 0;
3647 target_host->max_id = 1;
985aa495 3648 target_host->max_lun = -1LL;
3c8edf0e 3649 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
0b5cb330 3650 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
5f068992 3651
e945c653 3652 if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
8c175d31
CH
3653 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3654
aef9ec39 3655 target = host_to_target(target_host);
aef9ec39 3656
19f31343 3657 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
49248644
DD
3658 target->io_class = SRP_REV16A_IB_IO_CLASS;
3659 target->scsi_host = target_host;
3660 target->srp_host = host;
e6bf5f48 3661 target->lkey = host->srp_dev->pd->local_dma_lkey;
cee687b6 3662 target->global_rkey = host->srp_dev->global_rkey;
49248644 3663 target->cmd_sg_cnt = cmd_sg_entries;
c07d424d
DD
3664 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3665 target->allow_ext_sg = allow_ext_sg;
7bb312e4 3666 target->tl_retry_count = 7;
4d73f95f 3667 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
aef9ec39 3668
34aa654e
BVA
3669 /*
3670 * Avoid that the SCSI host can be removed by srp_remove_target()
3671 * before this function returns.
3672 */
3673 scsi_host_get(target->scsi_host);
3674
4fa354c9
BVA
3675 ret = mutex_lock_interruptible(&host->add_target_mutex);
3676 if (ret < 0)
3677 goto put;
2d7091bc 3678
19f31343 3679 ret = srp_parse_options(target->net, buf, target);
aef9ec39 3680 if (ret)
fb49c8bb 3681 goto out;
aef9ec39 3682
96fc248a 3683 if (!srp_conn_unique(target->srp_host, target)) {
19f31343 3684 if (target->using_rdma_cm) {
19f31343 3685 shost_printk(KERN_INFO, target->scsi_host,
7da09af9 3686 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
19f31343
BVA
3687 be64_to_cpu(target->id_ext),
3688 be64_to_cpu(target->ioc_guid),
7da09af9 3689 &target->rdma_cm.dst);
19f31343
BVA
3690 } else {
3691 shost_printk(KERN_INFO, target->scsi_host,
3692 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3693 be64_to_cpu(target->id_ext),
3694 be64_to_cpu(target->ioc_guid),
3695 be64_to_cpu(target->initiator_ext));
3696 }
96fc248a 3697 ret = -EEXIST;
fb49c8bb 3698 goto out;
96fc248a
BVA
3699 }
3700
f273ad4f 3701 if (!srp_dev->has_fr && !target->allow_ext_sg &&
d1b4289e 3702 target->cmd_sg_cnt < target->sg_tablesize) {
5cfb1782 3703 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
c07d424d
DD
3704 target->sg_tablesize = target->cmd_sg_cnt;
3705 }
3706
f273ad4f 3707 if (srp_dev->use_fast_reg) {
e945c653
JG
3708 bool gaps_reg = ibdev->attrs.kernel_cap_flags &
3709 IBK_SG_GAPS_REG;
fbd36818 3710
509c5f33
BVA
3711 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3712 (ilog2(srp_dev->mr_page_size) - 9);
fbd36818
SG
3713 if (!gaps_reg) {
3714 /*
f273ad4f
MG
3715 * FR can only map one HCA page per entry. If the start
3716 * address is not aligned on a HCA page boundary two
3717 * entries will be used for the head and the tail
3718 * although these two entries combined contain at most
3719 * one HCA page of data. Hence the "+ 1" in the
3720 * calculation below.
fbd36818
SG
3721 *
3722 * The indirect data buffer descriptor is contiguous
3723 * so the memory for that buffer will only be
3724 * registered if register_always is true. Hence add
3725 * one to mr_per_cmd if register_always has been set.
3726 */
3727 mr_per_cmd = register_always +
3728 (target->scsi_host->max_sectors + 1 +
3729 max_sectors_per_mr - 1) / max_sectors_per_mr;
3730 } else {
3731 mr_per_cmd = register_always +
3732 (target->sg_tablesize +
3733 srp_dev->max_pages_per_mr - 1) /
3734 srp_dev->max_pages_per_mr;
3735 }
509c5f33 3736 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
fbd36818 3737 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
509c5f33
BVA
3738 max_sectors_per_mr, mr_per_cmd);
3739 }
3740
c07d424d 3741 target_host->sg_tablesize = target->sg_tablesize;
509c5f33
BVA
3742 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3743 target->mr_per_cmd = mr_per_cmd;
c07d424d
DD
3744 target->indirect_size = target->sg_tablesize *
3745 sizeof (struct srp_direct_buf);
b2e872f4
HL
3746 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3747 srp_use_imm_data,
3748 target->max_it_iu_size);
49248644 3749
c1120f89 3750 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
ef6c49d8 3751 INIT_WORK(&target->remove_work, srp_remove_work);
8f26c9ff 3752 spin_lock_init(&target->lock);
1dfce294 3753 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
2088ca66 3754 if (ret)
fb49c8bb 3755 goto out;
aef9ec39 3756
d92c0da7 3757 ret = -ENOMEM;
2b5715fc 3758 if (target->ch_count == 0) {
87fee61c 3759 target->ch_count =
2b5715fc
NMC
3760 min(ch_count ?:
3761 max(4 * num_online_nodes(),
3762 ibdev->num_comp_vectors),
3763 num_online_cpus());
3764 }
3765
d92c0da7
BVA
3766 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3767 GFP_KERNEL);
3768 if (!target->ch)
fb49c8bb 3769 goto out;
aef9ec39 3770
2b5715fc
NMC
3771 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3772 ch = &target->ch[ch_idx];
3773 ch->target = target;
3774 ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3775 spin_lock_init(&ch->lock);
3776 INIT_LIST_HEAD(&ch->free_tx);
3777 ret = srp_new_cm_id(ch);
3778 if (ret)
3779 goto err_disconnect;
aef9ec39 3780
2b5715fc
NMC
3781 ret = srp_create_ch_ib(ch);
3782 if (ret)
3783 goto err_disconnect;
d92c0da7 3784
2b5715fc
NMC
3785 ret = srp_connect_ch(ch, max_iu_len, multich);
3786 if (ret) {
3787 char dst[64];
d92c0da7 3788
2b5715fc
NMC
3789 if (target->using_rdma_cm)
3790 snprintf(dst, sizeof(dst), "%pIS",
3791 &target->rdma_cm.dst);
3792 else
3793 snprintf(dst, sizeof(dst), "%pI6",
3794 target->ib_cm.orig_dgid.raw);
3795 shost_printk(KERN_ERR, target->scsi_host,
3796 PFX "Connection %d/%d to %s failed\n",
3797 ch_idx,
3798 target->ch_count, dst);
3799 if (ch_idx == 0) {
3800 goto free_ch;
3801 } else {
3802 srp_free_ch_ib(target, ch);
2b5715fc
NMC
3803 target->ch_count = ch - target->ch;
3804 goto connected;
3805 }
d92c0da7 3806 }
2b5715fc 3807 multich = true;
aef9ec39
RD
3808 }
3809
c257ea6f 3810connected:
d92c0da7
BVA
3811 target->scsi_host->nr_hw_queues = target->ch_count;
3812
aef9ec39
RD
3813 ret = srp_add_target(host, target);
3814 if (ret)
3815 goto err_disconnect;
3816
34aa654e 3817 if (target->state != SRP_TARGET_REMOVED) {
19f31343 3818 if (target->using_rdma_cm) {
19f31343 3819 shost_printk(KERN_DEBUG, target->scsi_host, PFX
7da09af9 3820 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
19f31343
BVA
3821 be64_to_cpu(target->id_ext),
3822 be64_to_cpu(target->ioc_guid),
7da09af9 3823 target->sgid.raw, &target->rdma_cm.dst);
19f31343
BVA
3824 } else {
3825 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3826 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3827 be64_to_cpu(target->id_ext),
3828 be64_to_cpu(target->ioc_guid),
3829 be16_to_cpu(target->ib_cm.pkey),
3830 be64_to_cpu(target->ib_cm.service_id),
3831 target->sgid.raw,
3832 target->ib_cm.orig_dgid.raw);
3833 }
34aa654e 3834 }
e7ffde01 3835
2d7091bc
BVA
3836 ret = count;
3837
3838out:
3839 mutex_unlock(&host->add_target_mutex);
34aa654e 3840
4fa354c9 3841put:
34aa654e 3842 scsi_host_put(target->scsi_host);
19f31343
BVA
3843 if (ret < 0) {
3844 /*
3845 * If a call to srp_remove_target() has not been scheduled,
3846 * drop the network namespace reference now that was obtained
3847 * earlier in this function.
3848 */
3849 if (target->state != SRP_TARGET_REMOVED)
3850 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
bc44bd1d 3851 scsi_host_put(target->scsi_host);
19f31343 3852 }
34aa654e 3853
2d7091bc 3854 return ret;
aef9ec39
RD
3855
3856err_disconnect:
3857 srp_disconnect_target(target);
3858
b02c1536 3859free_ch:
d92c0da7
BVA
3860 for (i = 0; i < target->ch_count; i++) {
3861 ch = &target->ch[i];
3862 srp_free_ch_ib(target, ch);
d92c0da7 3863 }
aef9ec39 3864
d92c0da7 3865 kfree(target->ch);
2d7091bc 3866 goto out;
aef9ec39
RD
3867}
3868
33e82346 3869static DEVICE_ATTR_WO(add_target);
aef9ec39 3870
33e82346 3871static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
ee959b00 3872 char *buf)
aef9ec39 3873{
ee959b00 3874 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3875
1c7fd726 3876 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
aef9ec39
RD
3877}
3878
33e82346 3879static DEVICE_ATTR_RO(ibdev);
aef9ec39 3880
33e82346 3881static ssize_t port_show(struct device *dev, struct device_attribute *attr,
ee959b00 3882 char *buf)
aef9ec39 3883{
ee959b00 3884 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 3885
1c7fd726 3886 return sysfs_emit(buf, "%d\n", host->port);
aef9ec39
RD
3887}
3888
33e82346 3889static DEVICE_ATTR_RO(port);
aef9ec39 3890
f5358a17 3891static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
3892{
3893 struct srp_host *host;
3894
3895 host = kzalloc(sizeof *host, GFP_KERNEL);
3896 if (!host)
3897 return NULL;
3898
3899 INIT_LIST_HEAD(&host->target_list);
b3589fd4 3900 spin_lock_init(&host->target_lock);
aef9ec39 3901 init_completion(&host->released);
2d7091bc 3902 mutex_init(&host->add_target_mutex);
05321937 3903 host->srp_dev = device;
aef9ec39
RD
3904 host->port = port;
3905
ee959b00 3906 host->dev.class = &srp_class;
dee2b82a 3907 host->dev.parent = device->dev->dev.parent;
6c854111
JG
3908 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3909 port);
aef9ec39 3910
ee959b00 3911 if (device_register(&host->dev))
f5358a17 3912 goto free_host;
ee959b00 3913 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 3914 goto err_class;
ee959b00 3915 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 3916 goto err_class;
ee959b00 3917 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
3918 goto err_class;
3919
3920 return host;
3921
3922err_class:
ee959b00 3923 device_unregister(&host->dev);
aef9ec39 3924
f5358a17 3925free_host:
aef9ec39
RD
3926 kfree(host);
3927
3928 return NULL;
3929}
3930
dc1435c0
LR
3931static void srp_rename_dev(struct ib_device *device, void *client_data)
3932{
3933 struct srp_device *srp_dev = client_data;
3934 struct srp_host *host, *tmp_host;
3935
3936 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3937 char name[IB_DEVICE_NAME_MAX + 8];
3938
3939 snprintf(name, sizeof(name), "srp-%s-%d",
3940 dev_name(&device->dev), host->port);
3941 device_rename(&host->dev, name);
3942 }
3943}
3944
11a0ae4c 3945static int srp_add_one(struct ib_device *device)
aef9ec39 3946{
f5358a17 3947 struct srp_device *srp_dev;
042dd765 3948 struct ib_device_attr *attr = &device->attrs;
aef9ec39 3949 struct srp_host *host;
ea1075ed
JG
3950 int mr_page_shift;
3951 unsigned int p;
52ede08f 3952 u64 max_pages_per_mr;
5f071777 3953 unsigned int flags = 0;
aef9ec39 3954
249f0656 3955 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
f5358a17 3956 if (!srp_dev)
11a0ae4c 3957 return -ENOMEM;
f5358a17
RD
3958
3959 /*
3960 * Use the smallest page size supported by the HCA, down to a
8f26c9ff
DD
3961 * minimum of 4096 bytes. We're unlikely to build large sglists
3962 * out of smaller entries.
f5358a17 3963 */
042dd765 3964 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
52ede08f
BVA
3965 srp_dev->mr_page_size = 1 << mr_page_shift;
3966 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
042dd765 3967 max_pages_per_mr = attr->max_mr_size;
52ede08f 3968 do_div(max_pages_per_mr, srp_dev->mr_page_size);
509c5f33 3969 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
042dd765 3970 attr->max_mr_size, srp_dev->mr_page_size,
509c5f33 3971 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
52ede08f
BVA
3972 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3973 max_pages_per_mr);
835ee624 3974
042dd765 3975 srp_dev->has_fr = (attr->device_cap_flags &
835ee624 3976 IB_DEVICE_MEM_MGT_EXTENSIONS);
f273ad4f
MG
3977 if (!never_register && !srp_dev->has_fr)
3978 dev_warn(&device->dev, "FR is not supported\n");
3979 else if (!never_register &&
3980 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3981 srp_dev->use_fast_reg = srp_dev->has_fr;
835ee624 3982
f273ad4f 3983 if (never_register || !register_always || !srp_dev->has_fr)
5f071777
CH
3984 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3985
5cfb1782
BVA
3986 if (srp_dev->use_fast_reg) {
3987 srp_dev->max_pages_per_mr =
3988 min_t(u32, srp_dev->max_pages_per_mr,
042dd765 3989 attr->max_fast_reg_page_list_len);
5cfb1782 3990 }
52ede08f
BVA
3991 srp_dev->mr_max_size = srp_dev->mr_page_size *
3992 srp_dev->max_pages_per_mr;
4a061b28 3993 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
6c854111 3994 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
042dd765 3995 attr->max_fast_reg_page_list_len,
52ede08f 3996 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
f5358a17
RD
3997
3998 INIT_LIST_HEAD(&srp_dev->dev_list);
3999
4000 srp_dev->dev = device;
5f071777 4001 srp_dev->pd = ib_alloc_pd(device, flags);
11a0ae4c
JG
4002 if (IS_ERR(srp_dev->pd)) {
4003 int ret = PTR_ERR(srp_dev->pd);
4004
4005 kfree(srp_dev);
4006 return ret;
4007 }
f5358a17 4008
cee687b6
BVA
4009 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4010 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4011 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4012 }
f5358a17 4013
ea1075ed 4014 rdma_for_each_port (device, p) {
f5358a17 4015 host = srp_add_port(srp_dev, p);
aef9ec39 4016 if (host)
f5358a17 4017 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
4018 }
4019
f5358a17 4020 ib_set_client_data(device, &srp_client, srp_dev);
11a0ae4c 4021 return 0;
aef9ec39
RD
4022}
4023
7c1eb45a 4024static void srp_remove_one(struct ib_device *device, void *client_data)
aef9ec39 4025{
f5358a17 4026 struct srp_device *srp_dev;
aef9ec39 4027 struct srp_host *host, *tmp_host;
ef6c49d8 4028 struct srp_target_port *target;
aef9ec39 4029
7c1eb45a 4030 srp_dev = client_data;
aef9ec39 4031
f5358a17 4032 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 4033 device_unregister(&host->dev);
aef9ec39
RD
4034 /*
4035 * Wait for the sysfs entry to go away, so that no new
4036 * target ports can be created.
4037 */
4038 wait_for_completion(&host->released);
4039
4040 /*
ef6c49d8 4041 * Remove all target ports.
aef9ec39 4042 */
b3589fd4 4043 spin_lock(&host->target_lock);
ef6c49d8
BVA
4044 list_for_each_entry(target, &host->target_list, list)
4045 srp_queue_remove_work(target);
b3589fd4 4046 spin_unlock(&host->target_lock);
aef9ec39
RD
4047
4048 /*
081bdc9f
BVA
4049 * srp_queue_remove_work() queues a call to
4050 * srp_remove_target(). The latter function cancels
4051 * target->tl_err_work so waiting for the remove works to
4052 * finish is sufficient.
aef9ec39 4053 */
bcc05910 4054 flush_workqueue(srp_remove_wq);
aef9ec39 4055
aef9ec39
RD
4056 kfree(host);
4057 }
4058
f5358a17
RD
4059 ib_dealloc_pd(srp_dev->pd);
4060
4061 kfree(srp_dev);
aef9ec39
RD
4062}
4063
3236822b 4064static struct srp_function_template ib_srp_transport_functions = {
ed9b2264
BVA
4065 .has_rport_state = true,
4066 .reset_timer_if_blocked = true,
a95cadb9 4067 .reconnect_delay = &srp_reconnect_delay,
ed9b2264
BVA
4068 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4069 .dev_loss_tmo = &srp_dev_loss_tmo,
4070 .reconnect = srp_rport_reconnect,
dc1bdbd9 4071 .rport_delete = srp_rport_delete,
ed9b2264 4072 .terminate_rport_io = srp_terminate_io,
3236822b
FT
4073};
4074
aef9ec39
RD
4075static int __init srp_init_module(void)
4076{
4077 int ret;
4078
c838de1a
BVA
4079 BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4080 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
16d14e01 4081 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
c838de1a 4082 BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
16d14e01
BVA
4083 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4084 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
c838de1a 4085 BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
16d14e01 4086
49248644 4087 if (srp_sg_tablesize) {
e0bda7d8 4088 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
49248644
DD
4089 if (!cmd_sg_entries)
4090 cmd_sg_entries = srp_sg_tablesize;
4091 }
4092
4093 if (!cmd_sg_entries)
4094 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4095
4096 if (cmd_sg_entries > 255) {
e0bda7d8 4097 pr_warn("Clamping cmd_sg_entries to 255\n");
49248644 4098 cmd_sg_entries = 255;
1e89a194
DD
4099 }
4100
c07d424d
DD
4101 if (!indirect_sg_entries)
4102 indirect_sg_entries = cmd_sg_entries;
4103 else if (indirect_sg_entries < cmd_sg_entries) {
e0bda7d8
BVA
4104 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4105 cmd_sg_entries);
c07d424d
DD
4106 indirect_sg_entries = cmd_sg_entries;
4107 }
4108
0a475ef4
IR
4109 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4110 pr_warn("Clamping indirect_sg_entries to %u\n",
4111 SG_MAX_SEGMENTS);
4112 indirect_sg_entries = SG_MAX_SEGMENTS;
4113 }
4114
bcc05910 4115 srp_remove_wq = create_workqueue("srp_remove");
da05be29
WY
4116 if (!srp_remove_wq) {
4117 ret = -ENOMEM;
bcc05910
BVA
4118 goto out;
4119 }
4120
4121 ret = -ENOMEM;
3236822b
FT
4122 ib_srp_transport_template =
4123 srp_attach_transport(&ib_srp_transport_functions);
4124 if (!ib_srp_transport_template)
bcc05910 4125 goto destroy_wq;
3236822b 4126
aef9ec39
RD
4127 ret = class_register(&srp_class);
4128 if (ret) {
e0bda7d8 4129 pr_err("couldn't register class infiniband_srp\n");
bcc05910 4130 goto release_tr;
aef9ec39
RD
4131 }
4132
c1a0b23b
MT
4133 ib_sa_register_client(&srp_sa_client);
4134
aef9ec39
RD
4135 ret = ib_register_client(&srp_client);
4136 if (ret) {
e0bda7d8 4137 pr_err("couldn't register IB client\n");
bcc05910 4138 goto unreg_sa;
aef9ec39
RD
4139 }
4140
bcc05910
BVA
4141out:
4142 return ret;
4143
4144unreg_sa:
4145 ib_sa_unregister_client(&srp_sa_client);
4146 class_unregister(&srp_class);
4147
4148release_tr:
4149 srp_release_transport(ib_srp_transport_template);
4150
4151destroy_wq:
4152 destroy_workqueue(srp_remove_wq);
4153 goto out;
aef9ec39
RD
4154}
4155
4156static void __exit srp_cleanup_module(void)
4157{
4158 ib_unregister_client(&srp_client);
c1a0b23b 4159 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 4160 class_unregister(&srp_class);
3236822b 4161 srp_release_transport(ib_srp_transport_template);
bcc05910 4162 destroy_workqueue(srp_remove_wq);
aef9ec39
RD
4163}
4164
4165module_init(srp_init_module);
4166module_exit(srp_cleanup_module);