]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.drivers/open-fcoe-libfc
Reenabled linux-xen and xen-image build
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.drivers / open-fcoe-libfc
1 From: Hannes Reinecke <hare@suse.de>
2 Date: Wed, 17 Sep 2008 16:24:37 +0200
3 Subject: libfc: a modular software Fibre Channel implementation
4 References: FATE#303913
5
6 Signed-off-by: Robert Love <robert.w.love@intel.com>
7 Signed-off-by: Chris Leech <christopher.leech@intel.com>
8 Signed-off-by: Vasu Dev <vasu.dev@intel.com>
9 Signed-off-by: Yi Zou <yi.zou@intel.com>
10 Signed-off-by: Steve Ma <steve.ma@intel.com>
11 Signed-off-by: Hannes Reinecke <hare@suse.de>
12 ---
13 drivers/scsi/Kconfig | 6 +
14 drivers/scsi/Makefile | 1 +
15 drivers/scsi/libfc/Makefile | 12 +
16 drivers/scsi/libfc/fc_attr.c | 129 +++
17 drivers/scsi/libfc/fc_exch.c | 2028 ++++++++++++++++++++++++++++++++++++++
18 drivers/scsi/libfc/fc_fcp.c | 2173 +++++++++++++++++++++++++++++++++++++++++
19 drivers/scsi/libfc/fc_frame.c | 88 ++
20 drivers/scsi/libfc/fc_lport.c | 926 ++++++++++++++++++
21 drivers/scsi/libfc/fc_ns.c | 1283 ++++++++++++++++++++++++
22 drivers/scsi/libfc/fc_rport.c | 1301 ++++++++++++++++++++++++
23 10 files changed, 7947 insertions(+), 0 deletions(-)
24 create mode 100644 drivers/scsi/libfc/Makefile
25 create mode 100644 drivers/scsi/libfc/fc_attr.c
26 create mode 100644 drivers/scsi/libfc/fc_exch.c
27 create mode 100644 drivers/scsi/libfc/fc_fcp.c
28 create mode 100644 drivers/scsi/libfc/fc_frame.c
29 create mode 100644 drivers/scsi/libfc/fc_lport.c
30 create mode 100644 drivers/scsi/libfc/fc_ns.c
31 create mode 100644 drivers/scsi/libfc/fc_rport.c
32
33 diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
34 index 4e0322b..bd480d2 100644
35 --- a/drivers/scsi/Kconfig
36 +++ b/drivers/scsi/Kconfig
37 @@ -328,6 +328,12 @@ menuconfig SCSI_LOWLEVEL
38
39 if SCSI_LOWLEVEL && SCSI
40
41 +config LIBFC
42 + tristate "LibFC module"
43 + depends on SCSI && SCSI_FC_ATTRS
44 + ---help---
45 + Fibre Channel library module
46 +
47 config ISCSI_TCP
48 tristate "iSCSI Initiator over TCP/IP"
49 depends on SCSI && INET
50 diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
51 index 72fd504..9158dc6 100644
52 --- a/drivers/scsi/Makefile
53 +++ b/drivers/scsi/Makefile
54 @@ -36,6 +36,7 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
55 obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
56 obj-$(CONFIG_SCSI_DH) += device_handler/
57
58 +obj-$(CONFIG_LIBFC) += libfc/
59 obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
60 obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
61 obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
62 diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
63 new file mode 100644
64 index 0000000..0a31ca2
65 --- /dev/null
66 +++ b/drivers/scsi/libfc/Makefile
67 @@ -0,0 +1,12 @@
68 +# $Id: Makefile
69 +
70 +obj-$(CONFIG_LIBFC) += libfc.o
71 +
72 +libfc-objs := \
73 + fc_ns.o \
74 + fc_exch.o \
75 + fc_frame.o \
76 + fc_lport.o \
77 + fc_rport.o \
78 + fc_attr.o \
79 + fc_fcp.o
80 diff --git a/drivers/scsi/libfc/fc_attr.c b/drivers/scsi/libfc/fc_attr.c
81 new file mode 100644
82 index 0000000..d73f39e
83 --- /dev/null
84 +++ b/drivers/scsi/libfc/fc_attr.c
85 @@ -0,0 +1,129 @@
86 +/*
87 + * Copyright(c) 2007 Intel Corporation. All rights reserved.
88 + *
89 + * This program is free software; you can redistribute it and/or modify it
90 + * under the terms and conditions of the GNU General Public License,
91 + * version 2, as published by the Free Software Foundation.
92 + *
93 + * This program is distributed in the hope it will be useful, but WITHOUT
94 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
95 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
96 + * more details.
97 + *
98 + * You should have received a copy of the GNU General Public License along with
99 + * this program; if not, write to the Free Software Foundation, Inc.,
100 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
101 + *
102 + * Maintained at www.Open-FCoE.org
103 + */
104 +
105 +#include <linux/kernel.h>
106 +#include <linux/types.h>
107 +
108 +#include <scsi/scsi_host.h>
109 +
110 +#include <scsi/libfc/libfc.h>
111 +
112 +MODULE_AUTHOR("Open-FCoE.org");
113 +MODULE_DESCRIPTION("libfc");
114 +MODULE_LICENSE("GPL");
115 +
116 +void fc_get_host_port_id(struct Scsi_Host *shost)
117 +{
118 + struct fc_lport *lp = shost_priv(shost);
119 +
120 + fc_host_port_id(shost) = fc_lport_get_fid(lp);
121 +}
122 +EXPORT_SYMBOL(fc_get_host_port_id);
123 +
124 +void fc_get_host_speed(struct Scsi_Host *shost)
125 +{
126 + /*
127 + * should be obtain from DEC or Enet Driver
128 + */
129 + fc_host_speed(shost) = 1; /* for now it is 1g */
130 +}
131 +EXPORT_SYMBOL(fc_get_host_speed);
132 +
133 +void fc_get_host_port_type(struct Scsi_Host *shost)
134 +{
135 + fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
136 +}
137 +EXPORT_SYMBOL(fc_get_host_port_type);
138 +
139 +void fc_get_host_fabric_name(struct Scsi_Host *shost)
140 +{
141 + struct fc_lport *lp = shost_priv(shost);
142 +
143 + fc_host_fabric_name(shost) = lp->wwnn;
144 +}
145 +EXPORT_SYMBOL(fc_get_host_fabric_name);
146 +
147 +void fc_attr_init(struct fc_lport *lp)
148 +{
149 + fc_host_node_name(lp->host) = lp->wwnn;
150 + fc_host_port_name(lp->host) = lp->wwpn;
151 + fc_host_supported_classes(lp->host) = FC_COS_CLASS3;
152 + memset(fc_host_supported_fc4s(lp->host), 0,
153 + sizeof(fc_host_supported_fc4s(lp->host)));
154 + fc_host_supported_fc4s(lp->host)[2] = 1;
155 + fc_host_supported_fc4s(lp->host)[7] = 1;
156 + /* This value is also unchanging */
157 + memset(fc_host_active_fc4s(lp->host), 0,
158 + sizeof(fc_host_active_fc4s(lp->host)));
159 + fc_host_active_fc4s(lp->host)[2] = 1;
160 + fc_host_active_fc4s(lp->host)[7] = 1;
161 + fc_host_maxframe_size(lp->host) = lp->mfs;
162 +}
163 +EXPORT_SYMBOL(fc_attr_init);
164 +
165 +void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
166 +{
167 + if (timeout)
168 + rport->dev_loss_tmo = timeout + 5;
169 + else
170 + rport->dev_loss_tmo = 30;
171 +
172 +}
173 +EXPORT_SYMBOL(fc_set_rport_loss_tmo);
174 +
175 +struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
176 +{
177 + int i;
178 + struct fc_host_statistics *fcoe_stats;
179 + struct fc_lport *lp = shost_priv(shost);
180 + struct timespec v0, v1;
181 +
182 + fcoe_stats = &lp->host_stats;
183 + memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
184 +
185 + jiffies_to_timespec(jiffies, &v0);
186 + jiffies_to_timespec(lp->boot_time, &v1);
187 + fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
188 +
189 + for_each_online_cpu(i) {
190 + struct fcoe_dev_stats *stats = lp->dev_stats[i];
191 + if (stats == NULL)
192 + continue;
193 + fcoe_stats->tx_frames += stats->TxFrames;
194 + fcoe_stats->tx_words += stats->TxWords;
195 + fcoe_stats->rx_frames += stats->RxFrames;
196 + fcoe_stats->rx_words += stats->RxWords;
197 + fcoe_stats->error_frames += stats->ErrorFrames;
198 + fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
199 + fcoe_stats->fcp_input_requests += stats->InputRequests;
200 + fcoe_stats->fcp_output_requests += stats->OutputRequests;
201 + fcoe_stats->fcp_control_requests += stats->ControlRequests;
202 + fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
203 + fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
204 + fcoe_stats->link_failure_count += stats->LinkFailureCount;
205 + }
206 + fcoe_stats->lip_count = -1;
207 + fcoe_stats->nos_count = -1;
208 + fcoe_stats->loss_of_sync_count = -1;
209 + fcoe_stats->loss_of_signal_count = -1;
210 + fcoe_stats->prim_seq_protocol_err_count = -1;
211 + fcoe_stats->dumped_frames = -1;
212 + return fcoe_stats;
213 +}
214 +EXPORT_SYMBOL(fc_get_host_stats);
215 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
216 new file mode 100644
217 index 0000000..11a03bd
218 --- /dev/null
219 +++ b/drivers/scsi/libfc/fc_exch.c
220 @@ -0,0 +1,2028 @@
221 +/*
222 + * Copyright(c) 2007 Intel Corporation. All rights reserved.
223 + *
224 + * This program is free software; you can redistribute it and/or modify it
225 + * under the terms and conditions of the GNU General Public License,
226 + * version 2, as published by the Free Software Foundation.
227 + *
228 + * This program is distributed in the hope it will be useful, but WITHOUT
229 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
230 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
231 + * more details.
232 + *
233 + * You should have received a copy of the GNU General Public License along with
234 + * this program; if not, write to the Free Software Foundation, Inc.,
235 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
236 + *
237 + * Maintained at www.Open-FCoE.org
238 + */
239 +
240 +/*
241 + * Fibre Channel exchange and sequence handling.
242 + */
243 +
244 +#include <linux/timer.h>
245 +#include <linux/gfp.h>
246 +#include <linux/err.h>
247 +
248 +#include <scsi/fc/fc_fc2.h>
249 +
250 +#include <scsi/libfc/libfc.h>
251 +
252 +#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
253 +
254 +/*
255 + * fc_exch_debug can be set in debugger or at compile time to get more logs.
256 + */
257 +static int fc_exch_debug;
258 +static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
259 +
260 +/*
261 + * Structure and function definitions for managing Fibre Channel Exchanges
262 + * and Sequences.
263 + *
264 + * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
265 + *
266 + * fc_exch_mgr holds the exchange state for an N port
267 + *
268 + * fc_exch holds state for one exchange and links to its active sequence.
269 + *
270 + * fc_seq holds the state for an individual sequence.
271 + */
272 +
273 +/*
274 + * Sequence.
275 + */
276 +struct fc_seq {
277 + u8 id; /* seq ID */
278 + u16 ssb_stat; /* status flags for sequence status block */
279 + u16 cnt; /* frames sent so far on sequence */
280 + u32 f_ctl; /* F_CTL flags for frames */
281 + u32 rec_data; /* FC-4 value for REC */
282 +};
283 +
284 +struct fc_exch;
285 +
286 +#define FC_EX_DONE (1 << 0) /* ep is completed */
287 +#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */
288 +
289 +/*
290 + * Exchange.
291 + *
292 + * Locking notes: The ex_lock protects changes to the following fields:
293 + * esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl.
294 + * seq_id
295 + * sequence allocation
296 + */
297 +struct fc_exch {
298 + struct fc_exch_mgr *em; /* exchange manager */
299 + u32 state; /* internal driver state */
300 + u16 xid; /* our exchange ID */
301 + struct list_head ex_list; /* free or busy list linkage */
302 + spinlock_t ex_lock; /* lock covering exchange state */
303 + atomic_t ex_refcnt; /* reference counter */
304 + struct timer_list ex_timer; /* timer for upper level protocols */
305 + struct fc_lport *lp; /* fc device instance */
306 + u16 oxid; /* originator's exchange ID */
307 + u16 rxid; /* responder's exchange ID */
308 + u32 oid; /* originator's FCID */
309 + u32 sid; /* source FCID */
310 + u32 did; /* destination FCID */
311 + u32 esb_stat; /* exchange status for ESB */
312 + u32 r_a_tov; /* r_a_tov from rport (msec) */
313 + u8 seq_id; /* next sequence ID to use */
314 + u32 f_ctl; /* F_CTL flags for sequences */
315 + u8 fh_type; /* frame type */
316 + enum fc_class class; /* class of service */
317 + struct fc_seq seq; /* single sequence */
318 + struct fc_exch *aborted_ep; /* ref to ep rrq is cleaning up */
319 +
320 + /*
321 + * Handler for responses to this current exchange.
322 + */
323 + void (*resp)(struct fc_seq *, struct fc_frame *, void *);
324 + void *resp_arg; /* 3rd arg for exchange resp handler */
325 +};
326 +
327 +/*
328 + * Exchange manager.
329 + *
330 + * This structure is the center for creating exchanges and sequences.
331 + * It manages the allocation of exchange IDs.
332 + */
333 +struct fc_exch_mgr {
334 + enum fc_class class; /* default class for sequences */
335 + spinlock_t em_lock; /* exchange manager lock */
336 + u16 last_xid; /* last allocated exchange ID */
337 + u16 min_xid; /* min exchange ID */
338 + u16 max_xid; /* max exchange ID */
339 + u32 total_exches; /* total allocated exchanges */
340 + struct list_head ex_list; /* allocated exchanges list */
341 + struct fc_lport *lp; /* fc device instance */
342 + mempool_t *ep_pool; /* reserve ep's */
343 +
344 + /*
345 + * currently exchange mgr stats are updated but not used.
346 + * either stats can be expose via sysfs or remove them
347 + * all together if not used XXX
348 + */
349 + struct {
350 + atomic_t no_free_exch;
351 + atomic_t no_free_exch_xid;
352 + atomic_t xid_not_found;
353 + atomic_t xid_busy;
354 + atomic_t seq_not_found;
355 + atomic_t non_bls_resp;
356 + } stats;
357 + struct fc_exch **exches; /* for exch pointers indexed by xid */
358 +};
359 +
360 +#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
361 +#define fc_exch_next_xid(mp, id) ((id == mp->max_xid) ? mp->min_xid : id + 1)
362 +
363 +static void fc_exch_rrq(struct fc_exch *);
364 +static void fc_seq_ls_acc(struct fc_seq *);
365 +static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
366 + enum fc_els_rjt_explan);
367 +static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
368 +static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
369 +static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
370 +
371 +/*
372 + * Internal implementation notes.
373 + *
374 + * The exchange manager is one by default in libfc but LLD may choose
375 + * to have one per CPU. The sequence manager is one per exchange manager
376 + * and currently never separated.
377 + *
378 + * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
379 + * assigned by the Sequence Initiator that shall be unique for a specific
380 + * D_ID and S_ID pair while the Sequence is open." Note that it isn't
381 + * qualified by exchange ID, which one might think it would be.
382 + * In practice this limits the number of open sequences and exchanges to 256
383 + * per session. For most targets we could treat this limit as per exchange.
384 + *
385 + * The exchange and its sequence are freed when the last sequence is received.
386 + * It's possible for the remote port to leave an exchange open without
387 + * sending any sequences.
388 + *
389 + * Notes on reference counts:
390 + *
391 + * Exchanges are reference counted and exchange gets freed when the reference
392 + * count becomes zero.
393 + *
394 + * Timeouts:
395 + * Sequences are timed out for E_D_TOV and R_A_TOV.
396 + *
397 + * Sequence event handling:
398 + *
399 + * The following events may occur on initiator sequences:
400 + *
401 + * Send.
402 + * For now, the whole thing is sent.
403 + * Receive ACK
404 + * This applies only to class F.
405 + * The sequence is marked complete.
406 + * ULP completion.
407 + * The upper layer calls fc_exch_done() when done
408 + * with exchange and sequence tuple.
409 + * RX-inferred completion.
410 + * When we receive the next sequence on the same exchange, we can
411 + * retire the previous sequence ID. (XXX not implemented).
412 + * Timeout.
413 + * R_A_TOV frees the sequence ID. If we're waiting for ACK,
414 + * E_D_TOV causes abort and calls upper layer response handler
415 + * with FC_EX_TIMEOUT error.
416 + * Receive RJT
417 + * XXX defer.
418 + * Send ABTS
419 + * On timeout.
420 + *
421 + * The following events may occur on recipient sequences:
422 + *
423 + * Receive
424 + * Allocate sequence for first frame received.
425 + * Hold during receive handler.
426 + * Release when final frame received.
427 + * Keep status of last N of these for the ELS RES command. XXX TBD.
428 + * Receive ABTS
429 + * Deallocate sequence
430 + * Send RJT
431 + * Deallocate
432 + *
433 + * For now, we neglect conditions where only part of a sequence was
434 + * received or transmitted, or where out-of-order receipt is detected.
435 + */
436 +
437 +/*
438 + * Locking notes:
439 + *
440 + * The EM code run in a per-CPU worker thread.
441 + *
442 + * To protect against concurrency between a worker thread code and timers,
443 + * sequence allocation and deallocation must be locked.
444 + * - exchange refcnt can be done atomicly without locks.
445 + * - sequence allocation must be locked by exch lock.
446 + */
447 +
448 +/*
449 + * opcode names for debugging.
450 + */
451 +static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
452 +
453 +#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
454 +
455 +static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
456 + unsigned int max_index)
457 +{
458 + const char *name = NULL;
459 +
460 + if (op < max_index)
461 + name = table[op];
462 + if (!name)
463 + name = "unknown";
464 + return name;
465 +}
466 +
467 +static const char *fc_exch_rctl_name(unsigned int op)
468 +{
469 + return fc_exch_name_lookup(op, fc_exch_rctl_names,
470 + FC_TABLE_SIZE(fc_exch_rctl_names));
471 +}
472 +
473 +/*
474 + * Hold an exchange - keep it from being freed.
475 + */
476 +static void fc_exch_hold(struct fc_exch *ep)
477 +{
478 + atomic_inc(&ep->ex_refcnt);
479 +}
480 +
481 +/*
482 + * Fill in frame header.
483 + *
484 + * The following fields are the responsibility of this routine:
485 + * d_id, s_id, df_ctl, oxid, rxid, cs_ctl, seq_id
486 + *
487 + * The following fields are handled by the caller.
488 + * r_ctl, type, f_ctl, seq_cnt, parm_offset
489 + *
490 + * That should be a complete list.
491 + *
492 + * We may be the originator or responder to the sequence.
493 + */
494 +static void fc_seq_fill_hdr(struct fc_seq *sp, struct fc_frame *fp)
495 +{
496 + struct fc_frame_header *fh = fc_frame_header_get(fp);
497 + struct fc_exch *ep;
498 +
499 + ep = fc_seq_exch(sp);
500 +
501 + hton24(fh->fh_s_id, ep->sid);
502 + hton24(fh->fh_d_id, ep->did);
503 + fh->fh_ox_id = htons(ep->oxid);
504 + fh->fh_rx_id = htons(ep->rxid);
505 + fh->fh_seq_id = sp->id;
506 + fh->fh_cs_ctl = 0;
507 + fh->fh_df_ctl = 0;
508 +}
509 +
510 +/*
511 + * Release a reference to an exchange.
512 + * If the refcnt goes to zero and the exchange is complete, it is freed.
513 + */
514 +static void fc_exch_release(struct fc_exch *ep)
515 +{
516 + struct fc_exch_mgr *mp;
517 +
518 + if (atomic_dec_and_test(&ep->ex_refcnt)) {
519 + mp = ep->em;
520 + if (ep->lp->tt.exch_put)
521 + ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
522 + WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
523 + WARN_ON(timer_pending(&ep->ex_timer));
524 + mempool_free(ep, mp->ep_pool);
525 + }
526 +}
527 +
528 +static int fc_exch_done_locked(struct fc_exch *ep)
529 +{
530 + int rc = 1;
531 +
532 + /*
533 + * We must check for completion in case there are two threads
534 + * tyring to complete this. But the rrq code will reuse the
535 + * ep, and in that case we only clear the resp and set it as
536 + * complete, so it can be reused by the timer to send the rrq.
537 + */
538 + ep->resp = NULL;
539 + if (ep->state & FC_EX_DONE)
540 + return rc;
541 + ep->esb_stat |= ESB_ST_COMPLETE;
542 +
543 + if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
544 + ep->state |= FC_EX_DONE;
545 + if (del_timer(&ep->ex_timer))
546 + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
547 + atomic_dec(&ep->ex_refcnt); /* drop hold from alloc */
548 + rc = 0;
549 + }
550 + return rc;
551 +}
552 +
553 +static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
554 +{
555 + struct fc_exch_mgr *mp;
556 +
557 + mp = ep->em;
558 + spin_lock_bh(&mp->em_lock);
559 + WARN_ON(mp->total_exches <= 0);
560 + mp->total_exches--;
561 + mp->exches[ep->xid - mp->min_xid] = NULL;
562 + list_del(&ep->ex_list);
563 + spin_unlock_bh(&mp->em_lock);
564 +}
565 +
566 +/*
567 + * Internal version of fc_exch_timer_set - used with lock held.
568 + */
569 +static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
570 + unsigned int timer_msec)
571 +{
572 + if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
573 + return;
574 +
575 + if (!mod_timer(&ep->ex_timer, jiffies + msecs_to_jiffies(timer_msec)))
576 + fc_exch_hold(ep); /* hold for timer */
577 +}
578 +
579 +/*
580 + * Set timer for an exchange.
581 + * The time is a minimum delay in milliseconds until the timer fires.
582 + * Used for upper level protocols to time out the exchange.
583 + * The timer is cancelled when it fires or when the exchange completes.
584 + * Returns non-zero if a timer couldn't be allocated.
585 + */
586 +static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
587 +{
588 + spin_lock_bh(&ep->ex_lock);
589 + fc_exch_timer_set_locked(ep, timer_msec);
590 + spin_unlock_bh(&ep->ex_lock);
591 +}
592 +
593 +int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
594 +{
595 + struct fc_seq *sp;
596 + struct fc_exch *ep;
597 + struct fc_frame *fp;
598 + int error;
599 +
600 + ep = fc_seq_exch(req_sp);
601 +
602 + spin_lock_bh(&ep->ex_lock);
603 + if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
604 + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
605 + spin_unlock_bh(&ep->ex_lock);
606 + return -ENXIO;
607 + }
608 +
609 + /*
610 + * Send the abort on a new sequence if possible.
611 + */
612 + sp = fc_seq_start_next_locked(&ep->seq);
613 + if (!sp) {
614 + spin_unlock_bh(&ep->ex_lock);
615 + return -ENOMEM;
616 + }
617 +
618 + sp->f_ctl |= FC_FC_SEQ_INIT;
619 + ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
620 + if (timer_msec)
621 + fc_exch_timer_set_locked(ep, timer_msec);
622 + spin_unlock_bh(&ep->ex_lock);
623 +
624 + /*
625 + * If not logged into the fabric, don't send ABTS but leave
626 + * sequence active until next timeout.
627 + */
628 + if (!ep->sid)
629 + return 0;
630 +
631 + /*
632 + * Send an abort for the sequence that timed out.
633 + */
634 + fp = fc_frame_alloc(ep->lp, 0);
635 + if (fp) {
636 + fc_frame_setup(fp, FC_RCTL_BA_ABTS, FC_TYPE_BLS);
637 + error = fc_seq_send(ep->lp, sp, fp, FC_FC_END_SEQ);
638 + } else
639 + error = -ENOBUFS;
640 + return error;
641 +}
642 +EXPORT_SYMBOL(fc_seq_exch_abort);
643 +
644 +/*
645 + * Exchange timeout - handle exchange timer expiration.
646 + * The timer will have been cancelled before this is called.
647 + */
648 +static void fc_exch_timeout(unsigned long ep_arg)
649 +{
650 + struct fc_exch *ep = (struct fc_exch *)ep_arg;
651 + struct fc_seq *sp = &ep->seq;
652 + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
653 + void *arg;
654 + u32 e_stat;
655 + int rc = 1;
656 +
657 + spin_lock_bh(&ep->ex_lock);
658 + if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
659 + goto unlock;
660 +
661 + e_stat = ep->esb_stat;
662 + if (e_stat & ESB_ST_COMPLETE) {
663 + ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
664 + spin_unlock_bh(&ep->ex_lock);
665 + if (e_stat & ESB_ST_REC_QUAL)
666 + fc_exch_rrq(ep);
667 + goto done;
668 + } else {
669 + resp = ep->resp;
670 + arg = ep->resp_arg;
671 + ep->resp = NULL;
672 + if (e_stat & ESB_ST_ABNORMAL)
673 + rc = fc_exch_done_locked(ep);
674 + spin_unlock_bh(&ep->ex_lock);
675 + if (!rc)
676 + fc_exch_mgr_delete_ep(ep);
677 + if (resp)
678 + resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
679 + fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
680 + goto done;
681 + }
682 +unlock:
683 + spin_unlock_bh(&ep->ex_lock);
684 +done:
685 + /*
686 + * This release matches the hold taken when the timer was set.
687 + */
688 + fc_exch_release(ep);
689 +}
690 +
691 +/*
692 + * Allocate a sequence.
693 + *
694 + * We don't support multiple originated sequences on the same exchange.
695 + * By implication, any previously originated sequence on this exchange
696 + * is complete, and we reallocate the same sequence.
697 + */
698 +static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
699 +{
700 + struct fc_seq *sp;
701 +
702 + sp = &ep->seq;
703 + sp->ssb_stat = 0;
704 + sp->f_ctl = 0;
705 + sp->cnt = 0;
706 + sp->id = seq_id;
707 + return sp;
708 +}
709 +
710 +/*
711 + * Allocate an exchange.
712 + *
713 + * if xid is supplied zero then assign next free exchange ID
714 + * from exchange manager, otherwise use supplied xid.
715 + */
716 +struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid)
717 +{
718 + struct fc_exch *ep = NULL;
719 + u16 min_xid, max_xid;
720 +
721 + min_xid = mp->min_xid;
722 + max_xid = mp->max_xid;
723 + /*
724 + * if xid is supplied then verify its xid range
725 + */
726 + if (xid) {
727 + if (unlikely((xid < min_xid) || (xid > max_xid))) {
728 + FC_DBG("Invalid xid 0x:%x\n", xid);
729 + goto out;
730 + }
731 + if (unlikely(mp->exches[xid - min_xid] != NULL)) {
732 + FC_DBG("xid 0x:%x is already in use\n", xid);
733 + goto out;
734 + }
735 + }
736 +
737 + /*
738 + * Allocate new exchange
739 + */
740 + ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
741 + if (!ep) {
742 + atomic_inc(&mp->stats.no_free_exch);
743 + goto out;
744 + }
745 + memset(ep, 0, sizeof(*ep));
746 +
747 + spin_lock_bh(&mp->em_lock);
748 +
749 + /*
750 + * if xid is zero then assign next free exchange ID
751 + */
752 + if (!xid) {
753 + xid = fc_exch_next_xid(mp, mp->last_xid);
754 + /*
755 + * find next free xid using linear search
756 + */
757 + while (mp->exches[xid - min_xid] != NULL) {
758 + if (xid == mp->last_xid)
759 + break;
760 + xid = fc_exch_next_xid(mp, xid);
761 + }
762 +
763 + if (likely(mp->exches[xid - min_xid] == NULL)) {
764 + mp->last_xid = xid;
765 + } else {
766 + spin_unlock_bh(&mp->em_lock);
767 + atomic_inc(&mp->stats.no_free_exch_xid);
768 + mempool_free(ep, mp->ep_pool);
769 + goto out;
770 + }
771 + }
772 +
773 + mp->exches[xid - min_xid] = ep;
774 + list_add_tail(&ep->ex_list, &mp->ex_list);
775 + fc_seq_alloc(ep, ep->seq_id++);
776 + mp->total_exches++;
777 + spin_unlock_bh(&mp->em_lock);
778 +
779 + /*
780 + * update exchange
781 + */
782 + ep->oxid = ep->xid = xid;
783 + ep->em = mp;
784 + ep->lp = mp->lp;
785 + ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
786 + ep->rxid = FC_XID_UNKNOWN;
787 + ep->class = mp->class;
788 +
789 + spin_lock_init(&ep->ex_lock);
790 + setup_timer(&ep->ex_timer, fc_exch_timeout, (unsigned long)ep);
791 +
792 + fc_exch_hold(ep); /* hold for caller */
793 +out:
794 + return ep;
795 +}
796 +EXPORT_SYMBOL(fc_exch_alloc);
797 +
798 +/*
799 + * Lookup and hold an exchange.
800 + */
801 +static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
802 +{
803 + struct fc_exch *ep = NULL;
804 +
805 + if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
806 + spin_lock_bh(&mp->em_lock);
807 + ep = mp->exches[xid - mp->min_xid];
808 + if (ep) {
809 + fc_exch_hold(ep);
810 + WARN_ON(ep->xid != xid);
811 + }
812 + spin_unlock_bh(&mp->em_lock);
813 + }
814 + return ep;
815 +}
816 +
817 +void fc_exch_done(struct fc_seq *sp)
818 +{
819 + struct fc_exch *ep = fc_seq_exch(sp);
820 + int rc;
821 +
822 + spin_lock_bh(&ep->ex_lock);
823 + rc = fc_exch_done_locked(ep);
824 + spin_unlock_bh(&ep->ex_lock);
825 + if (!rc)
826 + fc_exch_mgr_delete_ep(ep);
827 +}
828 +EXPORT_SYMBOL(fc_exch_done);
829 +
830 +/*
831 + * Allocate a new exchange as responder.
832 + * Sets the responder ID in the frame header.
833 + */
834 +static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
835 +{
836 + struct fc_exch *ep;
837 + struct fc_frame_header *fh;
838 + u16 rxid;
839 +
840 + ep = mp->lp->tt.exch_get(mp->lp, fp);
841 + if (ep) {
842 + ep->class = fc_frame_class(fp);
843 +
844 + /*
845 + * Set EX_CTX indicating we're responding on this exchange.
846 + */
847 + ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
848 + ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
849 + fh = fc_frame_header_get(fp);
850 + ep->sid = ntoh24(fh->fh_d_id);
851 + ep->did = ntoh24(fh->fh_s_id);
852 + ep->oid = ep->did;
853 +
854 + /*
855 + * Allocated exchange has placed the XID in the
856 + * originator field. Move it to the responder field,
857 + * and set the originator XID from the frame.
858 + */
859 + ep->rxid = ep->xid;
860 + ep->oxid = ntohs(fh->fh_ox_id);
861 + ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
862 + if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
863 + ep->esb_stat &= ~ESB_ST_SEQ_INIT;
864 +
865 + /*
866 + * Set the responder ID in the frame header.
867 + * The old one should've been 0xffff.
868 + * If it isn't, don't assign one.
869 + * Incoming basic link service frames may specify
870 + * a referenced RX_ID.
871 + */
872 + if (fh->fh_type != FC_TYPE_BLS) {
873 + rxid = ntohs(fh->fh_rx_id);
874 + WARN_ON(rxid != FC_XID_UNKNOWN);
875 + fh->fh_rx_id = htons(ep->rxid);
876 + }
877 + }
878 + return ep;
879 +}
880 +
881 +/*
882 + * Find a sequence for receive where the other end is originating the sequence.
883 + * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
884 + * on the ep that should be released by the caller.
885 + */
886 +static enum fc_pf_rjt_reason
887 +fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
888 +{
889 + struct fc_frame_header *fh = fc_frame_header_get(fp);
890 + struct fc_exch *ep = NULL, *new_ep = NULL;
891 + struct fc_seq *sp = NULL;
892 + enum fc_pf_rjt_reason reject = FC_RJT_NONE;
893 + u32 f_ctl;
894 + u16 xid;
895 +
896 + f_ctl = ntoh24(fh->fh_f_ctl);
897 + WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
898 +
899 + /*
900 + * Lookup or create the exchange if we will be creating the sequence.
901 + */
902 + if (f_ctl & FC_FC_EX_CTX) {
903 + xid = ntohs(fh->fh_ox_id); /* we originated exch */
904 + ep = fc_exch_find(mp, xid);
905 + if (!ep) {
906 + atomic_inc(&mp->stats.xid_not_found);
907 + reject = FC_RJT_OX_ID;
908 + goto out;
909 + }
910 + if (ep->rxid == FC_XID_UNKNOWN)
911 + ep->rxid = ntohs(fh->fh_rx_id);
912 + else if (ep->rxid != ntohs(fh->fh_rx_id)) {
913 + reject = FC_RJT_OX_ID;
914 + goto rel;
915 + }
916 + } else {
917 + xid = ntohs(fh->fh_rx_id); /* we are the responder */
918 +
919 + /*
920 + * Special case for MDS issuing an ELS TEST with a
921 + * bad rxid of 0.
922 + * XXX take this out once we do the proper reject.
923 + */
924 + if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
925 + fc_frame_payload_op(fp) == ELS_TEST) {
926 + fh->fh_rx_id = htons(FC_XID_UNKNOWN);
927 + xid = FC_XID_UNKNOWN;
928 + }
929 +
930 + /*
931 + * new sequence - find the exchange
932 + */
933 + ep = fc_exch_find(mp, xid);
934 + if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
935 + if (ep) {
936 + atomic_inc(&mp->stats.xid_busy);
937 + reject = FC_RJT_RX_ID;
938 + goto rel;
939 + }
940 + new_ep = ep = fc_exch_resp(mp, fp);
941 + if (!ep) {
942 + reject = FC_RJT_EXCH_EST; /* XXX */
943 + goto out;
944 + }
945 + fc_exch_hold(ep); /* Additional hold for caller */
946 + xid = ep->xid; /* get our XID */
947 + } else if (!ep) {
948 + atomic_inc(&mp->stats.xid_not_found);
949 + reject = FC_RJT_RX_ID; /* XID not found */
950 + goto out;
951 + }
952 + }
953 +
954 + /*
955 + * At this point, we have the exchange held.
956 + * Find or create the sequence.
957 + */
958 + if (fc_sof_is_init(fr_sof(fp))) {
959 + sp = fc_seq_start_next(&ep->seq);
960 + if (!sp) {
961 + reject = FC_RJT_SEQ_XS; /* exchange shortage */
962 + goto rel;
963 + }
964 + sp->id = fh->fh_seq_id;
965 + sp->ssb_stat |= SSB_ST_RESP;
966 + } else {
967 + sp = &ep->seq;
968 + if (sp->id != fh->fh_seq_id) {
969 + atomic_inc(&mp->stats.seq_not_found);
970 + reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
971 + goto rel;
972 + }
973 + }
974 + WARN_ON(ep != fc_seq_exch(sp));
975 +
976 + if (f_ctl & FC_FC_SEQ_INIT)
977 + ep->esb_stat |= ESB_ST_SEQ_INIT;
978 +
979 + fr_seq(fp) = sp;
980 +out:
981 + return reject;
982 +rel:
983 + fc_exch_release(ep);
984 + if (new_ep)
985 + fc_exch_release(new_ep);
986 + return reject;
987 +}
988 +
989 +/*
990 + * Find the sequence for a frame being received.
991 + * We originated the sequence, so it should be found.
992 + * We may or may not have originated the exchange.
993 + * Does not hold the sequence for the caller.
994 + */
995 +static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
996 + struct fc_frame *fp)
997 +{
998 + struct fc_frame_header *fh = fc_frame_header_get(fp);
999 + struct fc_exch *ep;
1000 + struct fc_seq *sp = NULL;
1001 + u32 f_ctl;
1002 + u16 xid;
1003 +
1004 + f_ctl = ntoh24(fh->fh_f_ctl);
1005 + WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1006 + xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1007 + ep = fc_exch_find(mp, xid);
1008 + if (!ep)
1009 + return NULL;
1010 + if (ep->seq.id == fh->fh_seq_id) {
1011 + /*
1012 + * Save the RX_ID if we didn't previously know it.
1013 + */
1014 + sp = &ep->seq;
1015 + if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1016 + ep->rxid == FC_XID_UNKNOWN) {
1017 + ep->rxid = ntohs(fh->fh_rx_id);
1018 + }
1019 + }
1020 + fc_exch_release(ep);
1021 + return sp;
1022 +}
1023 +
1024 +/*
1025 + * Set addresses for an exchange.
1026 + * Note this must be done before the first sequence of the exchange is sent.
1027 + */
1028 +static void fc_exch_set_addr(struct fc_exch *ep,
1029 + u32 orig_id, u32 resp_id)
1030 +{
1031 + ep->oid = orig_id;
1032 + if (ep->esb_stat & ESB_ST_RESP) {
1033 + ep->sid = resp_id;
1034 + ep->did = orig_id;
1035 + } else {
1036 + ep->sid = orig_id;
1037 + ep->did = resp_id;
1038 + }
1039 +}
1040 +
1041 +static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
1042 +{
1043 + struct fc_exch *ep = fc_seq_exch(sp);
1044 +
1045 + sp = fc_seq_alloc(ep, ep->seq_id++);
1046 + if (fc_exch_debug)
1047 + FC_DBG("exch %4x f_ctl %6x seq %2x f_ctl %6x\n",
1048 + ep->xid, ep->f_ctl, sp->id, sp->f_ctl);
1049 + return sp;
1050 +}
1051 +/*
1052 + * Allocate a new sequence on the same exchange as the supplied sequence.
1053 + * This will never return NULL.
1054 + */
1055 +struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
1056 +{
1057 + struct fc_exch *ep = fc_seq_exch(sp);
1058 +
1059 + spin_lock_bh(&ep->ex_lock);
1060 + WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
1061 + sp = fc_seq_start_next_locked(sp);
1062 + spin_unlock_bh(&ep->ex_lock);
1063 +
1064 + return sp;
1065 +}
1066 +EXPORT_SYMBOL(fc_seq_start_next);
1067 +
1068 +int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
1069 + struct fc_frame *fp, u32 f_ctl)
1070 +{
1071 + struct fc_exch *ep;
1072 + struct fc_frame_header *fh;
1073 + enum fc_class class;
1074 + u16 fill = 0;
1075 + int error;
1076 +
1077 + ep = fc_seq_exch(sp);
1078 + WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
1079 +
1080 + fc_seq_fill_hdr(sp, fp);
1081 + fh = fc_frame_header_get(fp);
1082 + class = ep->class;
1083 + fr_sof(fp) = class;
1084 + if (sp->cnt)
1085 + fr_sof(fp) = fc_sof_normal(class);
1086 +
1087 + if (f_ctl & FC_FC_END_SEQ) {
1088 + fr_eof(fp) = FC_EOF_T;
1089 + if (fc_sof_needs_ack(class))
1090 + fr_eof(fp) = FC_EOF_N;
1091 + /*
1092 + * Form f_ctl.
1093 + * The number of fill bytes to make the length a 4-byte
1094 + * multiple is the low order 2-bits of the f_ctl.
1095 + * The fill itself will have been cleared by the frame
1096 + * allocation.
1097 + * After this, the length will be even, as expected by
1098 + * the transport. Don't include the fill in the f_ctl
1099 + * saved in the sequence.
1100 + */
1101 + fill = fr_len(fp) & 3;
1102 + if (fill) {
1103 + fill = 4 - fill;
1104 + /* TODO, this may be a problem with fragmented skb */
1105 + skb_put(fp_skb(fp), fill);
1106 + }
1107 + f_ctl |= sp->f_ctl | ep->f_ctl;
1108 + } else {
1109 + WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
1110 + f_ctl |= sp->f_ctl | ep->f_ctl;
1111 + f_ctl &= ~FC_FC_SEQ_INIT;
1112 + fr_eof(fp) = FC_EOF_N;
1113 + }
1114 +
1115 + hton24(fh->fh_f_ctl, f_ctl | fill);
1116 + fh->fh_seq_cnt = htons(sp->cnt++);
1117 +
1118 + /*
1119 + * Send the frame.
1120 + */
1121 + error = lp->tt.frame_send(lp, fp);
1122 +
1123 + /*
1124 + * Update the exchange and sequence flags,
1125 + * assuming all frames for the sequence have been sent.
1126 + * We can only be called to send once for each sequence.
1127 + */
1128 + spin_lock_bh(&ep->ex_lock);
1129 + sp->f_ctl = f_ctl; /* save for possible abort */
1130 + ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1131 + if (f_ctl & FC_FC_END_SEQ) {
1132 + if (f_ctl & FC_FC_SEQ_INIT)
1133 + ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1134 + }
1135 + spin_unlock_bh(&ep->ex_lock);
1136 + return error;
1137 +}
1138 +EXPORT_SYMBOL(fc_seq_send);
1139 +
1140 +void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
1141 + struct fc_seq_els_data *els_data)
1142 +{
1143 + switch (els_cmd) {
1144 + case ELS_LS_RJT:
1145 + fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
1146 + break;
1147 + case ELS_LS_ACC:
1148 + fc_seq_ls_acc(sp);
1149 + break;
1150 + case ELS_RRQ:
1151 + fc_exch_els_rrq(sp, els_data->fp);
1152 + break;
1153 + case ELS_REC:
1154 + fc_exch_els_rec(sp, els_data->fp);
1155 + break;
1156 + default:
1157 + FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
1158 + }
1159 +}
1160 +EXPORT_SYMBOL(fc_seq_els_rsp_send);
1161 +
1162 +/*
1163 + * Send a sequence, which is also the last sequence in the exchange.
1164 + */
1165 +static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1166 + enum fc_rctl rctl, enum fc_fh_type fh_type)
1167 +{
1168 + u32 f_ctl;
1169 +
1170 + fc_frame_setup(fp, rctl, fh_type);
1171 + f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1172 + fc_seq_send(fc_seq_exch(sp)->lp, sp, fp, f_ctl);
1173 +}
1174 +
1175 +/*
1176 + * Send ACK_1 (or equiv.) indicating we received something.
1177 + * The frame we're acking is supplied.
1178 + */
1179 +static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1180 +{
1181 + struct fc_frame *fp;
1182 + struct fc_frame_header *rx_fh;
1183 + struct fc_frame_header *fh;
1184 + struct fc_lport *lp = fc_seq_exch(sp)->lp;
1185 + unsigned int f_ctl;
1186 +
1187 + /*
1188 + * Don't send ACKs for class 3.
1189 + */
1190 + if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1191 + fp = fc_frame_alloc(lp, 0);
1192 + BUG_ON(!fp);
1193 + if (!fp)
1194 + return;
1195 +
1196 + fc_seq_fill_hdr(sp, fp);
1197 + fh = fc_frame_header_get(fp);
1198 + fh->fh_r_ctl = FC_RCTL_ACK_1;
1199 + fh->fh_type = FC_TYPE_BLS;
1200 +
1201 + /*
1202 + * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1203 + * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1204 + * Bits 9-8 are meaningful (retransmitted or unidirectional).
1205 + * Last ACK uses bits 7-6 (continue sequence),
1206 + * bits 5-4 are meaningful (what kind of ACK to use).
1207 + */
1208 + rx_fh = fc_frame_header_get(rx_fp);
1209 + f_ctl = ntoh24(rx_fh->fh_f_ctl);
1210 + f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1211 + FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1212 + FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1213 + FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1214 + f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1215 + hton24(fh->fh_f_ctl, f_ctl);
1216 +
1217 + fh->fh_seq_id = rx_fh->fh_seq_id;
1218 + fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1219 + fh->fh_parm_offset = htonl(1); /* ack single frame */
1220 +
1221 + fr_sof(fp) = fr_sof(rx_fp);
1222 + if (f_ctl & FC_FC_END_SEQ)
1223 + fr_eof(fp) = FC_EOF_T;
1224 + else
1225 + fr_eof(fp) = FC_EOF_N;
1226 +
1227 + (void) lp->tt.frame_send(lp, fp);
1228 + }
1229 +}
1230 +
1231 +/*
1232 + * Send BLS Reject.
1233 + * This is for rejecting BA_ABTS only.
1234 + */
1235 +static void
1236 +fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
1237 + enum fc_ba_rjt_explan explan)
1238 +{
1239 + struct fc_frame *fp;
1240 + struct fc_frame_header *rx_fh;
1241 + struct fc_frame_header *fh;
1242 + struct fc_ba_rjt *rp;
1243 + struct fc_lport *lp;
1244 + unsigned int f_ctl;
1245 +
1246 + lp = fr_dev(rx_fp);
1247 + fp = fc_frame_alloc(lp, sizeof(*rp));
1248 + if (!fp)
1249 + return;
1250 + fh = fc_frame_header_get(fp);
1251 + rx_fh = fc_frame_header_get(rx_fp);
1252 +
1253 + memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1254 +
1255 + rp = fc_frame_payload_get(fp, sizeof(*rp));
1256 + rp->br_reason = reason;
1257 + rp->br_explan = explan;
1258 +
1259 + /*
1260 + * seq_id, cs_ctl, df_ctl and param/offset are zero.
1261 + */
1262 + memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1263 + memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1264 + fh->fh_ox_id = rx_fh->fh_rx_id;
1265 + fh->fh_rx_id = rx_fh->fh_ox_id;
1266 + fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1267 + fh->fh_r_ctl = FC_RCTL_BA_RJT;
1268 + fh->fh_type = FC_TYPE_BLS;
1269 +
1270 + /*
1271 + * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1272 + * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1273 + * Bits 9-8 are meaningful (retransmitted or unidirectional).
1274 + * Last ACK uses bits 7-6 (continue sequence),
1275 + * bits 5-4 are meaningful (what kind of ACK to use).
1276 + * Always set LAST_SEQ, END_SEQ.
1277 + */
1278 + f_ctl = ntoh24(rx_fh->fh_f_ctl);
1279 + f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1280 + FC_FC_END_CONN | FC_FC_SEQ_INIT |
1281 + FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1282 + f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1283 + f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1284 + f_ctl &= ~FC_FC_FIRST_SEQ;
1285 + hton24(fh->fh_f_ctl, f_ctl);
1286 +
1287 + fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1288 + fr_eof(fp) = FC_EOF_T;
1289 + if (fc_sof_needs_ack(fr_sof(fp)))
1290 + fr_eof(fp) = FC_EOF_N;
1291 +
1292 + (void) lp->tt.frame_send(lp, fp);
1293 +}
1294 +
1295 +/*
1296 + * Handle an incoming ABTS. This would be for target mode usually,
1297 + * but could be due to lost FCP transfer ready, confirm or RRQ.
1298 + * We always handle this as an exchange abort, ignoring the parameter.
1299 + */
1300 +static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1301 +{
1302 + struct fc_frame *fp;
1303 + struct fc_ba_acc *ap;
1304 + struct fc_frame_header *fh;
1305 + struct fc_seq *sp;
1306 +
1307 + if (!ep)
1308 + goto reject;
1309 + spin_lock_bh(&ep->ex_lock);
1310 + if (ep->esb_stat & ESB_ST_COMPLETE) {
1311 + spin_unlock_bh(&ep->ex_lock);
1312 + goto reject;
1313 + }
1314 + if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1315 + fc_exch_hold(ep); /* hold for REC_QUAL */
1316 + ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1317 + fc_exch_timer_set_locked(ep, ep->r_a_tov);
1318 +
1319 + fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1320 + if (!fp) {
1321 + spin_unlock_bh(&ep->ex_lock);
1322 + goto free;
1323 + }
1324 + fh = fc_frame_header_get(fp);
1325 + ap = fc_frame_payload_get(fp, sizeof(*ap));
1326 + memset(ap, 0, sizeof(*ap));
1327 + sp = &ep->seq;
1328 + ap->ba_high_seq_cnt = htons(0xffff);
1329 + if (sp->ssb_stat & SSB_ST_RESP) {
1330 + ap->ba_seq_id = sp->id;
1331 + ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1332 + ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1333 + ap->ba_low_seq_cnt = htons(sp->cnt);
1334 + }
1335 + sp = fc_seq_start_next(sp);
1336 + spin_unlock_bh(&ep->ex_lock);
1337 + fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1338 + fc_frame_free(rx_fp);
1339 + return;
1340 +
1341 +reject:
1342 + fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1343 +free:
1344 + fc_frame_free(rx_fp);
1345 +}
1346 +
1347 +/*
1348 + * Handle receive where the other end is originating the sequence.
1349 + */
1350 +static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1351 + struct fc_frame *fp)
1352 +{
1353 + struct fc_frame_header *fh = fc_frame_header_get(fp);
1354 + struct fc_seq *sp = NULL;
1355 + struct fc_exch *ep = NULL;
1356 + enum fc_sof sof;
1357 + enum fc_eof eof;
1358 + u32 f_ctl;
1359 + enum fc_pf_rjt_reason reject;
1360 +
1361 + fr_seq(fp) = NULL;
1362 + reject = fc_seq_lookup_recip(mp, fp);
1363 + if (reject == FC_RJT_NONE) {
1364 + sp = fr_seq(fp); /* sequence will be held */
1365 + ep = fc_seq_exch(sp);
1366 + sof = fr_sof(fp);
1367 + eof = fr_eof(fp);
1368 + f_ctl = ntoh24(fh->fh_f_ctl);
1369 + fc_seq_send_ack(sp, fp);
1370 +
1371 + /*
1372 + * Call the receive function.
1373 + *
1374 + * The receive function may allocate a new sequence
1375 + * over the old one, so we shouldn't change the
1376 + * sequence after this.
1377 + *
1378 + * The frame will be freed by the receive function.
1379 + * If new exch resp handler is valid then call that
1380 + * first.
1381 + */
1382 + if (ep->resp)
1383 + ep->resp(sp, fp, ep->resp_arg);
1384 + else
1385 + lp->tt.lport_recv(lp, sp, fp);
1386 + fc_exch_release(ep); /* release from lookup */
1387 + } else {
1388 + if (fc_exch_debug)
1389 + FC_DBG("exch/seq lookup failed: reject %x\n", reject);
1390 + fc_frame_free(fp);
1391 + }
1392 +}
1393 +
1394 +/*
1395 + * Handle receive where the other end is originating the sequence in
1396 + * response to our exchange.
1397 + */
1398 +static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1399 +{
1400 + struct fc_frame_header *fh = fc_frame_header_get(fp);
1401 + struct fc_seq *sp;
1402 + struct fc_exch *ep;
1403 + enum fc_sof sof;
1404 + u32 f_ctl;
1405 + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1406 + void *ex_resp_arg;
1407 + int rc;
1408 +
1409 + ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1410 + if (!ep) {
1411 + atomic_inc(&mp->stats.xid_not_found);
1412 + goto out;
1413 + }
1414 + if (ep->rxid == FC_XID_UNKNOWN)
1415 + ep->rxid = ntohs(fh->fh_rx_id);
1416 + if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1417 + atomic_inc(&mp->stats.xid_not_found);
1418 + goto rel;
1419 + }
1420 + if (ep->did != ntoh24(fh->fh_s_id) &&
1421 + ep->did != FC_FID_FLOGI) {
1422 + atomic_inc(&mp->stats.xid_not_found);
1423 + goto rel;
1424 + }
1425 + sof = fr_sof(fp);
1426 + if (fc_sof_is_init(sof)) {
1427 + sp = fc_seq_start_next(&ep->seq);
1428 + sp->id = fh->fh_seq_id;
1429 + sp->ssb_stat |= SSB_ST_RESP;
1430 + } else {
1431 + sp = &ep->seq;
1432 + if (sp->id != fh->fh_seq_id) {
1433 + atomic_inc(&mp->stats.seq_not_found);
1434 + goto rel;
1435 + }
1436 + }
1437 + f_ctl = ntoh24(fh->fh_f_ctl);
1438 + fr_seq(fp) = sp;
1439 + if (f_ctl & FC_FC_SEQ_INIT)
1440 + ep->esb_stat |= ESB_ST_SEQ_INIT;
1441 +
1442 + if (fc_sof_needs_ack(sof))
1443 + fc_seq_send_ack(sp, fp);
1444 + resp = ep->resp;
1445 + ex_resp_arg = ep->resp_arg;
1446 +
1447 + if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1448 + (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1449 + (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1450 + spin_lock_bh(&ep->ex_lock);
1451 + rc = fc_exch_done_locked(ep);
1452 + WARN_ON(fc_seq_exch(sp) != ep);
1453 + spin_unlock_bh(&ep->ex_lock);
1454 + if (!rc)
1455 + fc_exch_mgr_delete_ep(ep);
1456 + }
1457 +
1458 + /*
1459 + * Call the receive function.
1460 + * The sequence is held (has a refcnt) for us,
1461 + * but not for the receive function.
1462 + *
1463 + * The receive function may allocate a new sequence
1464 + * over the old one, so we shouldn't change the
1465 + * sequence after this.
1466 + *
1467 + * The frame will be freed by the receive function.
1468 + * If new exch resp handler is valid then call that
1469 + * first.
1470 + */
1471 + if (resp)
1472 + resp(sp, fp, ex_resp_arg);
1473 + else
1474 + fc_frame_free(fp);
1475 + fc_exch_release(ep);
1476 + return;
1477 +rel:
1478 + fc_exch_release(ep);
1479 +out:
1480 + fc_frame_free(fp);
1481 +}
1482 +
1483 +/*
1484 + * Handle receive for a sequence where other end is responding to our sequence.
1485 + */
1486 +static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1487 +{
1488 + struct fc_seq *sp;
1489 +
1490 + sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1491 + if (!sp) {
1492 + atomic_inc(&mp->stats.xid_not_found);
1493 + if (fc_exch_debug)
1494 + FC_DBG("seq lookup failed\n");
1495 + } else {
1496 + atomic_inc(&mp->stats.non_bls_resp);
1497 + if (fc_exch_debug)
1498 + FC_DBG("non-BLS response to sequence");
1499 + }
1500 + fc_frame_free(fp);
1501 +}
1502 +
1503 +/*
1504 + * Handle the response to an ABTS for exchange or sequence.
1505 + * This can be BA_ACC or BA_RJT.
1506 + */
1507 +static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1508 +{
1509 + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1510 + void *ex_resp_arg;
1511 + struct fc_frame_header *fh;
1512 + struct fc_ba_acc *ap;
1513 + struct fc_seq *sp;
1514 + u16 low;
1515 + u16 high;
1516 + int rc = 1, has_rec = 0;
1517 +
1518 + fh = fc_frame_header_get(fp);
1519 + if (fc_exch_debug)
1520 + FC_DBG("exch: BLS rctl %x - %s\n",
1521 + fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
1522 +
1523 + if (del_timer_sync(&ep->ex_timer))
1524 + fc_exch_release(ep); /* release from pending timer hold */
1525 +
1526 + spin_lock_bh(&ep->ex_lock);
1527 + switch (fh->fh_r_ctl) {
1528 + case FC_RCTL_BA_ACC:
1529 + ap = fc_frame_payload_get(fp, sizeof(*ap));
1530 + if (!ap)
1531 + break;
1532 +
1533 + /*
1534 + * Decide whether to establish a Recovery Qualifier.
1535 + * We do this if there is a non-empty SEQ_CNT range and
1536 + * SEQ_ID is the same as the one we aborted.
1537 + */
1538 + low = ntohs(ap->ba_low_seq_cnt);
1539 + high = ntohs(ap->ba_high_seq_cnt);
1540 + if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1541 + (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1542 + ap->ba_seq_id == ep->seq_id) && low != high) {
1543 + ep->esb_stat |= ESB_ST_REC_QUAL;
1544 + fc_exch_hold(ep); /* hold for recovery qualifier */
1545 + has_rec = 1;
1546 + }
1547 + break;
1548 + case FC_RCTL_BA_RJT:
1549 + break;
1550 + default:
1551 + break;
1552 + }
1553 +
1554 + resp = ep->resp;
1555 + ex_resp_arg = ep->resp_arg;
1556 +
1557 + /* do we need to do some other checks here. Can we reuse more of
1558 + * fc_exch_recv_seq_resp
1559 + */
1560 + sp = &ep->seq;
1561 + /*
1562 + * do we want to check END_SEQ as well as LAST_SEQ here?
1563 + */
1564 + if (fh->fh_type != FC_TYPE_FCP &&
1565 + ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1566 + rc = fc_exch_done_locked(ep);
1567 + spin_unlock_bh(&ep->ex_lock);
1568 + if (!rc)
1569 + fc_exch_mgr_delete_ep(ep);
1570 +
1571 + if (resp)
1572 + resp(sp, fp, ex_resp_arg);
1573 + else
1574 + fc_frame_free(fp);
1575 +
1576 + if (has_rec)
1577 + fc_exch_timer_set(ep, ep->r_a_tov);
1578 +
1579 +}
1580 +
1581 +/*
1582 + * Receive BLS sequence.
1583 + * This is always a sequence initiated by the remote side.
1584 + * We may be either the originator or recipient of the exchange.
1585 + */
1586 +static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1587 +{
1588 + struct fc_frame_header *fh;
1589 + struct fc_exch *ep;
1590 + u32 f_ctl;
1591 +
1592 + fh = fc_frame_header_get(fp);
1593 + f_ctl = ntoh24(fh->fh_f_ctl);
1594 + fr_seq(fp) = NULL;
1595 +
1596 + ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1597 + ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1598 + if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1599 + spin_lock_bh(&ep->ex_lock);
1600 + ep->esb_stat |= ESB_ST_SEQ_INIT;
1601 + spin_unlock_bh(&ep->ex_lock);
1602 + }
1603 + if (f_ctl & FC_FC_SEQ_CTX) {
1604 + /*
1605 + * A response to a sequence we initiated.
1606 + * This should only be ACKs for class 2 or F.
1607 + */
1608 + switch (fh->fh_r_ctl) {
1609 + case FC_RCTL_ACK_1:
1610 + case FC_RCTL_ACK_0:
1611 + break;
1612 + default:
1613 + if (fc_exch_debug)
1614 + FC_DBG("BLS rctl %x - %s received",
1615 + fh->fh_r_ctl,
1616 + fc_exch_rctl_name(fh->fh_r_ctl));
1617 + break;
1618 + }
1619 + fc_frame_free(fp);
1620 + } else {
1621 + switch (fh->fh_r_ctl) {
1622 + case FC_RCTL_BA_RJT:
1623 + case FC_RCTL_BA_ACC:
1624 + if (ep)
1625 + fc_exch_abts_resp(ep, fp);
1626 + else
1627 + fc_frame_free(fp);
1628 + break;
1629 + case FC_RCTL_BA_ABTS:
1630 + fc_exch_recv_abts(ep, fp);
1631 + break;
1632 + default: /* ignore junk */
1633 + fc_frame_free(fp);
1634 + break;
1635 + }
1636 + }
1637 + if (ep)
1638 + fc_exch_release(ep); /* release hold taken by fc_exch_find */
1639 +}
1640 +
1641 +/*
1642 + * Accept sequence with LS_ACC.
1643 + * If this fails due to allocation or transmit congestion, assume the
1644 + * originator will repeat the sequence.
1645 + */
1646 +static void fc_seq_ls_acc(struct fc_seq *req_sp)
1647 +{
1648 + struct fc_seq *sp;
1649 + struct fc_els_ls_acc *acc;
1650 + struct fc_frame *fp;
1651 +
1652 + sp = fc_seq_start_next(req_sp);
1653 + fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1654 + if (fp) {
1655 + acc = fc_frame_payload_get(fp, sizeof(*acc));
1656 + memset(acc, 0, sizeof(*acc));
1657 + acc->la_cmd = ELS_LS_ACC;
1658 + fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1659 + }
1660 +}
1661 +
1662 +/*
1663 + * Reject sequence with ELS LS_RJT.
1664 + * If this fails due to allocation or transmit congestion, assume the
1665 + * originator will repeat the sequence.
1666 + */
1667 +static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1668 + enum fc_els_rjt_explan explan)
1669 +{
1670 + struct fc_seq *sp;
1671 + struct fc_els_ls_rjt *rjt;
1672 + struct fc_frame *fp;
1673 +
1674 + sp = fc_seq_start_next(req_sp);
1675 + fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
1676 + if (fp) {
1677 + rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1678 + memset(rjt, 0, sizeof(*rjt));
1679 + rjt->er_cmd = ELS_LS_RJT;
1680 + rjt->er_reason = reason;
1681 + rjt->er_explan = explan;
1682 + fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1683 + }
1684 +}
1685 +
1686 +static void fc_exch_reset(struct fc_exch *ep)
1687 +{
1688 + struct fc_seq *sp;
1689 + void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1690 + void *arg;
1691 + int rc = 1;
1692 +
1693 + spin_lock_bh(&ep->ex_lock);
1694 + ep->state |= FC_EX_RST_CLEANUP;
1695 + /*
1696 + * we really want to call del_timer_sync, but cannot due
1697 + * to the lport calling with the lport lock held (some resp
1698 + * functions can also grab the lport lock which could cause
1699 + * a deadlock).
1700 + */
1701 + if (del_timer(&ep->ex_timer))
1702 + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1703 + resp = ep->resp;
1704 + ep->resp = NULL;
1705 + if (ep->esb_stat & ESB_ST_REC_QUAL)
1706 + atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1707 + ep->esb_stat &= ~ESB_ST_REC_QUAL;
1708 + arg = ep->resp_arg;
1709 + sp = &ep->seq;
1710 +
1711 + if (ep->fh_type != FC_TYPE_FCP)
1712 + rc = fc_exch_done_locked(ep);
1713 + spin_unlock_bh(&ep->ex_lock);
1714 + if (!rc)
1715 + fc_exch_mgr_delete_ep(ep);
1716 +
1717 + if (resp)
1718 + resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1719 +}
1720 +
1721 +/*
1722 + * Reset an exchange manager, releasing all sequences and exchanges.
1723 + * If sid is non-zero, reset only exchanges we source from that FID.
1724 + * If did is non-zero, reset only exchanges destined to that FID.
1725 + */
1726 +void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
1727 +{
1728 + struct fc_exch *ep;
1729 + struct fc_exch *next;
1730 +
1731 + spin_lock_bh(&mp->em_lock);
1732 +restart:
1733 + list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
1734 + if ((sid == 0 || sid == ep->sid) &&
1735 + (did == 0 || did == ep->did)) {
1736 + fc_exch_hold(ep);
1737 + spin_unlock_bh(&mp->em_lock);
1738 +
1739 + fc_exch_reset(ep);
1740 +
1741 + fc_exch_release(ep);
1742 + spin_lock_bh(&mp->em_lock);
1743 +
1744 + /*
1745 + * must restart loop incase while lock was down
1746 + * multiple eps were released.
1747 + */
1748 + goto restart;
1749 + }
1750 + }
1751 + spin_unlock_bh(&mp->em_lock);
1752 +}
1753 +EXPORT_SYMBOL(fc_exch_mgr_reset);
1754 +
1755 +void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid)
1756 +{
1757 + struct fc_exch *ep;
1758 +
1759 + ep = fc_seq_exch(sp);
1760 + *oxid = ep->oxid;
1761 + *rxid = ep->rxid;
1762 +}
1763 +EXPORT_SYMBOL(fc_seq_get_xids);
1764 +
1765 +void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data)
1766 +{
1767 + sp->rec_data = rec_data;
1768 +}
1769 +EXPORT_SYMBOL(fc_seq_set_rec_data);
1770 +
1771 +/*
1772 + * Handle incoming ELS REC - Read Exchange Concise.
1773 + * Note that the requesting port may be different than the S_ID in the request.
1774 + */
1775 +static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1776 +{
1777 + struct fc_frame *fp;
1778 + struct fc_exch *ep;
1779 + struct fc_exch_mgr *em;
1780 + struct fc_els_rec *rp;
1781 + struct fc_els_rec_acc *acc;
1782 + enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1783 + enum fc_els_rjt_explan explan;
1784 + u32 sid;
1785 + u16 rxid;
1786 + u16 oxid;
1787 +
1788 + rp = fc_frame_payload_get(rfp, sizeof(*rp));
1789 + explan = ELS_EXPL_INV_LEN;
1790 + if (!rp)
1791 + goto reject;
1792 + sid = ntoh24(rp->rec_s_id);
1793 + rxid = ntohs(rp->rec_rx_id);
1794 + oxid = ntohs(rp->rec_ox_id);
1795 +
1796 + /*
1797 + * Currently it's hard to find the local S_ID from the exchange
1798 + * manager. This will eventually be fixed, but for now it's easier
1799 + * to lookup the subject exchange twice, once as if we were
1800 + * the initiator, and then again if we weren't.
1801 + */
1802 + em = fc_seq_exch(sp)->em;
1803 + ep = fc_exch_find(em, oxid);
1804 + explan = ELS_EXPL_OXID_RXID;
1805 + if (ep && ep->oid == sid) {
1806 + if (ep->rxid != FC_XID_UNKNOWN &&
1807 + rxid != FC_XID_UNKNOWN &&
1808 + ep->rxid != rxid)
1809 + goto rel;
1810 + } else {
1811 + if (ep)
1812 + fc_exch_release(ep);
1813 + ep = NULL;
1814 + if (rxid != FC_XID_UNKNOWN)
1815 + ep = fc_exch_find(em, rxid);
1816 + if (!ep)
1817 + goto reject;
1818 + }
1819 +
1820 + fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1821 + if (!fp) {
1822 + fc_exch_done(sp);
1823 + goto out;
1824 + }
1825 + sp = fc_seq_start_next(sp);
1826 + acc = fc_frame_payload_get(fp, sizeof(*acc));
1827 + memset(acc, 0, sizeof(*acc));
1828 + acc->reca_cmd = ELS_LS_ACC;
1829 + acc->reca_ox_id = rp->rec_ox_id;
1830 + memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1831 + acc->reca_rx_id = htons(ep->rxid);
1832 + if (ep->sid == ep->oid)
1833 + hton24(acc->reca_rfid, ep->did);
1834 + else
1835 + hton24(acc->reca_rfid, ep->sid);
1836 + acc->reca_fc4value = htonl(ep->seq.rec_data);
1837 + acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1838 + ESB_ST_SEQ_INIT |
1839 + ESB_ST_COMPLETE));
1840 + sp = fc_seq_start_next(sp);
1841 + fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1842 +out:
1843 + fc_exch_release(ep);
1844 + fc_frame_free(rfp);
1845 + return;
1846 +
1847 +rel:
1848 + fc_exch_release(ep);
1849 +reject:
1850 + fc_seq_ls_rjt(sp, reason, explan);
1851 + fc_frame_free(rfp);
1852 +}
1853 +
1854 +/*
1855 + * Handle response from RRQ.
1856 + * Not much to do here, really.
1857 + * Should report errors.
1858 + *
1859 + * TODO: fix error handler.
1860 + */
1861 +static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1862 +{
1863 + struct fc_exch *ep = fc_seq_exch(sp);
1864 + struct fc_exch *aborted_ep;
1865 +
1866 + unsigned int op;
1867 +
1868 + if (IS_ERR(fp)) {
1869 + int err = PTR_ERR(fp);
1870 +
1871 + if (err == -FC_EX_CLOSED)
1872 + goto cleanup;
1873 + FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
1874 + return;
1875 + }
1876 +
1877 + op = fc_frame_payload_op(fp);
1878 + fc_frame_free(fp);
1879 +
1880 + switch (op) {
1881 + case ELS_LS_RJT:
1882 + FC_DBG("LS_RJT for RRQ");
1883 + /* fall through */
1884 + case ELS_LS_ACC:
1885 + goto cleanup;
1886 + default:
1887 + FC_DBG("unexpected response op %x for RRQ", op);
1888 + return;
1889 + }
1890 +
1891 +cleanup:
1892 + spin_lock_bh(&ep->ex_lock);
1893 + aborted_ep = ep->aborted_ep;
1894 + ep->aborted_ep = NULL;
1895 + spin_unlock_bh(&ep->ex_lock);
1896 +
1897 + if (aborted_ep) {
1898 + fc_exch_done(&aborted_ep->seq);
1899 + /* drop hold for rec qual */
1900 + fc_exch_release(aborted_ep);
1901 + }
1902 +}
1903 +
1904 +/*
1905 + * Send ELS RRQ - Reinstate Recovery Qualifier.
1906 + * This tells the remote port to stop blocking the use of
1907 + * the exchange and the seq_cnt range.
1908 + */
1909 +static void fc_exch_rrq(struct fc_exch *ep)
1910 +{
1911 + struct fc_lport *lp;
1912 + struct fc_els_rrq *rrq;
1913 + struct fc_frame *fp;
1914 + struct fc_seq *rrq_sp;
1915 + struct fc_exch *rrq_ep;
1916 + u32 did;
1917 +
1918 + lp = ep->lp;
1919 +
1920 + fp = fc_frame_alloc(lp, sizeof(*rrq));
1921 + if (!fp)
1922 + return;
1923 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
1924 + rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1925 + memset(rrq, 0, sizeof(*rrq));
1926 + rrq->rrq_cmd = ELS_RRQ;
1927 + hton24(rrq->rrq_s_id, ep->sid);
1928 + rrq->rrq_ox_id = htons(ep->oxid);
1929 + rrq->rrq_rx_id = htons(ep->rxid);
1930 +
1931 + did = ep->did;
1932 + if (ep->esb_stat & ESB_ST_RESP)
1933 + did = ep->sid;
1934 + rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, ep, lp->e_d_tov,
1935 + lp->fid, did, FC_FC_SEQ_INIT | FC_FC_END_SEQ);
1936 + if (!rrq_sp) {
1937 + spin_lock_bh(&ep->ex_lock);
1938 + ep->esb_stat |= ESB_ST_REC_QUAL;
1939 + fc_exch_timer_set_locked(ep, ep->r_a_tov);
1940 + spin_unlock_bh(&ep->ex_lock);
1941 + return;
1942 + }
1943 +
1944 + rrq_ep = fc_seq_exch(rrq_sp);
1945 + rrq_ep->aborted_ep = ep;
1946 +}
1947 +
1948 +
1949 +/*
1950 + * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1951 + */
1952 +static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1953 +{
1954 + struct fc_exch *ep; /* request or subject exchange */
1955 + struct fc_els_rrq *rp;
1956 + u32 sid;
1957 + u16 xid;
1958 + enum fc_els_rjt_explan explan;
1959 +
1960 + rp = fc_frame_payload_get(fp, sizeof(*rp));
1961 + explan = ELS_EXPL_INV_LEN;
1962 + if (!rp)
1963 + goto reject;
1964 +
1965 + /*
1966 + * lookup subject exchange.
1967 + */
1968 + ep = fc_seq_exch(sp);
1969 + sid = ntoh24(rp->rrq_s_id); /* subject source */
1970 + xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1971 + ep = fc_exch_find(ep->em, xid);
1972 +
1973 + explan = ELS_EXPL_OXID_RXID;
1974 + if (!ep)
1975 + goto reject;
1976 + spin_lock_bh(&ep->ex_lock);
1977 + if (ep->oxid != ntohs(rp->rrq_ox_id))
1978 + goto unlock_reject;
1979 + if (ep->rxid != ntohs(rp->rrq_rx_id) &&
1980 + ep->rxid != FC_XID_UNKNOWN)
1981 + goto unlock_reject;
1982 + explan = ELS_EXPL_SID;
1983 + if (ep->sid != sid)
1984 + goto unlock_reject;
1985 +
1986 + /*
1987 + * Clear Recovery Qualifier state, and cancel timer if complete.
1988 + */
1989 + if (ep->esb_stat & ESB_ST_REC_QUAL) {
1990 + ep->esb_stat &= ~ESB_ST_REC_QUAL;
1991 + atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
1992 + }
1993 + if ((ep->esb_stat & ESB_ST_COMPLETE) && (del_timer(&ep->ex_timer)))
1994 + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1995 +
1996 + spin_unlock_bh(&ep->ex_lock);
1997 +
1998 + /*
1999 + * Send LS_ACC.
2000 + */
2001 + fc_seq_ls_acc(sp);
2002 + fc_frame_free(fp);
2003 + return;
2004 +
2005 +unlock_reject:
2006 + spin_unlock_bh(&ep->ex_lock);
2007 + fc_exch_release(ep); /* drop hold from fc_exch_find */
2008 +reject:
2009 + fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
2010 + fc_frame_free(fp);
2011 +}
2012 +
2013 +struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
2014 + enum fc_class class,
2015 + u16 min_xid, u16 max_xid)
2016 +{
2017 + struct fc_exch_mgr *mp;
2018 + size_t len;
2019 +
2020 + if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
2021 + FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2022 + min_xid, max_xid);
2023 + return NULL;
2024 + }
2025 +
2026 + /*
2027 + * Memory need for EM
2028 + */
2029 + len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
2030 + len += sizeof(struct fc_exch_mgr);
2031 +
2032 + mp = kzalloc(len, GFP_ATOMIC);
2033 + if (mp) {
2034 + mp->class = class;
2035 + mp->total_exches = 0;
2036 + mp->exches = (struct fc_exch **)(mp + 1);
2037 + mp->last_xid = min_xid - 1;
2038 + mp->min_xid = min_xid;
2039 + mp->max_xid = max_xid;
2040 + mp->lp = lp;
2041 + INIT_LIST_HEAD(&mp->ex_list);
2042 + spin_lock_init(&mp->em_lock);
2043 + }
2044 +
2045 + mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2046 + if (!mp->ep_pool)
2047 + goto free_mp;
2048 +
2049 + return mp;
2050 +
2051 +free_mp:
2052 + kfree(mp);
2053 + return NULL;
2054 +}
2055 +EXPORT_SYMBOL(fc_exch_mgr_alloc);
2056 +
2057 +void fc_exch_mgr_free(struct fc_exch_mgr *mp)
2058 +{
2059 + WARN_ON(!mp);
2060 + /*
2061 + * The total exch count must be zero
2062 + * before freeing exchange manager.
2063 + */
2064 + WARN_ON(mp->total_exches != 0);
2065 + mempool_destroy(mp->ep_pool);
2066 + kfree(mp);
2067 +}
2068 +EXPORT_SYMBOL(fc_exch_mgr_free);
2069 +
2070 +struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
2071 +{
2072 + if (!lp || !lp->emp)
2073 + return NULL;
2074 + return fc_exch_alloc(lp->emp, 0);
2075 +}
2076 +EXPORT_SYMBOL(fc_exch_get);
2077 +
2078 +struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
2079 + struct fc_frame *fp,
2080 + void (*resp)(struct fc_seq *,
2081 + struct fc_frame *fp,
2082 + void *arg),
2083 + void *resp_arg, u32 timer_msec,
2084 + u32 sid, u32 did, u32 f_ctl)
2085 +{
2086 + struct fc_exch *ep;
2087 + struct fc_seq *sp = NULL;
2088 + struct fc_frame_header *fh;
2089 + u16 fill;
2090 +
2091 + ep = lp->tt.exch_get(lp, fp);
2092 + if (!ep) {
2093 + fc_frame_free(fp);
2094 + return NULL;
2095 + }
2096 + ep->esb_stat |= ESB_ST_SEQ_INIT;
2097 + fc_exch_set_addr(ep, sid, did);
2098 + ep->resp = resp;
2099 + ep->resp_arg = resp_arg;
2100 + ep->r_a_tov = FC_DEF_R_A_TOV;
2101 + ep->lp = lp;
2102 + sp = &ep->seq;
2103 + WARN_ON((sp->f_ctl & FC_FC_END_SEQ) != 0);
2104 +
2105 + fr_sof(fp) = ep->class;
2106 + if (sp->cnt)
2107 + fr_sof(fp) = fc_sof_normal(ep->class);
2108 + fr_eof(fp) = FC_EOF_T;
2109 + if (fc_sof_needs_ack(ep->class))
2110 + fr_eof(fp) = FC_EOF_N;
2111 +
2112 + fc_seq_fill_hdr(sp, fp);
2113 + /*
2114 + * Form f_ctl.
2115 + * The number of fill bytes to make the length a 4-byte multiple is
2116 + * the low order 2-bits of the f_ctl. The fill itself will have been
2117 + * cleared by the frame allocation.
2118 + * After this, the length will be even, as expected by the transport.
2119 + * Don't include the fill in the f_ctl saved in the sequence.
2120 + */
2121 + fill = fr_len(fp) & 3;
2122 + if (fill) {
2123 + fill = 4 - fill;
2124 + /* TODO, this may be a problem with fragmented skb */
2125 + skb_put(fp_skb(fp), fill);
2126 + }
2127 + f_ctl |= ep->f_ctl;
2128 + fh = fc_frame_header_get(fp);
2129 + hton24(fh->fh_f_ctl, f_ctl | fill);
2130 + fh->fh_seq_cnt = htons(sp->cnt++);
2131 +
2132 + if (unlikely(lp->tt.frame_send(lp, fp)))
2133 + goto err;
2134 +
2135 + spin_lock_bh(&ep->ex_lock);
2136 + if (timer_msec)
2137 + fc_exch_timer_set_locked(ep, timer_msec);
2138 + sp->f_ctl = f_ctl; /* save for possible abort */
2139 + ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
2140 + ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
2141 +
2142 + if (f_ctl & FC_FC_SEQ_INIT)
2143 + ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2144 + spin_unlock_bh(&ep->ex_lock);
2145 + return sp;
2146 +err:
2147 + fc_exch_done(sp);
2148 + return NULL;
2149 +}
2150 +EXPORT_SYMBOL(fc_exch_seq_send);
2151 +
2152 +/*
2153 + * Receive a frame
2154 + */
2155 +void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
2156 + struct fc_frame *fp)
2157 +{
2158 + struct fc_frame_header *fh = fc_frame_header_get(fp);
2159 + u32 f_ctl;
2160 +
2161 + if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
2162 + FC_DBG("fc_lport or EM is not allocated and configured");
2163 + fc_frame_free(fp);
2164 + return;
2165 + }
2166 +
2167 + /*
2168 + * If frame is marked invalid, just drop it.
2169 + */
2170 + f_ctl = ntoh24(fh->fh_f_ctl);
2171 + switch (fr_eof(fp)) {
2172 + case FC_EOF_T:
2173 + if (f_ctl & FC_FC_END_SEQ)
2174 + skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2175 + /* fall through */
2176 + case FC_EOF_N:
2177 + if (fh->fh_type == FC_TYPE_BLS)
2178 + fc_exch_recv_bls(mp, fp);
2179 + else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2180 + FC_FC_EX_CTX)
2181 + fc_exch_recv_seq_resp(mp, fp);
2182 + else if (f_ctl & FC_FC_SEQ_CTX)
2183 + fc_exch_recv_resp(mp, fp);
2184 + else
2185 + fc_exch_recv_req(lp, mp, fp);
2186 + break;
2187 + default:
2188 + FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
2189 + fc_frame_free(fp);
2190 + break;
2191 + }
2192 +}
2193 +EXPORT_SYMBOL(fc_exch_recv);
2194 +
2195 +int fc_exch_init(struct fc_lport *lp)
2196 +{
2197 + if (!lp->tt.exch_get) {
2198 + /*
2199 + * exch_put() should be NULL if
2200 + * exch_get() is NULL
2201 + */
2202 + WARN_ON(lp->tt.exch_put);
2203 + lp->tt.exch_get = fc_exch_get;
2204 + }
2205 +
2206 + if (!lp->tt.seq_start_next)
2207 + lp->tt.seq_start_next = fc_seq_start_next;
2208 +
2209 + if (!lp->tt.exch_seq_send)
2210 + lp->tt.exch_seq_send = fc_exch_seq_send;
2211 +
2212 + if (!lp->tt.seq_send)
2213 + lp->tt.seq_send = fc_seq_send;
2214 +
2215 + if (!lp->tt.seq_els_rsp_send)
2216 + lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
2217 +
2218 + if (!lp->tt.exch_done)
2219 + lp->tt.exch_done = fc_exch_done;
2220 +
2221 + if (!lp->tt.exch_mgr_reset)
2222 + lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
2223 +
2224 + if (!lp->tt.seq_exch_abort)
2225 + lp->tt.seq_exch_abort = fc_seq_exch_abort;
2226 +
2227 + if (!lp->tt.seq_get_xids)
2228 + lp->tt.seq_get_xids = fc_seq_get_xids;
2229 +
2230 + if (!lp->tt.seq_set_rec_data)
2231 + lp->tt.seq_set_rec_data = fc_seq_set_rec_data;
2232 + return 0;
2233 +}
2234 +EXPORT_SYMBOL(fc_exch_init);
2235 +
2236 +int fc_setup_exch_mgr(void)
2237 +{
2238 + fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2239 + 0, SLAB_HWCACHE_ALIGN, NULL);
2240 + if (!fc_em_cachep)
2241 + return -ENOMEM;
2242 + return 0;
2243 +}
2244 +
2245 +void fc_destroy_exch_mgr(void)
2246 +{
2247 + kmem_cache_destroy(fc_em_cachep);
2248 +}
2249 diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
2250 new file mode 100644
2251 index 0000000..9402eba
2252 --- /dev/null
2253 +++ b/drivers/scsi/libfc/fc_fcp.c
2254 @@ -0,0 +1,2174 @@
2255 +/*
2256 + * Copyright(c) 2007 Intel Corporation. All rights reserved.
2257 + * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
2258 + * Copyright(c) 2008 Mike Christie
2259 + *
2260 + * This program is free software; you can redistribute it and/or modify it
2261 + * under the terms and conditions of the GNU General Public License,
2262 + * version 2, as published by the Free Software Foundation.
2263 + *
2264 + * This program is distributed in the hope it will be useful, but WITHOUT
2265 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2266 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2267 + * more details.
2268 + *
2269 + * You should have received a copy of the GNU General Public License along with
2270 + * this program; if not, write to the Free Software Foundation, Inc.,
2271 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2272 + *
2273 + * Maintained at www.Open-FCoE.org
2274 + */
2275 +
2276 +#include <linux/module.h>
2277 +#include <linux/kernel.h>
2278 +#include <linux/types.h>
2279 +#include <linux/spinlock.h>
2280 +#include <linux/scatterlist.h>
2281 +#include <linux/err.h>
2282 +#include <linux/crc32.h>
2283 +#include <linux/delay.h>
2284 +
2285 +#include <scsi/scsi_tcq.h>
2286 +#include <scsi/scsi.h>
2287 +#include <scsi/scsi_host.h>
2288 +#include <scsi/scsi_device.h>
2289 +#include <scsi/scsi_cmnd.h>
2290 +
2291 +#include <scsi/fc/fc_fc2.h>
2292 +
2293 +#include <scsi/libfc/libfc.h>
2294 +
2295 +int fc_fcp_debug;
2296 +static struct kmem_cache *scsi_pkt_cachep;
2297 +
2298 +/* SRB state definitions */
2299 +#define FC_SRB_FREE 0 /* cmd is free */
2300 +#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
2301 +#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
2302 +#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
2303 +#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
2304 +#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
2305 +#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
2306 +#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
2307 +#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
2308 +
2309 +#define FC_SRB_READ (1 << 1)
2310 +#define FC_SRB_WRITE (1 << 0)
2311 +
2312 +/*
2313 + * scsi request structure, one for each scsi request
2314 + */
2315 +struct fc_fcp_pkt {
2316 + /*
2317 + * housekeeping stuff
2318 + */
2319 + struct fc_lport *lp; /* handle to hba struct */
2320 + u16 state; /* scsi_pkt state state */
2321 + u16 tgt_flags; /* target flags */
2322 + atomic_t ref_cnt; /* only used byr REC ELS */
2323 + spinlock_t scsi_pkt_lock; /* Must be taken before the host lock
2324 + * if both are held at the same time */
2325 + /*
2326 + * SCSI I/O related stuff
2327 + */
2328 + struct scsi_cmnd *cmd; /* scsi command pointer. set/clear
2329 + * under host lock */
2330 + struct list_head list; /* tracks queued commands. access under
2331 + * host lock */
2332 + /*
2333 + * timeout related stuff
2334 + */
2335 + struct timer_list timer; /* command timer */
2336 + struct completion tm_done;
2337 + int wait_for_comp;
2338 + unsigned long start_time; /* start jiffie */
2339 + unsigned long end_time; /* end jiffie */
2340 + unsigned long last_pkt_time; /* jiffies of last frame received */
2341 +
2342 + /*
2343 + * scsi cmd and data transfer information
2344 + */
2345 + u32 data_len;
2346 + /*
2347 + * transport related veriables
2348 + */
2349 + struct fcp_cmnd cdb_cmd;
2350 + size_t xfer_len;
2351 + u32 xfer_contig_end; /* offset of end of contiguous xfer */
2352 + u16 max_payload; /* max payload size in bytes */
2353 +
2354 + /*
2355 + * scsi/fcp return status
2356 + */
2357 + u32 io_status; /* SCSI result upper 24 bits */
2358 + u8 cdb_status;
2359 + u8 status_code; /* FCP I/O status */
2360 + /* bit 3 Underrun bit 2: overrun */
2361 + u8 scsi_comp_flags;
2362 + u32 req_flags; /* bit 0: read bit:1 write */
2363 + u32 scsi_resid; /* residule length */
2364 +
2365 + struct fc_rport *rport; /* remote port pointer */
2366 + struct fc_seq *seq_ptr; /* current sequence pointer */
2367 + /*
2368 + * Error Processing
2369 + */
2370 + u8 recov_retry; /* count of recovery retries */
2371 + struct fc_seq *recov_seq; /* sequence for REC or SRR */
2372 +};
2373 +
2374 +/*
2375 + * The SCp.ptr should be tested and set under the host lock. NULL indicates
2376 + * that the command has been retruned to the scsi layer.
2377 + */
2378 +#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
2379 +#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
2380 +#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
2381 +#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
2382 +#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
2383 +
2384 +struct fc_fcp_internal {
2385 + mempool_t *scsi_pkt_pool;
2386 + struct list_head scsi_pkt_queue;
2387 + u8 throttled;
2388 +};
2389 +
2390 +#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
2391 +
2392 +/*
2393 + * function prototypes
2394 + * FC scsi I/O related functions
2395 + */
2396 +static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
2397 +static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
2398 +static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
2399 +static void fc_fcp_complete(struct fc_fcp_pkt *);
2400 +static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
2401 +static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
2402 +static void fc_timeout_error(struct fc_fcp_pkt *);
2403 +static int fc_fcp_send_cmd(struct fc_fcp_pkt *);
2404 +static void fc_fcp_timeout(unsigned long data);
2405 +static void fc_fcp_rec(struct fc_fcp_pkt *);
2406 +static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
2407 +static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
2408 +static void fc_io_compl(struct fc_fcp_pkt *);
2409 +
2410 +static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
2411 +static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
2412 +static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
2413 +
2414 +/*
2415 + * command status codes
2416 + */
2417 +#define FC_COMPLETE 0
2418 +#define FC_CMD_ABORTED 1
2419 +#define FC_CMD_RESET 2
2420 +#define FC_CMD_PLOGO 3
2421 +#define FC_SNS_RCV 4
2422 +#define FC_TRANS_ERR 5
2423 +#define FC_DATA_OVRRUN 6
2424 +#define FC_DATA_UNDRUN 7
2425 +#define FC_ERROR 8
2426 +#define FC_HRD_ERROR 9
2427 +#define FC_CMD_TIME_OUT 10
2428 +
2429 +/*
2430 + * Error recovery timeout values.
2431 + */
2432 +#define FC_SCSI_ER_TIMEOUT (10 * HZ)
2433 +#define FC_SCSI_TM_TOV (10 * HZ)
2434 +#define FC_SCSI_REC_TOV (2 * HZ)
2435 +#define FC_HOST_RESET_TIMEOUT (30 * HZ)
2436 +
2437 +#define FC_MAX_ERROR_CNT 5
2438 +#define FC_MAX_RECOV_RETRY 3
2439 +
2440 +#define FC_FCP_DFLT_QUEUE_DEPTH 32
2441 +
2442 +/**
2443 + * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
2444 + * @lp: fc lport struct
2445 + * @gfp: gfp flags for allocation
2446 + *
2447 + * This is used by upper layer scsi driver.
2448 + * Return Value : scsi_pkt structure or null on allocation failure.
2449 + * Context : call from process context. no locking required.
2450 + */
2451 +static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
2452 +{
2453 + struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
2454 + struct fc_fcp_pkt *sp;
2455 +
2456 + sp = mempool_alloc(si->scsi_pkt_pool, gfp);
2457 + if (sp) {
2458 + memset(sp, 0, sizeof(*sp));
2459 + sp->lp = lp;
2460 + atomic_set(&sp->ref_cnt, 1);
2461 + init_timer(&sp->timer);
2462 + INIT_LIST_HEAD(&sp->list);
2463 + }
2464 + return sp;
2465 +}
2466 +
2467 +/**
2468 + * fc_fcp_pkt_release - release hold on scsi_pkt packet
2469 + * @sp: fcp packet struct
2470 + *
2471 + * This is used by upper layer scsi driver.
2472 + * Context : call from process and interrupt context.
2473 + * no locking required
2474 + */
2475 +static void fc_fcp_pkt_release(struct fc_fcp_pkt *sp)
2476 +{
2477 + if (atomic_dec_and_test(&sp->ref_cnt)) {
2478 + struct fc_fcp_internal *si = fc_get_scsi_internal(sp->lp);
2479 +
2480 + mempool_free(sp, si->scsi_pkt_pool);
2481 + }
2482 +}
2483 +
2484 +static void fc_fcp_pkt_hold(struct fc_fcp_pkt *sp)
2485 +{
2486 + atomic_inc(&sp->ref_cnt);
2487 +}
2488 +
2489 +/**
2490 + * fc_fcp_lock_pkt - lock a packet and get a ref to it.
2491 + * @fsp: fcp packet
2492 + *
2493 + * We should only return error if we return a command to scsi-ml before
2494 + * getting a response. This can happen in cases where we send a abort, but
2495 + * do not wait for the response and the abort and command can be passing
2496 + * each other on the wire/network-layer.
2497 + *
2498 + * Note: this function locks the packet and gets a reference to allow
2499 + * callers to call the completion function while the lock is held and
2500 + * not have to worry about the packets refcount.
2501 + *
2502 + * TODO: Maybe we should just have callers grab/release the lock and
2503 + * have a function that they call to verify the fsp and grab a ref if
2504 + * needed.
2505 + */
2506 +static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
2507 +{
2508 + spin_lock_bh(&fsp->scsi_pkt_lock);
2509 + if (!fsp->cmd) {
2510 + spin_unlock_bh(&fsp->scsi_pkt_lock);
2511 + FC_DBG("Invalid scsi cmd pointer on fcp packet.\n");
2512 + return -EINVAL;
2513 + }
2514 +
2515 + fc_fcp_pkt_hold(fsp);
2516 + return 0;
2517 +}
2518 +
2519 +static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
2520 +{
2521 + spin_unlock_bh(&fsp->scsi_pkt_lock);
2522 + fc_fcp_pkt_release(fsp);
2523 +}
2524 +
2525 +static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
2526 +{
2527 + if (!(fsp->state & FC_SRB_COMPL))
2528 + mod_timer(&fsp->timer, jiffies + delay);
2529 +}
2530 +
2531 +static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
2532 +{
2533 + if (!fsp->seq_ptr)
2534 + return -EINVAL;
2535 +
2536 + fsp->state |= FC_SRB_ABORT_PENDING;
2537 + return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
2538 +}
2539 +
2540 +/*
2541 + * Retry command.
2542 + * An abort isn't needed.
2543 + */
2544 +static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
2545 +{
2546 + if (fsp->seq_ptr) {
2547 + fsp->lp->tt.exch_done(fsp->seq_ptr);
2548 + fsp->seq_ptr = NULL;
2549 + }
2550 +
2551 + fsp->state &= ~FC_SRB_ABORT_PENDING;
2552 + fsp->io_status = SUGGEST_RETRY << 24;
2553 + fsp->status_code = FC_ERROR;
2554 + fc_fcp_complete(fsp);
2555 +}
2556 +
2557 +/*
2558 + * Receive SCSI data from target.
2559 + * Called after receiving solicited data.
2560 + */
2561 +static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
2562 +{
2563 + struct scsi_cmnd *sc = fsp->cmd;
2564 + struct fc_lport *lp = fsp->lp;
2565 + struct fcoe_dev_stats *sp;
2566 + struct fc_frame_header *fh;
2567 + size_t start_offset;
2568 + size_t offset;
2569 + u32 crc;
2570 + u32 copy_len = 0;
2571 + size_t len;
2572 + void *buf;
2573 + struct scatterlist *sg;
2574 + size_t remaining;
2575 +
2576 + fh = fc_frame_header_get(fp);
2577 + offset = ntohl(fh->fh_parm_offset);
2578 + start_offset = offset;
2579 + len = fr_len(fp) - sizeof(*fh);
2580 + buf = fc_frame_payload_get(fp, 0);
2581 +
2582 + if (offset + len > fsp->data_len) {
2583 + /*
2584 + * this should never happen
2585 + */
2586 + if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
2587 + fc_frame_crc_check(fp))
2588 + goto crc_err;
2589 + if (fc_fcp_debug) {
2590 + FC_DBG("data received past end. "
2591 + "len %zx offset %zx "
2592 + "data_len %x\n", len, offset, fsp->data_len);
2593 + }
2594 + fc_fcp_retry_cmd(fsp);
2595 + return;
2596 + }
2597 + if (offset != fsp->xfer_len)
2598 + fsp->state |= FC_SRB_DISCONTIG;
2599 +
2600 + crc = 0;
2601 + if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
2602 + crc = crc32(~0, (u8 *) fh, sizeof(*fh));
2603 +
2604 + sg = scsi_sglist(sc);
2605 + remaining = len;
2606 +
2607 + while (remaining > 0 && sg) {
2608 + size_t off;
2609 + void *page_addr;
2610 + size_t sg_bytes;
2611 +
2612 + if (offset >= sg->length) {
2613 + offset -= sg->length;
2614 + sg = sg_next(sg);
2615 + continue;
2616 + }
2617 + sg_bytes = min(remaining, sg->length - offset);
2618 +
2619 + /*
2620 + * The scatterlist item may be bigger than PAGE_SIZE,
2621 + * but we are limited to mapping PAGE_SIZE at a time.
2622 + */
2623 + off = offset + sg->offset;
2624 + sg_bytes = min(sg_bytes, (size_t)
2625 + (PAGE_SIZE - (off & ~PAGE_MASK)));
2626 + page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
2627 + KM_SOFTIRQ0);
2628 + if (!page_addr)
2629 + break; /* XXX panic? */
2630 +
2631 + if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
2632 + crc = crc32(crc, buf, sg_bytes);
2633 + memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
2634 + sg_bytes);
2635 +
2636 + kunmap_atomic(page_addr, KM_SOFTIRQ0);
2637 + buf += sg_bytes;
2638 + offset += sg_bytes;
2639 + remaining -= sg_bytes;
2640 + copy_len += sg_bytes;
2641 + }
2642 +
2643 + if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
2644 + buf = fc_frame_payload_get(fp, 0);
2645 + if (len % 4) {
2646 + crc = crc32(crc, buf + len, 4 - (len % 4));
2647 + len += 4 - (len % 4);
2648 + }
2649 +
2650 + if (~crc != le32_to_cpu(*(__le32 *)(buf + len))) {
2651 +crc_err:
2652 + sp = lp->dev_stats[smp_processor_id()];
2653 + sp->ErrorFrames++;
2654 + if (sp->InvalidCRCCount++ < 5)
2655 + FC_DBG("CRC error on data frame\n");
2656 + /*
2657 + * Assume the frame is total garbage.
2658 + * We may have copied it over the good part
2659 + * of the buffer.
2660 + * If so, we need to retry the entire operation.
2661 + * Otherwise, ignore it.
2662 + */
2663 + if (fsp->state & FC_SRB_DISCONTIG)
2664 + fc_fcp_retry_cmd(fsp);
2665 + return;
2666 + }
2667 + }
2668 +
2669 + if (fsp->xfer_contig_end == start_offset)
2670 + fsp->xfer_contig_end += copy_len;
2671 + fsp->xfer_len += copy_len;
2672 +
2673 + /*
2674 + * In the very rare event that this data arrived after the response
2675 + * and completes the transfer, call the completion handler.
2676 + */
2677 + if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
2678 + fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
2679 + fc_fcp_complete(fsp);
2680 +}
2681 +
2682 +/*
2683 + * Send SCSI data to target.
2684 + * Called after receiving a Transfer Ready data descriptor.
2685 + */
2686 +static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
2687 + size_t offset, size_t len,
2688 + struct fc_frame *oldfp, int sg_supp)
2689 +{
2690 + struct scsi_cmnd *sc;
2691 + struct scatterlist *sg;
2692 + struct fc_frame *fp = NULL;
2693 + struct fc_lport *lp = fsp->lp;
2694 + size_t remaining;
2695 + size_t mfs;
2696 + size_t tlen;
2697 + size_t sg_bytes;
2698 + size_t frame_offset;
2699 + int error;
2700 + void *data = NULL;
2701 + void *page_addr;
2702 + int using_sg = sg_supp;
2703 + u32 f_ctl;
2704 +
2705 + if (unlikely(offset + len > fsp->data_len)) {
2706 + /*
2707 + * this should never happen
2708 + */
2709 + if (fc_fcp_debug) {
2710 + FC_DBG("xfer-ready past end. len %zx offset %zx\n",
2711 + len, offset);
2712 + }
2713 + fc_fcp_send_abort(fsp);
2714 + return 0;
2715 + } else if (offset != fsp->xfer_len) {
2716 + /*
2717 + * Out of Order Data Request - no problem, but unexpected.
2718 + */
2719 + if (fc_fcp_debug) {
2720 + FC_DBG("xfer-ready non-contiguous. "
2721 + "len %zx offset %zx\n", len, offset);
2722 + }
2723 + }
2724 + mfs = fsp->max_payload;
2725 + WARN_ON(mfs > FC_MAX_PAYLOAD);
2726 + WARN_ON(mfs < FC_MIN_MAX_PAYLOAD);
2727 + if (mfs > 512)
2728 + mfs &= ~(512 - 1); /* round down to block size */
2729 + WARN_ON(mfs < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
2730 + WARN_ON(len <= 0);
2731 + sc = fsp->cmd;
2732 +
2733 + remaining = len;
2734 + frame_offset = offset;
2735 + tlen = 0;
2736 + sp = lp->tt.seq_start_next(sp);
2737 + f_ctl = FC_FC_REL_OFF;
2738 + WARN_ON(!sp);
2739 +
2740 + /*
2741 + * If a get_page()/put_page() will fail, don't use sg lists
2742 + * in the fc_frame structure.
2743 + *
2744 + * The put_page() may be long after the I/O has completed
2745 + * in the case of FCoE, since the network driver does it
2746 + * via free_skb(). See the test in free_pages_check().
2747 + *
2748 + * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
2749 + */
2750 + if (using_sg) {
2751 + for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
2752 + if (page_count(sg_page(sg)) == 0 ||
2753 + (sg_page(sg)->flags & (1 << PG_lru |
2754 + 1 << PG_private |
2755 + 1 << PG_locked |
2756 + 1 << PG_active |
2757 + 1 << PG_slab |
2758 + 1 << PG_swapcache |
2759 + 1 << PG_writeback |
2760 + 1 << PG_reserved |
2761 + 1 << PG_buddy))) {
2762 + using_sg = 0;
2763 + break;
2764 + }
2765 + }
2766 + }
2767 + sg = scsi_sglist(sc);
2768 +
2769 + while (remaining > 0 && sg) {
2770 + if (offset >= sg->length) {
2771 + offset -= sg->length;
2772 + sg = sg_next(sg);
2773 + continue;
2774 + }
2775 + if (!fp) {
2776 + tlen = min(mfs, remaining);
2777 +
2778 + /*
2779 + * TODO. Temporary workaround. fc_seq_send() can't
2780 + * handle odd lengths in non-linear skbs.
2781 + * This will be the final fragment only.
2782 + */
2783 + if (tlen % 4)
2784 + using_sg = 0;
2785 + if (using_sg) {
2786 + fp = _fc_frame_alloc(lp, 0);
2787 + if (!fp)
2788 + return -ENOMEM;
2789 + } else {
2790 + fp = fc_frame_alloc(lp, tlen);
2791 + if (!fp)
2792 + return -ENOMEM;
2793 +
2794 + data = (void *)(fr_hdr(fp)) +
2795 + sizeof(struct fc_frame_header);
2796 + }
2797 + fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP);
2798 + fc_frame_set_offset(fp, frame_offset);
2799 + }
2800 + sg_bytes = min(tlen, sg->length - offset);
2801 + if (using_sg) {
2802 + WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
2803 + FC_FRAME_SG_LEN);
2804 + get_page(sg_page(sg));
2805 + skb_fill_page_desc(fp_skb(fp),
2806 + skb_shinfo(fp_skb(fp))->nr_frags,
2807 + sg_page(sg), sg->offset + offset,
2808 + sg_bytes);
2809 + fp_skb(fp)->data_len += sg_bytes;
2810 + fr_len(fp) += sg_bytes;
2811 + fp_skb(fp)->truesize += PAGE_SIZE;
2812 + } else {
2813 + size_t off = offset + sg->offset;
2814 +
2815 + /*
2816 + * The scatterlist item may be bigger than PAGE_SIZE,
2817 + * but we must not cross pages inside the kmap.
2818 + */
2819 + sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
2820 + (off & ~PAGE_MASK)));
2821 + page_addr = kmap_atomic(sg_page(sg) +
2822 + (off >> PAGE_SHIFT),
2823 + KM_SOFTIRQ0);
2824 + memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
2825 + sg_bytes);
2826 + kunmap_atomic(page_addr, KM_SOFTIRQ0);
2827 + data += sg_bytes;
2828 + }
2829 + offset += sg_bytes;
2830 + frame_offset += sg_bytes;
2831 + tlen -= sg_bytes;
2832 + remaining -= sg_bytes;
2833 +
2834 + if (remaining == 0) {
2835 + /*
2836 + * Send a request sequence with
2837 + * transfer sequence initiative.
2838 + */
2839 + f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
2840 + error = lp->tt.seq_send(lp, sp, fp, f_ctl);
2841 + } else if (tlen == 0) {
2842 + /*
2843 + * send fragment using for a sequence.
2844 + */
2845 + error = lp->tt.seq_send(lp, sp, fp, f_ctl);
2846 + } else {
2847 + continue;
2848 + }
2849 + fp = NULL;
2850 +
2851 + if (error) {
2852 + WARN_ON(1); /* send error should be rare */
2853 + fc_fcp_retry_cmd(fsp);
2854 + return 0;
2855 + }
2856 + }
2857 + fsp->xfer_len += len; /* premature count? */
2858 + return 0;
2859 +}
2860 +
2861 +static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame_header *fh)
2862 +{
2863 + /*
2864 + * we will let the command timeout and scsi-ml escalate if
2865 + * the abort was rejected
2866 + */
2867 + if (fh->fh_r_ctl == FC_RCTL_BA_ACC) {
2868 + fsp->state |= FC_SRB_ABORTED;
2869 + fsp->state &= ~FC_SRB_ABORT_PENDING;
2870 +
2871 + if (fsp->wait_for_comp)
2872 + complete(&fsp->tm_done);
2873 + else
2874 + fc_fcp_complete(fsp);
2875 + }
2876 +}
2877 +
2878 +/*
2879 + * fc_fcp_reduce_can_queue - drop can_queue
2880 + * @lp: lport to drop queueing for
2881 + *
2882 + * If we are getting memory allocation failures, then we may
2883 + * be trying to execute too many commands. We let the running
2884 + * commands complete or timeout, then try again with a reduced
2885 + * can_queue. Eventually we will hit the point where we run
2886 + * on all reserved structs.
2887 + */
2888 +static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
2889 +{
2890 + struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
2891 + unsigned long flags;
2892 + int can_queue;
2893 +
2894 + spin_lock_irqsave(lp->host->host_lock, flags);
2895 + if (si->throttled)
2896 + goto done;
2897 + si->throttled = 1;
2898 +
2899 + can_queue = lp->host->can_queue;
2900 + can_queue >>= 1;
2901 + if (!can_queue)
2902 + can_queue = 1;
2903 + lp->host->can_queue = can_queue;
2904 + shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
2905 + "Reducing can_queue to %d.\n", can_queue);
2906 +done:
2907 + spin_unlock_irqrestore(lp->host->host_lock, flags);
2908 +}
2909 +
2910 +/*
2911 + * exch mgr calls this routine to process scsi
2912 + * exchanges.
2913 + *
2914 + * Return : None
2915 + * Context : called from Soft IRQ context
2916 + * can not called holding list lock
2917 + */
2918 +static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
2919 +{
2920 + struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
2921 + struct fc_lport *lp;
2922 + struct fc_frame_header *fh;
2923 + struct fc_data_desc *dd;
2924 + u8 r_ctl;
2925 + int rc = 0;
2926 +
2927 + if (IS_ERR(fp))
2928 + goto errout;
2929 +
2930 + fh = fc_frame_header_get(fp);
2931 + r_ctl = fh->fh_r_ctl;
2932 + lp = fsp->lp;
2933 +
2934 + if (!(lp->state & LPORT_ST_READY))
2935 + goto out;
2936 + if (fc_fcp_lock_pkt(fsp))
2937 + goto out;
2938 + fsp->last_pkt_time = jiffies;
2939 +
2940 + if (fh->fh_type == FC_TYPE_BLS) {
2941 + fc_fcp_abts_resp(fsp, fh);
2942 + goto unlock;
2943 + }
2944 +
2945 + if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
2946 + goto unlock;
2947 +
2948 + if (r_ctl == FC_RCTL_DD_DATA_DESC) {
2949 + /*
2950 + * received XFER RDY from the target
2951 + * need to send data to the target
2952 + */
2953 + WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
2954 + dd = fc_frame_payload_get(fp, sizeof(*dd));
2955 + WARN_ON(!dd);
2956 +
2957 + rc = fc_fcp_send_data(fsp, sp,
2958 + (size_t) ntohl(dd->dd_offset),
2959 + (size_t) ntohl(dd->dd_len), fp,
2960 + lp->capabilities & TRANS_C_SG);
2961 + if (!rc)
2962 + lp->tt.seq_set_rec_data(sp, fsp->xfer_len);
2963 + else if (rc == -ENOMEM)
2964 + fsp->state |= FC_SRB_NOMEM;
2965 + } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
2966 + /*
2967 + * received a DATA frame
2968 + * next we will copy the data to the system buffer
2969 + */
2970 + WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
2971 + fc_fcp_recv_data(fsp, fp);
2972 + lp->tt.seq_set_rec_data(sp, fsp->xfer_contig_end);
2973 + } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
2974 + WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
2975 +
2976 + fc_fcp_resp(fsp, fp);
2977 + } else {
2978 + FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
2979 + }
2980 +unlock:
2981 + fc_fcp_unlock_pkt(fsp);
2982 +out:
2983 + fc_frame_free(fp);
2984 +errout:
2985 + if (IS_ERR(fp))
2986 + fc_fcp_error(fsp, fp);
2987 + else if (rc == -ENOMEM)
2988 + fc_fcp_reduce_can_queue(lp);
2989 +}
2990 +
2991 +static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
2992 +{
2993 + struct fc_frame_header *fh;
2994 + struct fcp_resp *fc_rp;
2995 + struct fcp_resp_ext *rp_ex;
2996 + struct fcp_resp_rsp_info *fc_rp_info;
2997 + u32 plen;
2998 + u32 expected_len;
2999 + u32 respl = 0;
3000 + u32 snsl = 0;
3001 + u8 flags = 0;
3002 +
3003 + plen = fr_len(fp);
3004 + fh = (struct fc_frame_header *)fr_hdr(fp);
3005 + if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
3006 + goto len_err;
3007 + plen -= sizeof(*fh);
3008 + fc_rp = (struct fcp_resp *)(fh + 1);
3009 + fsp->cdb_status = fc_rp->fr_status;
3010 + flags = fc_rp->fr_flags;
3011 + fsp->scsi_comp_flags = flags;
3012 + expected_len = fsp->data_len;
3013 +
3014 + if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
3015 + rp_ex = (void *)(fc_rp + 1);
3016 + if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
3017 + if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
3018 + goto len_err;
3019 + fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
3020 + if (flags & FCP_RSP_LEN_VAL) {
3021 + respl = ntohl(rp_ex->fr_rsp_len);
3022 + if (respl != sizeof(*fc_rp_info))
3023 + goto len_err;
3024 + if (fsp->wait_for_comp) {
3025 + /* Abuse cdb_status for rsp code */
3026 + fsp->cdb_status = fc_rp_info->rsp_code;
3027 + complete(&fsp->tm_done);
3028 + /*
3029 + * tmfs will not have any scsi cmd so
3030 + * exit here
3031 + */
3032 + return;
3033 + } else
3034 + goto err;
3035 + }
3036 + if (flags & FCP_SNS_LEN_VAL) {
3037 + snsl = ntohl(rp_ex->fr_sns_len);
3038 + if (snsl > SCSI_SENSE_BUFFERSIZE)
3039 + snsl = SCSI_SENSE_BUFFERSIZE;
3040 + memcpy(fsp->cmd->sense_buffer,
3041 + (char *)fc_rp_info + respl, snsl);
3042 + }
3043 + }
3044 + if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
3045 + if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
3046 + goto len_err;
3047 + if (flags & FCP_RESID_UNDER) {
3048 + fsp->scsi_resid = ntohl(rp_ex->fr_resid);
3049 + /*
3050 + * The cmnd->underflow is the minimum number of
3051 + * bytes that must be transfered for this
3052 + * command. Provided a sense condition is not
3053 + * present, make sure the actual amount
3054 + * transferred is at least the underflow value
3055 + * or fail.
3056 + */
3057 + if (!(flags & FCP_SNS_LEN_VAL) &&
3058 + (fc_rp->fr_status == 0) &&
3059 + (scsi_bufflen(fsp->cmd) -
3060 + fsp->scsi_resid) < fsp->cmd->underflow)
3061 + goto err;
3062 + expected_len -= fsp->scsi_resid;
3063 + } else {
3064 + fsp->status_code = FC_ERROR;
3065 + }
3066 + }
3067 + }
3068 + fsp->state |= FC_SRB_RCV_STATUS;
3069 +
3070 + /*
3071 + * Check for missing or extra data frames.
3072 + */
3073 + if (unlikely(fsp->xfer_len != expected_len)) {
3074 + if (fsp->xfer_len < expected_len) {
3075 + /*
3076 + * Some data may be queued locally,
3077 + * Wait a at least one jiffy to see if it is delivered.
3078 + * If this expires without data, we may do SRR.
3079 + */
3080 + fc_fcp_timer_set(fsp, 2);
3081 + return;
3082 + }
3083 + fsp->status_code = FC_DATA_OVRRUN;
3084 + FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
3085 + "data len %x\n",
3086 + fsp->rport->port_id,
3087 + fsp->xfer_len, expected_len, fsp->data_len);
3088 + }
3089 + fc_fcp_complete(fsp);
3090 + return;
3091 +
3092 +len_err:
3093 + FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
3094 + flags, fr_len(fp), respl, snsl);
3095 +err:
3096 + fsp->status_code = FC_ERROR;
3097 + fc_fcp_complete(fsp);
3098 +}
3099 +
3100 +/**
3101 + * fc_fcp_complete - complete processing of a fcp packet
3102 + * @fsp: fcp packet
3103 + *
3104 + * This function may sleep if a timer is pending. The packet lock must be
3105 + * held, and the host lock must not be held.
3106 + */
3107 +static void fc_fcp_complete(struct fc_fcp_pkt *fsp)
3108 +{
3109 + struct fc_lport *lp = fsp->lp;
3110 + struct fc_seq *sp;
3111 + u32 f_ctl;
3112 +
3113 + if (fsp->state & FC_SRB_ABORT_PENDING)
3114 + return;
3115 +
3116 + if (fsp->state & FC_SRB_ABORTED) {
3117 + if (!fsp->status_code)
3118 + fsp->status_code = FC_CMD_ABORTED;
3119 + } else {
3120 + /*
3121 + * Test for transport underrun, independent of response
3122 + * underrun status.
3123 + */
3124 + if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
3125 + (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
3126 + fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
3127 + fsp->status_code = FC_DATA_UNDRUN;
3128 + fsp->io_status = SUGGEST_RETRY << 24;
3129 + }
3130 + }
3131 +
3132 + sp = fsp->seq_ptr;
3133 + if (sp) {
3134 + fsp->seq_ptr = NULL;
3135 + if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
3136 + struct fc_frame *conf_frame;
3137 + struct fc_seq *csp;
3138 +
3139 + csp = lp->tt.seq_start_next(sp);
3140 + conf_frame = fc_frame_alloc(fsp->lp, 0);
3141 + if (conf_frame) {
3142 + fc_frame_setup(conf_frame,
3143 + FC_RCTL_DD_SOL_CTL, FC_TYPE_FCP);
3144 + f_ctl = FC_FC_SEQ_INIT;
3145 + f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
3146 + lp->tt.seq_send(lp, csp, conf_frame, f_ctl);
3147 + }
3148 + }
3149 + lp->tt.exch_done(sp);
3150 + }
3151 + fc_io_compl(fsp);
3152 +}
3153 +
3154 +static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
3155 +{
3156 + struct fc_lport *lp = fsp->lp;
3157 +
3158 + if (fsp->seq_ptr) {
3159 + lp->tt.exch_done(fsp->seq_ptr);
3160 + fsp->seq_ptr = NULL;
3161 + }
3162 + fsp->status_code = error;
3163 +}
3164 +
3165 +/**
3166 + * fc_fcp_cleanup_each_cmd - run fn on each active command
3167 + * @lp: logical port
3168 + * @id: target id
3169 + * @lun: lun
3170 + * @error: fsp status code
3171 + *
3172 + * If lun or id is -1, they are ignored.
3173 + */
3174 +static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
3175 + unsigned int lun, int error)
3176 +{
3177 + struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
3178 + struct fc_fcp_pkt *fsp;
3179 + struct scsi_cmnd *sc_cmd;
3180 + unsigned long flags;
3181 +
3182 + spin_lock_irqsave(lp->host->host_lock, flags);
3183 +restart:
3184 + list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
3185 + sc_cmd = fsp->cmd;
3186 + if (id != -1 && scmd_id(sc_cmd) != id)
3187 + continue;
3188 +
3189 + if (lun != -1 && sc_cmd->device->lun != lun)
3190 + continue;
3191 +
3192 + fc_fcp_pkt_hold(fsp);
3193 + spin_unlock_irqrestore(lp->host->host_lock, flags);
3194 +
3195 + if (!fc_fcp_lock_pkt(fsp)) {
3196 + fc_fcp_cleanup_cmd(fsp, error);
3197 + fc_io_compl(fsp);
3198 + fc_fcp_unlock_pkt(fsp);
3199 + }
3200 +
3201 + fc_fcp_pkt_release(fsp);
3202 + spin_lock_irqsave(lp->host->host_lock, flags);
3203 + /*
3204 + * while we dropped the lock multiple pkts could
3205 + * have been released, so we have to start over.
3206 + */
3207 + goto restart;
3208 + }
3209 + spin_unlock_irqrestore(lp->host->host_lock, flags);
3210 +}
3211 +
3212 +static void fc_fcp_abort_io(struct fc_lport *lp)
3213 +{
3214 + fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
3215 +}
3216 +
3217 +/**
3218 + * fc_fcp_pkt_send - send a fcp packet to the lower level.
3219 + * @lp: fc lport
3220 + * @fsp: fc packet.
3221 + *
3222 + * This is called by upper layer protocol.
3223 + * Return : zero for success and -1 for failure
3224 + * Context : called from queuecommand which can be called from process
3225 + * or scsi soft irq.
3226 + * Locks : called with the host lock and irqs disabled.
3227 + */
3228 +static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
3229 +{
3230 + struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
3231 + int rc;
3232 +
3233 + fsp->cmd->SCp.ptr = (char *)fsp;
3234 + fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
3235 + fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
3236 +
3237 + int_to_scsilun(fsp->cmd->device->lun,
3238 + (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
3239 + memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
3240 + list_add_tail(&fsp->list, &si->scsi_pkt_queue);
3241 +
3242 + spin_unlock_irq(lp->host->host_lock);
3243 + rc = fc_fcp_send_cmd(fsp);
3244 + spin_lock_irq(lp->host->host_lock);
3245 + if (rc)
3246 + list_del(&fsp->list);
3247 +
3248 + return rc;
3249 +}
3250 +
3251 +static int fc_fcp_send_cmd(struct fc_fcp_pkt *fsp)
3252 +{
3253 + struct fc_lport *lp;
3254 + struct fc_frame *fp;
3255 + struct fc_seq *sp;
3256 + struct fc_rport *rport;
3257 + struct fc_rport_libfc_priv *rp;
3258 + int rc = 0;
3259 +
3260 + if (fc_fcp_lock_pkt(fsp))
3261 + return 0;
3262 +
3263 + if (fsp->state & FC_SRB_COMPL)
3264 + goto unlock;
3265 +
3266 + lp = fsp->lp;
3267 + fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
3268 + if (!fp) {
3269 + rc = -1;
3270 + goto unlock;
3271 + }
3272 +
3273 + memcpy(fc_frame_payload_get(fp, sizeof(fsp->cdb_cmd)),
3274 + &fsp->cdb_cmd, sizeof(fsp->cdb_cmd));
3275 + fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
3276 + fc_frame_set_offset(fp, 0);
3277 + rport = fsp->rport;
3278 + fsp->max_payload = rport->maxframe_size;
3279 + rp = rport->dd_data;
3280 + sp = lp->tt.exch_seq_send(lp, fp,
3281 + fc_fcp_recv,
3282 + fsp, 0,
3283 + rp->local_port->fid,
3284 + rport->port_id,
3285 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3286 + if (!sp) {
3287 + fc_frame_free(fp);
3288 + rc = -1;
3289 + goto unlock;
3290 + }
3291 + fsp->seq_ptr = sp;
3292 +
3293 + setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
3294 + fc_fcp_timer_set(fsp,
3295 + (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
3296 + FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
3297 +unlock:
3298 + fc_fcp_unlock_pkt(fsp);
3299 + return rc;
3300 +}
3301 +
3302 +/*
3303 + * transport error handler
3304 + */
3305 +static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
3306 +{
3307 + int error = PTR_ERR(fp);
3308 +
3309 + if (fc_fcp_lock_pkt(fsp))
3310 + return;
3311 +
3312 + switch (error) {
3313 + case -FC_EX_CLOSED:
3314 + fc_fcp_retry_cmd(fsp);
3315 + goto unlock;
3316 + default:
3317 + FC_DBG("unknown error %ld\n", PTR_ERR(fp));
3318 + }
3319 + /*
3320 + * clear abort pending, because the lower layer
3321 + * decided to force completion.
3322 + */
3323 + fsp->state &= ~FC_SRB_ABORT_PENDING;
3324 + fsp->status_code = FC_CMD_PLOGO;
3325 + fc_fcp_complete(fsp);
3326 +unlock:
3327 + fc_fcp_unlock_pkt(fsp);
3328 +}
3329 +
3330 +/*
3331 + * Scsi abort handler- calls to send an abort
3332 + * and then wait for abort completion
3333 + */
3334 +static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
3335 +{
3336 + int rc = FAILED;
3337 +
3338 + if (fc_fcp_send_abort(fsp))
3339 + return FAILED;
3340 +
3341 + init_completion(&fsp->tm_done);
3342 + fsp->wait_for_comp = 1;
3343 +
3344 + spin_unlock_bh(&fsp->scsi_pkt_lock);
3345 + rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
3346 + spin_lock_bh(&fsp->scsi_pkt_lock);
3347 + fsp->wait_for_comp = 0;
3348 +
3349 + if (!rc) {
3350 + FC_DBG("target abort cmd failed\n");
3351 + rc = FAILED;
3352 + } else if (fsp->state & FC_SRB_ABORTED) {
3353 + FC_DBG("target abort cmd passed\n");
3354 + rc = SUCCESS;
3355 + fc_fcp_complete(fsp);
3356 + }
3357 +
3358 + return rc;
3359 +}
3360 +
3361 +/*
3362 + * Retry LUN reset after resource allocation failed.
3363 + */
3364 +static void fc_lun_reset_send(unsigned long data)
3365 +{
3366 + struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
3367 + const size_t len = sizeof(fsp->cdb_cmd);
3368 + struct fc_lport *lp = fsp->lp;
3369 + struct fc_frame *fp;
3370 + struct fc_seq *sp;
3371 + struct fc_rport *rport;
3372 + struct fc_rport_libfc_priv *rp;
3373 +
3374 + spin_lock_bh(&fsp->scsi_pkt_lock);
3375 + if (fsp->state & FC_SRB_COMPL)
3376 + goto unlock;
3377 +
3378 + fp = fc_frame_alloc(lp, len);
3379 + if (!fp)
3380 + goto retry;
3381 + memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
3382 + fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
3383 + fc_frame_set_offset(fp, 0);
3384 + rport = fsp->rport;
3385 + rp = rport->dd_data;
3386 + sp = lp->tt.exch_seq_send(lp, fp,
3387 + fc_tm_done,
3388 + fsp, 0,
3389 + rp->local_port->fid,
3390 + rport->port_id,
3391 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3392 +
3393 + if (sp) {
3394 + fsp->seq_ptr = sp;
3395 + goto unlock;
3396 + }
3397 + /*
3398 + * Exchange or frame allocation failed. Set timer and retry.
3399 + */
3400 + fc_frame_free(fp);
3401 +retry:
3402 + setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
3403 + fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
3404 +unlock:
3405 + spin_unlock_bh(&fsp->scsi_pkt_lock);
3406 +}
3407 +
3408 +/*
3409 + * Scsi device reset handler- send a LUN RESET to the device
3410 + * and wait for reset reply
3411 + */
3412 +static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
3413 + unsigned int id, unsigned int lun)
3414 +{
3415 + int rc;
3416 +
3417 + fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
3418 + fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
3419 + int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
3420 +
3421 + fsp->wait_for_comp = 1;
3422 + init_completion(&fsp->tm_done);
3423 +
3424 + fc_lun_reset_send((unsigned long)fsp);
3425 +
3426 + /*
3427 + * wait for completion of reset
3428 + * after that make sure all commands are terminated
3429 + */
3430 + rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
3431 +
3432 + spin_lock_bh(&fsp->scsi_pkt_lock);
3433 + fsp->state |= FC_SRB_COMPL;
3434 + spin_unlock_bh(&fsp->scsi_pkt_lock);
3435 +
3436 + del_timer_sync(&fsp->timer);
3437 +
3438 + spin_lock_bh(&fsp->scsi_pkt_lock);
3439 + if (fsp->seq_ptr) {
3440 + /* TODO:
3441 + * if the exch resp function is running and trying to grab
3442 + * the scsi_pkt_lock, this could free the exch from under
3443 + * it and it could allow the fsp to be freed from under
3444 + * fc_tm_done.
3445 + */
3446 + lp->tt.exch_done(fsp->seq_ptr);
3447 + fsp->seq_ptr = NULL;
3448 + }
3449 + fsp->wait_for_comp = 0;
3450 + spin_unlock_bh(&fsp->scsi_pkt_lock);
3451 +
3452 + if (!rc) {
3453 + FC_DBG("lun reset failed\n");
3454 + return FAILED;
3455 + }
3456 +
3457 + /* cdb_status holds the tmf's rsp code */
3458 + if (fsp->cdb_status != FCP_TMF_CMPL)
3459 + return FAILED;
3460 +
3461 + FC_DBG("lun reset to lun %u completed\n", lun);
3462 + fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
3463 + return SUCCESS;
3464 +}
3465 +
3466 +/*
3467 + * Task Managment response handler
3468 + */
3469 +static void fc_tm_done(struct fc_seq *sp, struct fc_frame *fp, void *arg)
3470 +{
3471 + struct fc_fcp_pkt *fsp = arg;
3472 + struct fc_frame_header *fh;
3473 +
3474 + spin_lock_bh(&fsp->scsi_pkt_lock);
3475 + if (IS_ERR(fp)) {
3476 + /*
3477 + * If there is an error just let it timeout or wait
3478 + * for TMF to be aborted if it timedout.
3479 + *
3480 + * scsi-eh will escalate for when either happens.
3481 + */
3482 + spin_unlock_bh(&fsp->scsi_pkt_lock);
3483 + return;
3484 + }
3485 +
3486 + /*
3487 + * raced with eh timeout handler.
3488 + *
3489 + * TODO: If this happens we could be freeing the fsp right now and
3490 + * would oops. Next patches will fix this race.
3491 + */
3492 + if ((fsp->state & FC_SRB_COMPL) || !fsp->seq_ptr ||
3493 + !fsp->wait_for_comp) {
3494 + spin_unlock_bh(&fsp->scsi_pkt_lock);
3495 + return;
3496 + }
3497 +
3498 + fh = fc_frame_header_get(fp);
3499 + if (fh->fh_type != FC_TYPE_BLS)
3500 + fc_fcp_resp(fsp, fp);
3501 + fsp->seq_ptr = NULL;
3502 + fsp->lp->tt.exch_done(sp);
3503 + fc_frame_free(fp);
3504 + spin_unlock_bh(&fsp->scsi_pkt_lock);
3505 +}
3506 +
3507 +static void fc_fcp_cleanup(struct fc_lport *lp)
3508 +{
3509 + fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
3510 +}
3511 +
3512 +/*
3513 + * fc_fcp_timeout: called by OS timer function.
3514 + *
3515 + * The timer has been inactivated and must be reactivated if desired
3516 + * using fc_fcp_timer_set().
3517 + *
3518 + * Algorithm:
3519 + *
3520 + * If REC is supported, just issue it, and return. The REC exchange will
3521 + * complete or time out, and recovery can continue at that point.
3522 + *
3523 + * Otherwise, if the response has been received without all the data,
3524 + * it has been ER_TIMEOUT since the response was received.
3525 + *
3526 + * If the response has not been received,
3527 + * we see if data was received recently. If it has been, we continue waiting,
3528 + * otherwise, we abort the command.
3529 + */
3530 +static void fc_fcp_timeout(unsigned long data)
3531 +{
3532 + struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
3533 + struct fc_rport *rport = fsp->rport;
3534 + struct fc_rport_libfc_priv *rp = rport->dd_data;
3535 +
3536 + if (fc_fcp_lock_pkt(fsp))
3537 + return;
3538 +
3539 + if (fsp->state & FC_SRB_COMPL)
3540 + goto unlock;
3541 + fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
3542 +
3543 + if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
3544 + fc_fcp_rec(fsp);
3545 + /* TODO: change this to time_before/after */
3546 + else if (jiffies - fsp->last_pkt_time < FC_SCSI_ER_TIMEOUT / 2)
3547 + fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
3548 + else if (fsp->state & FC_SRB_RCV_STATUS)
3549 + fc_fcp_complete(fsp);
3550 + else
3551 + fc_timeout_error(fsp);
3552 +
3553 + fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
3554 +unlock:
3555 + fc_fcp_unlock_pkt(fsp);
3556 +}
3557 +
3558 +/*
3559 + * Send a REC ELS request
3560 + */
3561 +static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
3562 +{
3563 + struct fc_lport *lp;
3564 + struct fc_seq *sp;
3565 + struct fc_frame *fp;
3566 + struct fc_els_rec *rec;
3567 + struct fc_rport *rport;
3568 + struct fc_rport_libfc_priv *rp;
3569 + u16 ox_id;
3570 + u16 rx_id;
3571 +
3572 + lp = fsp->lp;
3573 + rport = fsp->rport;
3574 + rp = rport->dd_data;
3575 + sp = fsp->seq_ptr;
3576 + if (!sp || rp->rp_state != RPORT_ST_READY) {
3577 + fsp->status_code = FC_HRD_ERROR;
3578 + fsp->io_status = SUGGEST_RETRY << 24;
3579 + fc_fcp_complete(fsp);
3580 + return;
3581 + }
3582 + lp->tt.seq_get_xids(sp, &ox_id, &rx_id);
3583 + fp = fc_frame_alloc(lp, sizeof(*rec));
3584 + if (!fp)
3585 + goto retry;
3586 +
3587 + rec = fc_frame_payload_get(fp, sizeof(*rec));
3588 + memset(rec, 0, sizeof(*rec));
3589 + rec->rec_cmd = ELS_REC;
3590 + hton24(rec->rec_s_id, lp->fid);
3591 + rec->rec_ox_id = htons(ox_id);
3592 + rec->rec_rx_id = htons(rx_id);
3593 +
3594 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
3595 + fc_frame_set_offset(fp, 0);
3596 + sp = lp->tt.exch_seq_send(lp, fp,
3597 + fc_fcp_rec_resp,
3598 + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
3599 + rp->local_port->fid,
3600 + rport->port_id,
3601 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3602 +
3603 + if (sp) {
3604 + fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
3605 + return;
3606 + } else
3607 + fc_frame_free(fp);
3608 +retry:
3609 + if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
3610 + fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
3611 + else
3612 + fc_timeout_error(fsp);
3613 +}
3614 +
3615 +/*
3616 + * Receive handler for REC ELS frame
3617 + * if it is a reject then let the scsi layer to handle
3618 + * the timeout. if it is a LS_ACC then if the io was not completed
3619 + * then set the timeout and return otherwise complete the exchange
3620 + * and tell the scsi layer to restart the I/O.
3621 + */
3622 +static void fc_fcp_rec_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
3623 +{
3624 + struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
3625 + struct fc_els_rec_acc *recp;
3626 + struct fc_els_ls_rjt *rjt;
3627 + u32 e_stat;
3628 + u8 opcode;
3629 + u32 offset;
3630 + enum dma_data_direction data_dir;
3631 + enum fc_rctl r_ctl;
3632 + struct fc_rport_libfc_priv *rp;
3633 +
3634 + if (IS_ERR(fp)) {
3635 + fc_fcp_rec_error(fsp, fp);
3636 + return;
3637 + }
3638 +
3639 + if (fc_fcp_lock_pkt(fsp))
3640 + goto out;
3641 +
3642 + fsp->recov_retry = 0;
3643 + opcode = fc_frame_payload_op(fp);
3644 + if (opcode == ELS_LS_RJT) {
3645 + rjt = fc_frame_payload_get(fp, sizeof(*rjt));
3646 + switch (rjt->er_reason) {
3647 + default:
3648 + if (fc_fcp_debug)
3649 + FC_DBG("device %x unexpected REC reject "
3650 + "reason %d expl %d\n",
3651 + fsp->rport->port_id, rjt->er_reason,
3652 + rjt->er_explan);
3653 + /* fall through */
3654 +
3655 + case ELS_RJT_UNSUP:
3656 + if (fc_fcp_debug)
3657 + FC_DBG("device does not support REC\n");
3658 + rp = fsp->rport->dd_data;
3659 + rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
3660 + /* fall through */
3661 +
3662 + case ELS_RJT_LOGIC:
3663 + case ELS_RJT_UNAB:
3664 + /*
3665 + * If no data transfer, the command frame got dropped
3666 + * so we just retry. If data was transferred, we
3667 + * lost the response but the target has no record,
3668 + * so we abort and retry.
3669 + */
3670 + if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
3671 + fsp->xfer_len == 0) {
3672 + fc_fcp_retry_cmd(fsp);
3673 + break;
3674 + }
3675 + fc_timeout_error(fsp);
3676 + break;
3677 + }
3678 + } else if (opcode == ELS_LS_ACC) {
3679 + if (fsp->state & FC_SRB_ABORTED)
3680 + goto unlock_out;
3681 +
3682 + data_dir = fsp->cmd->sc_data_direction;
3683 + recp = fc_frame_payload_get(fp, sizeof(*recp));
3684 + offset = ntohl(recp->reca_fc4value);
3685 + e_stat = ntohl(recp->reca_e_stat);
3686 +
3687 + if (e_stat & ESB_ST_COMPLETE) {
3688 +
3689 + /*
3690 + * The exchange is complete.
3691 + *
3692 + * For output, we must've lost the response.
3693 + * For input, all data must've been sent.
3694 + * We lost may have lost the response
3695 + * (and a confirmation was requested) and maybe
3696 + * some data.
3697 + *
3698 + * If all data received, send SRR
3699 + * asking for response. If partial data received,
3700 + * or gaps, SRR requests data at start of gap.
3701 + * Recovery via SRR relies on in-order-delivery.
3702 + */
3703 + if (data_dir == DMA_TO_DEVICE) {
3704 + r_ctl = FC_RCTL_DD_CMD_STATUS;
3705 + } else if (fsp->xfer_contig_end == offset) {
3706 + r_ctl = FC_RCTL_DD_CMD_STATUS;
3707 + } else {
3708 + offset = fsp->xfer_contig_end;
3709 + r_ctl = FC_RCTL_DD_SOL_DATA;
3710 + }
3711 + fc_fcp_srr(fsp, r_ctl, offset);
3712 + } else if (e_stat & ESB_ST_SEQ_INIT) {
3713 +
3714 + /*
3715 + * The remote port has the initiative, so just
3716 + * keep waiting for it to complete.
3717 + */
3718 + fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
3719 + } else {
3720 +
3721 + /*
3722 + * The exchange is incomplete, we have seq. initiative.
3723 + * Lost response with requested confirmation,
3724 + * lost confirmation, lost transfer ready or
3725 + * lost write data.
3726 + *
3727 + * For output, if not all data was received, ask
3728 + * for transfer ready to be repeated.
3729 + *
3730 + * If we received or sent all the data, send SRR to
3731 + * request response.
3732 + *
3733 + * If we lost a response, we may have lost some read
3734 + * data as well.
3735 + */
3736 + r_ctl = FC_RCTL_DD_SOL_DATA;
3737 + if (data_dir == DMA_TO_DEVICE) {
3738 + r_ctl = FC_RCTL_DD_CMD_STATUS;
3739 + if (offset < fsp->data_len)
3740 + r_ctl = FC_RCTL_DD_DATA_DESC;
3741 + } else if (offset == fsp->xfer_contig_end) {
3742 + r_ctl = FC_RCTL_DD_CMD_STATUS;
3743 + } else if (fsp->xfer_contig_end < offset) {
3744 + offset = fsp->xfer_contig_end;
3745 + }
3746 + fc_fcp_srr(fsp, r_ctl, offset);
3747 + }
3748 + }
3749 +unlock_out:
3750 + fc_fcp_unlock_pkt(fsp);
3751 +out:
3752 + fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
3753 + fc_frame_free(fp);
3754 +}
3755 +
3756 +/*
3757 + * Handle error response or timeout for REC exchange.
3758 + */
3759 +static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
3760 +{
3761 + int error = PTR_ERR(fp);
3762 +
3763 + if (fc_fcp_lock_pkt(fsp))
3764 + goto out;
3765 +
3766 + switch (error) {
3767 + case -FC_EX_CLOSED:
3768 + fc_fcp_retry_cmd(fsp);
3769 + break;
3770 +
3771 + default:
3772 + FC_DBG("REC %p fid %x error unexpected error %d\n",
3773 + fsp, fsp->rport->port_id, error);
3774 + fsp->status_code = FC_CMD_PLOGO;
3775 + /* fall through */
3776 +
3777 + case -FC_EX_TIMEOUT:
3778 + /*
3779 + * Assume REC or LS_ACC was lost.
3780 + * The exchange manager will have aborted REC, so retry.
3781 + */
3782 + FC_DBG("REC fid %x error error %d retry %d/%d\n",
3783 + fsp->rport->port_id, error, fsp->recov_retry,
3784 + FC_MAX_RECOV_RETRY);
3785 + if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
3786 + fc_fcp_rec(fsp);
3787 + else
3788 + fc_timeout_error(fsp);
3789 + break;
3790 + }
3791 + fc_fcp_unlock_pkt(fsp);
3792 +out:
3793 + fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
3794 +}
3795 +
3796 +/*
3797 + * Time out error routine:
3798 + * abort's the I/O close the exchange and
3799 + * send completion notification to scsi layer
3800 + */
3801 +static void fc_timeout_error(struct fc_fcp_pkt *fsp)
3802 +{
3803 + fsp->status_code = FC_CMD_TIME_OUT;
3804 + fsp->cdb_status = 0;
3805 + fsp->io_status = 0;
3806 + /*
3807 + * if this fails then we let the scsi command timer fire and
3808 + * scsi-ml escalate.
3809 + */
3810 + fc_fcp_send_abort(fsp);
3811 +}
3812 +
3813 +/*
3814 + * Sequence retransmission request.
3815 + * This is called after receiving status but insufficient data, or
3816 + * when expecting status but the request has timed out.
3817 + */
3818 +static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
3819 +{
3820 + struct fc_lport *lp = fsp->lp;
3821 + struct fc_rport *rport;
3822 + struct fc_rport_libfc_priv *rp;
3823 + struct fc_seq *sp;
3824 + struct fcp_srr *srr;
3825 + struct fc_frame *fp;
3826 + u8 cdb_op;
3827 + u16 ox_id;
3828 + u16 rx_id;
3829 +
3830 + rport = fsp->rport;
3831 + rp = rport->dd_data;
3832 + cdb_op = fsp->cdb_cmd.fc_cdb[0];
3833 + lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
3834 +
3835 + if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
3836 + goto retry; /* shouldn't happen */
3837 + fp = fc_frame_alloc(lp, sizeof(*srr));
3838 + if (!fp)
3839 + goto retry;
3840 +
3841 + srr = fc_frame_payload_get(fp, sizeof(*srr));
3842 + memset(srr, 0, sizeof(*srr));
3843 + srr->srr_op = ELS_SRR;
3844 + srr->srr_ox_id = htons(ox_id);
3845 + srr->srr_rx_id = htons(rx_id);
3846 + srr->srr_r_ctl = r_ctl;
3847 + srr->srr_rel_off = htonl(offset);
3848 +
3849 + fc_frame_setup(fp, FC_RCTL_ELS4_REQ, FC_TYPE_FCP);
3850 + fc_frame_set_offset(fp, 0);
3851 + sp = lp->tt.exch_seq_send(lp, fp,
3852 + fc_fcp_srr_resp,
3853 + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
3854 + rp->local_port->fid,
3855 + rport->port_id,
3856 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3857 + if (!sp) {
3858 + fc_frame_free(fp);
3859 + goto retry;
3860 + }
3861 + fsp->recov_seq = sp;
3862 + fsp->xfer_len = offset;
3863 + fsp->xfer_contig_end = offset;
3864 + fsp->state &= ~FC_SRB_RCV_STATUS;
3865 + fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
3866 + return;
3867 +retry:
3868 + fc_fcp_retry_cmd(fsp);
3869 +}
3870 +
3871 +/*
3872 + * Handle response from SRR.
3873 + */
3874 +static void fc_fcp_srr_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
3875 +{
3876 + struct fc_fcp_pkt *fsp = arg;
3877 + struct fc_frame_header *fh;
3878 + u16 ox_id;
3879 + u16 rx_id;
3880 +
3881 + if (IS_ERR(fp)) {
3882 + fc_fcp_srr_error(fsp, fp);
3883 + return;
3884 + }
3885 +
3886 + if (fc_fcp_lock_pkt(fsp))
3887 + goto out;
3888 +
3889 + fh = fc_frame_header_get(fp);
3890 + /*
3891 + * BUG? fc_fcp_srr_error calls exch_done which would release
3892 + * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
3893 + * then fc_exch_timeout would be sending an abort. The exch_done
3894 + * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
3895 + * an abort response though.
3896 + */
3897 + if (fh->fh_type == FC_TYPE_BLS) {
3898 + fc_fcp_unlock_pkt(fsp);
3899 + return;
3900 + }
3901 +
3902 + fsp->recov_seq = NULL;
3903 +
3904 + fsp->lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
3905 + switch (fc_frame_payload_op(fp)) {
3906 + case ELS_LS_ACC:
3907 + fsp->recov_retry = 0;
3908 + fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
3909 + break;
3910 + case ELS_LS_RJT:
3911 + default:
3912 + fc_timeout_error(fsp);
3913 + break;
3914 + }
3915 + fc_fcp_unlock_pkt(fsp);
3916 + fsp->lp->tt.exch_done(sp);
3917 +out:
3918 + fc_frame_free(fp);
3919 + fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
3920 +}
3921 +
3922 +static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
3923 +{
3924 + if (fc_fcp_lock_pkt(fsp))
3925 + goto out;
3926 + fsp->lp->tt.exch_done(fsp->recov_seq);
3927 + fsp->recov_seq = NULL;
3928 + switch (PTR_ERR(fp)) {
3929 + case -FC_EX_TIMEOUT:
3930 + if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
3931 + fc_fcp_rec(fsp);
3932 + else
3933 + fc_timeout_error(fsp);
3934 + break;
3935 + case -FC_EX_CLOSED: /* e.g., link failure */
3936 + /* fall through */
3937 + default:
3938 + fc_fcp_retry_cmd(fsp);
3939 + break;
3940 + }
3941 + fc_fcp_unlock_pkt(fsp);
3942 +out:
3943 + fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
3944 +}
3945 +
3946 +static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
3947 +{
3948 + /* lock ? */
3949 + return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
3950 +}
3951 +
3952 +/**
3953 + * fc_queuecommand - The queuecommand function of the scsi template
3954 + * @cmd: struct scsi_cmnd to be executed
3955 + * @done: Callback function to be called when cmd is completed
3956 + *
3957 + * this is the i/o strategy routine, called by the scsi layer
3958 + * this routine is called with holding the host_lock.
3959 + */
3960 +int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
3961 +{
3962 + struct fc_lport *lp;
3963 + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
3964 + struct fc_fcp_pkt *sp;
3965 + struct fc_rport_libfc_priv *rp;
3966 + int rval;
3967 + int rc = 0;
3968 + struct fcoe_dev_stats *stats;
3969 +
3970 + lp = shost_priv(sc_cmd->device->host);
3971 +
3972 + rval = fc_remote_port_chkready(rport);
3973 + if (rval) {
3974 + sc_cmd->result = rval;
3975 + done(sc_cmd);
3976 + goto out;
3977 + }
3978 +
3979 + if (!*(struct fc_remote_port **)rport->dd_data) {
3980 + /*
3981 + * rport is transitioning from blocked/deleted to
3982 + * online
3983 + */
3984 + sc_cmd->result = DID_IMM_RETRY << 16;
3985 + done(sc_cmd);
3986 + goto out;
3987 + }
3988 +
3989 + rp = rport->dd_data;
3990 +
3991 + if (!fc_fcp_lport_queue_ready(lp)) {
3992 + rc = SCSI_MLQUEUE_HOST_BUSY;
3993 + goto out;
3994 + }
3995 +
3996 + sp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
3997 + if (sp == NULL) {
3998 + rc = SCSI_MLQUEUE_HOST_BUSY;
3999 + goto out;
4000 + }
4001 +
4002 + /*
4003 + * build the libfc request pkt
4004 + */
4005 + sp->cmd = sc_cmd; /* save the cmd */
4006 + sp->lp = lp; /* save the softc ptr */
4007 + sp->rport = rport; /* set the remote port ptr */
4008 + sc_cmd->scsi_done = done;
4009 +
4010 + /*
4011 + * set up the transfer length
4012 + */
4013 + sp->data_len = scsi_bufflen(sc_cmd);
4014 + sp->xfer_len = 0;
4015 +
4016 + /*
4017 + * setup the data direction
4018 + */
4019 + stats = lp->dev_stats[smp_processor_id()];
4020 + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4021 + sp->req_flags = FC_SRB_READ;
4022 + stats->InputRequests++;
4023 + stats->InputMegabytes = sp->data_len;
4024 + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
4025 + sp->req_flags = FC_SRB_WRITE;
4026 + stats->OutputRequests++;
4027 + stats->OutputMegabytes = sp->data_len;
4028 + } else {
4029 + sp->req_flags = 0;
4030 + stats->ControlRequests++;
4031 + }
4032 +
4033 + sp->tgt_flags = rp->flags;
4034 +
4035 + init_timer(&sp->timer);
4036 + sp->timer.data = (unsigned long)sp;
4037 +
4038 + /*
4039 + * send it to the lower layer
4040 + * if we get -1 return then put the request in the pending
4041 + * queue.
4042 + */
4043 + rval = fc_fcp_pkt_send(lp, sp);
4044 + if (rval != 0) {
4045 + sp->state = FC_SRB_FREE;
4046 + fc_fcp_pkt_release(sp);
4047 + rc = SCSI_MLQUEUE_HOST_BUSY;
4048 + }
4049 +out:
4050 + return rc;
4051 +}
4052 +EXPORT_SYMBOL(fc_queuecommand);
4053 +
4054 +/**
4055 + * fc_io_compl - Handle responses for completed commands
4056 + * @sp: scsi packet
4057 + *
4058 + * Translates a error to a Linux SCSI error.
4059 + *
4060 + * The fcp packet lock must be held when calling.
4061 + */
4062 +static void fc_io_compl(struct fc_fcp_pkt *sp)
4063 +{
4064 + struct fc_fcp_internal *si;
4065 + struct scsi_cmnd *sc_cmd;
4066 + struct fc_lport *lp;
4067 + unsigned long flags;
4068 +
4069 + sp->state |= FC_SRB_COMPL;
4070 + if (!(sp->state & FC_SRB_FCP_PROCESSING_TMO)) {
4071 + spin_unlock_bh(&sp->scsi_pkt_lock);
4072 + del_timer_sync(&sp->timer);
4073 + spin_lock_bh(&sp->scsi_pkt_lock);
4074 + }
4075 +
4076 + lp = sp->lp;
4077 + si = fc_get_scsi_internal(lp);
4078 + spin_lock_irqsave(lp->host->host_lock, flags);
4079 + if (!sp->cmd) {
4080 + spin_unlock_irqrestore(lp->host->host_lock, flags);
4081 + return;
4082 + }
4083 +
4084 + /*
4085 + * if a command timed out while we had to try and throttle IO
4086 + * and it is now getting cleaned up, then we are about to
4087 + * try again so clear the throttled flag incase we get more
4088 + * time outs.
4089 + */
4090 + if (si->throttled && sp->state & FC_SRB_NOMEM)
4091 + si->throttled = 0;
4092 +
4093 + sc_cmd = sp->cmd;
4094 + sp->cmd = NULL;
4095 +
4096 + if (!sc_cmd->SCp.ptr) {
4097 + spin_unlock_irqrestore(lp->host->host_lock, flags);
4098 + return;
4099 + }
4100 +
4101 + CMD_SCSI_STATUS(sc_cmd) = sp->cdb_status;
4102 + switch (sp->status_code) {
4103 + case FC_COMPLETE:
4104 + if (sp->cdb_status == 0) {
4105 + /*
4106 + * good I/O status
4107 + */
4108 + sc_cmd->result = DID_OK << 16;
4109 + if (sp->scsi_resid)
4110 + CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
4111 + } else if (sp->cdb_status == QUEUE_FULL) {
4112 + struct scsi_device *tmp_sdev;
4113 + struct scsi_device *sdev = sc_cmd->device;
4114 +
4115 + shost_for_each_device(tmp_sdev, sdev->host) {
4116 + if (tmp_sdev->id != sdev->id)
4117 + continue;
4118 +
4119 + if (tmp_sdev->queue_depth > 1) {
4120 + scsi_track_queue_full(tmp_sdev,
4121 + tmp_sdev->
4122 + queue_depth - 1);
4123 + }
4124 + }
4125 + sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
4126 + } else {
4127 + /*
4128 + * transport level I/O was ok but scsi
4129 + * has non zero status
4130 + */
4131 + sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
4132 + }
4133 + break;
4134 + case FC_ERROR:
4135 + if (sp->io_status & (SUGGEST_RETRY << 24))
4136 + sc_cmd->result = DID_IMM_RETRY << 16;
4137 + else
4138 + sc_cmd->result = (DID_ERROR << 16) | sp->io_status;
4139 + break;
4140 + case FC_DATA_UNDRUN:
4141 + if (sp->cdb_status == 0) {
4142 + /*
4143 + * scsi status is good but transport level
4144 + * underrun. for read it should be an error??
4145 + */
4146 + sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
4147 + } else {
4148 + /*
4149 + * scsi got underrun, this is an error
4150 + */
4151 + CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
4152 + sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
4153 + }
4154 + break;
4155 + case FC_DATA_OVRRUN:
4156 + /*
4157 + * overrun is an error
4158 + */
4159 + sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
4160 + break;
4161 + case FC_CMD_ABORTED:
4162 + sc_cmd->result = (DID_ABORT << 16) | sp->io_status;
4163 + break;
4164 + case FC_CMD_TIME_OUT:
4165 + sc_cmd->result = (DID_BUS_BUSY << 16) | sp->io_status;
4166 + break;
4167 + case FC_CMD_RESET:
4168 + sc_cmd->result = (DID_RESET << 16);
4169 + break;
4170 + case FC_HRD_ERROR:
4171 + sc_cmd->result = (DID_NO_CONNECT << 16);
4172 + break;
4173 + default:
4174 + sc_cmd->result = (DID_ERROR << 16);
4175 + break;
4176 + }
4177 +
4178 + list_del(&sp->list);
4179 + sc_cmd->SCp.ptr = NULL;
4180 + sc_cmd->scsi_done(sc_cmd);
4181 + spin_unlock_irqrestore(lp->host->host_lock, flags);
4182 +
4183 + /* release ref from initial allocation in queue command */
4184 + fc_fcp_pkt_release(sp);
4185 +}
4186 +
4187 +/**
4188 + * fc_eh_abort - Abort a command...from scsi host template
4189 + * @sc_cmd: scsi command to abort
4190 + *
4191 + * send ABTS to the target device and wait for the response
4192 + * sc_cmd is the pointer to the command to be aborted.
4193 + */
4194 +int fc_eh_abort(struct scsi_cmnd *sc_cmd)
4195 +{
4196 + struct fc_fcp_pkt *sp;
4197 + struct fc_lport *lp;
4198 + int rc = FAILED;
4199 + unsigned long flags;
4200 +
4201 + lp = shost_priv(sc_cmd->device->host);
4202 + if (lp->state != LPORT_ST_READY)
4203 + return rc;
4204 + else if (!(lp->link_status & FC_LINK_UP))
4205 + return rc;
4206 +
4207 + spin_lock_irqsave(lp->host->host_lock, flags);
4208 + sp = CMD_SP(sc_cmd);
4209 + if (!sp) {
4210 + /* command completed while scsi eh was setting up */
4211 + spin_unlock_irqrestore(lp->host->host_lock, flags);
4212 + return SUCCESS;
4213 + }
4214 + /* grab a ref so the sp and sc_cmd cannot be relased from under us */
4215 + fc_fcp_pkt_hold(sp);
4216 + spin_unlock_irqrestore(lp->host->host_lock, flags);
4217 +
4218 + if (fc_fcp_lock_pkt(sp)) {
4219 + /* completed while we were waiting for timer to be deleted */
4220 + rc = SUCCESS;
4221 + goto release_pkt;
4222 + }
4223 +
4224 + rc = fc_fcp_pkt_abort(lp, sp);
4225 + fc_fcp_unlock_pkt(sp);
4226 +
4227 +release_pkt:
4228 + fc_fcp_pkt_release(sp);
4229 + return rc;
4230 +}
4231 +EXPORT_SYMBOL(fc_eh_abort);
4232 +
4233 +/**
4234 + * fc_eh_device_reset: Reset a single LUN
4235 + * @sc_cmd: scsi command
4236 + *
4237 + * Set from scsi host template to send tm cmd to the target and wait for the
4238 + * response.
4239 + */
4240 +int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
4241 +{
4242 + struct fc_lport *lp;
4243 + struct fc_fcp_pkt *sp;
4244 + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
4245 + int rc = FAILED;
4246 + struct fc_rport_libfc_priv *rp;
4247 + int rval;
4248 +
4249 + rval = fc_remote_port_chkready(rport);
4250 + if (rval)
4251 + goto out;
4252 +
4253 + rp = rport->dd_data;
4254 + lp = shost_priv(sc_cmd->device->host);
4255 +
4256 + if (lp->state != LPORT_ST_READY)
4257 + return rc;
4258 +
4259 + sp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
4260 + if (sp == NULL) {
4261 + FC_DBG("could not allocate scsi_pkt\n");
4262 + sc_cmd->result = DID_NO_CONNECT << 16;
4263 + goto out;
4264 + }
4265 +
4266 + /*
4267 + * Build the libfc request pkt. Do not set the scsi cmnd, because
4268 + * the sc passed in is not setup for execution like when sent
4269 + * through the queuecommand callout.
4270 + */
4271 + sp->lp = lp; /* save the softc ptr */
4272 + sp->rport = rport; /* set the remote port ptr */
4273 +
4274 + /*
4275 + * flush outstanding commands
4276 + */
4277 + rc = fc_lun_reset(lp, sp, scmd_id(sc_cmd), sc_cmd->device->lun);
4278 + sp->state = FC_SRB_FREE;
4279 + fc_fcp_pkt_release(sp);
4280 +
4281 +out:
4282 + return rc;
4283 +}
4284 +EXPORT_SYMBOL(fc_eh_device_reset);
4285 +
4286 +/**
4287 + * fc_eh_host_reset - The reset function will reset the ports on the host.
4288 + * @sc_cmd: scsi command
4289 + */
4290 +int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
4291 +{
4292 + struct Scsi_Host *shost = sc_cmd->device->host;
4293 + struct fc_lport *lp = shost_priv(shost);
4294 + unsigned long wait_tmo;
4295 +
4296 + lp->tt.lport_reset(lp);
4297 + wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
4298 + while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
4299 + msleep(1000);
4300 +
4301 + if (fc_fcp_lport_queue_ready(lp)) {
4302 + shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
4303 + return SUCCESS;
4304 + } else {
4305 + shost_printk(KERN_INFO, shost, "Host reset succeeded failed."
4306 + "lport not ready.\n");
4307 + return FAILED;
4308 + }
4309 +}
4310 +EXPORT_SYMBOL(fc_eh_host_reset);
4311 +
4312 +/**
4313 + * fc_slave_alloc - configure queue depth
4314 + * @sdev: scsi device
4315 + *
4316 + * Configures queue depth based on host's cmd_per_len. If not set
4317 + * then we use the libfc default.
4318 + */
4319 +int fc_slave_alloc(struct scsi_device *sdev)
4320 +{
4321 + struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
4322 + int queue_depth;
4323 +
4324 + if (!rport || fc_remote_port_chkready(rport))
4325 + return -ENXIO;
4326 +
4327 + if (sdev->tagged_supported) {
4328 + if (sdev->host->hostt->cmd_per_lun)
4329 + queue_depth = sdev->host->hostt->cmd_per_lun;
4330 + else
4331 + queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
4332 + scsi_activate_tcq(sdev, queue_depth);
4333 + }
4334 + return 0;
4335 +}
4336 +EXPORT_SYMBOL(fc_slave_alloc);
4337 +
4338 +int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
4339 +{
4340 + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4341 + return sdev->queue_depth;
4342 +}
4343 +EXPORT_SYMBOL(fc_change_queue_depth);
4344 +
4345 +int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
4346 +{
4347 + if (sdev->tagged_supported) {
4348 + scsi_set_tag_type(sdev, tag_type);
4349 + if (tag_type)
4350 + scsi_activate_tcq(sdev, sdev->queue_depth);
4351 + else
4352 + scsi_deactivate_tcq(sdev, sdev->queue_depth);
4353 + } else
4354 + tag_type = 0;
4355 +
4356 + return tag_type;
4357 +}
4358 +EXPORT_SYMBOL(fc_change_queue_type);
4359 +
4360 +void fc_fcp_destroy(struct fc_lport *lp)
4361 +{
4362 + struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
4363 +
4364 + if (!list_empty(&si->scsi_pkt_queue))
4365 + printk(KERN_ERR "Leaked scsi packets.\n");
4366 +
4367 + mempool_destroy(si->scsi_pkt_pool);
4368 + kfree(si);
4369 + lp->scsi_priv = NULL;
4370 +}
4371 +EXPORT_SYMBOL(fc_fcp_destroy);
4372 +
4373 +int fc_fcp_init(struct fc_lport *lp)
4374 +{
4375 + int rc;
4376 + struct fc_fcp_internal *si;
4377 +
4378 + if (!lp->tt.scsi_cleanup)
4379 + lp->tt.scsi_cleanup = fc_fcp_cleanup;
4380 +
4381 + if (!lp->tt.scsi_abort_io)
4382 + lp->tt.scsi_abort_io = fc_fcp_abort_io;
4383 +
4384 + si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
4385 + if (!si)
4386 + return -ENOMEM;
4387 + lp->scsi_priv = si;
4388 + INIT_LIST_HEAD(&si->scsi_pkt_queue);
4389 +
4390 + si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
4391 + if (!si->scsi_pkt_pool) {
4392 + rc = -ENOMEM;
4393 + goto free_internal;
4394 + }
4395 + return 0;
4396 +
4397 +free_internal:
4398 + kfree(si);
4399 + return rc;
4400 +}
4401 +EXPORT_SYMBOL(fc_fcp_init);
4402 +
4403 +static int __init libfc_init(void)
4404 +{
4405 + int rc;
4406 +
4407 + scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
4408 + sizeof(struct fc_fcp_pkt),
4409 + 0, SLAB_HWCACHE_ALIGN, NULL);
4410 + if (scsi_pkt_cachep == NULL) {
4411 + FC_DBG("Unable to allocate SRB cache...module load failed!");
4412 + return -ENOMEM;
4413 + }
4414 +
4415 + rc = fc_setup_exch_mgr();
4416 + if (rc)
4417 + kmem_cache_destroy(scsi_pkt_cachep);
4418 + return rc;
4419 +}
4420 +
4421 +static void __exit libfc_exit(void)
4422 +{
4423 + kmem_cache_destroy(scsi_pkt_cachep);
4424 + fc_destroy_exch_mgr();
4425 +}
4426 +
4427 +module_init(libfc_init);
4428 +module_exit(libfc_exit);
4429 diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
4430 new file mode 100644
4431 index 0000000..7ba241e
4432 --- /dev/null
4433 +++ b/drivers/scsi/libfc/fc_frame.c
4434 @@ -0,0 +1,88 @@
4435 +/*
4436 + * Copyright(c) 2007 Intel Corporation. All rights reserved.
4437 + *
4438 + * This program is free software; you can redistribute it and/or modify it
4439 + * under the terms and conditions of the GNU General Public License,
4440 + * version 2, as published by the Free Software Foundation.
4441 + *
4442 + * This program is distributed in the hope it will be useful, but WITHOUT
4443 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4444 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4445 + * more details.
4446 + *
4447 + * You should have received a copy of the GNU General Public License along with
4448 + * this program; if not, write to the Free Software Foundation, Inc.,
4449 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4450 + *
4451 + * Maintained at www.Open-FCoE.org
4452 + */
4453 +
4454 +/*
4455 + * Frame allocation.
4456 + */
4457 +#include <linux/module.h>
4458 +#include <linux/kernel.h>
4459 +#include <linux/skbuff.h>
4460 +#include <linux/crc32.h>
4461 +
4462 +#include <scsi/libfc/fc_frame.h>
4463 +
4464 +/*
4465 + * Check the CRC in a frame.
4466 + */
4467 +u32 fc_frame_crc_check(struct fc_frame *fp)
4468 +{
4469 + u32 crc;
4470 + u32 error;
4471 + const u8 *bp;
4472 + unsigned int len;
4473 +
4474 + WARN_ON(!fc_frame_is_linear(fp));
4475 + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
4476 + len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
4477 + bp = (const u8 *) fr_hdr(fp);
4478 + crc = ~crc32(~0, bp, len);
4479 + error = crc ^ *(u32 *) (bp + len);
4480 + return error;
4481 +}
4482 +EXPORT_SYMBOL(fc_frame_crc_check);
4483 +
4484 +/*
4485 + * Allocate a frame intended to be sent via fcoe_xmit.
4486 + * Get an sk_buff for the frame and set the length.
4487 + */
4488 +struct fc_frame *__fc_frame_alloc(size_t len)
4489 +{
4490 + struct fc_frame *fp;
4491 + struct sk_buff *skb;
4492 +
4493 + WARN_ON((len % sizeof(u32)) != 0);
4494 + len += sizeof(struct fc_frame_header);
4495 + skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
4496 + if (!skb)
4497 + return NULL;
4498 + fp = (struct fc_frame *) skb;
4499 + fc_frame_init(fp);
4500 + skb_reserve(skb, FC_FRAME_HEADROOM);
4501 + skb_put(skb, len);
4502 + return fp;
4503 +}
4504 +EXPORT_SYMBOL(__fc_frame_alloc);
4505 +
4506 +
4507 +struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
4508 +{
4509 + struct fc_frame *fp;
4510 + size_t fill;
4511 +
4512 + fill = payload_len % 4;
4513 + if (fill != 0)
4514 + fill = 4 - fill;
4515 + fp = __fc_frame_alloc(payload_len + fill);
4516 + if (fp) {
4517 + memset((char *) fr_hdr(fp) + payload_len, 0, fill);
4518 + /* trim is OK, we just allocated it so there are no fragments */
4519 + skb_trim(fp_skb(fp), payload_len);
4520 + }
4521 + return fp;
4522 +}
4523 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
4524 new file mode 100644
4525 index 0000000..b390a32
4526 --- /dev/null
4527 +++ b/drivers/scsi/libfc/fc_lport.c
4528 @@ -0,0 +1,926 @@
4529 +/*
4530 + * Copyright(c) 2007 Intel Corporation. All rights reserved.
4531 + *
4532 + * This program is free software; you can redistribute it and/or modify it
4533 + * under the terms and conditions of the GNU General Public License,
4534 + * version 2, as published by the Free Software Foundation.
4535 + *
4536 + * This program is distributed in the hope it will be useful, but WITHOUT
4537 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4538 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4539 + * more details.
4540 + *
4541 + * You should have received a copy of the GNU General Public License along with
4542 + * this program; if not, write to the Free Software Foundation, Inc.,
4543 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4544 + *
4545 + * Maintained at www.Open-FCoE.org
4546 + */
4547 +
4548 +/*
4549 + * Logical interface support.
4550 + */
4551 +
4552 +#include <linux/timer.h>
4553 +#include <asm/unaligned.h>
4554 +
4555 +#include <scsi/fc/fc_gs.h>
4556 +
4557 +#include <scsi/libfc/libfc.h>
4558 +
4559 +/* Fabric IDs to use for point-to-point mode, chosen on whims. */
4560 +#define FC_LOCAL_PTP_FID_LO 0x010101
4561 +#define FC_LOCAL_PTP_FID_HI 0x010102
4562 +
4563 +#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
4564 +
4565 +static int fc_lport_debug;
4566 +
4567 +static void fc_lport_enter_flogi(struct fc_lport *);
4568 +static void fc_lport_enter_logo(struct fc_lport *);
4569 +
4570 +static const char *fc_lport_state_names[] = {
4571 + [LPORT_ST_NONE] = "none",
4572 + [LPORT_ST_FLOGI] = "FLOGI",
4573 + [LPORT_ST_DNS] = "dNS",
4574 + [LPORT_ST_REG_PN] = "REG_PN",
4575 + [LPORT_ST_REG_FT] = "REG_FT",
4576 + [LPORT_ST_SCR] = "SCR",
4577 + [LPORT_ST_READY] = "ready",
4578 + [LPORT_ST_DNS_STOP] = "stop",
4579 + [LPORT_ST_LOGO] = "LOGO",
4580 + [LPORT_ST_RESET] = "reset",
4581 +};
4582 +
4583 +static int fc_frame_drop(struct fc_lport *lp, struct fc_frame *fp)
4584 +{
4585 + fc_frame_free(fp);
4586 + return 0;
4587 +}
4588 +
4589 +static const char *fc_lport_state(struct fc_lport *lp)
4590 +{
4591 + const char *cp;
4592 +
4593 + cp = fc_lport_state_names[lp->state];
4594 + if (!cp)
4595 + cp = "unknown";
4596 + return cp;
4597 +}
4598 +
4599 +static void fc_lport_ptp_setup(struct fc_lport *lp,
4600 + u32 remote_fid, u64 remote_wwpn,
4601 + u64 remote_wwnn)
4602 +{
4603 + struct fc_rport *rport;
4604 + struct fc_rport_identifiers ids = {
4605 + .port_id = remote_fid,
4606 + .port_name = remote_wwpn,
4607 + .node_name = remote_wwnn,
4608 + };
4609 +
4610 + /*
4611 + * if we have to create a rport the fc class can sleep so we must
4612 + * drop the lock here
4613 + */
4614 + fc_lport_unlock(lp);
4615 + rport = lp->tt.rport_lookup(lp, ids.port_id); /* lookup and hold */
4616 + if (rport == NULL)
4617 + rport = lp->tt.rport_create(lp, &ids); /* create and hold */
4618 + fc_lport_lock(lp);
4619 + if (rport) {
4620 + if (lp->ptp_rp)
4621 + fc_remote_port_delete(lp->ptp_rp);
4622 + lp->ptp_rp = rport;
4623 + fc_lport_state_enter(lp, LPORT_ST_READY);
4624 + }
4625 +}
4626 +
4627 +static void fc_lport_ptp_clear(struct fc_lport *lp)
4628 +{
4629 + if (lp->ptp_rp) {
4630 + fc_remote_port_delete(lp->ptp_rp);
4631 + lp->ptp_rp = NULL;
4632 + }
4633 +}
4634 +
4635 +/*
4636 + * Routines to support struct fc_function_template
4637 + */
4638 +void fc_get_host_port_state(struct Scsi_Host *shost)
4639 +{
4640 + struct fc_lport *lp = shost_priv(shost);
4641 +
4642 + if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
4643 + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
4644 + else
4645 + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
4646 +}
4647 +EXPORT_SYMBOL(fc_get_host_port_state);
4648 +
4649 +/*
4650 + * Fill in FLOGI command for request.
4651 + */
4652 +static void
4653 +fc_lport_flogi_fill(struct fc_lport *lp,
4654 + struct fc_els_flogi *flogi, unsigned int op)
4655 +{
4656 + struct fc_els_csp *sp;
4657 + struct fc_els_cssp *cp;
4658 +
4659 + memset(flogi, 0, sizeof(*flogi));
4660 + flogi->fl_cmd = (u8) op;
4661 + put_unaligned_be64(lp->wwpn, &flogi->fl_wwpn);
4662 + put_unaligned_be64(lp->wwnn, &flogi->fl_wwnn);
4663 + sp = &flogi->fl_csp;
4664 + sp->sp_hi_ver = 0x20;
4665 + sp->sp_lo_ver = 0x20;
4666 + sp->sp_bb_cred = htons(10); /* this gets set by gateway */
4667 + sp->sp_bb_data = htons((u16) lp->mfs);
4668 + cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
4669 + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
4670 + if (op != ELS_FLOGI) {
4671 + sp->sp_features = htons(FC_SP_FT_CIRO);
4672 + sp->sp_tot_seq = htons(255); /* seq. we accept */
4673 + sp->sp_rel_off = htons(0x1f);
4674 + sp->sp_e_d_tov = htonl(lp->e_d_tov);
4675 +
4676 + cp->cp_rdfs = htons((u16) lp->mfs);
4677 + cp->cp_con_seq = htons(255);
4678 + cp->cp_open_seq = 1;
4679 + }
4680 +}
4681 +
4682 +/*
4683 + * Set the fid. This indicates that we have a new connection to the
4684 + * fabric so we should reset our list of fc_rports. Passing a fid of
4685 + * 0 will also reset the rport list regardless of the previous fid.
4686 + */
4687 +static void fc_lport_set_fid(struct fc_lport *lp, u32 fid)
4688 +{
4689 + if (fid != 0 && lp->fid == fid)
4690 + return;
4691 +
4692 + if (fc_lport_debug)
4693 + FC_DBG("changing local port fid from %x to %x\n",
4694 + lp->fid, fid);
4695 + lp->fid = fid;
4696 + lp->tt.rport_reset_list(lp);
4697 +}
4698 +
4699 +/*
4700 + * Add a supported FC-4 type.
4701 + */
4702 +static void fc_lport_add_fc4_type(struct fc_lport *lp, enum fc_fh_type type)
4703 +{
4704 + __be32 *mp;
4705 +
4706 + mp = &lp->fcts.ff_type_map[type / FC_NS_BPW];
4707 + *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
4708 +}
4709 +
4710 +/*
4711 + * Handle received RLIR - registered link incident report.
4712 + */
4713 +static void fc_lport_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
4714 + struct fc_lport *lp)
4715 +{
4716 + lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
4717 + fc_frame_free(fp);
4718 +}
4719 +
4720 +/*
4721 + * Handle received ECHO.
4722 + */
4723 +static void fc_lport_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
4724 + struct fc_lport *lp)
4725 +{
4726 + struct fc_frame *fp;
4727 + unsigned int len;
4728 + void *pp;
4729 + void *dp;
4730 + u32 f_ctl;
4731 +
4732 + len = fr_len(in_fp) - sizeof(struct fc_frame_header);
4733 + pp = fc_frame_payload_get(in_fp, len);
4734 +
4735 + if (len < sizeof(__be32))
4736 + len = sizeof(__be32);
4737 + fp = fc_frame_alloc(lp, len);
4738 + if (fp) {
4739 + dp = fc_frame_payload_get(fp, len);
4740 + memcpy(dp, pp, len);
4741 + *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
4742 + sp = lp->tt.seq_start_next(sp);
4743 + f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
4744 + fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
4745 + lp->tt.seq_send(lp, sp, fp, f_ctl);
4746 + }
4747 + fc_frame_free(in_fp);
4748 +}
4749 +
4750 +/*
4751 + * Handle received RNID.
4752 + */
4753 +static void fc_lport_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
4754 + struct fc_lport *lp)
4755 +{
4756 + struct fc_frame *fp;
4757 + struct fc_els_rnid *req;
4758 + struct {
4759 + struct fc_els_rnid_resp rnid;
4760 + struct fc_els_rnid_cid cid;
4761 + struct fc_els_rnid_gen gen;
4762 + } *rp;
4763 + struct fc_seq_els_data rjt_data;
4764 + u8 fmt;
4765 + size_t len;
4766 + u32 f_ctl;
4767 +
4768 + req = fc_frame_payload_get(in_fp, sizeof(*req));
4769 + if (!req) {
4770 + rjt_data.fp = NULL;
4771 + rjt_data.reason = ELS_RJT_LOGIC;
4772 + rjt_data.explan = ELS_EXPL_NONE;
4773 + lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
4774 + } else {
4775 + fmt = req->rnid_fmt;
4776 + len = sizeof(*rp);
4777 + if (fmt != ELS_RNIDF_GEN ||
4778 + ntohl(lp->rnid_gen.rnid_atype) == 0) {
4779 + fmt = ELS_RNIDF_NONE; /* nothing to provide */
4780 + len -= sizeof(rp->gen);
4781 + }
4782 + fp = fc_frame_alloc(lp, len);
4783 + if (fp) {
4784 + rp = fc_frame_payload_get(fp, len);
4785 + memset(rp, 0, len);
4786 + rp->rnid.rnid_cmd = ELS_LS_ACC;
4787 + rp->rnid.rnid_fmt = fmt;
4788 + rp->rnid.rnid_cid_len = sizeof(rp->cid);
4789 + rp->cid.rnid_wwpn = htonll(lp->wwpn);
4790 + rp->cid.rnid_wwnn = htonll(lp->wwnn);
4791 + if (fmt == ELS_RNIDF_GEN) {
4792 + rp->rnid.rnid_sid_len = sizeof(rp->gen);
4793 + memcpy(&rp->gen, &lp->rnid_gen,
4794 + sizeof(rp->gen));
4795 + }
4796 + sp = lp->tt.seq_start_next(sp);
4797 + f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
4798 + fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
4799 + lp->tt.seq_send(lp, sp, fp, f_ctl);
4800 + }
4801 + }
4802 + fc_frame_free(in_fp);
4803 +}
4804 +
4805 +/*
4806 + * Handle received fabric logout request.
4807 + */
4808 +static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
4809 + struct fc_lport *lp)
4810 +{
4811 + lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
4812 + fc_lport_enter_reset(lp);
4813 + fc_frame_free(fp);
4814 +}
4815 +
4816 +/*
4817 + * Receive request frame
4818 + */
4819 +
4820 +int fc_fabric_login(struct fc_lport *lp)
4821 +{
4822 + int rc = -1;
4823 +
4824 + if (lp->state == LPORT_ST_NONE) {
4825 + fc_lport_lock(lp);
4826 + fc_lport_enter_reset(lp);
4827 + fc_lport_unlock(lp);
4828 + rc = 0;
4829 + }
4830 + return rc;
4831 +}
4832 +EXPORT_SYMBOL(fc_fabric_login);
4833 +
4834 +/**
4835 + * fc_linkup - link up notification
4836 + * @dev: Pointer to fc_lport .
4837 + **/
4838 +void fc_linkup(struct fc_lport *lp)
4839 +{
4840 + if ((lp->link_status & FC_LINK_UP) != FC_LINK_UP) {
4841 + lp->link_status |= FC_LINK_UP;
4842 + fc_lport_lock(lp);
4843 + if (lp->state == LPORT_ST_RESET)
4844 + lp->tt.lport_login(lp);
4845 + fc_lport_unlock(lp);
4846 + }
4847 +}
4848 +EXPORT_SYMBOL(fc_linkup);
4849 +
4850 +/**
4851 + * fc_linkdown - link down notification
4852 + * @dev: Pointer to fc_lport .
4853 + **/
4854 +void fc_linkdown(struct fc_lport *lp)
4855 +{
4856 + if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP) {
4857 + lp->link_status &= ~(FC_LINK_UP);
4858 + fc_lport_enter_reset(lp);
4859 + lp->tt.scsi_cleanup(lp);
4860 + }
4861 +}
4862 +EXPORT_SYMBOL(fc_linkdown);
4863 +
4864 +void fc_pause(struct fc_lport *lp)
4865 +{
4866 + lp->link_status |= FC_PAUSE;
4867 +}
4868 +EXPORT_SYMBOL(fc_pause);
4869 +
4870 +void fc_unpause(struct fc_lport *lp)
4871 +{
4872 + lp->link_status &= ~(FC_PAUSE);
4873 +}
4874 +EXPORT_SYMBOL(fc_unpause);
4875 +
4876 +int fc_fabric_logoff(struct fc_lport *lp)
4877 +{
4878 + fc_lport_lock(lp);
4879 + switch (lp->state) {
4880 + case LPORT_ST_NONE:
4881 + break;
4882 + case LPORT_ST_FLOGI:
4883 + case LPORT_ST_LOGO:
4884 + case LPORT_ST_RESET:
4885 + fc_lport_enter_reset(lp);
4886 + break;
4887 + case LPORT_ST_DNS:
4888 + case LPORT_ST_DNS_STOP:
4889 + fc_lport_enter_logo(lp);
4890 + break;
4891 + case LPORT_ST_REG_PN:
4892 + case LPORT_ST_REG_FT:
4893 + case LPORT_ST_SCR:
4894 + case LPORT_ST_READY:
4895 + lp->tt.disc_stop(lp);
4896 + break;
4897 + }
4898 + fc_lport_unlock(lp);
4899 + lp->tt.scsi_cleanup(lp);
4900 +
4901 + return 0;
4902 +}
4903 +EXPORT_SYMBOL(fc_fabric_logoff);
4904 +
4905 +/**
4906 + * fc_lport_destroy - unregister a fc_lport
4907 + * @lp: fc_lport pointer to unregister
4908 + *
4909 + * Return value:
4910 + * None
4911 + * Note:
4912 + * exit routine for fc_lport instance
4913 + * clean-up all the allocated memory
4914 + * and free up other system resources.
4915 + *
4916 + **/
4917 +int fc_lport_destroy(struct fc_lport *lp)
4918 +{
4919 + fc_lport_lock(lp);
4920 + fc_lport_state_enter(lp, LPORT_ST_LOGO);
4921 + fc_lport_unlock(lp);
4922 +
4923 + cancel_delayed_work_sync(&lp->ns_disc_work);
4924 +
4925 + lp->tt.scsi_abort_io(lp);
4926 +
4927 + lp->tt.frame_send = fc_frame_drop;
4928 +
4929 + lp->tt.exch_mgr_reset(lp->emp, 0, 0);
4930 +
4931 + return 0;
4932 +}
4933 +EXPORT_SYMBOL(fc_lport_destroy);
4934 +
4935 +int fc_set_mfs(struct fc_lport *lp, u32 mfs)
4936 +{
4937 + unsigned int old_mfs;
4938 + int rc = -1;
4939 +
4940 + old_mfs = lp->mfs;
4941 +
4942 + if (mfs >= FC_MIN_MAX_FRAME) {
4943 + mfs &= ~3;
4944 + WARN_ON((size_t) mfs < FC_MIN_MAX_FRAME);
4945 + if (mfs > FC_MAX_FRAME)
4946 + mfs = FC_MAX_FRAME;
4947 + mfs -= sizeof(struct fc_frame_header);
4948 + lp->mfs = mfs;
4949 + rc = 0;
4950 + }
4951 +
4952 + if (!rc && mfs < old_mfs) {
4953 + lp->ns_disc_done = 0;
4954 + fc_lport_enter_reset(lp);
4955 + }
4956 + return rc;
4957 +}
4958 +EXPORT_SYMBOL(fc_set_mfs);
4959 +
4960 +/*
4961 + * re-enter state for retrying a request after a timeout or alloc failure.
4962 + */
4963 +static void fc_lport_enter_retry(struct fc_lport *lp)
4964 +{
4965 + switch (lp->state) {
4966 + case LPORT_ST_NONE:
4967 + case LPORT_ST_READY:
4968 + case LPORT_ST_RESET:
4969 + case LPORT_ST_DNS:
4970 + case LPORT_ST_DNS_STOP:
4971 + case LPORT_ST_REG_PN:
4972 + case LPORT_ST_REG_FT:
4973 + case LPORT_ST_SCR:
4974 + WARN_ON(1);
4975 + break;
4976 + case LPORT_ST_FLOGI:
4977 + fc_lport_enter_flogi(lp);
4978 + break;
4979 + case LPORT_ST_LOGO:
4980 + fc_lport_enter_logo(lp);
4981 + break;
4982 + }
4983 +}
4984 +
4985 +/*
4986 + * enter next state for handling an exchange reject or retry exhaustion
4987 + * in the current state.
4988 + */
4989 +static void fc_lport_enter_reject(struct fc_lport *lp)
4990 +{
4991 + switch (lp->state) {
4992 + case LPORT_ST_NONE:
4993 + case LPORT_ST_READY:
4994 + case LPORT_ST_RESET:
4995 + case LPORT_ST_REG_PN:
4996 + case LPORT_ST_REG_FT:
4997 + case LPORT_ST_SCR:
4998 + case LPORT_ST_DNS_STOP:
4999 + case LPORT_ST_DNS:
5000 + WARN_ON(1);
5001 + break;
5002 + case LPORT_ST_FLOGI:
5003 + fc_lport_enter_flogi(lp);
5004 + break;
5005 + case LPORT_ST_LOGO:
5006 + fc_lport_enter_reset(lp);
5007 + break;
5008 + }
5009 +}
5010 +
5011 +/*
5012 + * Handle resource allocation problem by retrying in a bit.
5013 + */
5014 +static void fc_lport_retry(struct fc_lport *lp)
5015 +{
5016 + if (lp->retry_count == 0)
5017 + FC_DBG("local port %6x alloc failure in state %s "
5018 + "- will retry\n", lp->fid, fc_lport_state(lp));
5019 + if (lp->retry_count < lp->max_retry_count) {
5020 + lp->retry_count++;
5021 + mod_timer(&lp->state_timer,
5022 + jiffies + msecs_to_jiffies(lp->e_d_tov));
5023 + } else {
5024 + FC_DBG("local port %6x alloc failure in state %s "
5025 + "- retries exhausted\n", lp->fid,
5026 + fc_lport_state(lp));
5027 + fc_lport_enter_reject(lp);
5028 + }
5029 +}
5030 +
5031 +/*
5032 + * A received FLOGI request indicates a point-to-point connection.
5033 + * Accept it with the common service parameters indicating our N port.
5034 + * Set up to do a PLOGI if we have the higher-number WWPN.
5035 + */
5036 +static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
5037 + struct fc_frame *rx_fp,
5038 + struct fc_lport *lp)
5039 +{
5040 + struct fc_frame *fp;
5041 + struct fc_frame_header *fh;
5042 + struct fc_seq *sp;
5043 + struct fc_els_flogi *flp;
5044 + struct fc_els_flogi *new_flp;
5045 + u64 remote_wwpn;
5046 + u32 remote_fid;
5047 + u32 local_fid;
5048 + u32 f_ctl;
5049 +
5050 + fh = fc_frame_header_get(rx_fp);
5051 + remote_fid = ntoh24(fh->fh_s_id);
5052 + flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
5053 + if (!flp)
5054 + goto out;
5055 + remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
5056 + if (remote_wwpn == lp->wwpn) {
5057 + FC_DBG("FLOGI from port with same WWPN %llx "
5058 + "possible configuration error\n", remote_wwpn);
5059 + goto out;
5060 + }
5061 + FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn);
5062 + fc_lport_lock(lp);
5063 +
5064 + /*
5065 + * XXX what is the right thing to do for FIDs?
5066 + * The originator might expect our S_ID to be 0xfffffe.
5067 + * But if so, both of us could end up with the same FID.
5068 + */
5069 + local_fid = FC_LOCAL_PTP_FID_LO;
5070 + if (remote_wwpn < lp->wwpn) {
5071 + local_fid = FC_LOCAL_PTP_FID_HI;
5072 + if (!remote_fid || remote_fid == local_fid)
5073 + remote_fid = FC_LOCAL_PTP_FID_LO;
5074 + } else if (!remote_fid) {
5075 + remote_fid = FC_LOCAL_PTP_FID_HI;
5076 + }
5077 + fc_lport_set_fid(lp, local_fid);
5078 +
5079 + fp = fc_frame_alloc(lp, sizeof(*flp));
5080 + if (fp) {
5081 + sp = lp->tt.seq_start_next(fr_seq(rx_fp));
5082 + new_flp = fc_frame_payload_get(fp, sizeof(*flp));
5083 + fc_lport_flogi_fill(lp, new_flp, ELS_FLOGI);
5084 + new_flp->fl_cmd = (u8) ELS_LS_ACC;
5085 +
5086 + /*
5087 + * Send the response. If this fails, the originator should
5088 + * repeat the sequence.
5089 + */
5090 + f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
5091 + fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
5092 + lp->tt.seq_send(lp, sp, fp, f_ctl);
5093 +
5094 + } else {
5095 + fc_lport_retry(lp);
5096 + }
5097 + fc_lport_ptp_setup(lp, remote_fid, remote_wwpn,
5098 + get_unaligned_be64(&flp->fl_wwnn));
5099 + fc_lport_unlock(lp);
5100 + if (lp->tt.disc_start(lp))
5101 + FC_DBG("target discovery start error\n");
5102 +out:
5103 + sp = fr_seq(rx_fp);
5104 + fc_frame_free(rx_fp);
5105 +}
5106 +
5107 +static void fc_lport_recv(struct fc_lport *lp, struct fc_seq *sp,
5108 + struct fc_frame *fp)
5109 +{
5110 + struct fc_frame_header *fh = fc_frame_header_get(fp);
5111 + void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
5112 + struct fc_rport *rport;
5113 + u32 s_id;
5114 + u32 d_id;
5115 + struct fc_seq_els_data rjt_data;
5116 +
5117 + /*
5118 + * Handle special ELS cases like FLOGI, LOGO, and
5119 + * RSCN here. These don't require a session.
5120 + * Even if we had a session, it might not be ready.
5121 + */
5122 + if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
5123 + /*
5124 + * Check opcode.
5125 + */
5126 + recv = NULL;
5127 + switch (fc_frame_payload_op(fp)) {
5128 + case ELS_FLOGI:
5129 + recv = fc_lport_recv_flogi_req;
5130 + break;
5131 + case ELS_LOGO:
5132 + fh = fc_frame_header_get(fp);
5133 + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
5134 + recv = fc_lport_recv_logo_req;
5135 + break;
5136 + case ELS_RSCN:
5137 + recv = lp->tt.disc_recv_req;
5138 + break;
5139 + case ELS_ECHO:
5140 + recv = fc_lport_echo_req;
5141 + break;
5142 + case ELS_RLIR:
5143 + recv = fc_lport_rlir_req;
5144 + break;
5145 + case ELS_RNID:
5146 + recv = fc_lport_rnid_req;
5147 + break;
5148 + }
5149 +
5150 + if (recv)
5151 + recv(sp, fp, lp);
5152 + else {
5153 + /*
5154 + * Find session.
5155 + * If this is a new incoming PLOGI, we won't find it.
5156 + */
5157 + s_id = ntoh24(fh->fh_s_id);
5158 + d_id = ntoh24(fh->fh_d_id);
5159 +
5160 + rport = lp->tt.rport_lookup(lp, s_id);
5161 + if (rport) {
5162 + lp->tt.rport_recv_req(sp, fp, rport);
5163 + put_device(&rport->dev); /* hold from lookup */
5164 + } else {
5165 + rjt_data.fp = NULL;
5166 + rjt_data.reason = ELS_RJT_UNAB;
5167 + rjt_data.explan = ELS_EXPL_NONE;
5168 + lp->tt.seq_els_rsp_send(sp,
5169 + ELS_LS_RJT, &rjt_data);
5170 + fc_frame_free(fp);
5171 + }
5172 + }
5173 + } else {
5174 + FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
5175 + fc_frame_free(fp);
5176 + }
5177 +
5178 + /*
5179 + * The common exch_done for all request may not be good
5180 + * if any request requires longer hold on exhange. XXX
5181 + */
5182 + lp->tt.exch_done(sp);
5183 +}
5184 +
5185 +/*
5186 + * Put the local port back into the initial state. Reset all sessions.
5187 + * This is called after a SCSI reset or the driver is unloading
5188 + * or the program is exiting.
5189 + */
5190 +int fc_lport_enter_reset(struct fc_lport *lp)
5191 +{
5192 + if (fc_lport_debug)
5193 + FC_DBG("Processing RESET state\n");
5194 +
5195 + if (lp->dns_rp) {
5196 + fc_remote_port_delete(lp->dns_rp);
5197 + lp->dns_rp = NULL;
5198 + }
5199 + fc_lport_ptp_clear(lp);
5200 +
5201 + /*
5202 + * Setting state RESET keeps fc_lport_error() callbacks
5203 + * by exch_mgr_reset() from recursing on the lock.
5204 + * It also causes fc_lport_sess_event() to ignore events.
5205 + * The lock is held for the duration of the time in RESET state.
5206 + */
5207 + fc_lport_state_enter(lp, LPORT_ST_RESET);
5208 + lp->tt.exch_mgr_reset(lp->emp, 0, 0);
5209 + fc_lport_set_fid(lp, 0);
5210 + if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
5211 + fc_lport_enter_flogi(lp);
5212 + return 0;
5213 +}
5214 +EXPORT_SYMBOL(fc_lport_enter_reset);
5215 +
5216 +/*
5217 + * Handle errors on local port requests.
5218 + * Don't get locks if in RESET state.
5219 + * The only possible errors so far are exchange TIMEOUT and CLOSED (reset).
5220 + */
5221 +static void fc_lport_error(struct fc_lport *lp, struct fc_frame *fp)
5222 +{
5223 + if (lp->state == LPORT_ST_RESET)
5224 + return;
5225 +
5226 + fc_lport_lock(lp);
5227 + if (PTR_ERR(fp) == -FC_EX_TIMEOUT) {
5228 + if (lp->retry_count < lp->max_retry_count) {
5229 + lp->retry_count++;
5230 + fc_lport_enter_retry(lp);
5231 + } else {
5232 + fc_lport_enter_reject(lp);
5233 +
5234 + }
5235 + }
5236 + if (fc_lport_debug)
5237 + FC_DBG("error %ld retries %d limit %d\n",
5238 + PTR_ERR(fp), lp->retry_count, lp->max_retry_count);
5239 + fc_lport_unlock(lp);
5240 +}
5241 +
5242 +static void fc_lport_timeout(unsigned long lp_arg)
5243 +{
5244 + struct fc_lport *lp = (struct fc_lport *)lp_arg;
5245 +
5246 + fc_lport_lock(lp);
5247 + fc_lport_enter_retry(lp);
5248 + fc_lport_unlock(lp);
5249 +}
5250 +
5251 +static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5252 + void *lp_arg)
5253 +{
5254 + struct fc_lport *lp = lp_arg;
5255 +
5256 + if (IS_ERR(fp))
5257 + fc_lport_error(lp, fp);
5258 + else {
5259 + fc_frame_free(fp);
5260 + fc_lport_lock(lp);
5261 + fc_lport_enter_reset(lp);
5262 + fc_lport_unlock(lp);
5263 + }
5264 +}
5265 +
5266 +/* Logout of the FC fabric */
5267 +static void fc_lport_enter_logo(struct fc_lport *lp)
5268 +{
5269 + struct fc_frame *fp;
5270 + struct fc_els_logo *logo;
5271 +
5272 + if (fc_lport_debug)
5273 + FC_DBG("Processing LOGO state\n");
5274 +
5275 + fc_lport_state_enter(lp, LPORT_ST_LOGO);
5276 +
5277 + /* DNS session should be closed so we can release it here */
5278 + if (lp->dns_rp) {
5279 + fc_remote_port_delete(lp->dns_rp);
5280 + lp->dns_rp = NULL;
5281 + }
5282 +
5283 + fp = fc_frame_alloc(lp, sizeof(*logo));
5284 + if (!fp) {
5285 + FC_DBG("failed to allocate frame\n");
5286 + return;
5287 + }
5288 +
5289 + logo = fc_frame_payload_get(fp, sizeof(*logo));
5290 + memset(logo, 0, sizeof(*logo));
5291 + logo->fl_cmd = ELS_LOGO;
5292 + hton24(logo->fl_n_port_id, lp->fid);
5293 + logo->fl_n_port_wwn = htonll(lp->wwpn);
5294 +
5295 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5296 + fc_frame_set_offset(fp, 0);
5297 +
5298 + lp->tt.exch_seq_send(lp, fp,
5299 + fc_lport_logo_resp,
5300 + lp, lp->e_d_tov,
5301 + lp->fid, FC_FID_FLOGI,
5302 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
5303 +}
5304 +
5305 +static int fc_lport_logout(struct fc_lport *lp)
5306 +{
5307 + fc_lport_lock(lp);
5308 + if (lp->state != LPORT_ST_LOGO)
5309 + fc_lport_enter_logo(lp);
5310 + fc_lport_unlock(lp);
5311 + return 0;
5312 +}
5313 +
5314 +/*
5315 + * Handle incoming ELS FLOGI response.
5316 + * Save parameters of remote switch. Finish exchange.
5317 + */
5318 +static void
5319 +fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg)
5320 +{
5321 + struct fc_lport *lp = lp_arg;
5322 + struct fc_frame_header *fh;
5323 + struct fc_els_flogi *flp;
5324 + u32 did;
5325 + u16 csp_flags;
5326 + unsigned int r_a_tov;
5327 + unsigned int e_d_tov;
5328 + u16 mfs;
5329 +
5330 + if (IS_ERR(fp)) {
5331 + fc_lport_error(lp, fp);
5332 + return;
5333 + }
5334 +
5335 + fh = fc_frame_header_get(fp);
5336 + did = ntoh24(fh->fh_d_id);
5337 + if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
5338 + if (fc_lport_debug)
5339 + FC_DBG("assigned fid %x\n", did);
5340 + fc_lport_lock(lp);
5341 + fc_lport_set_fid(lp, did);
5342 + flp = fc_frame_payload_get(fp, sizeof(*flp));
5343 + if (flp) {
5344 + mfs = ntohs(flp->fl_csp.sp_bb_data) &
5345 + FC_SP_BB_DATA_MASK;
5346 + if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
5347 + mfs < lp->mfs)
5348 + lp->mfs = mfs;
5349 + csp_flags = ntohs(flp->fl_csp.sp_features);
5350 + r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
5351 + e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
5352 + if (csp_flags & FC_SP_FT_EDTR)
5353 + e_d_tov /= 1000000;
5354 + if ((csp_flags & FC_SP_FT_FPORT) == 0) {
5355 + if (e_d_tov > lp->e_d_tov)
5356 + lp->e_d_tov = e_d_tov;
5357 + lp->r_a_tov = 2 * e_d_tov;
5358 + FC_DBG("point-to-point mode\n");
5359 + fc_lport_ptp_setup(lp, ntoh24(fh->fh_s_id),
5360 + get_unaligned_be64(
5361 + &flp->fl_wwpn),
5362 + get_unaligned_be64(
5363 + &flp->fl_wwnn));
5364 + } else {
5365 + lp->e_d_tov = e_d_tov;
5366 + lp->r_a_tov = r_a_tov;
5367 + lp->tt.dns_register(lp);
5368 + }
5369 + }
5370 + fc_lport_unlock(lp);
5371 + if (flp) {
5372 + csp_flags = ntohs(flp->fl_csp.sp_features);
5373 + if ((csp_flags & FC_SP_FT_FPORT) == 0) {
5374 + if (lp->tt.disc_start(lp))
5375 + FC_DBG("target disc start error\n");
5376 + }
5377 + }
5378 + } else {
5379 + FC_DBG("bad FLOGI response\n");
5380 + }
5381 + fc_frame_free(fp);
5382 +}
5383 +
5384 +/*
5385 + * Send ELS (extended link service) FLOGI request to peer.
5386 + */
5387 +static void fc_lport_flogi_send(struct fc_lport *lp)
5388 +{
5389 + struct fc_frame *fp;
5390 + struct fc_els_flogi *flp;
5391 +
5392 + fp = fc_frame_alloc(lp, sizeof(*flp));
5393 + if (!fp)
5394 + return fc_lport_retry(lp);
5395 +
5396 + flp = fc_frame_payload_get(fp, sizeof(*flp));
5397 + fc_lport_flogi_fill(lp, flp, ELS_FLOGI);
5398 +
5399 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5400 + fc_frame_set_offset(fp, 0);
5401 +
5402 + if (!lp->tt.exch_seq_send(lp, fp,
5403 + fc_lport_flogi_resp,
5404 + lp, lp->e_d_tov,
5405 + 0, FC_FID_FLOGI,
5406 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5407 + fc_lport_retry(lp);
5408 +
5409 +}
5410 +
5411 +void fc_lport_enter_flogi(struct fc_lport *lp)
5412 +{
5413 + if (fc_lport_debug)
5414 + FC_DBG("Processing FLOGI state\n");
5415 + fc_lport_state_enter(lp, LPORT_ST_FLOGI);
5416 + fc_lport_flogi_send(lp);
5417 +}
5418 +
5419 +/* Configure a fc_lport */
5420 +int fc_lport_config(struct fc_lport *lp)
5421 +{
5422 + setup_timer(&lp->state_timer, fc_lport_timeout, (unsigned long)lp);
5423 + spin_lock_init(&lp->state_lock);
5424 +
5425 + fc_lport_lock(lp);
5426 + fc_lport_state_enter(lp, LPORT_ST_NONE);
5427 + fc_lport_unlock(lp);
5428 +
5429 + lp->ns_disc_delay = DNS_DELAY;
5430 +
5431 + fc_lport_add_fc4_type(lp, FC_TYPE_FCP);
5432 + fc_lport_add_fc4_type(lp, FC_TYPE_CT);
5433 +
5434 + return 0;
5435 +}
5436 +EXPORT_SYMBOL(fc_lport_config);
5437 +
5438 +int fc_lport_init(struct fc_lport *lp)
5439 +{
5440 + if (!lp->tt.lport_recv)
5441 + lp->tt.lport_recv = fc_lport_recv;
5442 +
5443 + if (!lp->tt.lport_login)
5444 + lp->tt.lport_login = fc_lport_enter_reset;
5445 +
5446 + if (!lp->tt.lport_reset)
5447 + lp->tt.lport_reset = fc_lport_enter_reset;
5448 +
5449 + if (!lp->tt.lport_logout)
5450 + lp->tt.lport_logout = fc_lport_logout;
5451 +
5452 + return 0;
5453 +}
5454 +EXPORT_SYMBOL(fc_lport_init);
5455 diff --git a/drivers/scsi/libfc/fc_ns.c b/drivers/scsi/libfc/fc_ns.c
5456 new file mode 100644
5457 index 0000000..5c9272c
5458 --- /dev/null
5459 +++ b/drivers/scsi/libfc/fc_ns.c
5460 @@ -0,0 +1,1283 @@
5461 +/*
5462 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
5463 + *
5464 + * This program is free software; you can redistribute it and/or modify it
5465 + * under the terms and conditions of the GNU General Public License,
5466 + * version 2, as published by the Free Software Foundation.
5467 + *
5468 + * This program is distributed in the hope it will be useful, but WITHOUT
5469 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5470 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5471 + * more details.
5472 + *
5473 + * You should have received a copy of the GNU General Public License along with
5474 + * this program; if not, write to the Free Software Foundation, Inc.,
5475 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5476 + *
5477 + * Maintained at www.Open-FCoE.org
5478 + */
5479 +
5480 +/*
5481 + * Target Discovery
5482 + * Actually, this discovers all FC-4 remote ports, including FCP initiators.
5483 + */
5484 +
5485 +#include <linux/timer.h>
5486 +#include <linux/err.h>
5487 +#include <asm/unaligned.h>
5488 +
5489 +#include <scsi/fc/fc_gs.h>
5490 +
5491 +#include <scsi/libfc/libfc.h>
5492 +
5493 +#define FC_NS_RETRY_LIMIT 3 /* max retries */
5494 +#define FC_NS_RETRY_DELAY 500UL /* (msecs) delay */
5495 +
5496 +int fc_ns_debug;
5497 +
5498 +static void fc_ns_gpn_ft_req(struct fc_lport *);
5499 +static void fc_ns_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
5500 +static int fc_ns_new_target(struct fc_lport *, struct fc_rport *,
5501 + struct fc_rport_identifiers *);
5502 +static void fc_ns_del_target(struct fc_lport *, struct fc_rport *);
5503 +static void fc_ns_disc_done(struct fc_lport *);
5504 +static void fcdt_ns_error(struct fc_lport *, struct fc_frame *);
5505 +static void fc_ns_timeout(struct work_struct *);
5506 +
5507 +/**
5508 + * struct fc_ns_port - temporary discovery port to hold rport identifiers
5509 + * @lp: Fibre Channel host port instance
5510 + * @peers: node for list management during discovery and RSCN processing
5511 + * @ids: identifiers structure to pass to fc_remote_port_add()
5512 + */
5513 +struct fc_ns_port {
5514 + struct fc_lport *lp;
5515 + struct list_head peers;
5516 + struct fc_rport_identifiers ids;
5517 +};
5518 +
5519 +static int fc_ns_gpn_id_req(struct fc_lport *, struct fc_ns_port *);
5520 +static void fc_ns_gpn_id_resp(struct fc_seq *, struct fc_frame *, void *);
5521 +static void fc_ns_gpn_id_error(struct fc_ns_port *rp, struct fc_frame *fp);
5522 +
5523 +static int fc_ns_gnn_id_req(struct fc_lport *, struct fc_ns_port *);
5524 +static void fc_ns_gnn_id_resp(struct fc_seq *, struct fc_frame *, void *);
5525 +static void fc_ns_gnn_id_error(struct fc_ns_port *, struct fc_frame *);
5526 +static void fc_ns_enter_reg_pn(struct fc_lport *lp);
5527 +static void fc_ns_error(struct fc_lport *lp, struct fc_frame *fp);
5528 +static void fc_lport_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
5529 + unsigned int op, unsigned int req_size);
5530 +static void fc_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
5531 + void *lp_arg);
5532 +static void fc_ns_retry(struct fc_lport *lp);
5533 +static void fc_ns_single(struct fc_lport *, struct fc_ns_port *);
5534 +static int fc_ns_restart(struct fc_lport *);
5535 +
5536 +
5537 +/**
5538 + * fc_ns_rscn_req - Handle Registered State Change Notification (RSCN)
5539 + * @sp: Current sequence of the RSCN exchange
5540 + * @fp: RSCN Frame
5541 + * @lp: Fibre Channel host port instance
5542 + */
5543 +static void fc_ns_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
5544 + struct fc_lport *lp)
5545 +{
5546 + struct fc_els_rscn *rp;
5547 + struct fc_els_rscn_page *pp;
5548 + struct fc_seq_els_data rjt_data;
5549 + unsigned int len;
5550 + int redisc = 0;
5551 + enum fc_els_rscn_ev_qual ev_qual;
5552 + enum fc_els_rscn_addr_fmt fmt;
5553 + LIST_HEAD(disc_list);
5554 + struct fc_ns_port *dp, *next;
5555 +
5556 + rp = fc_frame_payload_get(fp, sizeof(*rp));
5557 +
5558 + if (!rp || rp->rscn_page_len != sizeof(*pp))
5559 + goto reject;
5560 +
5561 + len = ntohs(rp->rscn_plen);
5562 + if (len < sizeof(*rp))
5563 + goto reject;
5564 + len -= sizeof(*rp);
5565 +
5566 + for (pp = (void *)(rp + 1); len; len -= sizeof(*pp), pp++) {
5567 + ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
5568 + ev_qual &= ELS_RSCN_EV_QUAL_MASK;
5569 + fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
5570 + fmt &= ELS_RSCN_ADDR_FMT_MASK;
5571 + /*
5572 + * if we get an address format other than port
5573 + * (area, domain, fabric), then do a full discovery
5574 + */
5575 + switch (fmt) {
5576 + case ELS_ADDR_FMT_PORT:
5577 + dp = kzalloc(sizeof(*dp), GFP_KERNEL);
5578 + if (!dp) {
5579 + redisc = 1;
5580 + break;
5581 + }
5582 + dp->lp = lp;
5583 + dp->ids.port_id = ntoh24(pp->rscn_fid);
5584 + dp->ids.port_name = -1;
5585 + dp->ids.node_name = -1;
5586 + dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
5587 + list_add_tail(&dp->peers, &disc_list);
5588 + break;
5589 + case ELS_ADDR_FMT_AREA:
5590 + case ELS_ADDR_FMT_DOM:
5591 + case ELS_ADDR_FMT_FAB:
5592 + default:
5593 + redisc = 1;
5594 + break;
5595 + }
5596 + }
5597 + lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
5598 + if (redisc) {
5599 + if (fc_ns_debug)
5600 + FC_DBG("RSCN received: rediscovering\n");
5601 + list_for_each_entry_safe(dp, next, &disc_list, peers) {
5602 + list_del(&dp->peers);
5603 + kfree(dp);
5604 + }
5605 + fc_ns_restart(lp);
5606 + } else {
5607 + if (fc_ns_debug)
5608 + FC_DBG("RSCN received: not rediscovering. "
5609 + "redisc %d state %d in_prog %d\n",
5610 + redisc, lp->state, lp->ns_disc_pending);
5611 + list_for_each_entry_safe(dp, next, &disc_list, peers) {
5612 + list_del(&dp->peers);
5613 + fc_ns_single(lp, dp);
5614 + }
5615 + }
5616 + fc_frame_free(fp);
5617 + return;
5618 +reject:
5619 + rjt_data.fp = NULL;
5620 + rjt_data.reason = ELS_RJT_LOGIC;
5621 + rjt_data.explan = ELS_EXPL_NONE;
5622 + lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
5623 + fc_frame_free(fp);
5624 +}
5625 +
5626 +static void fc_ns_recv_req(struct fc_seq *sp, struct fc_frame *fp,
5627 + struct fc_lport *lp)
5628 +{
5629 + switch (fc_frame_payload_op(fp)) {
5630 + case ELS_RSCN:
5631 + fc_ns_rscn_req(sp, fp, lp);
5632 + break;
5633 + default:
5634 + FC_DBG("fc_ns recieved an unexpected request\n");
5635 + break;
5636 + }
5637 +}
5638 +
5639 +/**
5640 + * fc_ns_scr_resp - Handle response to State Change Register (SCR) request
5641 + * @sp: current sequence in SCR exchange
5642 + * @fp: response frame
5643 + * @lp_arg: Fibre Channel host port instance
5644 + */
5645 +static void fc_ns_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
5646 + void *lp_arg)
5647 +{
5648 + struct fc_lport *lp = lp_arg;
5649 + int err;
5650 +
5651 + if (IS_ERR(fp))
5652 + fc_ns_error(lp, fp);
5653 + else {
5654 + fc_lport_lock(lp);
5655 + fc_lport_state_enter(lp, LPORT_ST_READY);
5656 + fc_lport_unlock(lp);
5657 + err = lp->tt.disc_start(lp);
5658 + if (err)
5659 + FC_DBG("target discovery start error\n");
5660 + fc_frame_free(fp);
5661 + }
5662 +}
5663 +
5664 +/**
5665 + * fc_ns_enter scr - Send a State Change Register (SCR) request
5666 + * @lp: Fibre Channel host port instance
5667 + */
5668 +static void fc_ns_enter_scr(struct fc_lport *lp)
5669 +{
5670 + struct fc_frame *fp;
5671 + struct fc_els_scr *scr;
5672 +
5673 + if (fc_ns_debug)
5674 + FC_DBG("Processing SCR state\n");
5675 +
5676 + fc_lport_state_enter(lp, LPORT_ST_SCR);
5677 +
5678 + fp = fc_frame_alloc(lp, sizeof(*scr));
5679 + if (fp) {
5680 + scr = fc_frame_payload_get(fp, sizeof(*scr));
5681 + memset(scr, 0, sizeof(*scr));
5682 + scr->scr_cmd = ELS_SCR;
5683 + scr->scr_reg_func = ELS_SCRF_FULL;
5684 + }
5685 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5686 + fc_frame_set_offset(fp, 0);
5687 +
5688 + lp->tt.exch_seq_send(lp, fp,
5689 + fc_ns_scr_resp,
5690 + lp, lp->e_d_tov,
5691 + lp->fid, FC_FID_FCTRL,
5692 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
5693 +}
5694 +
5695 +/**
5696 + * fc_ns_enter_reg_ft - Register FC4-types with the name server
5697 + * @lp: Fibre Channel host port instance
5698 + */
5699 +static void fc_ns_enter_reg_ft(struct fc_lport *lp)
5700 +{
5701 + struct fc_frame *fp;
5702 + struct req {
5703 + struct fc_ct_hdr ct;
5704 + struct fc_ns_fid fid; /* port ID object */
5705 + struct fc_ns_fts fts; /* FC4-types object */
5706 + } *req;
5707 + struct fc_ns_fts *lps;
5708 + int i;
5709 +
5710 + if (fc_ns_debug)
5711 + FC_DBG("Processing REG_FT state\n");
5712 +
5713 + fc_lport_state_enter(lp, LPORT_ST_REG_FT);
5714 +
5715 + lps = &lp->fcts;
5716 + i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
5717 + while (--i >= 0)
5718 + if (ntohl(lps->ff_type_map[i]) != 0)
5719 + break;
5720 + if (i >= 0) {
5721 + fp = fc_frame_alloc(lp, sizeof(*req));
5722 + if (fp) {
5723 + req = fc_frame_payload_get(fp, sizeof(*req));
5724 + fc_lport_fill_dns_hdr(lp, &req->ct,
5725 + FC_NS_RFT_ID,
5726 + sizeof(*req) -
5727 + sizeof(struct fc_ct_hdr));
5728 + hton24(req->fid.fp_fid, lp->fid);
5729 + req->fts = *lps;
5730 + fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
5731 + if (!lp->tt.exch_seq_send(lp, fp,
5732 + fc_ns_resp, lp,
5733 + lp->e_d_tov,
5734 + lp->fid,
5735 + lp->dns_rp->port_id,
5736 + FC_FC_SEQ_INIT |
5737 + FC_FC_END_SEQ))
5738 + fc_ns_retry(lp);
5739 + } else {
5740 + fc_ns_retry(lp);
5741 + }
5742 + } else {
5743 + fc_ns_enter_scr(lp);
5744 + }
5745 +}
5746 +
5747 +/*
5748 + * enter next state for handling an exchange reject or retry exhaustion
5749 + * in the current state.
5750 + */
5751 +static void fc_ns_enter_reject(struct fc_lport *lp)
5752 +{
5753 + switch (lp->state) {
5754 + case LPORT_ST_NONE:
5755 + case LPORT_ST_READY:
5756 + case LPORT_ST_RESET:
5757 + case LPORT_ST_FLOGI:
5758 + case LPORT_ST_LOGO:
5759 + WARN_ON(1);
5760 + break;
5761 + case LPORT_ST_REG_PN:
5762 + fc_ns_enter_reg_ft(lp);
5763 + break;
5764 + case LPORT_ST_REG_FT:
5765 + fc_ns_enter_scr(lp);
5766 + break;
5767 + case LPORT_ST_SCR:
5768 + case LPORT_ST_DNS_STOP:
5769 + lp->tt.disc_stop(lp);
5770 + break;
5771 + case LPORT_ST_DNS:
5772 + lp->tt.lport_reset(lp);
5773 + break;
5774 + }
5775 +}
5776 +
5777 +static void fc_ns_enter_retry(struct fc_lport *lp)
5778 +{
5779 + switch (lp->state) {
5780 + case LPORT_ST_NONE:
5781 + case LPORT_ST_RESET:
5782 + case LPORT_ST_READY:
5783 + case LPORT_ST_FLOGI:
5784 + case LPORT_ST_LOGO:
5785 + WARN_ON(1);
5786 + break;
5787 + case LPORT_ST_DNS:
5788 + lp->tt.dns_register(lp);
5789 + break;
5790 + case LPORT_ST_DNS_STOP:
5791 + lp->tt.disc_stop(lp);
5792 + break;
5793 + case LPORT_ST_REG_PN:
5794 + fc_ns_enter_reg_pn(lp);
5795 + break;
5796 + case LPORT_ST_REG_FT:
5797 + fc_ns_enter_reg_ft(lp);
5798 + break;
5799 + case LPORT_ST_SCR:
5800 + fc_ns_enter_scr(lp);
5801 + break;
5802 + }
5803 +}
5804 +
5805 +/*
5806 + * Refresh target discovery, perhaps due to an RSCN.
5807 + * A configurable delay is introduced to collect any subsequent RSCNs.
5808 + */
5809 +static int fc_ns_restart(struct fc_lport *lp)
5810 +{
5811 + fc_lport_lock(lp);
5812 + if (!lp->ns_disc_requested && !lp->ns_disc_pending) {
5813 + schedule_delayed_work(&lp->ns_disc_work,
5814 + msecs_to_jiffies(lp->ns_disc_delay * 1000));
5815 + }
5816 + lp->ns_disc_requested = 1;
5817 + fc_lport_unlock(lp);
5818 + return 0;
5819 +}
5820 +
5821 +/* unlocked varient of scsi_target_block from scsi_lib.c */
5822 +#include "../scsi_priv.h"
5823 +
5824 +static void __device_block(struct scsi_device *sdev, void *data)
5825 +{
5826 + scsi_internal_device_block(sdev);
5827 +}
5828 +
5829 +static int __target_block(struct device *dev, void *data)
5830 +{
5831 + if (scsi_is_target_device(dev))
5832 + __starget_for_each_device(to_scsi_target(dev),
5833 + NULL, __device_block);
5834 + return 0;
5835 +}
5836 +
5837 +static void __scsi_target_block(struct device *dev)
5838 +{
5839 + if (scsi_is_target_device(dev))
5840 + __starget_for_each_device(to_scsi_target(dev),
5841 + NULL, __device_block);
5842 + else
5843 + device_for_each_child(dev, NULL, __target_block);
5844 +}
5845 +
5846 +static void fc_block_rports(struct fc_lport *lp)
5847 +{
5848 + struct Scsi_Host *shost = lp->host;
5849 + struct fc_rport *rport;
5850 + unsigned long flags;
5851 +
5852 + spin_lock_irqsave(shost->host_lock, flags);
5853 + list_for_each_entry(rport, &fc_host_rports(shost), peers) {
5854 + /* protect the name service remote port */
5855 + if (rport == lp->dns_rp)
5856 + continue;
5857 + if (rport->port_state != FC_PORTSTATE_ONLINE)
5858 + continue;
5859 + rport->port_state = FC_PORTSTATE_BLOCKED;
5860 + rport->flags |= FC_RPORT_DEVLOSS_PENDING;
5861 + __scsi_target_block(&rport->dev);
5862 + }
5863 + spin_unlock_irqrestore(shost->host_lock, flags);
5864 +}
5865 +
5866 +/*
5867 + * Fibre Channel Target discovery.
5868 + *
5869 + * Returns non-zero if discovery cannot be started.
5870 + *
5871 + * Callback is called for each target remote port found in discovery.
5872 + * When discovery is complete, the callback is called with a NULL remote port.
5873 + * Discovery may be restarted after an RSCN is received, causing the
5874 + * callback to be called after discovery complete is indicated.
5875 + */
5876 +int fc_ns_disc_start(struct fc_lport *lp)
5877 +{
5878 + struct fc_rport *rport;
5879 + int error;
5880 + struct fc_rport_identifiers ids;
5881 +
5882 + fc_lport_lock(lp);
5883 +
5884 + /*
5885 + * If not ready, or already running discovery, just set request flag.
5886 + */
5887 + if (!fc_lport_test_ready(lp) || lp->ns_disc_pending) {
5888 + lp->ns_disc_requested = 1;
5889 + fc_lport_unlock(lp);
5890 + return 0;
5891 + }
5892 + lp->ns_disc_pending = 1;
5893 + lp->ns_disc_requested = 0;
5894 + lp->ns_disc_retry_count = 0;
5895 +
5896 + /*
5897 + * Handle point-to-point mode as a simple discovery
5898 + * of the remote port.
5899 + */
5900 + rport = lp->ptp_rp;
5901 + if (rport) {
5902 + ids.port_id = rport->port_id;
5903 + ids.port_name = rport->port_name;
5904 + ids.node_name = rport->node_name;
5905 + ids.roles = FC_RPORT_ROLE_UNKNOWN;
5906 + get_device(&rport->dev);
5907 + fc_lport_unlock(lp);
5908 + error = fc_ns_new_target(lp, rport, &ids);
5909 + put_device(&rport->dev);
5910 + if (!error)
5911 + fc_ns_disc_done(lp);
5912 + } else {
5913 + fc_lport_unlock(lp);
5914 + fc_block_rports(lp);
5915 + fc_ns_gpn_ft_req(lp); /* get ports by FC-4 type */
5916 + error = 0;
5917 + }
5918 + return error;
5919 +}
5920 +
5921 +/*
5922 + * Handle resource allocation problem by retrying in a bit.
5923 + */
5924 +static void fc_ns_retry(struct fc_lport *lp)
5925 +{
5926 + if (lp->retry_count == 0)
5927 + FC_DBG("local port %6x alloc failure "
5928 + "- will retry\n", lp->fid);
5929 + if (lp->retry_count < lp->max_retry_count) {
5930 + lp->retry_count++;
5931 + mod_timer(&lp->state_timer,
5932 + jiffies + msecs_to_jiffies(lp->e_d_tov));
5933 + } else {
5934 + FC_DBG("local port %6x alloc failure "
5935 + "- retries exhausted\n", lp->fid);
5936 + fc_ns_enter_reject(lp);
5937 + }
5938 +}
5939 +
5940 +/*
5941 + * Handle errors on local port requests.
5942 + * Don't get locks if in RESET state.
5943 + * The only possible errors so far are exchange TIMEOUT and CLOSED (reset).
5944 + */
5945 +static void fc_ns_error(struct fc_lport *lp, struct fc_frame *fp)
5946 +{
5947 + if (lp->state == LPORT_ST_RESET)
5948 + return;
5949 +
5950 + fc_lport_lock(lp);
5951 + if (PTR_ERR(fp) == -FC_EX_TIMEOUT) {
5952 + if (lp->retry_count < lp->max_retry_count) {
5953 + lp->retry_count++;
5954 + fc_ns_enter_retry(lp);
5955 + } else {
5956 + fc_ns_enter_reject(lp);
5957 + }
5958 + }
5959 + if (fc_ns_debug)
5960 + FC_DBG("error %ld retries %d limit %d\n",
5961 + PTR_ERR(fp), lp->retry_count, lp->max_retry_count);
5962 + fc_lport_unlock(lp);
5963 +}
5964 +
5965 +/*
5966 + * Restart discovery after a delay due to resource shortages.
5967 + * If the error persists, the discovery will be abandoned.
5968 + */
5969 +static void fcdt_ns_retry(struct fc_lport *lp)
5970 +{
5971 + unsigned long delay = FC_NS_RETRY_DELAY;
5972 +
5973 + if (!lp->ns_disc_retry_count)
5974 + delay /= 4; /* timeout faster first time */
5975 + if (lp->ns_disc_retry_count++ < FC_NS_RETRY_LIMIT)
5976 + schedule_delayed_work(&lp->ns_disc_work,
5977 + msecs_to_jiffies(delay));
5978 + else
5979 + fc_ns_disc_done(lp);
5980 +}
5981 +
5982 +/*
5983 + * Test for dNS accept in response payload.
5984 + */
5985 +static int fc_lport_dns_acc(struct fc_frame *fp)
5986 +{
5987 + struct fc_frame_header *fh;
5988 + struct fc_ct_hdr *ct;
5989 + int rc = 0;
5990 +
5991 + fh = fc_frame_header_get(fp);
5992 + ct = fc_frame_payload_get(fp, sizeof(*ct));
5993 + if (fh && ct && fh->fh_type == FC_TYPE_CT &&
5994 + ct->ct_fs_type == FC_FST_DIR &&
5995 + ct->ct_fs_subtype == FC_NS_SUBTYPE &&
5996 + ntohs(ct->ct_cmd) == FC_FS_ACC) {
5997 + rc = 1;
5998 + }
5999 + return rc;
6000 +}
6001 +
6002 +/*
6003 + * Handle response from name server.
6004 + */
6005 +static void
6006 +fc_ns_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg)
6007 +{
6008 + struct fc_lport *lp = lp_arg;
6009 +
6010 + if (!IS_ERR(fp)) {
6011 + fc_lport_lock(lp);
6012 + del_timer(&lp->state_timer);
6013 + if (fc_lport_dns_acc(fp)) {
6014 + if (lp->state == LPORT_ST_REG_PN)
6015 + fc_ns_enter_reg_ft(lp);
6016 + else
6017 + fc_ns_enter_scr(lp);
6018 +
6019 + } else {
6020 + fc_ns_retry(lp);
6021 + }
6022 + fc_lport_unlock(lp);
6023 + fc_frame_free(fp);
6024 + } else
6025 + fc_ns_error(lp, fp);
6026 +}
6027 +
6028 +/*
6029 + * Handle new target found by discovery.
6030 + * Create remote port and session if needed.
6031 + * Ignore returns of our own FID & WWPN.
6032 + *
6033 + * If a non-NULL rp is passed in, it is held for the caller, but not for us.
6034 + *
6035 + * Events delivered are:
6036 + * FC_EV_READY, when remote port is rediscovered.
6037 + */
6038 +static int fc_ns_new_target(struct fc_lport *lp,
6039 + struct fc_rport *rport,
6040 + struct fc_rport_identifiers *ids)
6041 +{
6042 + struct fc_rport_libfc_priv *rp;
6043 + int error = 0;
6044 +
6045 + if (rport && ids->port_name) {
6046 + if (rport->port_name == -1) {
6047 + /*
6048 + * Set WWN and fall through to notify of create.
6049 + */
6050 + fc_rport_set_name(rport, ids->port_name,
6051 + rport->node_name);
6052 + } else if (rport->port_name != ids->port_name) {
6053 + /*
6054 + * This is a new port with the same FCID as
6055 + * a previously-discovered port. Presumably the old
6056 + * port logged out and a new port logged in and was
6057 + * assigned the same FCID. This should be rare.
6058 + * Delete the old one and fall thru to re-create.
6059 + */
6060 + fc_ns_del_target(lp, rport);
6061 + rport = NULL;
6062 + }
6063 + }
6064 + if (((ids->port_name != -1) || (ids->port_id != -1)) &&
6065 + ids->port_id != lp->fid && ids->port_name != lp->wwpn) {
6066 + if (!rport) {
6067 + rport = lp->tt.rport_lookup(lp, ids->port_id);
6068 + if (rport == NULL)
6069 + rport = lp->tt.rport_create(lp, ids);
6070 + if (!rport)
6071 + error = ENOMEM;
6072 + }
6073 + if (rport) {
6074 + rp = rport->dd_data;
6075 + rp->rp_state = RPORT_ST_INIT;
6076 + lp->tt.rport_login(rport);
6077 + }
6078 + }
6079 + return error;
6080 +}
6081 +
6082 +/*
6083 + * Delete the remote port.
6084 + */
6085 +static void fc_ns_del_target(struct fc_lport *lp, struct fc_rport *rport)
6086 +{
6087 + lp->tt.rport_reset(rport);
6088 + fc_remote_port_delete(rport); /* release hold from create */
6089 +}
6090 +
6091 +/*
6092 + * Done with discovery
6093 + */
6094 +static void fc_ns_disc_done(struct fc_lport *lp)
6095 +{
6096 + lp->ns_disc_done = 1;
6097 + lp->ns_disc_pending = 0;
6098 + if (lp->ns_disc_requested)
6099 + lp->tt.disc_start(lp);
6100 +}
6101 +
6102 +/**
6103 + * fc_ns_fill_dns_hdr - Fill in a name service request header
6104 + * @lp: Fibre Channel host port instance
6105 + * @ct: Common Transport (CT) header structure
6106 + * @op: Name Service request code
6107 + * @req_size: Full size of Name Service request
6108 + */
6109 +static void fc_ns_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
6110 + unsigned int op, unsigned int req_size)
6111 +{
6112 + memset(ct, 0, sizeof(*ct) + req_size);
6113 + ct->ct_rev = FC_CT_REV;
6114 + ct->ct_fs_type = FC_FST_DIR;
6115 + ct->ct_fs_subtype = FC_NS_SUBTYPE;
6116 + ct->ct_cmd = htons((u16) op);
6117 +}
6118 +
6119 +/**
6120 + * fc_ns_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
6121 + * @lp: Fibre Channel host port instance
6122 + */
6123 +static void fc_ns_gpn_ft_req(struct fc_lport *lp)
6124 +{
6125 + struct fc_frame *fp;
6126 + struct fc_seq *sp = NULL;
6127 + struct req {
6128 + struct fc_ct_hdr ct;
6129 + struct fc_ns_gid_ft gid;
6130 + } *rp;
6131 + int error = 0;
6132 +
6133 + lp->ns_disc_buf_len = 0;
6134 + lp->ns_disc_seq_count = 0;
6135 + fp = fc_frame_alloc(lp, sizeof(*rp));
6136 + if (fp == NULL) {
6137 + error = ENOMEM;
6138 + } else {
6139 + rp = fc_frame_payload_get(fp, sizeof(*rp));
6140 + fc_ns_fill_dns_hdr(lp, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
6141 + rp->gid.fn_fc4_type = FC_TYPE_FCP;
6142 +
6143 + WARN_ON(!fc_lport_test_ready(lp));
6144 +
6145 + fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
6146 + sp = lp->tt.exch_seq_send(lp, fp,
6147 + fc_ns_gpn_ft_resp,
6148 + lp, lp->e_d_tov,
6149 + lp->fid,
6150 + lp->dns_rp->port_id,
6151 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
6152 + }
6153 + if (error || sp == NULL)
6154 + fcdt_ns_retry(lp);
6155 +}
6156 +
6157 +/*
6158 + * Handle error on dNS request.
6159 + */
6160 +static void fcdt_ns_error(struct fc_lport *lp, struct fc_frame *fp)
6161 +{
6162 + int err = PTR_ERR(fp);
6163 +
6164 + switch (err) {
6165 + case -FC_EX_TIMEOUT:
6166 + if (lp->ns_disc_retry_count++ < FC_NS_RETRY_LIMIT) {
6167 + fc_ns_gpn_ft_req(lp);
6168 + } else {
6169 + FC_DBG("err %d - ending\n", err);
6170 + fc_ns_disc_done(lp);
6171 + }
6172 + break;
6173 + default:
6174 + FC_DBG("err %d - ending\n", err);
6175 + fc_ns_disc_done(lp);
6176 + break;
6177 + }
6178 +}
6179 +
6180 +/**
6181 + * fc_ns_gpn_ft_parse - Parse the list of IDs and names resulting from a request
6182 + * @lp: Fibre Channel host port instance
6183 + * @buf: GPN_FT response buffer
6184 + * @len: size of response buffer
6185 + */
6186 +static int fc_ns_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
6187 +{
6188 + struct fc_gpn_ft_resp *np;
6189 + char *bp;
6190 + size_t plen;
6191 + size_t tlen;
6192 + int error = 0;
6193 + struct fc_ns_port *dp;
6194 +
6195 + /*
6196 + * Handle partial name record left over from previous call.
6197 + */
6198 + bp = buf;
6199 + plen = len;
6200 + np = (struct fc_gpn_ft_resp *)bp;
6201 + tlen = lp->ns_disc_buf_len;
6202 + if (tlen) {
6203 + WARN_ON(tlen >= sizeof(*np));
6204 + plen = sizeof(*np) - tlen;
6205 + WARN_ON(plen <= 0);
6206 + WARN_ON(plen >= sizeof(*np));
6207 + if (plen > len)
6208 + plen = len;
6209 + np = &lp->ns_disc_buf;
6210 + memcpy((char *)np + tlen, bp, plen);
6211 +
6212 + /*
6213 + * Set bp so that the loop below will advance it to the
6214 + * first valid full name element.
6215 + */
6216 + bp -= tlen;
6217 + len += tlen;
6218 + plen += tlen;
6219 + lp->ns_disc_buf_len = (unsigned char) plen;
6220 + if (plen == sizeof(*np))
6221 + lp->ns_disc_buf_len = 0;
6222 + }
6223 +
6224 + /*
6225 + * Handle full name records, including the one filled from above.
6226 + * Normally, np == bp and plen == len, but from the partial case above,
6227 + * bp, len describe the overall buffer, and np, plen describe the
6228 + * partial buffer, which if would usually be full now.
6229 + * After the first time through the loop, things return to "normal".
6230 + */
6231 + while (plen >= sizeof(*np)) {
6232 + dp = kzalloc(sizeof(*dp), GFP_KERNEL);
6233 + if (!dp)
6234 + break;
6235 + dp->lp = lp;
6236 + dp->ids.port_id = ntoh24(np->fp_fid);
6237 + dp->ids.port_name = ntohll(np->fp_wwpn);
6238 + dp->ids.node_name = -1;
6239 + dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
6240 + error = fc_ns_gnn_id_req(lp, dp);
6241 + if (error)
6242 + break;
6243 + if (np->fp_flags & FC_NS_FID_LAST) {
6244 + fc_ns_disc_done(lp);
6245 + len = 0;
6246 + break;
6247 + }
6248 + len -= sizeof(*np);
6249 + bp += sizeof(*np);
6250 + np = (struct fc_gpn_ft_resp *)bp;
6251 + plen = len;
6252 + }
6253 +
6254 + /*
6255 + * Save any partial record at the end of the buffer for next time.
6256 + */
6257 + if (error == 0 && len > 0 && len < sizeof(*np)) {
6258 + if (np != &lp->ns_disc_buf)
6259 + memcpy(&lp->ns_disc_buf, np, len);
6260 + lp->ns_disc_buf_len = (unsigned char) len;
6261 + } else {
6262 + lp->ns_disc_buf_len = 0;
6263 + }
6264 + return error;
6265 +}
6266 +
6267 +/*
6268 + * Handle retry of memory allocation for remote ports.
6269 + */
6270 +static void fc_ns_timeout(struct work_struct *work)
6271 +{
6272 + struct fc_lport *lp;
6273 +
6274 + lp = container_of(work, struct fc_lport, ns_disc_work.work);
6275 +
6276 + if (lp->ns_disc_pending)
6277 + fc_ns_gpn_ft_req(lp);
6278 + else
6279 + lp->tt.disc_start(lp);
6280 +}
6281 +
6282 +/**
6283 + * fc_ns_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
6284 + * @sp: Current sequence of GPN_FT exchange
6285 + * @fp: response frame
6286 + * @lp_arg: Fibre Channel host port instance
6287 + *
6288 + * The response may be in multiple frames
6289 + */
6290 +static void fc_ns_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
6291 + void *lp_arg)
6292 +{
6293 + struct fc_lport *lp = lp_arg;
6294 + struct fc_ct_hdr *cp;
6295 + struct fc_frame_header *fh;
6296 + unsigned int seq_cnt;
6297 + void *buf = NULL;
6298 + unsigned int len;
6299 + int error;
6300 +
6301 + if (IS_ERR(fp)) {
6302 + fcdt_ns_error(lp, fp);
6303 + return;
6304 + }
6305 +
6306 + WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
6307 + fh = fc_frame_header_get(fp);
6308 + len = fr_len(fp) - sizeof(*fh);
6309 + seq_cnt = ntohs(fh->fh_seq_cnt);
6310 + if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
6311 + lp->ns_disc_seq_count == 0) {
6312 + cp = fc_frame_payload_get(fp, sizeof(*cp));
6313 + if (cp == NULL) {
6314 + FC_DBG("GPN_FT response too short, len %d\n",
6315 + fr_len(fp));
6316 + } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
6317 +
6318 + /*
6319 + * Accepted. Parse response.
6320 + */
6321 + buf = cp + 1;
6322 + len -= sizeof(*cp);
6323 + } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
6324 + FC_DBG("GPN_FT rejected reason %x exp %x "
6325 + "(check zoning)\n", cp->ct_reason,
6326 + cp->ct_explan);
6327 + fc_ns_disc_done(lp);
6328 + } else {
6329 + FC_DBG("GPN_FT unexpected response code %x\n",
6330 + ntohs(cp->ct_cmd));
6331 + }
6332 + } else if (fr_sof(fp) == FC_SOF_N3 &&
6333 + seq_cnt == lp->ns_disc_seq_count) {
6334 + buf = fh + 1;
6335 + } else {
6336 + FC_DBG("GPN_FT unexpected frame - out of sequence? "
6337 + "seq_cnt %x expected %x sof %x eof %x\n",
6338 + seq_cnt, lp->ns_disc_seq_count, fr_sof(fp), fr_eof(fp));
6339 + }
6340 + if (buf) {
6341 + error = fc_ns_gpn_ft_parse(lp, buf, len);
6342 + if (error)
6343 + fcdt_ns_retry(lp);
6344 + else
6345 + lp->ns_disc_seq_count++;
6346 + }
6347 + fc_frame_free(fp);
6348 +}
6349 +
6350 +/*
6351 + * Discover the directory information for a single target.
6352 + * This could be from an RSCN that reported a change for the target.
6353 + */
6354 +static void fc_ns_single(struct fc_lport *lp, struct fc_ns_port *dp)
6355 +{
6356 + struct fc_rport *rport;
6357 +
6358 + if (dp->ids.port_id == lp->fid)
6359 + goto out;
6360 +
6361 + rport = lp->tt.rport_lookup(lp, dp->ids.port_id);
6362 + if (rport) {
6363 + fc_ns_del_target(lp, rport);
6364 + put_device(&rport->dev); /* hold from lookup */
6365 + }
6366 +
6367 + if (fc_ns_gpn_id_req(lp, dp) != 0)
6368 + goto error;
6369 + return;
6370 +error:
6371 + fc_ns_restart(lp);
6372 +out:
6373 + kfree(dp);
6374 +}
6375 +
6376 +/**
6377 + * fc_ns_gpn_id_req - Send Get Port Name by ID (GPN_ID) request
6378 + * @lp: Fibre Channel host port instance
6379 + * @dp: Temporary discovery port for holding IDs and world wide names
6380 + *
6381 + * The remote port is held by the caller for us.
6382 + */
6383 +static int fc_ns_gpn_id_req(struct fc_lport *lp, struct fc_ns_port *dp)
6384 +{
6385 + struct fc_frame *fp;
6386 + struct req {
6387 + struct fc_ct_hdr ct;
6388 + struct fc_ns_fid fid;
6389 + } *cp;
6390 + int error = 0;
6391 +
6392 + fp = fc_frame_alloc(lp, sizeof(*cp));
6393 + if (fp == NULL)
6394 + return -ENOMEM;
6395 +
6396 + cp = fc_frame_payload_get(fp, sizeof(*cp));
6397 + fc_ns_fill_dns_hdr(lp, &cp->ct, FC_NS_GPN_ID, sizeof(cp->fid));
6398 + hton24(cp->fid.fp_fid, dp->ids.port_id);
6399 +
6400 + WARN_ON(!fc_lport_test_ready(lp));
6401 +
6402 + fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
6403 + if (!lp->tt.exch_seq_send(lp, fp,
6404 + fc_ns_gpn_id_resp,
6405 + dp, lp->e_d_tov,
6406 + lp->fid,
6407 + lp->dns_rp->port_id,
6408 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
6409 + error = -ENOMEM;
6410 +
6411 + return error;
6412 +}
6413 +
6414 +/**
6415 + * fc_ns_gpn_id_resp - Handle response to GPN_ID
6416 + * @sp: Current sequence of GPN_ID exchange
6417 + * @fp: response frame
6418 + * @dp_arg: Temporary discovery port for holding IDs and world wide names
6419 + */
6420 +static void fc_ns_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
6421 + void *dp_arg)
6422 +{
6423 + struct fc_ns_port *dp = dp_arg;
6424 + struct fc_lport *lp;
6425 + struct resp {
6426 + struct fc_ct_hdr ct;
6427 + __be64 wwn;
6428 + } *cp;
6429 + unsigned int cmd;
6430 +
6431 + if (IS_ERR(fp)) {
6432 + fc_ns_gpn_id_error(dp, fp);
6433 + return;
6434 + }
6435 +
6436 + lp = dp->lp;
6437 + WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
6438 +
6439 + cp = fc_frame_payload_get(fp, sizeof(cp->ct));
6440 + if (cp == NULL) {
6441 + FC_DBG("GPN_ID response too short, len %d\n", fr_len(fp));
6442 + return;
6443 + }
6444 + cmd = ntohs(cp->ct.ct_cmd);
6445 + switch (cmd) {
6446 + case FC_FS_ACC:
6447 + cp = fc_frame_payload_get(fp, sizeof(*cp));
6448 + if (cp == NULL) {
6449 + FC_DBG("GPN_ID response payload too short, len %d\n",
6450 + fr_len(fp));
6451 + break;
6452 + }
6453 + dp->ids.port_name = ntohll(cp->wwn);
6454 + fc_ns_gnn_id_req(lp, dp);
6455 + break;
6456 + case FC_FS_RJT:
6457 + fc_ns_restart(lp);
6458 + break;
6459 + default:
6460 + FC_DBG("GPN_ID unexpected CT response cmd %x\n", cmd);
6461 + break;
6462 + }
6463 + fc_frame_free(fp);
6464 +}
6465 +
6466 +/**
6467 + * fc_ns_gpn_id_error - Handle error from GPN_ID
6468 + * @dp: Temporary discovery port for holding IDs and world wide names
6469 + * @fp: response frame
6470 + */
6471 +static void fc_ns_gpn_id_error(struct fc_ns_port *dp, struct fc_frame *fp)
6472 +{
6473 + struct fc_lport *lp = dp->lp;
6474 +
6475 + switch (PTR_ERR(fp)) {
6476 + case -FC_EX_TIMEOUT:
6477 + fc_ns_restart(lp);
6478 + break;
6479 + case -FC_EX_CLOSED:
6480 + default:
6481 + break;
6482 + }
6483 + kfree(dp);
6484 +}
6485 +
6486 +/*
6487 + * Setup session to dNS if not already set up.
6488 + */
6489 +static void fc_ns_enter_dns(struct fc_lport *lp)
6490 +{
6491 + struct fc_rport *rport;
6492 + struct fc_rport_libfc_priv *rp;
6493 + struct fc_rport_identifiers ids = {
6494 + .port_id = FC_FID_DIR_SERV,
6495 + .port_name = -1,
6496 + .node_name = -1,
6497 + .roles = FC_RPORT_ROLE_UNKNOWN,
6498 + };
6499 +
6500 + if (fc_ns_debug)
6501 + FC_DBG("Processing DNS state\n");
6502 +
6503 + fc_lport_state_enter(lp, LPORT_ST_DNS);
6504 +
6505 + if (!lp->dns_rp) {
6506 + /*
6507 + * Set up remote port to directory server.
6508 + */
6509 +
6510 + /*
6511 + * we are called with the state_lock, but if rport_lookup_create
6512 + * needs to create a rport then it will sleep.
6513 + */
6514 + fc_lport_unlock(lp);
6515 + rport = lp->tt.rport_lookup(lp, ids.port_id);
6516 + if (rport == NULL)
6517 + rport = lp->tt.rport_create(lp, &ids);
6518 + fc_lport_lock(lp);
6519 + if (!rport)
6520 + goto err;
6521 + lp->dns_rp = rport;
6522 + }
6523 +
6524 + rport = lp->dns_rp;
6525 + rp = rport->dd_data;
6526 +
6527 + /*
6528 + * If dNS session isn't ready, start its logon.
6529 + */
6530 + if (rp->rp_state != RPORT_ST_READY) {
6531 + lp->tt.rport_login(rport);
6532 + } else {
6533 + del_timer(&lp->state_timer);
6534 + fc_ns_enter_reg_pn(lp);
6535 + }
6536 + return;
6537 +
6538 + /*
6539 + * Resource allocation problem (malloc). Try again in 500 mS.
6540 + */
6541 +err:
6542 + fc_ns_retry(lp);
6543 +}
6544 +
6545 +/*
6546 + * Logoff DNS session.
6547 + * We should get an event call when the session has been logged out.
6548 + */
6549 +static void fc_ns_enter_dns_stop(struct fc_lport *lp)
6550 +{
6551 + struct fc_rport *rport = lp->dns_rp;
6552 +
6553 + if (fc_ns_debug)
6554 + FC_DBG("Processing DNS_STOP state\n");
6555 +
6556 + fc_lport_state_enter(lp, LPORT_ST_DNS_STOP);
6557 +
6558 + if (rport)
6559 + lp->tt.rport_logout(rport);
6560 + else
6561 + lp->tt.lport_logout(lp);
6562 +}
6563 +
6564 +/*
6565 + * Fill in dNS request header.
6566 + */
6567 +static void
6568 +fc_lport_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
6569 + unsigned int op, unsigned int req_size)
6570 +{
6571 + memset(ct, 0, sizeof(*ct) + req_size);
6572 + ct->ct_rev = FC_CT_REV;
6573 + ct->ct_fs_type = FC_FST_DIR;
6574 + ct->ct_fs_subtype = FC_NS_SUBTYPE;
6575 + ct->ct_cmd = htons(op);
6576 +}
6577 +
6578 +/*
6579 + * Register port name with name server.
6580 + */
6581 +static void fc_ns_enter_reg_pn(struct fc_lport *lp)
6582 +{
6583 + struct fc_frame *fp;
6584 + struct req {
6585 + struct fc_ct_hdr ct;
6586 + struct fc_ns_rn_id rn;
6587 + } *req;
6588 +
6589 + if (fc_ns_debug)
6590 + FC_DBG("Processing REG_PN state\n");
6591 +
6592 + fc_lport_state_enter(lp, LPORT_ST_REG_PN);
6593 + fp = fc_frame_alloc(lp, sizeof(*req));
6594 + if (!fp) {
6595 + fc_ns_retry(lp);
6596 + return;
6597 + }
6598 + req = fc_frame_payload_get(fp, sizeof(*req));
6599 + memset(req, 0, sizeof(*req));
6600 + fc_lport_fill_dns_hdr(lp, &req->ct, FC_NS_RPN_ID, sizeof(req->rn));
6601 + hton24(req->rn.fr_fid.fp_fid, lp->fid);
6602 + put_unaligned_be64(lp->wwpn, &req->rn.fr_wwn);
6603 + fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
6604 + if (!lp->tt.exch_seq_send(lp, fp,
6605 + fc_ns_resp, lp,
6606 + lp->e_d_tov,
6607 + lp->fid,
6608 + lp->dns_rp->port_id,
6609 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
6610 + fc_ns_retry(lp);
6611 +}
6612 +
6613 +int fc_ns_init(struct fc_lport *lp)
6614 +{
6615 + INIT_DELAYED_WORK(&lp->ns_disc_work, fc_ns_timeout);
6616 +
6617 + if (!lp->tt.disc_start)
6618 + lp->tt.disc_start = fc_ns_disc_start;
6619 +
6620 + if (!lp->tt.disc_recv_req)
6621 + lp->tt.disc_recv_req = fc_ns_recv_req;
6622 +
6623 + if (!lp->tt.dns_register)
6624 + lp->tt.dns_register = fc_ns_enter_dns;
6625 +
6626 + if (!lp->tt.disc_stop)
6627 + lp->tt.disc_stop = fc_ns_enter_dns_stop;
6628 +
6629 + return 0;
6630 +}
6631 +EXPORT_SYMBOL(fc_ns_init);
6632 +
6633 +/**
6634 + * fc_ns_gnn_id_req - Send Get Node Name by ID (GNN_ID) request
6635 + * @lp: Fibre Channel host port instance
6636 + * @dp: Temporary discovery port for holding IDs and world wide names
6637 + *
6638 + * The remote port is held by the caller for us.
6639 + */
6640 +static int fc_ns_gnn_id_req(struct fc_lport *lp, struct fc_ns_port *dp)
6641 +{
6642 + struct fc_frame *fp;
6643 + struct req {
6644 + struct fc_ct_hdr ct;
6645 + struct fc_ns_fid fid;
6646 + } *cp;
6647 + int error = 0;
6648 +
6649 + fp = fc_frame_alloc(lp, sizeof(*cp));
6650 + if (fp == NULL)
6651 + return -ENOMEM;
6652 +
6653 + cp = fc_frame_payload_get(fp, sizeof(*cp));
6654 + fc_ns_fill_dns_hdr(lp, &cp->ct, FC_NS_GNN_ID, sizeof(cp->fid));
6655 + hton24(cp->fid.fp_fid, dp->ids.port_id);
6656 +
6657 + WARN_ON(!fc_lport_test_ready(lp));
6658 +
6659 + fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
6660 + if (!lp->tt.exch_seq_send(lp, fp,
6661 + fc_ns_gnn_id_resp,
6662 + dp, lp->e_d_tov,
6663 + lp->fid,
6664 + lp->dns_rp->port_id,
6665 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
6666 + error = -ENOMEM;
6667 +
6668 + return error;
6669 +}
6670 +
6671 +/**
6672 + * fc_ns_gnn_id_resp - Handle response to GNN_ID
6673 + * @sp: Current sequence of GNN_ID exchange
6674 + * @fp: response frame
6675 + * @dp_arg: Temporary discovery port for holding IDs and world wide names
6676 + */
6677 +static void fc_ns_gnn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
6678 + void *dp_arg)
6679 +{
6680 + struct fc_ns_port *dp = dp_arg;
6681 + struct fc_lport *lp;
6682 + struct resp {
6683 + struct fc_ct_hdr ct;
6684 + __be64 wwn;
6685 + } *cp;
6686 + unsigned int cmd;
6687 +
6688 + if (IS_ERR(fp)) {
6689 + fc_ns_gnn_id_error(dp, fp);
6690 + return;
6691 + }
6692 +
6693 + lp = dp->lp;
6694 + WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
6695 +
6696 + cp = fc_frame_payload_get(fp, sizeof(cp->ct));
6697 + if (cp == NULL) {
6698 + FC_DBG("GNN_ID response too short, len %d\n", fr_len(fp));
6699 + return;
6700 + }
6701 + cmd = ntohs(cp->ct.ct_cmd);
6702 + switch (cmd) {
6703 + case FC_FS_ACC:
6704 + cp = fc_frame_payload_get(fp, sizeof(*cp));
6705 + if (cp == NULL) {
6706 + FC_DBG("GNN_ID response payload too short, len %d\n",
6707 + fr_len(fp));
6708 + break;
6709 + }
6710 + dp->ids.node_name = ntohll(cp->wwn);
6711 + fc_ns_new_target(lp, NULL, &dp->ids);
6712 + break;
6713 + case FC_FS_RJT:
6714 + fc_ns_restart(lp);
6715 + break;
6716 + default:
6717 + FC_DBG("GNN_ID unexpected CT response cmd %x\n", cmd);
6718 + break;
6719 + }
6720 + kfree(dp);
6721 + fc_frame_free(fp);
6722 +}
6723 +
6724 +/**
6725 + * fc_ns_gnn_id_error - Handle error from GNN_ID
6726 + * @dp: Temporary discovery port for holding IDs and world wide names
6727 + * @fp: response frame
6728 + */
6729 +static void fc_ns_gnn_id_error(struct fc_ns_port *dp, struct fc_frame *fp)
6730 +{
6731 + struct fc_lport *lp = dp->lp;
6732 +
6733 + switch (PTR_ERR(fp)) {
6734 + case -FC_EX_TIMEOUT:
6735 + fc_ns_restart(lp);
6736 + break;
6737 + case -FC_EX_CLOSED:
6738 + default:
6739 + break;
6740 + }
6741 + kfree(dp);
6742 +}
6743 +
6744 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
6745 new file mode 100644
6746 index 0000000..6d0c970
6747 --- /dev/null
6748 +++ b/drivers/scsi/libfc/fc_rport.c
6749 @@ -0,0 +1,1301 @@
6750 +/*
6751 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
6752 + *
6753 + * This program is free software; you can redistribute it and/or modify it
6754 + * under the terms and conditions of the GNU General Public License,
6755 + * version 2, as published by the Free Software Foundation.
6756 + *
6757 + * This program is distributed in the hope it will be useful, but WITHOUT
6758 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6759 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6760 + * more details.
6761 + *
6762 + * You should have received a copy of the GNU General Public License along with
6763 + * this program; if not, write to the Free Software Foundation, Inc.,
6764 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6765 + *
6766 + * Maintained at www.Open-FCoE.org
6767 + */
6768 +
6769 +/*
6770 + * Remote Port support.
6771 + *
6772 + * A remote port structure contains information about an N port to which we
6773 + * will create sessions.
6774 + */
6775 +
6776 +#include <linux/kernel.h>
6777 +#include <linux/spinlock.h>
6778 +#include <linux/interrupt.h>
6779 +#include <linux/rcupdate.h>
6780 +#include <linux/timer.h>
6781 +#include <linux/workqueue.h>
6782 +#include <asm/unaligned.h>
6783 +
6784 +#include <scsi/libfc/libfc.h>
6785 +
6786 +static int fc_rp_debug;
6787 +
6788 +/*
6789 + * static functions.
6790 + */
6791 +static void fc_rport_enter_start(struct fc_rport *);
6792 +static void fc_rport_enter_plogi(struct fc_rport *);
6793 +static void fc_rport_enter_prli(struct fc_rport *);
6794 +static void fc_rport_enter_rtv(struct fc_rport *);
6795 +static void fc_rport_enter_logo(struct fc_rport *);
6796 +static void fc_rport_recv_plogi_req(struct fc_rport *,
6797 + struct fc_seq *, struct fc_frame *);
6798 +static void fc_rport_recv_prli_req(struct fc_rport *,
6799 + struct fc_seq *, struct fc_frame *);
6800 +static void fc_rport_recv_prlo_req(struct fc_rport *,
6801 + struct fc_seq *, struct fc_frame *);
6802 +static void fc_rport_recv_logo_req(struct fc_rport *,
6803 + struct fc_seq *, struct fc_frame *);
6804 +static void fc_rport_timeout(struct work_struct *);
6805 +
6806 +static struct fc_rport *fc_remote_port_create(struct fc_lport *,
6807 + struct fc_rport_identifiers *);
6808 +
6809 +/**
6810 + * fc_rport_lookup - lookup a remote port by port_id
6811 + * @lp: Fibre Channel host port instance
6812 + * @fid: remote port port_id to match
6813 + */
6814 +struct fc_rport *fc_rport_lookup(const struct fc_lport *lp, u32 fid)
6815 +{
6816 + struct Scsi_Host *shost = lp->host;
6817 + struct fc_rport *rport, *found;
6818 + unsigned long flags;
6819 +
6820 + found = NULL;
6821 + spin_lock_irqsave(shost->host_lock, flags);
6822 + list_for_each_entry(rport, &fc_host_rports(shost), peers)
6823 + if (rport->port_id == fid &&
6824 + rport->port_state == FC_PORTSTATE_ONLINE) {
6825 + found = rport;
6826 + get_device(&found->dev);
6827 + break;
6828 + }
6829 + spin_unlock_irqrestore(shost->host_lock, flags);
6830 + return found;
6831 +}
6832 +
6833 +/**
6834 + * fc_remote_port_create - create a remote port
6835 + * @lp: Fibre Channel host port instance
6836 + * @ids: remote port identifiers (port_id, port_name, and node_name must be set)
6837 + */
6838 +static struct fc_rport *fc_remote_port_create(struct fc_lport *lp,
6839 + struct fc_rport_identifiers *ids)
6840 +{
6841 + struct fc_rport_libfc_priv *rp;
6842 + struct fc_rport *rport;
6843 +
6844 + rport = fc_remote_port_add(lp->host, 0, ids);
6845 + if (!rport)
6846 + return NULL;
6847 +
6848 + rp = rport->dd_data;
6849 + rp->local_port = lp;
6850 +
6851 + /* default value until service parameters are exchanged in PLOGI */
6852 + rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
6853 +
6854 + spin_lock_init(&rp->rp_lock);
6855 + rp->rp_state = RPORT_ST_INIT;
6856 + rp->local_port = lp;
6857 + rp->e_d_tov = lp->e_d_tov;
6858 + rp->r_a_tov = lp->r_a_tov;
6859 + rp->flags = FC_RP_FLAGS_REC_SUPPORTED;
6860 + INIT_DELAYED_WORK(&rp->retry_work, fc_rport_timeout);
6861 +
6862 + return rport;
6863 +}
6864 +
6865 +static inline void fc_rport_lock(struct fc_rport *rport)
6866 +{
6867 + struct fc_rport_libfc_priv *rp = rport->dd_data;
6868 + spin_lock_bh(&rp->rp_lock);
6869 +}
6870 +
6871 +static inline void fc_rport_unlock(struct fc_rport *rport)
6872 +{
6873 + struct fc_rport_libfc_priv *rp = rport->dd_data;
6874 + spin_unlock_bh(&rp->rp_lock);
6875 +}
6876 +
6877 +/**
6878 + * fc_plogi_get_maxframe - Get max payload from the common service parameters
6879 + * @flp: FLOGI payload structure
6880 + * @maxval: upper limit, may be less than what is in the service parameters
6881 + */
6882 +static unsigned int
6883 +fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
6884 +{
6885 + unsigned int mfs;
6886 +
6887 + /*
6888 + * Get max payload from the common service parameters and the
6889 + * class 3 receive data field size.
6890 + */
6891 + mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
6892 + if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
6893 + maxval = mfs;
6894 + mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
6895 + if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
6896 + maxval = mfs;
6897 + return maxval;
6898 +}
6899 +
6900 +/**
6901 + * fc_lport_plogi_fill - Fill in PLOGI command for request
6902 + * @lp: Fibre Channel host port instance
6903 + * @plogi: PLOGI command structure to fill (same structure as FLOGI)
6904 + * @op: either ELS_PLOGI for a localy generated request, or ELS_LS_ACC
6905 + */
6906 +static void
6907 +fc_lport_plogi_fill(struct fc_lport *lp,
6908 + struct fc_els_flogi *plogi, unsigned int op)
6909 +{
6910 + struct fc_els_csp *sp;
6911 + struct fc_els_cssp *cp;
6912 +
6913 + memset(plogi, 0, sizeof(*plogi));
6914 + plogi->fl_cmd = (u8) op;
6915 + put_unaligned_be64(lp->wwpn, &plogi->fl_wwpn);
6916 + put_unaligned_be64(lp->wwnn, &plogi->fl_wwnn);
6917 +
6918 + sp = &plogi->fl_csp;
6919 + sp->sp_hi_ver = 0x20;
6920 + sp->sp_lo_ver = 0x20;
6921 + sp->sp_bb_cred = htons(10); /* this gets set by gateway */
6922 + sp->sp_bb_data = htons((u16) lp->mfs);
6923 + cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
6924 + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
6925 + if (op != ELS_FLOGI) {
6926 + sp->sp_features = htons(FC_SP_FT_CIRO);
6927 + sp->sp_tot_seq = htons(255); /* seq. we accept */
6928 + sp->sp_rel_off = htons(0x1f);
6929 + sp->sp_e_d_tov = htonl(lp->e_d_tov);
6930 +
6931 + cp->cp_rdfs = htons((u16) lp->mfs);
6932 + cp->cp_con_seq = htons(255);
6933 + cp->cp_open_seq = 1;
6934 + }
6935 +}
6936 +
6937 +static void fc_rport_state_enter(struct fc_rport *rport,
6938 + enum fc_rport_state new)
6939 +{
6940 + struct fc_rport_libfc_priv *rp = rport->dd_data;
6941 + if (rp->rp_state != new)
6942 + rp->retries = 0;
6943 + rp->rp_state = new;
6944 +}
6945 +
6946 +/**
6947 + * fc_rport_login - Start the remote port login state machine
6948 + * @rport: Fibre Channel remote port
6949 + */
6950 +int fc_rport_login(struct fc_rport *rport)
6951 +{
6952 + struct fc_rport_libfc_priv *rp = rport->dd_data;
6953 + struct fc_lport *lp = rp->local_port;
6954 +
6955 + fc_rport_lock(rport);
6956 + if (rp->rp_state == RPORT_ST_INIT) {
6957 + fc_rport_unlock(rport);
6958 + fc_rport_enter_start(rport);
6959 + } else if (rp->rp_state == RPORT_ST_ERROR) {
6960 + fc_rport_state_enter(rport, RPORT_ST_INIT);
6961 + fc_rport_unlock(rport);
6962 + if (fc_rp_debug)
6963 + FC_DBG("remote %6x closed\n", rport->port_id);
6964 +
6965 + if (rport == lp->dns_rp &&
6966 + lp->state != LPORT_ST_RESET) {
6967 + fc_lport_lock(lp);
6968 + del_timer(&lp->state_timer);
6969 + lp->dns_rp = NULL;
6970 +
6971 + if (lp->state == LPORT_ST_DNS_STOP) {
6972 + fc_lport_unlock(lp);
6973 + lp->tt.lport_logout(lp);
6974 + } else {
6975 + lp->tt.lport_login(lp);
6976 + fc_lport_unlock(lp);
6977 + }
6978 + fc_remote_port_delete(rport);
6979 + }
6980 + } else
6981 + fc_rport_unlock(rport);
6982 +
6983 + return 0;
6984 +}
6985 +
6986 +/*
6987 + * Stop the session - log it off.
6988 + */
6989 +int fc_rport_logout(struct fc_rport *rport)
6990 +{
6991 + struct fc_rport_libfc_priv *rp = rport->dd_data;
6992 + struct fc_lport *lp = rp->local_port;
6993 +
6994 + fc_rport_lock(rport);
6995 + switch (rp->rp_state) {
6996 + case RPORT_ST_PRLI:
6997 + case RPORT_ST_RTV:
6998 + case RPORT_ST_READY:
6999 + fc_rport_enter_logo(rport);
7000 + fc_rport_unlock(rport);
7001 + break;
7002 + default:
7003 + fc_rport_state_enter(rport, RPORT_ST_INIT);
7004 + fc_rport_unlock(rport);
7005 + if (fc_rp_debug)
7006 + FC_DBG("remote %6x closed\n", rport->port_id);
7007 + if (rport == lp->dns_rp &&
7008 + lp->state != LPORT_ST_RESET) {
7009 + fc_lport_lock(lp);
7010 + del_timer(&lp->state_timer);
7011 + lp->dns_rp = NULL;
7012 +
7013 + if (lp->state == LPORT_ST_DNS_STOP) {
7014 + fc_lport_unlock(lp);
7015 + lp->tt.lport_logout(lp);
7016 + } else {
7017 + lp->tt.lport_login(lp);
7018 + fc_lport_unlock(lp);
7019 + }
7020 +
7021 + fc_remote_port_delete(rport);
7022 + }
7023 + break;
7024 + }
7025 +
7026 + return 0;
7027 +}
7028 +
7029 +/*
7030 + * Reset the session - assume it is logged off. Used after fabric logoff.
7031 + * The local port code takes care of resetting the exchange manager.
7032 + */
7033 +void fc_rport_reset(struct fc_rport *rport)
7034 +{
7035 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7036 + struct fc_lport *lp;
7037 +
7038 + if (fc_rp_debug)
7039 + FC_DBG("sess to %6x reset\n", rport->port_id);
7040 + fc_rport_lock(rport);
7041 +
7042 + lp = rp->local_port;
7043 + fc_rport_state_enter(rport, RPORT_ST_INIT);
7044 + fc_rport_unlock(rport);
7045 +
7046 + if (fc_rp_debug)
7047 + FC_DBG("remote %6x closed\n", rport->port_id);
7048 + if (rport == lp->dns_rp &&
7049 + lp->state != LPORT_ST_RESET) {
7050 + fc_lport_lock(lp);
7051 + del_timer(&lp->state_timer);
7052 + lp->dns_rp = NULL;
7053 + if (lp->state == LPORT_ST_DNS_STOP) {
7054 + fc_lport_unlock(lp);
7055 + lp->tt.lport_logout(lp);
7056 + } else {
7057 + lp->tt.lport_login(lp);
7058 + fc_lport_unlock(lp);
7059 + }
7060 + fc_remote_port_delete(rport);
7061 + }
7062 +}
7063 +
7064 +/*
7065 + * Reset all sessions for a local port session list.
7066 + */
7067 +void fc_rport_reset_list(struct fc_lport *lp)
7068 +{
7069 + struct Scsi_Host *shost = lp->host;
7070 + struct fc_rport *rport;
7071 + struct fc_rport *next;
7072 + unsigned long flags;
7073 +
7074 + spin_lock_irqsave(shost->host_lock, flags);
7075 + list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) {
7076 + lp->tt.rport_reset(rport);
7077 + }
7078 + spin_unlock_irqrestore(shost->host_lock, flags);
7079 +}
7080 +
7081 +static void fc_rport_enter_start(struct fc_rport *rport)
7082 +{
7083 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7084 + struct fc_lport *lp = rp->local_port;
7085 +
7086 + /*
7087 + * If the local port is already logged on, advance to next state.
7088 + * Otherwise the local port will be logged on by fc_rport_unlock().
7089 + */
7090 + fc_rport_state_enter(rport, RPORT_ST_STARTED);
7091 +
7092 + if (rport == lp->dns_rp || fc_lport_test_ready(lp))
7093 + fc_rport_enter_plogi(rport);
7094 +}
7095 +
7096 +/*
7097 + * Handle exchange reject or retry exhaustion in various states.
7098 + */
7099 +static void fc_rport_reject(struct fc_rport *rport)
7100 +{
7101 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7102 + struct fc_lport *lp = rp->local_port;
7103 + switch (rp->rp_state) {
7104 + case RPORT_ST_PLOGI:
7105 + case RPORT_ST_PRLI:
7106 + fc_rport_state_enter(rport, RPORT_ST_ERROR);
7107 + if (rport == lp->dns_rp &&
7108 + lp->state != LPORT_ST_RESET) {
7109 + fc_lport_lock(lp);
7110 + del_timer(&lp->state_timer);
7111 + lp->dns_rp = NULL;
7112 + if (lp->state == LPORT_ST_DNS_STOP) {
7113 + fc_lport_unlock(lp);
7114 + lp->tt.lport_logout(lp);
7115 + } else {
7116 + lp->tt.lport_login(lp);
7117 + fc_lport_unlock(lp);
7118 + }
7119 + fc_remote_port_delete(rport);
7120 + }
7121 + break;
7122 + case RPORT_ST_RTV:
7123 + fc_rport_state_enter(rport, RPORT_ST_READY);
7124 + if (fc_rp_debug)
7125 + FC_DBG("remote %6x ready\n", rport->port_id);
7126 + if (rport == lp->dns_rp &&
7127 + lp->state == LPORT_ST_DNS) {
7128 + fc_lport_lock(lp);
7129 + del_timer(&lp->state_timer);
7130 + lp->tt.dns_register(lp);
7131 + fc_lport_unlock(lp);
7132 + }
7133 + break;
7134 + case RPORT_ST_LOGO:
7135 + fc_rport_state_enter(rport, RPORT_ST_INIT);
7136 + if (fc_rp_debug)
7137 + FC_DBG("remote %6x closed\n", rport->port_id);
7138 + if (rport == lp->dns_rp &&
7139 + lp->state != LPORT_ST_RESET) {
7140 + fc_lport_lock(lp);
7141 + del_timer(&lp->state_timer);
7142 + lp->dns_rp = NULL;
7143 + if (lp->state == LPORT_ST_DNS_STOP) {
7144 + fc_lport_unlock(lp);
7145 + lp->tt.lport_logout(lp);
7146 + } else {
7147 + lp->tt.lport_login(lp);
7148 + fc_lport_unlock(lp);
7149 + }
7150 + fc_remote_port_delete(rport);
7151 + }
7152 + break;
7153 + case RPORT_ST_NONE:
7154 + case RPORT_ST_READY:
7155 + case RPORT_ST_ERROR:
7156 + case RPORT_ST_PLOGI_RECV:
7157 + case RPORT_ST_STARTED:
7158 + case RPORT_ST_INIT:
7159 + BUG();
7160 + break;
7161 + }
7162 + return;
7163 +}
7164 +
7165 +/*
7166 + * Timeout handler for retrying after allocation failures or exchange timeout.
7167 + */
7168 +static void fc_rport_timeout(struct work_struct *work)
7169 +{
7170 + struct fc_rport_libfc_priv *rp =
7171 + container_of(work, struct fc_rport_libfc_priv, retry_work.work);
7172 + struct fc_rport *rport = (((void *)rp) - sizeof(struct fc_rport));
7173 +
7174 + switch (rp->rp_state) {
7175 + case RPORT_ST_PLOGI:
7176 + fc_rport_enter_plogi(rport);
7177 + break;
7178 + case RPORT_ST_PRLI:
7179 + fc_rport_enter_prli(rport);
7180 + break;
7181 + case RPORT_ST_RTV:
7182 + fc_rport_enter_rtv(rport);
7183 + break;
7184 + case RPORT_ST_LOGO:
7185 + fc_rport_enter_logo(rport);
7186 + break;
7187 + case RPORT_ST_READY:
7188 + case RPORT_ST_ERROR:
7189 + case RPORT_ST_INIT:
7190 + break;
7191 + case RPORT_ST_NONE:
7192 + case RPORT_ST_PLOGI_RECV:
7193 + case RPORT_ST_STARTED:
7194 + BUG();
7195 + break;
7196 + }
7197 + put_device(&rport->dev);
7198 +}
7199 +
7200 +/*
7201 + * Handle retry for allocation failure via timeout.
7202 + */
7203 +static void fc_rport_retry(struct fc_rport *rport)
7204 +{
7205 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7206 + struct fc_lport *lp = rp->local_port;
7207 +
7208 + if (rp->retries < lp->max_retry_count) {
7209 + rp->retries++;
7210 + get_device(&rport->dev);
7211 + schedule_delayed_work(&rp->retry_work,
7212 + msecs_to_jiffies(rp->e_d_tov));
7213 + } else {
7214 + FC_DBG("sess %6x alloc failure in state %d, "
7215 + "retries exhausted\n",
7216 + rport->port_id, rp->rp_state);
7217 + fc_rport_reject(rport);
7218 + }
7219 +}
7220 +
7221 +/*
7222 + * Handle error from a sequence issued by the rport state machine.
7223 + */
7224 +static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
7225 +{
7226 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7227 + fc_rport_lock(rport);
7228 + if (fc_rp_debug)
7229 + FC_DBG("state %d error %ld retries %d\n",
7230 + rp->rp_state, PTR_ERR(fp), rp->retries);
7231 +
7232 + if (PTR_ERR(fp) == -FC_EX_TIMEOUT &&
7233 + rp->retries++ >= rp->local_port->max_retry_count) {
7234 + get_device(&rport->dev);
7235 + schedule_delayed_work(&rp->retry_work, 0);
7236 + } else
7237 + fc_rport_reject(rport);
7238 +
7239 + fc_rport_unlock(rport);
7240 +}
7241 +
7242 +/**
7243 + * fc_rport_plpogi_recv_resp - Handle incoming ELS PLOGI response
7244 + * @sp: current sequence in the PLOGI exchange
7245 + * @fp: response frame
7246 + * @rp_arg: Fibre Channel remote port
7247 + */
7248 +static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
7249 + void *rp_arg)
7250 +{
7251 + struct fc_els_ls_rjt *rjp;
7252 + struct fc_els_flogi *plp;
7253 + u64 wwpn, wwnn;
7254 + unsigned int tov;
7255 + u16 csp_seq;
7256 + u16 cssp_seq;
7257 + u8 op;
7258 + struct fc_rport *rport = rp_arg;
7259 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7260 +
7261 + if (!IS_ERR(fp)) {
7262 + op = fc_frame_payload_op(fp);
7263 + fc_rport_lock(rport);
7264 + if (op == ELS_LS_ACC &&
7265 + (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
7266 + wwpn = get_unaligned_be64(&plp->fl_wwpn);
7267 + wwnn = get_unaligned_be64(&plp->fl_wwnn);
7268 +
7269 + fc_rport_set_name(rport, wwpn, wwnn);
7270 + tov = ntohl(plp->fl_csp.sp_e_d_tov);
7271 + if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
7272 + tov /= 1000;
7273 + if (tov > rp->e_d_tov)
7274 + rp->e_d_tov = tov;
7275 + csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
7276 + cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
7277 + if (cssp_seq < csp_seq)
7278 + csp_seq = cssp_seq;
7279 + rp->max_seq = csp_seq;
7280 + rport->maxframe_size =
7281 + fc_plogi_get_maxframe(plp, rp->local_port->mfs);
7282 + if (rp->rp_state == RPORT_ST_PLOGI)
7283 + fc_rport_enter_prli(rport);
7284 + } else {
7285 + if (fc_rp_debug)
7286 + FC_DBG("bad PLOGI response\n");
7287 +
7288 + rjp = fc_frame_payload_get(fp, sizeof(*rjp));
7289 + if (op == ELS_LS_RJT && rjp != NULL &&
7290 + rjp->er_reason == ELS_RJT_INPROG)
7291 + fc_rport_retry(rport); /* try again */
7292 + else
7293 + fc_rport_reject(rport); /* error */
7294 + }
7295 + fc_rport_unlock(rport);
7296 + fc_frame_free(fp);
7297 + } else {
7298 + fc_rport_error(rport, fp);
7299 + }
7300 +}
7301 +
7302 +/**
7303 + * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer
7304 + * @rport: Fibre Channel remote port to send PLOGI to
7305 + */
7306 +static void fc_rport_enter_plogi(struct fc_rport *rport)
7307 +{
7308 + struct fc_frame *fp;
7309 + struct fc_els_flogi *plogi;
7310 + struct fc_lport *lp;
7311 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7312 +
7313 + lp = rp->local_port;
7314 + fc_rport_state_enter(rport, RPORT_ST_PLOGI);
7315 + rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
7316 + fp = fc_frame_alloc(lp, sizeof(*plogi));
7317 + if (!fp)
7318 + return fc_rport_retry(rport);
7319 + plogi = fc_frame_payload_get(fp, sizeof(*plogi));
7320 + WARN_ON(!plogi);
7321 + fc_lport_plogi_fill(rp->local_port, plogi, ELS_PLOGI);
7322 + rp->e_d_tov = lp->e_d_tov;
7323 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
7324 + if (!lp->tt.exch_seq_send(lp, fp,
7325 + fc_rport_plogi_resp,
7326 + rport, lp->e_d_tov,
7327 + rp->local_port->fid,
7328 + rport->port_id,
7329 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
7330 + fc_rport_retry(rport);
7331 +}
7332 +
7333 +/**
7334 + * fc_rport_prli_resp - Process Login (PRLI) response handler
7335 + * @sp: current sequence in the PRLI exchange
7336 + * @fp: response frame
7337 + * @rp_arg: Fibre Channel remote port
7338 + */
7339 +static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
7340 + void *rp_arg)
7341 +{
7342 + struct fc_rport *rport = rp_arg;
7343 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7344 + struct fc_lport *lp = rp->local_port;
7345 + struct {
7346 + struct fc_els_prli prli;
7347 + struct fc_els_spp spp;
7348 + } *pp;
7349 + u32 roles = FC_RPORT_ROLE_UNKNOWN;
7350 + u32 fcp_parm = 0;
7351 + u8 op;
7352 +
7353 + if (IS_ERR(fp)) {
7354 + fc_rport_error(rport, fp);
7355 + return;
7356 + }
7357 +
7358 + fc_rport_lock(rport);
7359 + op = fc_frame_payload_op(fp);
7360 + if (op == ELS_LS_ACC) {
7361 + pp = fc_frame_payload_get(fp, sizeof(*pp));
7362 + if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
7363 + fcp_parm = ntohl(pp->spp.spp_params);
7364 + if (fcp_parm & FCP_SPPF_RETRY)
7365 + rp->flags |= FC_RP_FLAGS_RETRY;
7366 + }
7367 +
7368 + rport->supported_classes = FC_COS_CLASS3;
7369 + if (fcp_parm & FCP_SPPF_INIT_FCN)
7370 + roles |= FC_RPORT_ROLE_FCP_INITIATOR;
7371 + if (fcp_parm & FCP_SPPF_TARG_FCN)
7372 + roles |= FC_RPORT_ROLE_FCP_TARGET;
7373 +
7374 + fc_rport_enter_rtv(rport);
7375 + fc_rport_unlock(rport);
7376 + fc_remote_port_rolechg(rport, roles);
7377 + } else {
7378 + FC_DBG("bad ELS response\n");
7379 + fc_rport_state_enter(rport, RPORT_ST_ERROR);
7380 + fc_rport_unlock(rport);
7381 + if (rport == lp->dns_rp && lp->state != LPORT_ST_RESET) {
7382 + fc_lport_lock(lp);
7383 + del_timer(&lp->state_timer);
7384 + lp->dns_rp = NULL;
7385 + if (lp->state == LPORT_ST_DNS_STOP) {
7386 + fc_lport_unlock(lp);
7387 + lp->tt.lport_logout(lp);
7388 + } else {
7389 + lp->tt.lport_login(lp);
7390 + fc_lport_unlock(lp);
7391 + }
7392 + fc_remote_port_delete(rport);
7393 + }
7394 + }
7395 +
7396 + fc_frame_free(fp);
7397 +}
7398 +
7399 +/**
7400 + * fc_rport_logo_resp - Logout (LOGO) response handler
7401 + * @sp: current sequence in the LOGO exchange
7402 + * @fp: response frame
7403 + * @rp_arg: Fibre Channel remote port
7404 + */
7405 +static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
7406 + void *rp_arg)
7407 +{
7408 + struct fc_rport *rport = rp_arg;
7409 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7410 + struct fc_lport *lp = rp->local_port;
7411 + u8 op;
7412 +
7413 + if (IS_ERR(fp)) {
7414 + fc_rport_error(rport, fp);
7415 + return;
7416 + }
7417 +
7418 + fc_rport_lock(rport);
7419 + op = fc_frame_payload_op(fp);
7420 + if (op == ELS_LS_ACC) {
7421 + fc_rport_enter_rtv(rport);
7422 + fc_rport_unlock(rport);
7423 + } else {
7424 + FC_DBG("bad ELS response\n");
7425 + fc_rport_state_enter(rport, RPORT_ST_ERROR);
7426 + fc_rport_unlock(rport);
7427 + if (rport == lp->dns_rp && lp->state != LPORT_ST_RESET) {
7428 + fc_lport_lock(lp);
7429 + del_timer(&lp->state_timer);
7430 + lp->dns_rp = NULL;
7431 + if (lp->state == LPORT_ST_DNS_STOP) {
7432 + fc_lport_unlock(lp);
7433 + lp->tt.lport_logout(lp);
7434 + } else {
7435 + lp->tt.lport_login(lp);
7436 + fc_lport_unlock(lp);
7437 + }
7438 + fc_remote_port_delete(rport);
7439 + }
7440 + }
7441 +
7442 + fc_frame_free(fp);
7443 +}
7444 +
7445 +/**
7446 + * fc_rport_enter_prli - Send Process Login (PRLI) request to peer
7447 + * @rport: Fibre Channel remote port to send PRLI to
7448 + */
7449 +static void fc_rport_enter_prli(struct fc_rport *rport)
7450 +{
7451 + struct {
7452 + struct fc_els_prli prli;
7453 + struct fc_els_spp spp;
7454 + } *pp;
7455 + struct fc_frame *fp;
7456 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7457 + struct fc_lport *lp = rp->local_port;
7458 +
7459 + fc_rport_state_enter(rport, RPORT_ST_PRLI);
7460 +
7461 + /*
7462 + * Special case if session is for name server or any other
7463 + * well-known address: Skip the PRLI step.
7464 + * This should be made more general, possibly moved to the FCP layer.
7465 + */
7466 + if (rport->port_id >= FC_FID_DOM_MGR) {
7467 + fc_rport_state_enter(rport, RPORT_ST_READY);
7468 + if (fc_rp_debug)
7469 + FC_DBG("remote %6x ready\n", rport->port_id);
7470 + if (rport == lp->dns_rp &&
7471 + lp->state == LPORT_ST_DNS) {
7472 + fc_lport_lock(lp);
7473 + del_timer(&lp->state_timer);
7474 + lp->tt.dns_register(lp);
7475 + fc_lport_unlock(lp);
7476 + }
7477 + return;
7478 + }
7479 + fp = fc_frame_alloc(lp, sizeof(*pp));
7480 + if (!fp)
7481 + return fc_rport_retry(rport);
7482 + pp = fc_frame_payload_get(fp, sizeof(*pp));
7483 + WARN_ON(!pp);
7484 + memset(pp, 0, sizeof(*pp));
7485 + pp->prli.prli_cmd = ELS_PRLI;
7486 + pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
7487 + pp->prli.prli_len = htons(sizeof(*pp));
7488 + pp->spp.spp_type = FC_TYPE_FCP;
7489 + pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
7490 + pp->spp.spp_params = htonl(rp->local_port->service_params);
7491 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
7492 + if (!lp->tt.exch_seq_send(lp, fp,
7493 + fc_rport_prli_resp,
7494 + rport, lp->e_d_tov,
7495 + rp->local_port->fid,
7496 + rport->port_id,
7497 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
7498 + fc_rport_retry(rport);
7499 +}
7500 +
7501 +/**
7502 + * fc_rport_els_rtv_resp - Request Timeout Value response handler
7503 + * @sp: current sequence in the RTV exchange
7504 + * @fp: response frame
7505 + * @rp_arg: Fibre Channel remote port
7506 + *
7507 + * Many targets don't seem to support this.
7508 + */
7509 +static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
7510 + void *rp_arg)
7511 +{
7512 + struct fc_rport *rport = rp_arg;
7513 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7514 + struct fc_lport *lp = rp->local_port;
7515 + u8 op;
7516 +
7517 + if (IS_ERR(fp)) {
7518 + fc_rport_error(rport, fp);
7519 + return;
7520 + }
7521 +
7522 + fc_rport_lock(rport);
7523 + op = fc_frame_payload_op(fp);
7524 + if (op == ELS_LS_ACC) {
7525 + struct fc_els_rtv_acc *rtv;
7526 + u32 toq;
7527 + u32 tov;
7528 +
7529 + rtv = fc_frame_payload_get(fp, sizeof(*rtv));
7530 + if (rtv) {
7531 + toq = ntohl(rtv->rtv_toq);
7532 + tov = ntohl(rtv->rtv_r_a_tov);
7533 + if (tov == 0)
7534 + tov = 1;
7535 + rp->r_a_tov = tov;
7536 + tov = ntohl(rtv->rtv_e_d_tov);
7537 + if (toq & FC_ELS_RTV_EDRES)
7538 + tov /= 1000000;
7539 + if (tov == 0)
7540 + tov = 1;
7541 + rp->e_d_tov = tov;
7542 + }
7543 + }
7544 + fc_rport_state_enter(rport, RPORT_ST_READY);
7545 + fc_rport_unlock(rport);
7546 + if (fc_rp_debug)
7547 + FC_DBG("remote %6x ready\n", rport->port_id);
7548 + if (rport == lp->dns_rp &&
7549 + lp->state == LPORT_ST_DNS) {
7550 + fc_lport_lock(lp);
7551 + del_timer(&lp->state_timer);
7552 + lp->tt.dns_register(lp);
7553 + fc_lport_unlock(lp);
7554 + }
7555 + fc_frame_free(fp);
7556 +}
7557 +
7558 +/**
7559 + * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer
7560 + * @rport: Fibre Channel remote port to send RTV to
7561 + */
7562 +static void fc_rport_enter_rtv(struct fc_rport *rport)
7563 +{
7564 + struct fc_els_rtv *rtv;
7565 + struct fc_frame *fp;
7566 + struct fc_lport *lp;
7567 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7568 +
7569 + lp = rp->local_port;
7570 + fc_rport_state_enter(rport, RPORT_ST_RTV);
7571 +
7572 + fp = fc_frame_alloc(lp, sizeof(*rtv));
7573 + if (!fp)
7574 + return fc_rport_retry(rport);
7575 + rtv = fc_frame_payload_get(fp, sizeof(*rtv));
7576 + WARN_ON(!rtv);
7577 + memset(rtv, 0, sizeof(*rtv));
7578 + rtv->rtv_cmd = ELS_RTV;
7579 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
7580 + if (!lp->tt.exch_seq_send(lp, fp,
7581 + fc_rport_rtv_resp,
7582 + rport, lp->e_d_tov,
7583 + rp->local_port->fid,
7584 + rport->port_id,
7585 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
7586 + fc_rport_retry(rport);
7587 +}
7588 +
7589 +/**
7590 + * fc_rport_enter_logo - Send Logout (LOGO) request to peer
7591 + * @rport: Fibre Channel remote port to send LOGO to
7592 + */
7593 +static void fc_rport_enter_logo(struct fc_rport *rport)
7594 +{
7595 + struct fc_frame *fp;
7596 + struct fc_els_logo *logo;
7597 + struct fc_lport *lp;
7598 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7599 +
7600 + fc_rport_state_enter(rport, RPORT_ST_LOGO);
7601 +
7602 + lp = rp->local_port;
7603 + fp = fc_frame_alloc(lp, sizeof(*logo));
7604 + if (!fp)
7605 + return fc_rport_retry(rport);
7606 + logo = fc_frame_payload_get(fp, sizeof(*logo));
7607 + memset(logo, 0, sizeof(*logo));
7608 + logo->fl_cmd = ELS_LOGO;
7609 + hton24(logo->fl_n_port_id, lp->fid);
7610 + logo->fl_n_port_wwn = htonll(lp->wwpn);
7611 +
7612 + fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
7613 + if (!lp->tt.exch_seq_send(lp, fp,
7614 + fc_rport_logo_resp,
7615 + rport, lp->e_d_tov,
7616 + rp->local_port->fid,
7617 + rport->port_id,
7618 + FC_FC_SEQ_INIT | FC_FC_END_SEQ))
7619 + fc_rport_retry(rport);
7620 +}
7621 +
7622 +/*
7623 + * Handle a request received by the exchange manager for the session.
7624 + * This may be an entirely new session, or a PLOGI or LOGO for an existing one.
7625 + * This will free the frame.
7626 + */
7627 +void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
7628 + struct fc_rport *rport)
7629 +{
7630 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7631 + struct fc_frame_header *fh;
7632 + struct fc_lport *lp = rp->local_port;
7633 + struct fc_seq_els_data els_data;
7634 + u8 op;
7635 +
7636 + els_data.fp = NULL;
7637 + els_data.explan = ELS_EXPL_NONE;
7638 + els_data.reason = ELS_RJT_NONE;
7639 +
7640 + fh = fc_frame_header_get(fp);
7641 +
7642 + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
7643 + op = fc_frame_payload_op(fp);
7644 + switch (op) {
7645 + case ELS_PLOGI:
7646 + fc_rport_recv_plogi_req(rport, sp, fp);
7647 + break;
7648 + case ELS_PRLI:
7649 + fc_rport_recv_prli_req(rport, sp, fp);
7650 + break;
7651 + case ELS_PRLO:
7652 + fc_rport_recv_prlo_req(rport, sp, fp);
7653 + break;
7654 + case ELS_LOGO:
7655 + fc_rport_recv_logo_req(rport, sp, fp);
7656 + break;
7657 + case ELS_RRQ:
7658 + els_data.fp = fp;
7659 + lp->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
7660 + break;
7661 + case ELS_REC:
7662 + els_data.fp = fp;
7663 + lp->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
7664 + break;
7665 + default:
7666 + els_data.reason = ELS_RJT_UNSUP;
7667 + lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
7668 + fc_frame_free(fp);
7669 + break;
7670 + }
7671 + } else {
7672 + fc_frame_free(fp);
7673 + }
7674 +}
7675 +
7676 +/**
7677 + * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request
7678 + * @rport: Fibre Channel remote port that initiated PLOGI
7679 + * @sp: current sequence in the PLOGI exchange
7680 + * @fp: PLOGI request frame
7681 + */
7682 +static void fc_rport_recv_plogi_req(struct fc_rport *rport,
7683 + struct fc_seq *sp, struct fc_frame *rx_fp)
7684 +{
7685 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7686 + struct fc_frame *fp = rx_fp;
7687 + struct fc_frame_header *fh;
7688 + struct fc_lport *lp;
7689 + struct fc_els_flogi *pl;
7690 + struct fc_seq_els_data rjt_data;
7691 + u32 sid;
7692 + u64 wwpn;
7693 + u64 wwnn;
7694 + enum fc_els_rjt_reason reject = 0;
7695 + u32 f_ctl;
7696 +
7697 + rjt_data.fp = NULL;
7698 + fh = fc_frame_header_get(fp);
7699 + sid = ntoh24(fh->fh_s_id);
7700 + pl = fc_frame_payload_get(fp, sizeof(*pl));
7701 + if (!pl) {
7702 + FC_DBG("incoming PLOGI from %x too short\n", sid);
7703 + WARN_ON(1);
7704 + /* XXX TBD: send reject? */
7705 + fc_frame_free(fp);
7706 + return;
7707 + }
7708 + wwpn = get_unaligned_be64(&pl->fl_wwpn);
7709 + wwnn = get_unaligned_be64(&pl->fl_wwnn);
7710 + fc_rport_lock(rport);
7711 + lp = rp->local_port;
7712 +
7713 + /*
7714 + * If the session was just created, possibly due to the incoming PLOGI,
7715 + * set the state appropriately and accept the PLOGI.
7716 + *
7717 + * If we had also sent a PLOGI, and if the received PLOGI is from a
7718 + * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
7719 + * "command already in progress".
7720 + *
7721 + * XXX TBD: If the session was ready before, the PLOGI should result in
7722 + * all outstanding exchanges being reset.
7723 + */
7724 + switch (rp->rp_state) {
7725 + case RPORT_ST_INIT:
7726 + if (fc_rp_debug)
7727 + FC_DBG("incoming PLOGI from %6x wwpn %llx state INIT "
7728 + "- reject\n", sid, wwpn);
7729 + reject = ELS_RJT_UNSUP;
7730 + break;
7731 + case RPORT_ST_STARTED:
7732 + /*
7733 + * we'll only accept a login if the port name
7734 + * matches or was unknown.
7735 + */
7736 + if (rport->port_name != -1 &&
7737 + rport->port_name != wwpn) {
7738 + FC_DBG("incoming PLOGI from name %llx expected %llx\n",
7739 + wwpn, rport->port_name);
7740 + reject = ELS_RJT_UNAB;
7741 + }
7742 + break;
7743 + case RPORT_ST_PLOGI:
7744 + if (fc_rp_debug)
7745 + FC_DBG("incoming PLOGI from %x in PLOGI state %d\n",
7746 + sid, rp->rp_state);
7747 + if (wwpn < lp->wwpn)
7748 + reject = ELS_RJT_INPROG;
7749 + break;
7750 + case RPORT_ST_PRLI:
7751 + case RPORT_ST_ERROR:
7752 + case RPORT_ST_READY:
7753 + if (fc_rp_debug)
7754 + FC_DBG("incoming PLOGI from %x in logged-in state %d "
7755 + "- ignored for now\n", sid, rp->rp_state);
7756 + /* XXX TBD - should reset */
7757 + break;
7758 + case RPORT_ST_NONE:
7759 + default:
7760 + if (fc_rp_debug)
7761 + FC_DBG("incoming PLOGI from %x in unexpected "
7762 + "state %d\n", sid, rp->rp_state);
7763 + break;
7764 + }
7765 +
7766 + if (reject) {
7767 + rjt_data.reason = reject;
7768 + rjt_data.explan = ELS_EXPL_NONE;
7769 + lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
7770 + fc_frame_free(fp);
7771 + } else {
7772 + fp = fc_frame_alloc(lp, sizeof(*pl));
7773 + if (fp == NULL) {
7774 + fp = rx_fp;
7775 + rjt_data.reason = ELS_RJT_UNAB;
7776 + rjt_data.explan = ELS_EXPL_NONE;
7777 + lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
7778 + fc_frame_free(fp);
7779 + } else {
7780 + sp = lp->tt.seq_start_next(sp);
7781 + WARN_ON(!sp);
7782 + fc_rport_set_name(rport, wwpn, wwnn);
7783 +
7784 + /*
7785 + * Get session payload size from incoming PLOGI.
7786 + */
7787 + rport->maxframe_size =
7788 + fc_plogi_get_maxframe(pl, lp->mfs);
7789 + fc_frame_free(rx_fp);
7790 + pl = fc_frame_payload_get(fp, sizeof(*pl));
7791 + WARN_ON(!pl);
7792 + fc_lport_plogi_fill(lp, pl, ELS_LS_ACC);
7793 +
7794 + /*
7795 + * Send LS_ACC. If this fails,
7796 + * the originator should retry.
7797 + */
7798 + f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
7799 + fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
7800 + lp->tt.seq_send(lp, sp, fp, f_ctl);
7801 + if (rp->rp_state == RPORT_ST_PLOGI)
7802 + fc_rport_enter_prli(rport);
7803 + else
7804 + fc_rport_state_enter(rport,
7805 + RPORT_ST_PLOGI_RECV);
7806 + }
7807 + }
7808 + fc_rport_unlock(rport);
7809 +}
7810 +
7811 +/**
7812 + * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request
7813 + * @rport: Fibre Channel remote port that initiated PRLI
7814 + * @sp: current sequence in the PRLI exchange
7815 + * @fp: PRLI request frame
7816 + */
7817 +static void fc_rport_recv_prli_req(struct fc_rport *rport,
7818 + struct fc_seq *sp, struct fc_frame *rx_fp)
7819 +{
7820 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7821 + struct fc_frame *fp;
7822 + struct fc_frame_header *fh;
7823 + struct fc_lport *lp;
7824 + struct {
7825 + struct fc_els_prli prli;
7826 + struct fc_els_spp spp;
7827 + } *pp;
7828 + struct fc_els_spp *rspp; /* request service param page */
7829 + struct fc_els_spp *spp; /* response spp */
7830 + unsigned int len;
7831 + unsigned int plen;
7832 + enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
7833 + enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
7834 + enum fc_els_spp_resp resp;
7835 + struct fc_seq_els_data rjt_data;
7836 + u32 f_ctl;
7837 + u32 fcp_parm;
7838 + u32 roles = FC_RPORT_ROLE_UNKNOWN;
7839 +
7840 + rjt_data.fp = NULL;
7841 + fh = fc_frame_header_get(rx_fp);
7842 + lp = rp->local_port;
7843 + switch (rp->rp_state) {
7844 + case RPORT_ST_PLOGI_RECV:
7845 + case RPORT_ST_PRLI:
7846 + case RPORT_ST_READY:
7847 + reason = ELS_RJT_NONE;
7848 + break;
7849 + default:
7850 + break;
7851 + }
7852 + len = fr_len(rx_fp) - sizeof(*fh);
7853 + pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
7854 + if (pp == NULL) {
7855 + reason = ELS_RJT_PROT;
7856 + explan = ELS_EXPL_INV_LEN;
7857 + } else {
7858 + plen = ntohs(pp->prli.prli_len);
7859 + if ((plen % 4) != 0 || plen > len) {
7860 + reason = ELS_RJT_PROT;
7861 + explan = ELS_EXPL_INV_LEN;
7862 + } else if (plen < len) {
7863 + len = plen;
7864 + }
7865 + plen = pp->prli.prli_spp_len;
7866 + if ((plen % 4) != 0 || plen < sizeof(*spp) ||
7867 + plen > len || len < sizeof(*pp)) {
7868 + reason = ELS_RJT_PROT;
7869 + explan = ELS_EXPL_INV_LEN;
7870 + }
7871 + rspp = &pp->spp;
7872 + }
7873 + if (reason != ELS_RJT_NONE ||
7874 + (fp = fc_frame_alloc(lp, len)) == NULL) {
7875 + rjt_data.reason = reason;
7876 + rjt_data.explan = explan;
7877 + lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
7878 + } else {
7879 + sp = lp->tt.seq_start_next(sp);
7880 + WARN_ON(!sp);
7881 + pp = fc_frame_payload_get(fp, len);
7882 + WARN_ON(!pp);
7883 + memset(pp, 0, len);
7884 + pp->prli.prli_cmd = ELS_LS_ACC;
7885 + pp->prli.prli_spp_len = plen;
7886 + pp->prli.prli_len = htons(len);
7887 + len -= sizeof(struct fc_els_prli);
7888 +
7889 + /*
7890 + * Go through all the service parameter pages and build
7891 + * response. If plen indicates longer SPP than standard,
7892 + * use that. The entire response has been pre-cleared above.
7893 + */
7894 + spp = &pp->spp;
7895 + while (len >= plen) {
7896 + spp->spp_type = rspp->spp_type;
7897 + spp->spp_type_ext = rspp->spp_type_ext;
7898 + spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
7899 + resp = FC_SPP_RESP_ACK;
7900 + if (rspp->spp_flags & FC_SPP_RPA_VAL)
7901 + resp = FC_SPP_RESP_NO_PA;
7902 + switch (rspp->spp_type) {
7903 + case 0: /* common to all FC-4 types */
7904 + break;
7905 + case FC_TYPE_FCP:
7906 + fcp_parm = ntohl(rspp->spp_params);
7907 + if (fcp_parm * FCP_SPPF_RETRY)
7908 + rp->flags |= FC_RP_FLAGS_RETRY;
7909 + rport->supported_classes = FC_COS_CLASS3;
7910 + if (fcp_parm & FCP_SPPF_INIT_FCN)
7911 + roles |= FC_RPORT_ROLE_FCP_INITIATOR;
7912 + if (fcp_parm & FCP_SPPF_TARG_FCN)
7913 + roles |= FC_RPORT_ROLE_FCP_TARGET;
7914 + fc_remote_port_rolechg(rport, roles);
7915 + spp->spp_params =
7916 + htonl(rp->local_port->service_params);
7917 + break;
7918 + default:
7919 + resp = FC_SPP_RESP_INVL;
7920 + break;
7921 + }
7922 + spp->spp_flags |= resp;
7923 + len -= plen;
7924 + rspp = (struct fc_els_spp *)((char *)rspp + plen);
7925 + spp = (struct fc_els_spp *)((char *)spp + plen);
7926 + }
7927 +
7928 + /*
7929 + * Send LS_ACC. If this fails, the originator should retry.
7930 + */
7931 + f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
7932 + fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
7933 + lp->tt.seq_send(lp, sp, fp, f_ctl);
7934 +
7935 + /*
7936 + * Get lock and re-check state.
7937 + */
7938 + fc_rport_lock(rport);
7939 + switch (rp->rp_state) {
7940 + case RPORT_ST_PLOGI_RECV:
7941 + case RPORT_ST_PRLI:
7942 + fc_rport_state_enter(rport, RPORT_ST_READY);
7943 + if (fc_rp_debug)
7944 + FC_DBG("remote %6x ready\n", rport->port_id);
7945 + if (rport == lp->dns_rp &&
7946 + lp->state == LPORT_ST_DNS) {
7947 + fc_lport_lock(lp);
7948 + del_timer(&lp->state_timer);
7949 + lp->tt.dns_register(lp);
7950 + fc_lport_unlock(lp);
7951 + }
7952 + break;
7953 + case RPORT_ST_READY:
7954 + break;
7955 + default:
7956 + break;
7957 + }
7958 + fc_rport_unlock(rport);
7959 + }
7960 + fc_frame_free(rx_fp);
7961 +}
7962 +
7963 +/**
7964 + * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request
7965 + * @rport: Fibre Channel remote port that initiated PRLO
7966 + * @sp: current sequence in the PRLO exchange
7967 + * @fp: PRLO request frame
7968 + */
7969 +static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
7970 + struct fc_frame *fp)
7971 +{
7972 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7973 + struct fc_frame_header *fh;
7974 + struct fc_lport *lp = rp->local_port;
7975 + struct fc_seq_els_data rjt_data;
7976 +
7977 + fh = fc_frame_header_get(fp);
7978 + FC_DBG("incoming PRLO from %x state %d\n",
7979 + ntoh24(fh->fh_s_id), rp->rp_state);
7980 + rjt_data.fp = NULL;
7981 + rjt_data.reason = ELS_RJT_UNAB;
7982 + rjt_data.explan = ELS_EXPL_NONE;
7983 + lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
7984 + fc_frame_free(fp);
7985 +}
7986 +
7987 +/**
7988 + * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request
7989 + * @rport: Fibre Channel remote port that initiated LOGO
7990 + * @sp: current sequence in the LOGO exchange
7991 + * @fp: LOGO request frame
7992 + */
7993 +static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
7994 + struct fc_frame *fp)
7995 +{
7996 + struct fc_frame_header *fh;
7997 + struct fc_rport_libfc_priv *rp = rport->dd_data;
7998 + struct fc_lport *lp = rp->local_port;
7999 +
8000 + fh = fc_frame_header_get(fp);
8001 + fc_rport_lock(rport);
8002 + fc_rport_state_enter(rport, RPORT_ST_INIT);
8003 + fc_rport_unlock(rport);
8004 + if (fc_rp_debug)
8005 + FC_DBG("remote %6x closed\n", rport->port_id);
8006 + if (rport == lp->dns_rp &&
8007 + lp->state != LPORT_ST_RESET) {
8008 + fc_lport_lock(lp);
8009 + del_timer(&lp->state_timer);
8010 + lp->dns_rp = NULL;
8011 + if (lp->state == LPORT_ST_DNS_STOP) {
8012 + fc_lport_unlock(lp);
8013 + lp->tt.lport_logout(lp);
8014 + } else {
8015 + lp->tt.lport_login(lp);
8016 + fc_lport_unlock(lp);
8017 + }
8018 + fc_remote_port_delete(rport);
8019 + }
8020 + lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
8021 + fc_frame_free(fp);
8022 +}
8023 +
8024 +int fc_rport_init(struct fc_lport *lp)
8025 +{
8026 + if (!lp->tt.rport_login)
8027 + lp->tt.rport_login = fc_rport_login;
8028 +
8029 + if (!lp->tt.rport_logout)
8030 + lp->tt.rport_logout = fc_rport_logout;
8031 +
8032 + if (!lp->tt.rport_recv_req)
8033 + lp->tt.rport_recv_req = fc_rport_recv_req;
8034 +
8035 + if (!lp->tt.rport_create)
8036 + lp->tt.rport_create = fc_remote_port_create;
8037 +
8038 + if (!lp->tt.rport_lookup)
8039 + lp->tt.rport_lookup = fc_rport_lookup;
8040 +
8041 + if (!lp->tt.rport_reset)
8042 + lp->tt.rport_reset = fc_rport_reset;
8043 +
8044 + if (!lp->tt.rport_reset_list)
8045 + lp->tt.rport_reset_list = fc_rport_reset_list;
8046 +
8047 + return 0;
8048 +}
8049 +EXPORT_SYMBOL(fc_rport_init);
8050 +
8051 --
8052 1.5.2.4
8053