]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.drivers/open-fcoe-beta5-update
Add a patch to fix Intel E100 wake-on-lan problems.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / open-fcoe-beta5-update
CommitLineData
2cb7cef9
BS
1Subject: Open-FCoE: Update for Beta5
2From: John Fastabend <john.r.fastabend@intel.com>
3Date: Fri Nov 7 15:38:25 2008 +0100:
4Git: 4692e3314fc9ffdb33996bbff7b4aa8916d58f1c
5References: bnc#438954
6
7Incremental Open-FCoE update for Beta5.
8
9Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
10Acked-by: Hannes Reinecke <hare@suse.de>
11
12diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
13index f382eea..6f38b13 100644
14--- a/drivers/scsi/Kconfig
15+++ b/drivers/scsi/Kconfig
16@@ -336,7 +336,8 @@ config LIBFC
17
18 config FCOE
19 tristate "FCoE module"
20- depends on LIBFC
21+ depends on SCSI && SCSI_FC_ATTRS
22+ select LIBFC
23 ---help---
24 Fibre Channel over Ethernet module
25
26diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
27index e11d36b..ff207b2 100644
28--- a/drivers/scsi/fcoe/fc_transport_fcoe.c
29+++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
30@@ -38,6 +38,7 @@
31 MODULE_AUTHOR("Open-FCoE.org");
32 MODULE_DESCRIPTION("FCoE");
33 MODULE_LICENSE("GPL");
34+MODULE_VERSION("1.0.3");
35
36 /*
37 * Static functions and variables definations
38@@ -71,16 +72,13 @@ static void fcoe_create_percpu_data(int cpu)
39 {
40 struct fc_lport *lp;
41 struct fcoe_softc *fc;
42- struct fcoe_dev_stats *p;
43
44 write_lock_bh(&fcoe_hostlist_lock);
45 list_for_each_entry(fc, &fcoe_hostlist, list) {
46 lp = fc->lp;
47- if (lp->dev_stats[cpu] == NULL) {
48- p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
49- if (p)
50- lp->dev_stats[cpu] = p;
51- }
52+ if (lp->dev_stats[cpu] == NULL)
53+ lp->dev_stats[cpu] = kzalloc(sizeof(struct fcoe_dev_stats),
54+ GFP_KERNEL);
55 }
56 write_unlock_bh(&fcoe_hostlist_lock);
57 }
58@@ -91,18 +89,14 @@ static void fcoe_create_percpu_data(int cpu)
59 */
60 static void fcoe_destroy_percpu_data(int cpu)
61 {
62- struct fcoe_dev_stats *p;
63 struct fc_lport *lp;
64 struct fcoe_softc *fc;
65
66 write_lock_bh(&fcoe_hostlist_lock);
67 list_for_each_entry(fc, &fcoe_hostlist, list) {
68 lp = fc->lp;
69- p = lp->dev_stats[cpu];
70- if (p != NULL) {
71- lp->dev_stats[cpu] = NULL;
72- kfree(p);
73- }
74+ kfree(lp->dev_stats[cpu]);
75+ lp->dev_stats[cpu] = NULL;
76 }
77 write_unlock_bh(&fcoe_hostlist_lock);
78 }
79@@ -211,7 +205,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
80 fc_linkup(lp);
81 else {
82 stats = lp->dev_stats[smp_processor_id()];
83- stats->LinkFailureCount++;
84+ if (stats)
85+ stats->LinkFailureCount++;
86 fc_linkdown(lp);
87 fcoe_clean_pending_queue(lp);
88 }
89@@ -227,42 +222,44 @@ static void trimstr(char *str, int len)
90 *cp = '\0';
91 }
92
93-static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr,
94- const char *buffer, size_t size)
95+static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
96 {
97 struct net_device *netdev;
98 char ifname[IFNAMSIZ + 2];
99+ int rc = -ENODEV;
100
101 strlcpy(ifname, buffer, IFNAMSIZ);
102 trimstr(ifname, strlen(ifname));
103 netdev = dev_get_by_name(&init_net, ifname);
104 if (netdev) {
105- fcoe_destroy_interface(netdev);
106+ rc = fcoe_destroy_interface(netdev);
107 dev_put(netdev);
108 }
109- return size;
110+ return rc;
111 }
112
113-static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr,
114- const char *buffer, size_t size)
115+static int fcoe_create(const char *buffer, struct kernel_param *kp)
116 {
117 struct net_device *netdev;
118 char ifname[IFNAMSIZ + 2];
119+ int rc = -ENODEV;
120
121 strlcpy(ifname, buffer, IFNAMSIZ);
122 trimstr(ifname, strlen(ifname));
123 netdev = dev_get_by_name(&init_net, ifname);
124 if (netdev) {
125- fcoe_create_interface(netdev);
126+ rc = fcoe_create_interface(netdev);
127 dev_put(netdev);
128 }
129- return size;
130+ return rc;
131 }
132
133-static const struct kobj_attribute fcoe_destroyattr = \
134- __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy);
135-static const struct kobj_attribute fcoe_createattr = \
136- __ATTR(create, S_IWUSR, NULL, fcoe_create);
137+module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
138+__MODULE_PARM_TYPE(create, "string");
139+MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
140+module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
141+__MODULE_PARM_TYPE(destroy, "string");
142+MODULE_PARM_DESC(destroy, "Destroy fcoe port");
143
144 /*
145 * Initialization routine
146@@ -271,19 +268,9 @@ static const struct kobj_attribute fcoe_createattr = \
147 */
148 static int __init fcoe_init(void)
149 {
150- int rc = 0;
151 int cpu;
152 struct fcoe_percpu_s *p;
153
154- rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
155- &fcoe_destroyattr.attr);
156- if (!rc)
157- rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
158- &fcoe_createattr.attr);
159-
160- if (rc)
161- return rc;
162-
163 rwlock_init(&fcoe_hostlist_lock);
164
165 #ifdef CONFIG_HOTPLUG_CPU
166@@ -317,11 +304,6 @@ static int __init fcoe_init(void)
167 }
168 }
169 }
170- if (rc < 0) {
171- FC_DBG("failed to initialize proc intrerface\n");
172- rc = -ENODEV;
173- goto out_chrdev;
174- }
175
176 /*
177 * setup link change notification
178@@ -340,12 +322,6 @@ static int __init fcoe_init(void)
179 }
180
181 return 0;
182-
183-out_chrdev:
184-#ifdef CONFIG_HOTPLUG_CPU
185- unregister_cpu_notifier(&fcoe_cpu_notifier);
186-#endif /* CONFIG_HOTPLUG_CPU */
187- return rc;
188 }
189 module_init(fcoe_init);
190
191diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
192index 3cf5ad6..d7ceb1b 100644
193--- a/drivers/scsi/fcoe/fcoe_sw.c
194+++ b/drivers/scsi/fcoe/fcoe_sw.c
195@@ -46,13 +46,13 @@
196
197 #define FCOE_VERSION "0.1"
198
199-#define FCOE_MAX_LUN 255
200-#define FCOE_MAX_FCP_TARGET 256
201+#define FCOE_MAX_LUN 255
202+#define FCOE_MAX_FCP_TARGET 256
203
204-#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
205+#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
206
207-#define FCOE_MIN_XID 0x0004
208-#define FCOE_MAX_XID 0x07ef
209+#define FCOE_MIN_XID 0x0004
210+#define FCOE_MAX_XID 0x07ef
211
212 LIST_HEAD(fcoe_hostlist);
213 DEFINE_RWLOCK(fcoe_hostlist_lock);
214@@ -173,7 +173,6 @@ static struct scsi_host_template fcoe_driver_template = {
215 int fcoe_destroy_interface(struct net_device *netdev)
216 {
217 int cpu, idx;
218- struct fcoe_dev_stats *p;
219 struct fcoe_percpu_s *pp;
220 struct fcoe_softc *fc;
221 struct fcoe_rcv_info *fr;
222@@ -239,13 +238,8 @@ int fcoe_destroy_interface(struct net_device *netdev)
223 fcoe_clean_pending_queue(lp);
224
225 /* Free memory used by statistical counters */
226- for_each_online_cpu(cpu) {
227- p = lp->dev_stats[cpu];
228- if (p) {
229- lp->dev_stats[cpu] = NULL;
230- kfree(p);
231- }
232- }
233+ for_each_online_cpu(cpu)
234+ kfree(lp->dev_stats[cpu]);
235
236 /* Release the net_device and Scsi_Host */
237 dev_put(fc->real_dev);
238@@ -299,7 +293,6 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
239 static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
240 {
241 int i = 0;
242- struct fcoe_dev_stats *p;
243
244 lp->host = shost;
245 lp->drv_priv = (void *)(lp + 1);
246@@ -319,11 +312,9 @@ static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
247 /*
248 * allocate per cpu stats block
249 */
250- for_each_online_cpu(i) {
251- p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
252- if (p)
253- lp->dev_stats[i] = p;
254- }
255+ for_each_online_cpu(i)
256+ lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
257+ GFP_KERNEL);
258
259 /* Finish fc_lport configuration */
260 fc_lport_config(lp);
261@@ -341,11 +332,8 @@ static int net_config(struct fc_lport *lp)
262
263 /* Require support for get_pauseparam ethtool op. */
264 net_dev = fc->real_dev;
265- if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN))
266+ if (net_dev->priv_flags & IFF_802_1Q_VLAN)
267 net_dev = vlan_dev_real_dev(net_dev);
268- if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam)
269- return -EOPNOTSUPP;
270-
271 fc->phys_dev = net_dev;
272
273 /* Do not support for bonding device */
274diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
275index 93c47aa..45a7d6f 100644
276--- a/drivers/scsi/fcoe/libfcoe.c
277+++ b/drivers/scsi/fcoe/libfcoe.c
278@@ -135,7 +135,8 @@ err:
279 #else
280 stats = lp->dev_stats[0];
281 #endif
282- stats->ErrorFrames++;
283+ if (stats)
284+ stats->ErrorFrames++;
285
286 err2:
287 kfree_skb(skb);
288@@ -333,8 +334,10 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
289 hp->fcoe_sof = sof;
290
291 stats = lp->dev_stats[smp_processor_id()];
292- stats->TxFrames++;
293- stats->TxWords += wlen;
294+ if (stats) {
295+ stats->TxFrames++;
296+ stats->TxWords += wlen;
297+ }
298 skb->dev = fc->real_dev;
299
300 fr_dev(fp) = lp;
301@@ -422,10 +425,12 @@ int fcoe_percpu_receive_thread(void *arg)
302
303 hp = (struct fcoe_hdr *)skb->data;
304 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
305- if (stats->ErrorFrames < 5)
306- FC_DBG("unknown FCoE version %x",
307- FC_FCOE_DECAPS_VER(hp));
308- stats->ErrorFrames++;
309+ if (stats) {
310+ if (stats->ErrorFrames < 5)
311+ FC_DBG("unknown FCoE version %x",
312+ FC_FCOE_DECAPS_VER(hp));
313+ stats->ErrorFrames++;
314+ }
315 kfree_skb(skb);
316 continue;
317 }
318@@ -436,15 +441,20 @@ int fcoe_percpu_receive_thread(void *arg)
319 tlen = sizeof(struct fcoe_crc_eof);
320
321 if (unlikely(fr_len > skb->len)) {
322- if (stats->ErrorFrames < 5)
323- FC_DBG("length error fr_len 0x%x skb->len 0x%x",
324- fr_len, skb->len);
325- stats->ErrorFrames++;
326+ if (stats) {
327+ if (stats->ErrorFrames < 5)
328+ FC_DBG("length error fr_len 0x%x "
329+ "skb->len 0x%x", fr_len,
330+ skb->len);
331+ stats->ErrorFrames++;
332+ }
333 kfree_skb(skb);
334 continue;
335 }
336- stats->RxFrames++;
337- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
338+ if (stats) {
339+ stats->RxFrames++;
340+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
341+ }
342
343 fp = (struct fc_frame *) skb;
344 fc_frame_init(fp);
345@@ -469,12 +479,15 @@ int fcoe_percpu_receive_thread(void *arg)
346 fcoe_recv_flogi(fc, fp, mac);
347 fc_exch_recv(lp, lp->emp, fp);
348 } else {
349- if (debug_fcoe || stats->InvalidCRCCount < 5) {
350+ if (debug_fcoe ||
351+ (stats && stats->InvalidCRCCount < 5)) {
352 printk(KERN_WARNING \
353 "fcoe: dropping frame with CRC error");
354 }
355- stats->InvalidCRCCount++;
356- stats->ErrorFrames++;
357+ if (stats) {
358+ stats->InvalidCRCCount++;
359+ stats->ErrorFrames++;
360+ }
361 fc_frame_free(fp);
362 }
363 }
364diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
365index 80dc1ef..67c5bad 100644
366--- a/drivers/scsi/libfc/fc_exch.c
367+++ b/drivers/scsi/libfc/fc_exch.c
368@@ -371,7 +371,7 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
369 FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
370 ep->xid);
371 if (schedule_delayed_work(&ep->timeout_work,
372- jiffies + msecs_to_jiffies(timer_msec)))
373+ msecs_to_jiffies(timer_msec)))
374 fc_exch_hold(ep); /* hold for timer */
375 }
376
377@@ -1831,17 +1831,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
378 len += sizeof(struct fc_exch_mgr);
379
380 mp = kzalloc(len, GFP_ATOMIC);
381- if (mp) {
382- mp->class = class;
383- mp->total_exches = 0;
384- mp->exches = (struct fc_exch **)(mp + 1);
385- mp->last_xid = min_xid - 1;
386- mp->min_xid = min_xid;
387- mp->max_xid = max_xid;
388- mp->lp = lp;
389- INIT_LIST_HEAD(&mp->ex_list);
390- spin_lock_init(&mp->em_lock);
391- }
392+ if (!mp)
393+ return NULL;
394+
395+ mp->class = class;
396+ mp->total_exches = 0;
397+ mp->exches = (struct fc_exch **)(mp + 1);
398+ mp->last_xid = min_xid - 1;
399+ mp->min_xid = min_xid;
400+ mp->max_xid = max_xid;
401+ mp->lp = lp;
402+ INIT_LIST_HEAD(&mp->ex_list);
403+ spin_lock_init(&mp->em_lock);
404
405 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
406 if (!mp->ep_pool)
407@@ -1932,6 +1933,7 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
408 fh = fc_frame_header_get(fp);
409 hton24(fh->fh_f_ctl, f_ctl | fill);
410 fh->fh_seq_cnt = htons(sp->cnt++);
411+ ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
412
413 if (unlikely(lp->tt.frame_send(lp, fp)))
414 goto err;
415@@ -1940,7 +1942,6 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
416 fc_exch_timer_set_locked(ep, timer_msec);
417 sp->f_ctl = f_ctl; /* save for possible abort */
418 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
419- ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
420
421 if (f_ctl & FC_FC_SEQ_INIT)
422 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
423diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
424index bf8202f..01e84dc 100644
425--- a/drivers/scsi/libfc/fc_fcp.c
426+++ b/drivers/scsi/libfc/fc_fcp.c
427@@ -41,6 +41,7 @@
428 MODULE_AUTHOR("Open-FCoE.org");
429 MODULE_DESCRIPTION("libfc");
430 MODULE_LICENSE("GPL");
431+MODULE_VERSION("1.0.3");
432
433 static int fc_fcp_debug;
434
435@@ -53,80 +54,18 @@ static int fc_fcp_debug;
436 static struct kmem_cache *scsi_pkt_cachep;
437
438 /* SRB state definitions */
439-#define FC_SRB_FREE 0 /* cmd is free */
440-#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
441-#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
442-#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
443-#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
444-#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
445-#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
446+#define FC_SRB_FREE 0 /* cmd is free */
447+#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
448+#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
449+#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
450+#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
451+#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
452+#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
453 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
454-#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
455+#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
456
457-#define FC_SRB_READ (1 << 1)
458-#define FC_SRB_WRITE (1 << 0)
459-
460-/*
461- * scsi request structure, one for each scsi request
462- */
463-struct fc_fcp_pkt {
464- /*
465- * housekeeping stuff
466- */
467- struct fc_lport *lp; /* handle to hba struct */
468- u16 state; /* scsi_pkt state state */
469- u16 tgt_flags; /* target flags */
470- atomic_t ref_cnt; /* only used byr REC ELS */
471- spinlock_t scsi_pkt_lock; /* Must be taken before the host lock
472- * if both are held at the same time */
473- /*
474- * SCSI I/O related stuff
475- */
476- struct scsi_cmnd *cmd; /* scsi command pointer. set/clear
477- * under host lock */
478- struct list_head list; /* tracks queued commands. access under
479- * host lock */
480- /*
481- * timeout related stuff
482- */
483- struct timer_list timer; /* command timer */
484- struct completion tm_done;
485- int wait_for_comp;
486- unsigned long start_time; /* start jiffie */
487- unsigned long end_time; /* end jiffie */
488- unsigned long last_pkt_time; /* jiffies of last frame received */
489-
490- /*
491- * scsi cmd and data transfer information
492- */
493- u32 data_len;
494- /*
495- * transport related veriables
496- */
497- struct fcp_cmnd cdb_cmd;
498- size_t xfer_len;
499- u32 xfer_contig_end; /* offset of end of contiguous xfer */
500- u16 max_payload; /* max payload size in bytes */
501-
502- /*
503- * scsi/fcp return status
504- */
505- u32 io_status; /* SCSI result upper 24 bits */
506- u8 cdb_status;
507- u8 status_code; /* FCP I/O status */
508- /* bit 3 Underrun bit 2: overrun */
509- u8 scsi_comp_flags;
510- u32 req_flags; /* bit 0: read bit:1 write */
511- u32 scsi_resid; /* residule length */
512-
513- struct fc_rport *rport; /* remote port pointer */
514- struct fc_seq *seq_ptr; /* current sequence pointer */
515- /*
516- * Error Processing
517- */
518- u8 recov_retry; /* count of recovery retries */
519- struct fc_seq *recov_seq; /* sequence for REC or SRR */
520-};
521+#define FC_SRB_READ (1 << 1)
522+#define FC_SRB_WRITE (1 << 0)
523
524 /*
525 * The SCp.ptr should be tested and set under the host lock. NULL indicates
526@@ -153,11 +92,10 @@ struct fc_fcp_internal {
527 static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
528 static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
529 static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
530-static void fc_fcp_complete(struct fc_fcp_pkt *);
531+static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
532 static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
533 static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
534 static void fc_timeout_error(struct fc_fcp_pkt *);
535-static int fc_fcp_send_cmd(struct fc_fcp_pkt *);
536 static void fc_fcp_timeout(unsigned long data);
537 static void fc_fcp_rec(struct fc_fcp_pkt *);
538 static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
539@@ -171,17 +109,17 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
540 /*
541 * command status codes
542 */
543-#define FC_COMPLETE 0
544-#define FC_CMD_ABORTED 1
545-#define FC_CMD_RESET 2
546-#define FC_CMD_PLOGO 3
547-#define FC_SNS_RCV 4
548-#define FC_TRANS_ERR 5
549-#define FC_DATA_OVRRUN 6
550-#define FC_DATA_UNDRUN 7
551-#define FC_ERROR 8
552-#define FC_HRD_ERROR 9
553-#define FC_CMD_TIME_OUT 10
554+#define FC_COMPLETE 0
555+#define FC_CMD_ABORTED 1
556+#define FC_CMD_RESET 2
557+#define FC_CMD_PLOGO 3
558+#define FC_SNS_RCV 4
559+#define FC_TRANS_ERR 5
560+#define FC_DATA_OVRRUN 6
561+#define FC_DATA_UNDRUN 7
562+#define FC_ERROR 8
563+#define FC_HRD_ERROR 9
564+#define FC_CMD_TIME_OUT 10
565
566 /*
567 * Error recovery timeout values.
568@@ -191,8 +129,8 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
569 #define FC_SCSI_REC_TOV (2 * HZ)
570 #define FC_HOST_RESET_TIMEOUT (30 * HZ)
571
572-#define FC_MAX_ERROR_CNT 5
573-#define FC_MAX_RECOV_RETRY 3
574+#define FC_MAX_ERROR_CNT 5
575+#define FC_MAX_RECOV_RETRY 3
576
577 #define FC_FCP_DFLT_QUEUE_DEPTH 32
578
579@@ -208,46 +146,46 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
580 static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
581 {
582 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
583- struct fc_fcp_pkt *sp;
584-
585- sp = mempool_alloc(si->scsi_pkt_pool, gfp);
586- if (sp) {
587- memset(sp, 0, sizeof(*sp));
588- sp->lp = lp;
589- atomic_set(&sp->ref_cnt, 1);
590- init_timer(&sp->timer);
591- INIT_LIST_HEAD(&sp->list);
592- spin_lock_init(&sp->scsi_pkt_lock);
593+ struct fc_fcp_pkt *fsp;
594+
595+ fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
596+ if (fsp) {
597+ memset(fsp, 0, sizeof(*fsp));
598+ fsp->lp = lp;
599+ atomic_set(&fsp->ref_cnt, 1);
600+ init_timer(&fsp->timer);
601+ INIT_LIST_HEAD(&fsp->list);
602+ spin_lock_init(&fsp->scsi_pkt_lock);
603 }
604- return sp;
605+ return fsp;
606 }
607
608 /**
609 * fc_fcp_pkt_release - release hold on scsi_pkt packet
610- * @sp: fcp packet struct
611+ * @fsp: fcp packet struct
612 *
613 * This is used by upper layer scsi driver.
614 * Context : call from process and interrupt context.
615 * no locking required
616 */
617-static void fc_fcp_pkt_release(struct fc_fcp_pkt *sp)
618+static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
619 {
620- if (atomic_dec_and_test(&sp->ref_cnt)) {
621- struct fc_fcp_internal *si = fc_get_scsi_internal(sp->lp);
622+ if (atomic_dec_and_test(&fsp->ref_cnt)) {
623+ struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
624
625- mempool_free(sp, si->scsi_pkt_pool);
626+ mempool_free(fsp, si->scsi_pkt_pool);
627 }
628 }
629
630-static void fc_fcp_pkt_hold(struct fc_fcp_pkt *sp)
631+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
632 {
633- atomic_inc(&sp->ref_cnt);
634+ atomic_inc(&fsp->ref_cnt);
635 }
636
637 /**
638 * fc_fcp_pkt_destory - release hold on scsi_pkt packet
639 *
640- * @sp: exchange sequence
641+ * @seq: exchange sequence
642 * @fsp: fcp packet struct
643 *
644 * Release hold on scsi_pkt packet set to keep scsi_pkt
645@@ -255,9 +193,9 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *sp)
646 * Context : called from from EM layer.
647 * no locking required
648 */
649-static void fc_fcp_pkt_destroy(struct fc_seq *sp, void *arg)
650+static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
651 {
652- fc_fcp_pkt_release(arg);
653+ fc_fcp_pkt_release(fsp);
654 }
655
656 /**
657@@ -280,10 +218,9 @@ static void fc_fcp_pkt_destroy(struct fc_seq *sp, void *arg)
658 static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
659 {
660 spin_lock_bh(&fsp->scsi_pkt_lock);
661- if (!fsp->cmd) {
662+ if (fsp->state & FC_SRB_COMPL) {
663 spin_unlock_bh(&fsp->scsi_pkt_lock);
664- FC_DBG("Invalid scsi cmd pointer on fcp packet.\n");
665- return -EINVAL;
666+ return -EPERM;
667 }
668
669 fc_fcp_pkt_hold(fsp);
670@@ -325,7 +262,7 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
671 fsp->state &= ~FC_SRB_ABORT_PENDING;
672 fsp->io_status = SUGGEST_RETRY << 24;
673 fsp->status_code = FC_ERROR;
674- fc_fcp_complete(fsp);
675+ fc_fcp_complete_locked(fsp);
676 }
677
678 /*
679@@ -336,7 +273,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
680 {
681 struct scsi_cmnd *sc = fsp->cmd;
682 struct fc_lport *lp = fsp->lp;
683- struct fcoe_dev_stats *sp;
684+ struct fcoe_dev_stats *stats;
685 struct fc_frame_header *fh;
686 size_t start_offset;
687 size_t offset;
688@@ -420,9 +357,9 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
689
690 if (~crc != le32_to_cpu(*(__le32 *)(buf + len))) {
691 crc_err:
692- sp = lp->dev_stats[smp_processor_id()];
693- sp->ErrorFrames++;
694- if (sp->InvalidCRCCount++ < 5)
695+ stats = lp->dev_stats[smp_processor_id()];
696+ stats->ErrorFrames++;
697+ if (stats->InvalidCRCCount++ < 5)
698 FC_DBG("CRC error on data frame\n");
699 /*
700 * Assume the frame is total garbage.
701@@ -447,7 +384,7 @@ crc_err:
702 */
703 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
704 fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
705- fc_fcp_complete(fsp);
706+ fc_fcp_complete_locked(fsp);
707 }
708
709 /*
710@@ -457,7 +394,7 @@ crc_err:
711 * size of data in single frame, otherwise send multiple FC
712 * frames of max FC frame payload supported by target port.
713 */
714-static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
715+static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
716 size_t offset, size_t seq_blen)
717 {
718 struct scsi_cmnd *sc;
719@@ -503,9 +440,9 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
720 remaining = seq_blen;
721 frame_offset = offset;
722 tlen = 0;
723- sp = lp->tt.seq_start_next(sp);
724+ seq = lp->tt.seq_start_next(seq);
725 f_ctl = FC_FC_REL_OFF;
726- WARN_ON(!sp);
727+ WARN_ON(!seq);
728
729 /*
730 * If a get_page()/put_page() will fail, don't use sg lists
731@@ -608,12 +545,12 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
732 * transfer sequence initiative.
733 */
734 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
735- error = lp->tt.seq_send(lp, sp, fp, f_ctl);
736+ error = lp->tt.seq_send(lp, seq, fp, f_ctl);
737 } else if (tlen == 0) {
738 /*
739 * send fragment using for a sequence.
740 */
741- error = lp->tt.seq_send(lp, sp, fp, f_ctl);
742+ error = lp->tt.seq_send(lp, seq, fp, f_ctl);
743 } else {
744 continue;
745 }
746@@ -660,7 +597,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
747 if (fsp->wait_for_comp)
748 complete(&fsp->tm_done);
749 else
750- fc_fcp_complete(fsp);
751+ fc_fcp_complete_locked(fsp);
752 }
753 }
754
755@@ -704,7 +641,7 @@ done:
756 * Context : called from Soft IRQ context
757 * can not called holding list lock
758 */
759-static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
760+static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
761 {
762 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
763 struct fc_lport *lp;
764@@ -743,11 +680,11 @@ static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
765 dd = fc_frame_payload_get(fp, sizeof(*dd));
766 WARN_ON(!dd);
767
768- rc = fc_fcp_send_data(fsp, sp,
769+ rc = fc_fcp_send_data(fsp, seq,
770 (size_t) ntohl(dd->ft_data_ro),
771 (size_t) ntohl(dd->ft_burst_len));
772 if (!rc)
773- lp->tt.seq_set_rec_data(sp, fsp->xfer_len);
774+ lp->tt.seq_set_rec_data(seq, fsp->xfer_len);
775 else if (rc == -ENOMEM)
776 fsp->state |= FC_SRB_NOMEM;
777 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
778@@ -757,7 +694,7 @@ static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
779 */
780 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
781 fc_fcp_recv_data(fsp, fp);
782- lp->tt.seq_set_rec_data(sp, fsp->xfer_contig_end);
783+ lp->tt.seq_set_rec_data(seq, fsp->xfer_contig_end);
784 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
785 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
786
787@@ -874,7 +811,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
788 fsp->rport->port_id,
789 fsp->xfer_len, expected_len, fsp->data_len);
790 }
791- fc_fcp_complete(fsp);
792+ fc_fcp_complete_locked(fsp);
793 return;
794
795 len_err:
796@@ -882,20 +819,20 @@ len_err:
797 flags, fr_len(fp), respl, snsl);
798 err:
799 fsp->status_code = FC_ERROR;
800- fc_fcp_complete(fsp);
801+ fc_fcp_complete_locked(fsp);
802 }
803
804 /**
805- * fc_fcp_complete - complete processing of a fcp packet
806+ * fc_fcp_complete_locked - complete processing of a fcp packet
807 * @fsp: fcp packet
808 *
809 * This function may sleep if a timer is pending. The packet lock must be
810 * held, and the host lock must not be held.
811 */
812-static void fc_fcp_complete(struct fc_fcp_pkt *fsp)
813+static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
814 {
815 struct fc_lport *lp = fsp->lp;
816- struct fc_seq *sp;
817+ struct fc_seq *seq;
818 u32 f_ctl;
819
820 if (fsp->state & FC_SRB_ABORT_PENDING)
821@@ -917,14 +854,14 @@ static void fc_fcp_complete(struct fc_fcp_pkt *fsp)
822 }
823 }
824
825- sp = fsp->seq_ptr;
826- if (sp) {
827+ seq = fsp->seq_ptr;
828+ if (seq) {
829 fsp->seq_ptr = NULL;
830 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
831 struct fc_frame *conf_frame;
832 struct fc_seq *csp;
833
834- csp = lp->tt.seq_start_next(sp);
835+ csp = lp->tt.seq_start_next(seq);
836 conf_frame = fc_frame_alloc(fsp->lp, 0);
837 if (conf_frame) {
838 fc_frame_setup(conf_frame,
839@@ -934,7 +871,7 @@ static void fc_fcp_complete(struct fc_fcp_pkt *fsp)
840 lp->tt.seq_send(lp, csp, conf_frame, f_ctl);
841 }
842 }
843- lp->tt.exch_done(sp);
844+ lp->tt.exch_done(seq);
845 }
846 fc_io_compl(fsp);
847 }
848@@ -1028,7 +965,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
849 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
850
851 spin_unlock_irq(lp->host->host_lock);
852- rc = fc_fcp_send_cmd(fsp);
853+ rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
854 spin_lock_irq(lp->host->host_lock);
855 if (rc)
856 list_del(&fsp->list);
857@@ -1036,49 +973,47 @@ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
858 return rc;
859 }
860
861-static int fc_fcp_send_cmd(struct fc_fcp_pkt *fsp)
862+static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
863+ void (*resp)(struct fc_seq *,
864+ struct fc_frame *fp,
865+ void *arg))
866 {
867- struct fc_lport *lp;
868 struct fc_frame *fp;
869- struct fc_seq *sp;
870+ struct fc_seq *seq;
871 struct fc_rport *rport;
872 struct fc_rport_libfc_priv *rp;
873+ const size_t len = sizeof(fsp->cdb_cmd);
874 int rc = 0;
875
876 if (fc_fcp_lock_pkt(fsp))
877 return 0;
878
879- if (fsp->state & FC_SRB_COMPL)
880- goto unlock;
881-
882- lp = fsp->lp;
883 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
884 if (!fp) {
885 rc = -1;
886 goto unlock;
887 }
888
889- memcpy(fc_frame_payload_get(fp, sizeof(fsp->cdb_cmd)),
890- &fsp->cdb_cmd, sizeof(fsp->cdb_cmd));
891+ memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
892 fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
893 fc_frame_set_offset(fp, 0);
894 rport = fsp->rport;
895 fsp->max_payload = rport->maxframe_size;
896 rp = rport->dd_data;
897- sp = lp->tt.exch_seq_send(lp, fp,
898- fc_fcp_recv,
899- fc_fcp_pkt_destroy,
900- fsp, 0,
901- fc_host_port_id(rp->local_port->host),
902- rport->port_id,
903- FC_FC_SEQ_INIT | FC_FC_END_SEQ);
904- if (!sp) {
905+ seq = lp->tt.exch_seq_send(lp, fp,
906+ resp,
907+ fc_fcp_pkt_destroy,
908+ fsp, 0,
909+ fc_host_port_id(rp->local_port->host),
910+ rport->port_id,
911+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
912+ if (!seq) {
913 fc_frame_free(fp);
914 rc = -1;
915 goto unlock;
916 }
917 fsp->last_pkt_time = jiffies;
918- fsp->seq_ptr = sp;
919+ fsp->seq_ptr = seq;
920 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
921
922 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
923@@ -1113,7 +1048,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
924 */
925 fsp->state &= ~FC_SRB_ABORT_PENDING;
926 fsp->status_code = FC_CMD_PLOGO;
927- fc_fcp_complete(fsp);
928+ fc_fcp_complete_locked(fsp);
929 unlock:
930 fc_fcp_unlock_pkt(fsp);
931 }
932@@ -1143,7 +1078,7 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
933 } else if (fsp->state & FC_SRB_ABORTED) {
934 FC_DBG("target abort cmd passed\n");
935 rc = SUCCESS;
936- fc_fcp_complete(fsp);
937+ fc_fcp_complete_locked(fsp);
938 }
939
940 return rc;
941@@ -1155,47 +1090,16 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
942 static void fc_lun_reset_send(unsigned long data)
943 {
944 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
945- const size_t len = sizeof(fsp->cdb_cmd);
946 struct fc_lport *lp = fsp->lp;
947- struct fc_frame *fp;
948- struct fc_seq *sp;
949- struct fc_rport *rport;
950- struct fc_rport_libfc_priv *rp;
951-
952- spin_lock_bh(&fsp->scsi_pkt_lock);
953- if (fsp->state & FC_SRB_COMPL)
954- goto unlock;
955-
956- fp = fc_frame_alloc(lp, len);
957- if (!fp)
958- goto retry;
959- memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
960- fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
961- fc_frame_set_offset(fp, 0);
962- rport = fsp->rport;
963- rp = rport->dd_data;
964- sp = lp->tt.exch_seq_send(lp, fp,
965- fc_tm_done,
966- fc_fcp_pkt_destroy,
967- fsp, 0,
968- fc_host_port_id(rp->local_port->host),
969- rport->port_id,
970- FC_FC_SEQ_INIT | FC_FC_END_SEQ);
971-
972- if (sp) {
973- fsp->seq_ptr = sp;
974- fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
975- goto unlock;
976+ if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
977+ if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
978+ return;
979+ if (fc_fcp_lock_pkt(fsp))
980+ return;
981+ setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
982+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
983+ fc_fcp_unlock_pkt(fsp);
984 }
985- /*
986- * Exchange or frame allocation failed. Set timer and retry.
987- */
988- fc_frame_free(fp);
989-retry:
990- setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
991- fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
992-unlock:
993- spin_unlock_bh(&fsp->scsi_pkt_lock);
994 }
995
996 /*
997@@ -1253,12 +1157,11 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
998 /*
999 * Task Managment response handler
1000 */
1001-static void fc_tm_done(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1002+static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1003 {
1004 struct fc_fcp_pkt *fsp = arg;
1005 struct fc_frame_header *fh;
1006
1007- spin_lock_bh(&fsp->scsi_pkt_lock);
1008 if (IS_ERR(fp)) {
1009 /*
1010 * If there is an error just let it timeout or wait
1011@@ -1266,15 +1169,16 @@ static void fc_tm_done(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1012 *
1013 * scsi-eh will escalate for when either happens.
1014 */
1015- spin_unlock_bh(&fsp->scsi_pkt_lock);
1016 return;
1017 }
1018
1019+ if (fc_fcp_lock_pkt(fsp))
1020+ return;
1021+
1022 /*
1023 * raced with eh timeout handler.
1024 */
1025- if ((fsp->state & FC_SRB_COMPL) || !fsp->seq_ptr ||
1026- !fsp->wait_for_comp) {
1027+ if (!fsp->seq_ptr || !fsp->wait_for_comp) {
1028 spin_unlock_bh(&fsp->scsi_pkt_lock);
1029 return;
1030 }
1031@@ -1283,9 +1187,9 @@ static void fc_tm_done(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1032 if (fh->fh_type != FC_TYPE_BLS)
1033 fc_fcp_resp(fsp, fp);
1034 fsp->seq_ptr = NULL;
1035- fsp->lp->tt.exch_done(sp);
1036+ fsp->lp->tt.exch_done(seq);
1037 fc_frame_free(fp);
1038- spin_unlock_bh(&fsp->scsi_pkt_lock);
1039+ fc_fcp_unlock_pkt(fsp);
1040 }
1041
1042 static void fc_fcp_cleanup(struct fc_lport *lp)
1043@@ -1320,8 +1224,9 @@ static void fc_fcp_timeout(unsigned long data)
1044 if (fc_fcp_lock_pkt(fsp))
1045 return;
1046
1047- if (fsp->state & FC_SRB_COMPL)
1048+ if (fsp->cdb_cmd.fc_tm_flags)
1049 goto unlock;
1050+
1051 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1052
1053 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
1054@@ -1330,7 +1235,7 @@ static void fc_fcp_timeout(unsigned long data)
1055 jiffies))
1056 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1057 else if (fsp->state & FC_SRB_RCV_STATUS)
1058- fc_fcp_complete(fsp);
1059+ fc_fcp_complete_locked(fsp);
1060 else
1061 fc_timeout_error(fsp);
1062 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
1063@@ -1344,7 +1249,7 @@ unlock:
1064 static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1065 {
1066 struct fc_lport *lp;
1067- struct fc_seq *sp;
1068+ struct fc_seq *seq;
1069 struct fc_frame *fp;
1070 struct fc_els_rec *rec;
1071 struct fc_rport *rport;
1072@@ -1355,14 +1260,14 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1073 lp = fsp->lp;
1074 rport = fsp->rport;
1075 rp = rport->dd_data;
1076- sp = fsp->seq_ptr;
1077- if (!sp || rp->rp_state != RPORT_ST_READY) {
1078+ seq = fsp->seq_ptr;
1079+ if (!seq || rp->rp_state != RPORT_ST_READY) {
1080 fsp->status_code = FC_HRD_ERROR;
1081 fsp->io_status = SUGGEST_RETRY << 24;
1082- fc_fcp_complete(fsp);
1083+ fc_fcp_complete_locked(fsp);
1084 return;
1085 }
1086- lp->tt.seq_get_xids(sp, &ox_id, &rx_id);
1087+ lp->tt.seq_get_xids(seq, &ox_id, &rx_id);
1088 fp = fc_frame_alloc(lp, sizeof(*rec));
1089 if (!fp)
1090 goto retry;
1091@@ -1376,14 +1281,14 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1092
1093 fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
1094 fc_frame_set_offset(fp, 0);
1095- sp = lp->tt.exch_seq_send(lp, fp,
1096- fc_fcp_rec_resp, NULL,
1097- fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
1098- fc_host_port_id(rp->local_port->host),
1099- rport->port_id,
1100- FC_FC_SEQ_INIT | FC_FC_END_SEQ);
1101-
1102- if (sp) {
1103+ seq = lp->tt.exch_seq_send(lp, fp,
1104+ fc_fcp_rec_resp, NULL,
1105+ fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
1106+ fc_host_port_id(rp->local_port->host),
1107+ rport->port_id,
1108+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
1109+
1110+ if (seq) {
1111 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1112 return;
1113 } else
1114@@ -1402,7 +1307,7 @@ retry:
1115 * then set the timeout and return otherwise complete the exchange
1116 * and tell the scsi layer to restart the I/O.
1117 */
1118-static void fc_fcp_rec_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1119+static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1120 {
1121 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
1122 struct fc_els_rec_acc *recp;
1123@@ -1605,7 +1510,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1124 struct fc_lport *lp = fsp->lp;
1125 struct fc_rport *rport;
1126 struct fc_rport_libfc_priv *rp;
1127- struct fc_seq *sp;
1128+ struct fc_seq *seq;
1129 struct fcp_srr *srr;
1130 struct fc_frame *fp;
1131 u8 cdb_op;
1132@@ -1633,17 +1538,17 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1133
1134 fc_frame_setup(fp, FC_RCTL_ELS4_REQ, FC_TYPE_FCP);
1135 fc_frame_set_offset(fp, 0);
1136- sp = lp->tt.exch_seq_send(lp, fp,
1137- fc_fcp_srr_resp, NULL,
1138- fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
1139- fc_host_port_id(rp->local_port->host),
1140- rport->port_id,
1141- FC_FC_SEQ_INIT | FC_FC_END_SEQ);
1142- if (!sp) {
1143+ seq = lp->tt.exch_seq_send(lp, fp,
1144+ fc_fcp_srr_resp, NULL,
1145+ fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
1146+ fc_host_port_id(rp->local_port->host),
1147+ rport->port_id,
1148+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
1149+ if (!seq) {
1150 fc_frame_free(fp);
1151 goto retry;
1152 }
1153- fsp->recov_seq = sp;
1154+ fsp->recov_seq = seq;
1155 fsp->xfer_len = offset;
1156 fsp->xfer_contig_end = offset;
1157 fsp->state &= ~FC_SRB_RCV_STATUS;
1158@@ -1656,7 +1561,7 @@ retry:
1159 /*
1160 * Handle response from SRR.
1161 */
1162-static void fc_fcp_srr_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1163+static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1164 {
1165 struct fc_fcp_pkt *fsp = arg;
1166 struct fc_frame_header *fh;
1167@@ -1698,7 +1603,7 @@ static void fc_fcp_srr_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1168 break;
1169 }
1170 fc_fcp_unlock_pkt(fsp);
1171- fsp->lp->tt.exch_done(sp);
1172+ fsp->lp->tt.exch_done(seq);
1173 out:
1174 fc_frame_free(fp);
1175 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1176@@ -1746,7 +1651,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1177 {
1178 struct fc_lport *lp;
1179 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1180- struct fc_fcp_pkt *sp;
1181+ struct fc_fcp_pkt *fsp;
1182 struct fc_rport_libfc_priv *rp;
1183 int rval;
1184 int rc = 0;
1185@@ -1778,8 +1683,8 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1186 goto out;
1187 }
1188
1189- sp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
1190- if (sp == NULL) {
1191+ fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
1192+ if (fsp == NULL) {
1193 rc = SCSI_MLQUEUE_HOST_BUSY;
1194 goto out;
1195 }
1196@@ -1787,48 +1692,48 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1197 /*
1198 * build the libfc request pkt
1199 */
1200- sp->cmd = sc_cmd; /* save the cmd */
1201- sp->lp = lp; /* save the softc ptr */
1202- sp->rport = rport; /* set the remote port ptr */
1203+ fsp->cmd = sc_cmd; /* save the cmd */
1204+ fsp->lp = lp; /* save the softc ptr */
1205+ fsp->rport = rport; /* set the remote port ptr */
1206 sc_cmd->scsi_done = done;
1207
1208 /*
1209 * set up the transfer length
1210 */
1211- sp->data_len = scsi_bufflen(sc_cmd);
1212- sp->xfer_len = 0;
1213+ fsp->data_len = scsi_bufflen(sc_cmd);
1214+ fsp->xfer_len = 0;
1215
1216 /*
1217 * setup the data direction
1218 */
1219 stats = lp->dev_stats[smp_processor_id()];
1220 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1221- sp->req_flags = FC_SRB_READ;
1222+ fsp->req_flags = FC_SRB_READ;
1223 stats->InputRequests++;
1224- stats->InputMegabytes = sp->data_len;
1225+ stats->InputMegabytes = fsp->data_len;
1226 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1227- sp->req_flags = FC_SRB_WRITE;
1228+ fsp->req_flags = FC_SRB_WRITE;
1229 stats->OutputRequests++;
1230- stats->OutputMegabytes = sp->data_len;
1231+ stats->OutputMegabytes = fsp->data_len;
1232 } else {
1233- sp->req_flags = 0;
1234+ fsp->req_flags = 0;
1235 stats->ControlRequests++;
1236 }
1237
1238- sp->tgt_flags = rp->flags;
1239+ fsp->tgt_flags = rp->flags;
1240
1241- init_timer(&sp->timer);
1242- sp->timer.data = (unsigned long)sp;
1243+ init_timer(&fsp->timer);
1244+ fsp->timer.data = (unsigned long)fsp;
1245
1246 /*
1247 * send it to the lower layer
1248 * if we get -1 return then put the request in the pending
1249 * queue.
1250 */
1251- rval = fc_fcp_pkt_send(lp, sp);
1252+ rval = fc_fcp_pkt_send(lp, fsp);
1253 if (rval != 0) {
1254- sp->state = FC_SRB_FREE;
1255- fc_fcp_pkt_release(sp);
1256+ fsp->state = FC_SRB_FREE;
1257+ fc_fcp_pkt_release(fsp);
1258 rc = SCSI_MLQUEUE_HOST_BUSY;
1259 }
1260 out:
1261@@ -1838,30 +1743,30 @@ EXPORT_SYMBOL(fc_queuecommand);
1262
1263 /**
1264 * fc_io_compl - Handle responses for completed commands
1265- * @sp: scsi packet
1266+ * @fsp: scsi packet
1267 *
1268 * Translates a error to a Linux SCSI error.
1269 *
1270 * The fcp packet lock must be held when calling.
1271 */
1272-static void fc_io_compl(struct fc_fcp_pkt *sp)
1273+static void fc_io_compl(struct fc_fcp_pkt *fsp)
1274 {
1275 struct fc_fcp_internal *si;
1276 struct scsi_cmnd *sc_cmd;
1277 struct fc_lport *lp;
1278 unsigned long flags;
1279
1280- sp->state |= FC_SRB_COMPL;
1281- if (!(sp->state & FC_SRB_FCP_PROCESSING_TMO)) {
1282- spin_unlock_bh(&sp->scsi_pkt_lock);
1283- del_timer_sync(&sp->timer);
1284- spin_lock_bh(&sp->scsi_pkt_lock);
1285+ fsp->state |= FC_SRB_COMPL;
1286+ if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
1287+ spin_unlock_bh(&fsp->scsi_pkt_lock);
1288+ del_timer_sync(&fsp->timer);
1289+ spin_lock_bh(&fsp->scsi_pkt_lock);
1290 }
1291
1292- lp = sp->lp;
1293+ lp = fsp->lp;
1294 si = fc_get_scsi_internal(lp);
1295 spin_lock_irqsave(lp->host->host_lock, flags);
1296- if (!sp->cmd) {
1297+ if (!fsp->cmd) {
1298 spin_unlock_irqrestore(lp->host->host_lock, flags);
1299 return;
1300 }
1301@@ -1872,28 +1777,28 @@ static void fc_io_compl(struct fc_fcp_pkt *sp)
1302 * try again so clear the throttled flag incase we get more
1303 * time outs.
1304 */
1305- if (si->throttled && sp->state & FC_SRB_NOMEM)
1306+ if (si->throttled && fsp->state & FC_SRB_NOMEM)
1307 si->throttled = 0;
1308
1309- sc_cmd = sp->cmd;
1310- sp->cmd = NULL;
1311+ sc_cmd = fsp->cmd;
1312+ fsp->cmd = NULL;
1313
1314 if (!sc_cmd->SCp.ptr) {
1315 spin_unlock_irqrestore(lp->host->host_lock, flags);
1316 return;
1317 }
1318
1319- CMD_SCSI_STATUS(sc_cmd) = sp->cdb_status;
1320- switch (sp->status_code) {
1321+ CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1322+ switch (fsp->status_code) {
1323 case FC_COMPLETE:
1324- if (sp->cdb_status == 0) {
1325+ if (fsp->cdb_status == 0) {
1326 /*
1327 * good I/O status
1328 */
1329 sc_cmd->result = DID_OK << 16;
1330- if (sp->scsi_resid)
1331- CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
1332- } else if (sp->cdb_status == QUEUE_FULL) {
1333+ if (fsp->scsi_resid)
1334+ CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1335+ } else if (fsp->cdb_status == QUEUE_FULL) {
1336 struct scsi_device *tmp_sdev;
1337 struct scsi_device *sdev = sc_cmd->device;
1338
1339@@ -1907,47 +1812,44 @@ static void fc_io_compl(struct fc_fcp_pkt *sp)
1340 queue_depth - 1);
1341 }
1342 }
1343- sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
1344+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1345 } else {
1346 /*
1347 * transport level I/O was ok but scsi
1348 * has non zero status
1349 */
1350- sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
1351+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1352 }
1353 break;
1354 case FC_ERROR:
1355- if (sp->io_status & (SUGGEST_RETRY << 24))
1356- sc_cmd->result = DID_IMM_RETRY << 16;
1357- else
1358- sc_cmd->result = (DID_ERROR << 16) | sp->io_status;
1359+ sc_cmd->result = DID_ERROR << 16;
1360 break;
1361 case FC_DATA_UNDRUN:
1362- if (sp->cdb_status == 0) {
1363+ if (fsp->cdb_status == 0) {
1364 /*
1365 * scsi status is good but transport level
1366 * underrun. for read it should be an error??
1367 */
1368- sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
1369+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1370 } else {
1371 /*
1372 * scsi got underrun, this is an error
1373 */
1374- CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
1375- sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
1376+ CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1377+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1378 }
1379 break;
1380 case FC_DATA_OVRRUN:
1381 /*
1382 * overrun is an error
1383 */
1384- sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
1385+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1386 break;
1387 case FC_CMD_ABORTED:
1388- sc_cmd->result = (DID_ABORT << 16) | sp->io_status;
1389+ sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
1390 break;
1391 case FC_CMD_TIME_OUT:
1392- sc_cmd->result = (DID_BUS_BUSY << 16) | sp->io_status;
1393+ sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
1394 break;
1395 case FC_CMD_RESET:
1396 sc_cmd->result = (DID_RESET << 16);
1397@@ -1960,16 +1862,33 @@ static void fc_io_compl(struct fc_fcp_pkt *sp)
1398 break;
1399 }
1400
1401- list_del(&sp->list);
1402+ list_del(&fsp->list);
1403 sc_cmd->SCp.ptr = NULL;
1404 sc_cmd->scsi_done(sc_cmd);
1405 spin_unlock_irqrestore(lp->host->host_lock, flags);
1406
1407 /* release ref from initial allocation in queue command */
1408- fc_fcp_pkt_release(sp);
1409+ fc_fcp_pkt_release(fsp);
1410 }
1411
1412 /**
1413+ * fc_fcp_complete - complete processing of a fcp packet
1414+ * @fsp: fcp packet
1415+ *
1416+ * This function may sleep if a fsp timer is pending.
1417+ * The host lock must not be held by caller.
1418+ */
1419+void fc_fcp_complete(struct fc_fcp_pkt *fsp)
1420+{
1421+ if (fc_fcp_lock_pkt(fsp))
1422+ return;
1423+
1424+ fc_fcp_complete_locked(fsp);
1425+ fc_fcp_unlock_pkt(fsp);
1426+}
1427+EXPORT_SYMBOL(fc_fcp_complete);
1428+
1429+/**
1430 * fc_eh_abort - Abort a command...from scsi host template
1431 * @sc_cmd: scsi command to abort
1432 *
1433@@ -1978,7 +1897,7 @@ static void fc_io_compl(struct fc_fcp_pkt *sp)
1434 */
1435 int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1436 {
1437- struct fc_fcp_pkt *sp;
1438+ struct fc_fcp_pkt *fsp;
1439 struct fc_lport *lp;
1440 int rc = FAILED;
1441 unsigned long flags;
1442@@ -1990,27 +1909,27 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1443 return rc;
1444
1445 spin_lock_irqsave(lp->host->host_lock, flags);
1446- sp = CMD_SP(sc_cmd);
1447- if (!sp) {
1448+ fsp = CMD_SP(sc_cmd);
1449+ if (!fsp) {
1450 /* command completed while scsi eh was setting up */
1451 spin_unlock_irqrestore(lp->host->host_lock, flags);
1452 return SUCCESS;
1453 }
1454- /* grab a ref so the sp and sc_cmd cannot be relased from under us */
1455- fc_fcp_pkt_hold(sp);
1456+ /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1457+ fc_fcp_pkt_hold(fsp);
1458 spin_unlock_irqrestore(lp->host->host_lock, flags);
1459
1460- if (fc_fcp_lock_pkt(sp)) {
1461+ if (fc_fcp_lock_pkt(fsp)) {
1462 /* completed while we were waiting for timer to be deleted */
1463 rc = SUCCESS;
1464 goto release_pkt;
1465 }
1466
1467- rc = fc_fcp_pkt_abort(lp, sp);
1468- fc_fcp_unlock_pkt(sp);
1469+ rc = fc_fcp_pkt_abort(lp, fsp);
1470+ fc_fcp_unlock_pkt(fsp);
1471
1472 release_pkt:
1473- fc_fcp_pkt_release(sp);
1474+ fc_fcp_pkt_release(fsp);
1475 return rc;
1476 }
1477 EXPORT_SYMBOL(fc_eh_abort);
1478@@ -2025,7 +1944,7 @@ EXPORT_SYMBOL(fc_eh_abort);
1479 int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1480 {
1481 struct fc_lport *lp;
1482- struct fc_fcp_pkt *sp;
1483+ struct fc_fcp_pkt *fsp;
1484 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1485 int rc = FAILED;
1486 struct fc_rport_libfc_priv *rp;
1487@@ -2041,8 +1960,8 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1488 if (lp->state != LPORT_ST_READY)
1489 return rc;
1490
1491- sp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
1492- if (sp == NULL) {
1493+ fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
1494+ if (fsp == NULL) {
1495 FC_DBG("could not allocate scsi_pkt\n");
1496 sc_cmd->result = DID_NO_CONNECT << 16;
1497 goto out;
1498@@ -2053,15 +1972,15 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1499 * the sc passed in is not setup for execution like when sent
1500 * through the queuecommand callout.
1501 */
1502- sp->lp = lp; /* save the softc ptr */
1503- sp->rport = rport; /* set the remote port ptr */
1504+ fsp->lp = lp; /* save the softc ptr */
1505+ fsp->rport = rport; /* set the remote port ptr */
1506
1507 /*
1508 * flush outstanding commands
1509 */
1510- rc = fc_lun_reset(lp, sp, scmd_id(sc_cmd), sc_cmd->device->lun);
1511- sp->state = FC_SRB_FREE;
1512- fc_fcp_pkt_release(sp);
1513+ rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
1514+ fsp->state = FC_SRB_FREE;
1515+ fc_fcp_pkt_release(fsp);
1516
1517 out:
1518 return rc;
1519@@ -2160,11 +2079,14 @@ int fc_fcp_init(struct fc_lport *lp)
1520 int rc;
1521 struct fc_fcp_internal *si;
1522
1523- if (!lp->tt.scsi_cleanup)
1524- lp->tt.scsi_cleanup = fc_fcp_cleanup;
1525+ if (!lp->tt.fcp_cmd_send)
1526+ lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
1527+
1528+ if (!lp->tt.fcp_cleanup)
1529+ lp->tt.fcp_cleanup = fc_fcp_cleanup;
1530
1531- if (!lp->tt.scsi_abort_io)
1532- lp->tt.scsi_abort_io = fc_fcp_abort_io;
1533+ if (!lp->tt.fcp_abort_io)
1534+ lp->tt.fcp_abort_io = fc_fcp_abort_io;
1535
1536 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
1537 if (!si)
1538diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
1539index 7ba241e..388dc6c 100644
1540--- a/drivers/scsi/libfc/fc_frame.c
1541+++ b/drivers/scsi/libfc/fc_frame.c
1542@@ -82,7 +82,7 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
1543 if (fp) {
1544 memset((char *) fr_hdr(fp) + payload_len, 0, fill);
1545 /* trim is OK, we just allocated it so there are no fragments */
1546- skb_trim(fp_skb(fp), payload_len);
1547+ skb_trim(fp_skb(fp), payload_len + sizeof(struct fc_frame_header));
1548 }
1549 return fp;
1550 }
1551diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
1552index bfbc7d4..7e7c060 100644
1553--- a/drivers/scsi/libfc/fc_lport.c
1554+++ b/drivers/scsi/libfc/fc_lport.c
1555@@ -28,7 +28,7 @@
1556 * the lport to be reset before we fill out the frame header's port_id. The
1557 * problem is that a reset would cause the lport's port_id to reset to 0.
1558 * If we don't protect the lport we'd spew incorrect frames.
1559- *
1560+ *
1561 * At the time of this writing there are two primary mutexes, one for the
1562 * lport and one for the rport. Since the lport uses the rport and makes
1563 * calls into that block the rport should never make calls that would cause
1564@@ -537,7 +537,7 @@ void fc_linkdown(struct fc_lport *lport)
1565 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
1566 lport->link_status &= ~(FC_LINK_UP);
1567 fc_lport_enter_reset(lport);
1568- lport->tt.scsi_cleanup(lport);
1569+ lport->tt.fcp_cleanup(lport);
1570 }
1571
1572 mutex_unlock(&lport->lp_mutex);
1573@@ -579,7 +579,7 @@ int fc_fabric_logoff(struct fc_lport *lport)
1574 {
1575 mutex_lock(&lport->lp_mutex);
1576 fc_lport_enter_logo(lport);
1577- lport->tt.scsi_cleanup(lport);
1578+ lport->tt.fcp_cleanup(lport);
1579 mutex_unlock(&lport->lp_mutex);
1580 return 0;
1581 }
1582@@ -600,7 +600,7 @@ EXPORT_SYMBOL(fc_fabric_logoff);
1583 int fc_lport_destroy(struct fc_lport *lport)
1584 {
1585 cancel_delayed_work_sync(&lport->disc_work);
1586- lport->tt.scsi_abort_io(lport);
1587+ lport->tt.fcp_abort_io(lport);
1588 lport->tt.frame_send = fc_frame_drop;
1589 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
1590 return 0;
1591@@ -929,8 +929,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1592 if (!fp)
1593 delay = msecs_to_jiffies(500);
1594 else
1595- delay = jiffies +
1596- msecs_to_jiffies(lport->e_d_tov);
1597+ delay = msecs_to_jiffies(lport->e_d_tov);
1598
1599 schedule_delayed_work(&lport->retry_work, delay);
1600 } else {
1601@@ -969,6 +968,9 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1602 struct fc_frame_header *fh;
1603 struct fc_ct_hdr *ct;
1604
1605+ if (fp == ERR_PTR(-FC_EX_CLOSED))
1606+ return;
1607+
1608 mutex_lock(&lport->lp_mutex);
1609
1610 FC_DEBUG_LPORT("Received a RFT_ID response\n");
1611@@ -1018,6 +1020,9 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1612 struct fc_frame_header *fh;
1613 struct fc_ct_hdr *ct;
1614
1615+ if (fp == ERR_PTR(-FC_EX_CLOSED))
1616+ return;
1617+
1618 mutex_lock(&lport->lp_mutex);
1619
1620 FC_DEBUG_LPORT("Received a RPN_ID response\n");
1621@@ -1065,6 +1070,9 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1622 struct fc_lport *lport = lp_arg;
1623 u8 op;
1624
1625+ if (fp == ERR_PTR(-FC_EX_CLOSED))
1626+ return;
1627+
1628 mutex_lock(&lport->lp_mutex);
1629
1630 FC_DEBUG_LPORT("Received a SCR response\n");
1631@@ -1332,6 +1340,9 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1632 struct fc_lport *lport = lp_arg;
1633 u8 op;
1634
1635+ if (fp == ERR_PTR(-FC_EX_CLOSED))
1636+ return;
1637+
1638 mutex_lock(&lport->lp_mutex);
1639
1640 FC_DEBUG_LPORT("Received a LOGO response\n");
1641@@ -1426,6 +1437,9 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1642 unsigned int e_d_tov;
1643 u16 mfs;
1644
1645+ if (fp == ERR_PTR(-FC_EX_CLOSED))
1646+ return;
1647+
1648 mutex_lock(&lport->lp_mutex);
1649
1650 FC_DEBUG_LPORT("Received a FLOGI response\n");
1651diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
1652index 651a3ed..42da6ed 100644
1653--- a/drivers/scsi/libfc/fc_rport.c
1654+++ b/drivers/scsi/libfc/fc_rport.c
1655@@ -106,6 +106,17 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
1656 rport->node_name = dp->ids.node_name;
1657 rport->roles = dp->ids.roles;
1658 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
1659+ /*
1660+ * init the device, so other code can manipulate the rport as if
1661+ * it came from the fc class. We also do an extra get because
1662+ * libfc will free this rport instead of relying on the normal
1663+ * refcounting.
1664+ *
1665+ * Note: all this libfc rogue rport code will be removed for
1666+ * upstream so it fine that this is really ugly and hacky right now.
1667+ */
1668+ device_initialize(&rport->dev);
1669+ get_device(&rport->dev);
1670
1671 mutex_init(&rdata->rp_mutex);
1672 rdata->local_port = dp->lp;
1673diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h
1674index 7e5e6be..237abd3 100644
1675--- a/include/scsi/libfc/libfc.h
1676+++ b/include/scsi/libfc/libfc.h
1677@@ -157,9 +157,9 @@ struct fc_rport_libfc_priv {
1678 };
1679
1680 #define PRIV_TO_RPORT(x) \
1681- (struct fc_rport*)((void *)x - sizeof(struct fc_rport));
1682+ (struct fc_rport *)((void *)x - sizeof(struct fc_rport));
1683 #define RPORT_TO_PRIV(x) \
1684- (struct fc_rport_libfc_priv*)((void *)x + sizeof(struct fc_rport));
1685+ (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
1686
1687 struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
1688 void fc_rport_rogue_destroy(struct fc_rport *);
1689@@ -203,6 +203,68 @@ struct fc_seq_els_data {
1690 enum fc_els_rjt_explan explan;
1691 };
1692
1693+/*
1694+ * FCP request structure, one for each scsi cmd request
1695+ */
1696+struct fc_fcp_pkt {
1697+ /*
1698+ * housekeeping stuff
1699+ */
1700+ struct fc_lport *lp; /* handle to hba struct */
1701+ u16 state; /* scsi_pkt state state */
1702+ u16 tgt_flags; /* target flags */
1703+ atomic_t ref_cnt; /* fcp pkt ref count */
1704+ spinlock_t scsi_pkt_lock; /* Must be taken before the host lock
1705+ * if both are held at the same time */
1706+ /*
1707+ * SCSI I/O related stuff
1708+ */
1709+ struct scsi_cmnd *cmd; /* scsi command pointer. set/clear
1710+ * under host lock */
1711+ struct list_head list; /* tracks queued commands. access under
1712+ * host lock */
1713+ /*
1714+ * timeout related stuff
1715+ */
1716+ struct timer_list timer; /* command timer */
1717+ struct completion tm_done;
1718+ int wait_for_comp;
1719+ unsigned long start_time; /* start jiffie */
1720+ unsigned long end_time; /* end jiffie */
1721+ unsigned long last_pkt_time; /* jiffies of last frame received */
1722+
1723+ /*
1724+ * scsi cmd and data transfer information
1725+ */
1726+ u32 data_len;
1727+ /*
1728+ * transport related veriables
1729+ */
1730+ struct fcp_cmnd cdb_cmd;
1731+ size_t xfer_len;
1732+ u32 xfer_contig_end; /* offset of end of contiguous xfer */
1733+ u16 max_payload; /* max payload size in bytes */
1734+
1735+ /*
1736+ * scsi/fcp return status
1737+ */
1738+ u32 io_status; /* SCSI result upper 24 bits */
1739+ u8 cdb_status;
1740+ u8 status_code; /* FCP I/O status */
1741+ /* bit 3 Underrun bit 2: overrun */
1742+ u8 scsi_comp_flags;
1743+ u32 req_flags; /* bit 0: read bit:1 write */
1744+ u32 scsi_resid; /* residule length */
1745+
1746+ struct fc_rport *rport; /* remote port pointer */
1747+ struct fc_seq *seq_ptr; /* current sequence pointer */
1748+ /*
1749+ * Error Processing
1750+ */
1751+ u8 recov_retry; /* count of recovery retries */
1752+ struct fc_seq *recov_seq; /* sequence for REC or SRR */
1753+};
1754+
1755 struct libfc_function_template {
1756
1757 /**
1758@@ -372,7 +434,7 @@ struct libfc_function_template {
1759 */
1760 int (*rport_logout)(struct fc_rport *rport);
1761
1762- /*
1763+ /*
1764 * Delete the rport and remove it from the transport if
1765 * it had been added. This will not send a LOGO, use
1766 * rport_logout for a gracefull logout.
1767@@ -388,18 +450,29 @@ struct libfc_function_template {
1768 struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
1769
1770 /**
1771- * SCSI interfaces
1772+ * FCP interfaces
1773 */
1774
1775 /*
1776+ * Send a fcp cmd from fsp pkt.
1777+ * Called with the SCSI host lock unlocked and irqs disabled.
1778+ *
1779+ * The resp handler is called when FCP_RSP received.
1780+ *
1781+ */
1782+ int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1783+ void (*resp)(struct fc_seq *, struct fc_frame *fp,
1784+ void *arg));
1785+
1786+ /*
1787 * Used at least durring linkdown and reset
1788 */
1789- void (*scsi_cleanup)(struct fc_lport *);
1790+ void (*fcp_cleanup)(struct fc_lport *lp);
1791
1792 /*
1793 * Abort all I/O on a local port
1794 */
1795- void (*scsi_abort_io)(struct fc_lport *);
1796+ void (*fcp_abort_io)(struct fc_lport *lp);
1797
1798 /**
1799 * Discovery interfaces
1800@@ -600,6 +673,14 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd,
1801 void (*done)(struct scsi_cmnd *));
1802
1803 /*
1804+ * complete processing of a fcp packet
1805+ *
1806+ * This function may sleep if a fsp timer is pending.
1807+ * The host lock must not be held by caller.
1808+ */
1809+void fc_fcp_complete(struct fc_fcp_pkt *fsp);
1810+
1811+/*
1812 * Send an ABTS frame to the target device. The sc_cmd argument
1813 * is a pointer to the SCSI command to be aborted.
1814 */