]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.drivers/open-fcoe-beta4-update
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / open-fcoe-beta4-update
1 Subject: Open-FCoE: Update for Beta4
2 From: John Fastabend <john.r.fastabend@intel.com>
3 Date: Thu Nov 6 13:08:49 2008 +0100:
4 Git: c66b456a7eb389e5f19d5bf23170b47a3e01d755
5 References: bnc#438954
6
7 Incremental Open-FCoE update for Beta4.
8
9 Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
10 Acked-by: Hannes Reinecke <hare@suse.de>
11
12 diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
13 index 342e2ad..b78da06 100644
14 --- a/drivers/scsi/fcoe/Makefile
15 +++ b/drivers/scsi/fcoe/Makefile
16 @@ -3,6 +3,6 @@
17 obj-$(CONFIG_FCOE) += fcoe.o
18
19 fcoe-y := \
20 - fcoe_dev.o \
21 - fcoe_if.o \
22 - fcoeinit.o
23 + libfcoe.o \
24 + fcoe_sw.o \
25 + fc_transport_fcoe.o
26 diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
27 new file mode 100644
28 index 0000000..e11d36b
29 --- /dev/null
30 +++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
31 @@ -0,0 +1,396 @@
32 +/*
33 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
34 + *
35 + * This program is free software; you can redistribute it and/or modify it
36 + * under the terms and conditions of the GNU General Public License,
37 + * version 2, as published by the Free Software Foundation.
38 + *
39 + * This program is distributed in the hope it will be useful, but WITHOUT
40 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
41 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
42 + * more details.
43 + *
44 + * You should have received a copy of the GNU General Public License along with
45 + * this program; if not, write to the Free Software Foundation, Inc.,
46 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
47 + *
48 + * Maintained at www.Open-FCoE.org
49 + */
50 +
51 +#include <linux/module.h>
52 +#include <linux/version.h>
53 +#include <linux/kernel.h>
54 +#include <linux/kthread.h>
55 +#include <linux/spinlock.h>
56 +#include <linux/cpu.h>
57 +#include <linux/netdevice.h>
58 +#include <linux/etherdevice.h>
59 +#include <linux/ethtool.h>
60 +#include <linux/if_ether.h>
61 +#include <linux/fs.h>
62 +#include <linux/sysfs.h>
63 +#include <linux/ctype.h>
64 +
65 +#include <scsi/libfc/libfc.h>
66 +
67 +#include "fcoe_def.h"
68 +
69 +MODULE_AUTHOR("Open-FCoE.org");
70 +MODULE_DESCRIPTION("FCoE");
71 +MODULE_LICENSE("GPL");
72 +
73 +/*
74 + * Static functions and variables definations
75 + */
76 +#ifdef CONFIG_HOTPLUG_CPU
77 +static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
78 +#endif /* CONFIG_HOTPLUG_CPU */
79 +static int fcoe_device_notification(struct notifier_block *, ulong, void *);
80 +static void fcoe_dev_setup(void);
81 +static void fcoe_dev_cleanup(void);
82 +
83 +#ifdef CONFIG_HOTPLUG_CPU
84 +static struct notifier_block fcoe_cpu_notifier = {
85 + .notifier_call = fcoe_cpu_callback,
86 +};
87 +#endif /* CONFIG_HOTPLUG_CPU */
88 +
89 +/*
90 + * notification function from net device
91 + */
92 +static struct notifier_block fcoe_notifier = {
93 + .notifier_call = fcoe_device_notification,
94 +};
95 +
96 +#ifdef CONFIG_HOTPLUG_CPU
97 +/*
98 + * create percpu stats block
99 + * called by cpu add/remove notifier
100 + */
101 +static void fcoe_create_percpu_data(int cpu)
102 +{
103 + struct fc_lport *lp;
104 + struct fcoe_softc *fc;
105 + struct fcoe_dev_stats *p;
106 +
107 + write_lock_bh(&fcoe_hostlist_lock);
108 + list_for_each_entry(fc, &fcoe_hostlist, list) {
109 + lp = fc->lp;
110 + if (lp->dev_stats[cpu] == NULL) {
111 + p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
112 + if (p)
113 + lp->dev_stats[cpu] = p;
114 + }
115 + }
116 + write_unlock_bh(&fcoe_hostlist_lock);
117 +}
118 +
119 +/*
120 + * destroy percpu stats block
121 + * called by cpu add/remove notifier
122 + */
123 +static void fcoe_destroy_percpu_data(int cpu)
124 +{
125 + struct fcoe_dev_stats *p;
126 + struct fc_lport *lp;
127 + struct fcoe_softc *fc;
128 +
129 + write_lock_bh(&fcoe_hostlist_lock);
130 + list_for_each_entry(fc, &fcoe_hostlist, list) {
131 + lp = fc->lp;
132 + p = lp->dev_stats[cpu];
133 + if (p != NULL) {
134 + lp->dev_stats[cpu] = NULL;
135 + kfree(p);
136 + }
137 + }
138 + write_unlock_bh(&fcoe_hostlist_lock);
139 +}
140 +
141 +/*
142 + * Get notified when a cpu comes on/off. Be hotplug friendly.
143 + */
144 +static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
145 + void *hcpu)
146 +{
147 + unsigned int cpu = (unsigned long)hcpu;
148 +
149 + switch (action) {
150 + case CPU_ONLINE:
151 + fcoe_create_percpu_data(cpu);
152 + break;
153 + case CPU_DEAD:
154 + fcoe_destroy_percpu_data(cpu);
155 + break;
156 + default:
157 + break;
158 + }
159 + return NOTIFY_OK;
160 +}
161 +#endif /* CONFIG_HOTPLUG_CPU */
162 +
163 +/*
164 + * function to setup link change notification interface
165 + */
166 +static void fcoe_dev_setup(void)
167 +{
168 + /*
169 + * here setup a interface specific wd time to
170 + * monitor the link state
171 + */
172 + register_netdevice_notifier(&fcoe_notifier);
173 +}
174 +
175 +/*
176 + * function to cleanup link change notification interface
177 + */
178 +static void fcoe_dev_cleanup(void)
179 +{
180 + unregister_netdevice_notifier(&fcoe_notifier);
181 +}
182 +
183 +/*
184 + * This function is called by the ethernet driver
185 + * this is called in case of link change event
186 + */
187 +static int fcoe_device_notification(struct notifier_block *notifier,
188 + ulong event, void *ptr)
189 +{
190 + struct fc_lport *lp = NULL;
191 + struct net_device *real_dev = ptr;
192 + struct fcoe_softc *fc;
193 + struct fcoe_dev_stats *stats;
194 + u16 new_status;
195 + u32 mfs;
196 + int rc = NOTIFY_OK;
197 +
198 + read_lock(&fcoe_hostlist_lock);
199 + list_for_each_entry(fc, &fcoe_hostlist, list) {
200 + if (fc->real_dev == real_dev) {
201 + lp = fc->lp;
202 + break;
203 + }
204 + }
205 + read_unlock(&fcoe_hostlist_lock);
206 + if (lp == NULL) {
207 + rc = NOTIFY_DONE;
208 + goto out;
209 + }
210 +
211 + new_status = lp->link_status;
212 + switch (event) {
213 + case NETDEV_DOWN:
214 + case NETDEV_GOING_DOWN:
215 + new_status &= ~FC_LINK_UP;
216 + break;
217 + case NETDEV_UP:
218 + case NETDEV_CHANGE:
219 + new_status &= ~FC_LINK_UP;
220 + if (!fcoe_link_ok(lp))
221 + new_status |= FC_LINK_UP;
222 + break;
223 + case NETDEV_CHANGEMTU:
224 + mfs = fc->real_dev->mtu -
225 + (sizeof(struct fcoe_hdr) +
226 + sizeof(struct fcoe_crc_eof));
227 + if (fc->user_mfs && fc->user_mfs < mfs)
228 + mfs = fc->user_mfs;
229 + if (mfs >= FC_MIN_MAX_FRAME)
230 + fc_set_mfs(lp, mfs);
231 + new_status &= ~FC_LINK_UP;
232 + if (!fcoe_link_ok(lp))
233 + new_status |= FC_LINK_UP;
234 + break;
235 + case NETDEV_REGISTER:
236 + break;
237 + default:
238 + FC_DBG("unknown event %ld call", event);
239 + }
240 + if (lp->link_status != new_status) {
241 + if ((new_status & FC_LINK_UP) == FC_LINK_UP)
242 + fc_linkup(lp);
243 + else {
244 + stats = lp->dev_stats[smp_processor_id()];
245 + stats->LinkFailureCount++;
246 + fc_linkdown(lp);
247 + fcoe_clean_pending_queue(lp);
248 + }
249 + }
250 +out:
251 + return rc;
252 +}
253 +
254 +static void trimstr(char *str, int len)
255 +{
256 + char *cp = str + len;
257 + while (--cp >= str && *cp == '\n')
258 + *cp = '\0';
259 +}
260 +
261 +static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr,
262 + const char *buffer, size_t size)
263 +{
264 + struct net_device *netdev;
265 + char ifname[IFNAMSIZ + 2];
266 +
267 + strlcpy(ifname, buffer, IFNAMSIZ);
268 + trimstr(ifname, strlen(ifname));
269 + netdev = dev_get_by_name(&init_net, ifname);
270 + if (netdev) {
271 + fcoe_destroy_interface(netdev);
272 + dev_put(netdev);
273 + }
274 + return size;
275 +}
276 +
277 +static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr,
278 + const char *buffer, size_t size)
279 +{
280 + struct net_device *netdev;
281 + char ifname[IFNAMSIZ + 2];
282 +
283 + strlcpy(ifname, buffer, IFNAMSIZ);
284 + trimstr(ifname, strlen(ifname));
285 + netdev = dev_get_by_name(&init_net, ifname);
286 + if (netdev) {
287 + fcoe_create_interface(netdev);
288 + dev_put(netdev);
289 + }
290 + return size;
291 +}
292 +
293 +static const struct kobj_attribute fcoe_destroyattr = \
294 + __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy);
295 +static const struct kobj_attribute fcoe_createattr = \
296 + __ATTR(create, S_IWUSR, NULL, fcoe_create);
297 +
298 +/*
299 + * Initialization routine
300 + * 1. Will create fc transport software structure
301 + * 2. initialize the link list of port information structure
302 + */
303 +static int __init fcoe_init(void)
304 +{
305 + int rc = 0;
306 + int cpu;
307 + struct fcoe_percpu_s *p;
308 +
309 + rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
310 + &fcoe_destroyattr.attr);
311 + if (!rc)
312 + rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
313 + &fcoe_createattr.attr);
314 +
315 + if (rc)
316 + return rc;
317 +
318 + rwlock_init(&fcoe_hostlist_lock);
319 +
320 +#ifdef CONFIG_HOTPLUG_CPU
321 + register_cpu_notifier(&fcoe_cpu_notifier);
322 +#endif /* CONFIG_HOTPLUG_CPU */
323 +
324 + /*
325 + * initialize per CPU interrupt thread
326 + */
327 + for_each_online_cpu(cpu) {
328 + p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
329 + if (p) {
330 + p->thread = kthread_create(fcoe_percpu_receive_thread,
331 + (void *)p,
332 + "fcoethread/%d", cpu);
333 +
334 + /*
335 + * if there is no error then bind the thread to the cpu
336 + * initialize the semaphore and skb queue head
337 + */
338 + if (likely(!IS_ERR(p->thread))) {
339 + p->cpu = cpu;
340 + fcoe_percpu[cpu] = p;
341 + skb_queue_head_init(&p->fcoe_rx_list);
342 + kthread_bind(p->thread, cpu);
343 + wake_up_process(p->thread);
344 + } else {
345 + fcoe_percpu[cpu] = NULL;
346 + kfree(p);
347 +
348 + }
349 + }
350 + }
351 + if (rc < 0) {
352 + FC_DBG("failed to initialize proc intrerface\n");
353 + rc = -ENODEV;
354 + goto out_chrdev;
355 + }
356 +
357 + /*
358 + * setup link change notification
359 + */
360 + fcoe_dev_setup();
361 +
362 + init_timer(&fcoe_timer);
363 + fcoe_timer.data = 0;
364 + fcoe_timer.function = fcoe_watchdog;
365 + fcoe_timer.expires = (jiffies + (10 * HZ));
366 + add_timer(&fcoe_timer);
367 +
368 + if (fcoe_sw_init() != 0) {
369 + FC_DBG("fail to attach fc transport");
370 + return -1;
371 + }
372 +
373 + return 0;
374 +
375 +out_chrdev:
376 +#ifdef CONFIG_HOTPLUG_CPU
377 + unregister_cpu_notifier(&fcoe_cpu_notifier);
378 +#endif /* CONFIG_HOTPLUG_CPU */
379 + return rc;
380 +}
381 +module_init(fcoe_init);
382 +
383 +static void __exit fcoe_exit(void)
384 +{
385 + u32 idx;
386 + struct fcoe_softc *fc, *tmp;
387 + struct fcoe_percpu_s *p;
388 + struct sk_buff *skb;
389 +
390 + /*
391 + * Stop all call back interfaces
392 + */
393 +#ifdef CONFIG_HOTPLUG_CPU
394 + unregister_cpu_notifier(&fcoe_cpu_notifier);
395 +#endif /* CONFIG_HOTPLUG_CPU */
396 + fcoe_dev_cleanup();
397 +
398 + /*
399 + * stop timer
400 + */
401 + del_timer_sync(&fcoe_timer);
402 +
403 + /*
404 + * assuming that at this time there will be no
405 + * ioctl in prograss, therefore we do not need to lock the
406 + * list.
407 + */
408 + list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
409 + fcoe_destroy_interface(fc->real_dev);
410 +
411 + for (idx = 0; idx < NR_CPUS; idx++) {
412 + if (fcoe_percpu[idx]) {
413 + kthread_stop(fcoe_percpu[idx]->thread);
414 + p = fcoe_percpu[idx];
415 + spin_lock_bh(&p->fcoe_rx_list.lock);
416 + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
417 + kfree_skb(skb);
418 + spin_unlock_bh(&p->fcoe_rx_list.lock);
419 + if (fcoe_percpu[idx]->crc_eof_page)
420 + put_page(fcoe_percpu[idx]->crc_eof_page);
421 + kfree(fcoe_percpu[idx]);
422 + }
423 + }
424 +
425 + fcoe_sw_exit();
426 +}
427 +module_exit(fcoe_exit);
428 diff --git a/drivers/scsi/fcoe/fcoe_def.h b/drivers/scsi/fcoe/fcoe_def.h
429 index 12bf69c..b00e14b 100644
430 --- a/drivers/scsi/fcoe/fcoe_def.h
431 +++ b/drivers/scsi/fcoe/fcoe_def.h
432 @@ -1,5 +1,5 @@
433 /*
434 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
435 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
436 *
437 * This program is free software; you can redistribute it and/or modify it
438 * under the terms and conditions of the GNU General Public License,
439 @@ -48,16 +48,10 @@ struct fcoe_percpu_s {
440 int crc_eof_offset;
441 };
442
443 -struct fcoe_info {
444 - struct timer_list timer;
445 - /*
446 - * fcoe host list is protected by the following read/write lock
447 - */
448 - rwlock_t fcoe_hostlist_lock;
449 - struct list_head fcoe_hostlist;
450 -
451 - struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
452 -};
453 +extern struct timer_list fcoe_timer;
454 +extern rwlock_t fcoe_hostlist_lock;
455 +extern struct list_head fcoe_hostlist;
456 +extern struct fcoe_percpu_s *fcoe_percpu[];
457
458 struct fcoe_softc {
459 struct list_head list;
460 @@ -79,22 +73,20 @@ struct fcoe_softc {
461 u8 address_mode;
462 };
463
464 -extern int debug_fcoe;
465 -extern struct fcoe_percpu_s *fcoe_percpu[];
466 -extern struct scsi_transport_template *fcoe_transport_template;
467 int fcoe_percpu_receive_thread(void *arg);
468
469 /*
470 * HBA transport ops prototypes
471 */
472 -extern struct fcoe_info fcoei;
473 -
474 void fcoe_clean_pending_queue(struct fc_lport *fd);
475 void fcoe_watchdog(ulong vp);
476 -int fcoe_destroy_interface(const char *ifname);
477 -int fcoe_create_interface(const char *ifname);
478 +int fcoe_destroy_interface(struct net_device *);
479 +int fcoe_create_interface(struct net_device *);
480 int fcoe_xmit(struct fc_lport *, struct fc_frame *);
481 int fcoe_rcv(struct sk_buff *, struct net_device *,
482 struct packet_type *, struct net_device *);
483 int fcoe_link_ok(struct fc_lport *);
484 +
485 +int __init fcoe_sw_init(void);
486 +void __exit fcoe_sw_exit(void);
487 #endif /* _FCOE_DEF_H_ */
488 diff --git a/drivers/scsi/fcoe/fcoe_dev.c b/drivers/scsi/fcoe/fcoe_dev.c
489 deleted file mode 100644
490 index d5a354f..0000000
491 --- a/drivers/scsi/fcoe/fcoe_dev.c
492 +++ /dev/null
493 @@ -1,633 +0,0 @@
494 -/*
495 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
496 - *
497 - * This program is free software; you can redistribute it and/or modify it
498 - * under the terms and conditions of the GNU General Public License,
499 - * version 2, as published by the Free Software Foundation.
500 - *
501 - * This program is distributed in the hope it will be useful, but WITHOUT
502 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
503 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
504 - * more details.
505 - *
506 - * You should have received a copy of the GNU General Public License along with
507 - * this program; if not, write to the Free Software Foundation, Inc.,
508 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
509 - *
510 - * Maintained at www.Open-FCoE.org
511 - */
512 -
513 -/*
514 - * FCOE protocol file
515 - */
516 -
517 -#include <linux/module.h>
518 -#include <linux/version.h>
519 -#include <linux/kernel.h>
520 -#include <linux/spinlock.h>
521 -#include <linux/skbuff.h>
522 -#include <linux/netdevice.h>
523 -#include <linux/etherdevice.h>
524 -#include <linux/if_ether.h>
525 -#include <linux/kthread.h>
526 -#include <linux/crc32.h>
527 -#include <scsi/scsi_tcq.h>
528 -#include <scsi/scsicam.h>
529 -#include <scsi/scsi_transport.h>
530 -#include <scsi/scsi_transport_fc.h>
531 -#include <net/rtnetlink.h>
532 -
533 -#include <scsi/fc/fc_encaps.h>
534 -
535 -#include <scsi/libfc/libfc.h>
536 -#include <scsi/libfc/fc_frame.h>
537 -
538 -#include <scsi/fc/fc_fcoe.h>
539 -#include "fcoe_def.h"
540 -
541 -#define FCOE_MAX_QUEUE_DEPTH 256
542 -
543 -/* destination address mode */
544 -#define FCOE_GW_ADDR_MODE 0x00
545 -#define FCOE_FCOUI_ADDR_MODE 0x01
546 -
547 -/* Function Prototyes */
548 -static int fcoe_check_wait_queue(struct fc_lport *);
549 -static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
550 -static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
551 -static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
552 -
553 -/*
554 - * this is the fcoe receive function
555 - * called by NET_RX_SOFTIRQ
556 - * this function will receive the packet and
557 - * build fc frame and pass it up
558 - */
559 -int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
560 - struct packet_type *ptype, struct net_device *olddev)
561 -{
562 - struct fc_lport *lp;
563 - struct fcoe_rcv_info *fr;
564 - struct fcoe_softc *fc;
565 - struct fcoe_dev_stats *stats;
566 - u8 *data;
567 - struct fc_frame_header *fh;
568 - unsigned short oxid;
569 - int cpu_idx;
570 - struct fcoe_percpu_s *fps;
571 - struct fcoe_info *fci = &fcoei;
572 -
573 - fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
574 - lp = fc->lp;
575 - if (unlikely(lp == NULL)) {
576 - FC_DBG("cannot find hba structure");
577 - goto err2;
578 - }
579 -
580 - if (unlikely(debug_fcoe)) {
581 - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
582 - "end:%p sum:%d dev:%s", skb->len, skb->data_len,
583 - skb->head, skb->data, skb_tail_pointer(skb),
584 - skb_end_pointer(skb), skb->csum,
585 - skb->dev ? skb->dev->name : "<NULL>");
586 -
587 - }
588 -
589 - /* check for FCOE packet type */
590 - if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
591 - FC_DBG("wrong FC type frame");
592 - goto err;
593 - }
594 - data = skb->data;
595 - data += sizeof(struct fcoe_hdr);
596 - fh = (struct fc_frame_header *)data;
597 - oxid = ntohs(fh->fh_ox_id);
598 -
599 - fr = fcoe_dev_from_skb(skb);
600 - fr->fr_dev = lp;
601 - fr->ptype = ptype;
602 - cpu_idx = 0;
603 -#ifdef CONFIG_SMP
604 - /*
605 - * The exchange ID are ANDed with num of online CPUs,
606 - * so that will have the least lock contention in
607 - * handling the exchange. if there is no thread
608 - * for a given idx then use first online cpu.
609 - */
610 - cpu_idx = oxid & (num_online_cpus() >> 1);
611 - if (fci->fcoe_percpu[cpu_idx] == NULL)
612 - cpu_idx = first_cpu(cpu_online_map);
613 -#endif
614 - fps = fci->fcoe_percpu[cpu_idx];
615 -
616 - spin_lock_bh(&fps->fcoe_rx_list.lock);
617 - __skb_queue_tail(&fps->fcoe_rx_list, skb);
618 - if (fps->fcoe_rx_list.qlen == 1)
619 - wake_up_process(fps->thread);
620 -
621 - spin_unlock_bh(&fps->fcoe_rx_list.lock);
622 -
623 - return 0;
624 -err:
625 -#ifdef CONFIG_SMP
626 - stats = lp->dev_stats[smp_processor_id()];
627 -#else
628 - stats = lp->dev_stats[0];
629 -#endif
630 - stats->ErrorFrames++;
631 -
632 -err2:
633 - kfree_skb(skb);
634 - return -1;
635 -}
636 -
637 -static inline int fcoe_start_io(struct sk_buff *skb)
638 -{
639 - int rc;
640 -
641 - skb_get(skb);
642 - rc = dev_queue_xmit(skb);
643 - if (rc != 0)
644 - return rc;
645 - kfree_skb(skb);
646 - return 0;
647 -}
648 -
649 -static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
650 -{
651 - struct fcoe_info *fci = &fcoei;
652 - struct fcoe_percpu_s *fps;
653 - struct page *page;
654 - int cpu_idx;
655 -
656 - cpu_idx = get_cpu();
657 - fps = fci->fcoe_percpu[cpu_idx];
658 - page = fps->crc_eof_page;
659 - if (!page) {
660 - page = alloc_page(GFP_ATOMIC);
661 - if (!page) {
662 - put_cpu();
663 - return -ENOMEM;
664 - }
665 - fps->crc_eof_page = page;
666 - WARN_ON(fps->crc_eof_offset != 0);
667 - }
668 -
669 - get_page(page);
670 - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
671 - fps->crc_eof_offset, tlen);
672 - skb->len += tlen;
673 - skb->data_len += tlen;
674 - skb->truesize += tlen;
675 - fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
676 -
677 - if (fps->crc_eof_offset >= PAGE_SIZE) {
678 - fps->crc_eof_page = NULL;
679 - fps->crc_eof_offset = 0;
680 - put_page(page);
681 - }
682 - put_cpu();
683 - return 0;
684 -}
685 -
686 -/*
687 - * this is the frame xmit routine
688 - */
689 -int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
690 -{
691 - int indx;
692 - int wlen, rc = 0;
693 - u32 crc;
694 - struct ethhdr *eh;
695 - struct fcoe_crc_eof *cp;
696 - struct sk_buff *skb;
697 - struct fcoe_dev_stats *stats;
698 - struct fc_frame_header *fh;
699 - unsigned int hlen; /* header length implies the version */
700 - unsigned int tlen; /* trailer length */
701 - int flogi_in_progress = 0;
702 - struct fcoe_softc *fc;
703 - void *data;
704 - u8 sof, eof;
705 - struct fcoe_hdr *hp;
706 -
707 - WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
708 -
709 - fc = (struct fcoe_softc *)lp->drv_priv;
710 - /*
711 - * if it is a flogi then we need to learn gw-addr
712 - * and my own fcid
713 - */
714 - fh = fc_frame_header_get(fp);
715 - if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
716 - if (fc_frame_payload_op(fp) == ELS_FLOGI) {
717 - fc->flogi_oxid = ntohs(fh->fh_ox_id);
718 - fc->address_mode = FCOE_FCOUI_ADDR_MODE;
719 - fc->flogi_progress = 1;
720 - flogi_in_progress = 1;
721 - } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
722 - /*
723 - * Here we must've gotten an SID by accepting an FLOGI
724 - * from a point-to-point connection. Switch to using
725 - * the source mac based on the SID. The destination
726 - * MAC in this case would have been set by receving the
727 - * FLOGI.
728 - */
729 - fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
730 - fc->flogi_progress = 0;
731 - }
732 - }
733 -
734 - skb = fp_skb(fp);
735 - sof = fr_sof(fp);
736 - eof = fr_eof(fp);
737 -
738 - crc = ~0;
739 - crc = crc32(crc, skb->data, skb_headlen(skb));
740 -
741 - for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) {
742 - skb_frag_t *frag = &skb_shinfo(skb)->frags[indx];
743 - unsigned long off = frag->page_offset;
744 - unsigned long len = frag->size;
745 -
746 - while (len > 0) {
747 - unsigned long clen;
748 -
749 - clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
750 - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
751 - KM_SKB_DATA_SOFTIRQ);
752 - crc = crc32(crc, data + (off & ~PAGE_MASK),
753 - clen);
754 - kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
755 - off += clen;
756 - len -= clen;
757 - }
758 - }
759 -
760 - /*
761 - * Get header and trailer lengths.
762 - * This is temporary code until we get rid of the old protocol.
763 - * Both versions have essentially the same trailer layout but T11
764 - * has padding afterwards.
765 - */
766 - hlen = sizeof(struct fcoe_hdr);
767 - tlen = sizeof(struct fcoe_crc_eof);
768 -
769 - /*
770 - * copy fc crc and eof to the skb buff
771 - * Use utility buffer in the fc_frame part of the sk_buff for the
772 - * trailer.
773 - * We don't do a get_page for this frag, since that page may not be
774 - * managed that way. So that skb_free() doesn't do that either, we
775 - * setup the destructor to remove this frag.
776 - */
777 - if (skb_is_nonlinear(skb)) {
778 - skb_frag_t *frag;
779 - if (fcoe_get_paged_crc_eof(skb, tlen)) {
780 - kfree(skb);
781 - return -ENOMEM;
782 - }
783 - frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
784 - cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
785 - + frag->page_offset;
786 - } else {
787 - cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
788 - }
789 -
790 - cp->fcoe_eof = eof;
791 - cp->fcoe_crc32 = cpu_to_le32(~crc);
792 - if (tlen == sizeof(*cp))
793 - memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd));
794 - wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
795 -
796 - if (skb_is_nonlinear(skb)) {
797 - kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
798 - cp = NULL;
799 - }
800 -
801 - /*
802 - * Fill in the control structures
803 - */
804 - skb->ip_summed = CHECKSUM_NONE;
805 - eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr));
806 - if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
807 - fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
808 - else
809 - /* insert GW address */
810 - memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
811 -
812 - if (unlikely(flogi_in_progress))
813 - memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
814 - else
815 - memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
816 -
817 - eh->h_proto = htons(ETH_P_FCOE);
818 - skb->protocol = htons(ETH_P_802_3);
819 - skb_reset_mac_header(skb);
820 - skb_reset_network_header(skb);
821 -
822 - hp = (struct fcoe_hdr *)(eh + 1);
823 - memset(hp, 0, sizeof(*hp));
824 - if (FC_FCOE_VER)
825 - FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
826 - hp->fcoe_sof = sof;
827 -
828 - stats = lp->dev_stats[smp_processor_id()];
829 - stats->TxFrames++;
830 - stats->TxWords += wlen;
831 - skb->dev = fc->real_dev;
832 -
833 - fr_dev(fp) = lp;
834 - if (fc->fcoe_pending_queue.qlen)
835 - rc = fcoe_check_wait_queue(lp);
836 -
837 - if (rc == 0)
838 - rc = fcoe_start_io(skb);
839 -
840 - if (rc) {
841 - fcoe_insert_wait_queue(lp, skb);
842 - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
843 - fc_pause(lp);
844 - }
845 -
846 - return 0;
847 -}
848 -
849 -int fcoe_percpu_receive_thread(void *arg)
850 -{
851 - struct fcoe_percpu_s *p = arg;
852 - u32 fr_len;
853 - unsigned int hlen;
854 - unsigned int tlen;
855 - struct fc_lport *lp;
856 - struct fcoe_rcv_info *fr;
857 - struct fcoe_dev_stats *stats;
858 - struct fc_frame_header *fh;
859 - struct sk_buff *skb;
860 - struct fcoe_crc_eof *cp;
861 - enum fc_sof sof;
862 - struct fc_frame *fp;
863 - u8 *mac = NULL;
864 - struct fcoe_softc *fc;
865 - struct fcoe_hdr *hp;
866 -
867 - set_user_nice(current, 19);
868 -
869 - while (!kthread_should_stop()) {
870 -
871 - spin_lock_bh(&p->fcoe_rx_list.lock);
872 - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
873 - set_current_state(TASK_INTERRUPTIBLE);
874 - spin_unlock_bh(&p->fcoe_rx_list.lock);
875 - schedule();
876 - set_current_state(TASK_RUNNING);
877 - if (kthread_should_stop())
878 - return 0;
879 - spin_lock_bh(&p->fcoe_rx_list.lock);
880 - }
881 - spin_unlock_bh(&p->fcoe_rx_list.lock);
882 - fr = fcoe_dev_from_skb(skb);
883 - lp = fr->fr_dev;
884 - if (unlikely(lp == NULL)) {
885 - FC_DBG("invalid HBA Structure");
886 - kfree_skb(skb);
887 - continue;
888 - }
889 -
890 - stats = lp->dev_stats[smp_processor_id()];
891 -
892 - if (unlikely(debug_fcoe)) {
893 - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
894 - "tail:%p end:%p sum:%d dev:%s",
895 - skb->len, skb->data_len,
896 - skb->head, skb->data, skb_tail_pointer(skb),
897 - skb_end_pointer(skb), skb->csum,
898 - skb->dev ? skb->dev->name : "<NULL>");
899 - }
900 -
901 - /*
902 - * Save source MAC address before discarding header.
903 - */
904 - fc = lp->drv_priv;
905 - if (unlikely(fc->flogi_progress))
906 - mac = eth_hdr(skb)->h_source;
907 -
908 - if (skb_is_nonlinear(skb))
909 - skb_linearize(skb); /* not ideal */
910 -
911 - /*
912 - * Check the header and pull it off.
913 - */
914 - hlen = sizeof(struct fcoe_hdr);
915 -
916 - hp = (struct fcoe_hdr *)skb->data;
917 - if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
918 - if (stats->ErrorFrames < 5)
919 - FC_DBG("unknown FCoE version %x",
920 - FC_FCOE_DECAPS_VER(hp));
921 - stats->ErrorFrames++;
922 - kfree_skb(skb);
923 - continue;
924 - }
925 - sof = hp->fcoe_sof;
926 - skb_pull(skb, sizeof(*hp));
927 - fr_len = skb->len - sizeof(struct fcoe_crc_eof);
928 - skb_trim(skb, fr_len);
929 - tlen = sizeof(struct fcoe_crc_eof);
930 -
931 - if (unlikely(fr_len > skb->len)) {
932 - if (stats->ErrorFrames < 5)
933 - FC_DBG("length error fr_len 0x%x skb->len 0x%x",
934 - fr_len, skb->len);
935 - stats->ErrorFrames++;
936 - kfree_skb(skb);
937 - continue;
938 - }
939 - stats->RxFrames++;
940 - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
941 -
942 - fp = (struct fc_frame *) skb;
943 - fc_frame_init(fp);
944 - cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
945 - fr_eof(fp) = cp->fcoe_eof;
946 - fr_sof(fp) = sof;
947 - fr_dev(fp) = lp;
948 -
949 - /*
950 - * Check the CRC here, unless it's solicited data for SCSI.
951 - * In that case, the SCSI layer can check it during the copy,
952 - * and it'll be more cache-efficient.
953 - */
954 - fh = fc_frame_header_get(fp);
955 - if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
956 - fh->fh_type == FC_TYPE_FCP) {
957 - fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
958 - fc_exch_recv(lp, lp->emp, fp);
959 - } else if (le32_to_cpu(cp->fcoe_crc32) ==
960 - ~crc32(~0, skb->data, fr_len)) {
961 - if (unlikely(fc->flogi_progress))
962 - fcoe_recv_flogi(fc, fp, mac);
963 - fc_exch_recv(lp, lp->emp, fp);
964 - } else {
965 - if (debug_fcoe || stats->InvalidCRCCount < 5) {
966 - printk(KERN_WARNING \
967 - "fcoe: dropping frame with CRC error");
968 - }
969 - stats->InvalidCRCCount++;
970 - stats->ErrorFrames++;
971 - fc_frame_free(fp);
972 - }
973 - }
974 - return 0;
975 -}
976 -
977 -/*
978 - * Snoop potential response to FLOGI or even incoming FLOGI.
979 - */
980 -static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
981 -{
982 - struct fc_frame_header *fh;
983 - u8 op;
984 -
985 - fh = fc_frame_header_get(fp);
986 - if (fh->fh_type != FC_TYPE_ELS)
987 - return;
988 - op = fc_frame_payload_op(fp);
989 - if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
990 - fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
991 - /*
992 - * FLOGI accepted.
993 - * If the src mac addr is FC_OUI-based, then we mark the
994 - * address_mode flag to use FC_OUI-based Ethernet DA.
995 - * Otherwise we use the FCoE gateway addr
996 - */
997 - if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
998 - fc->address_mode = FCOE_FCOUI_ADDR_MODE;
999 - } else {
1000 - memcpy(fc->dest_addr, sa, ETH_ALEN);
1001 - fc->address_mode = FCOE_GW_ADDR_MODE;
1002 - }
1003 -
1004 - /*
1005 - * Remove any previously-set unicast MAC filter.
1006 - * Add secondary FCoE MAC address filter for our OUI.
1007 - */
1008 - rtnl_lock();
1009 - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1010 - dev_unicast_delete(fc->real_dev, fc->data_src_addr,
1011 - ETH_ALEN);
1012 - fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
1013 - dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1014 - rtnl_unlock();
1015 -
1016 - fc->flogi_progress = 0;
1017 - } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
1018 - /*
1019 - * Save source MAC for point-to-point responses.
1020 - */
1021 - memcpy(fc->dest_addr, sa, ETH_ALEN);
1022 - fc->address_mode = FCOE_GW_ADDR_MODE;
1023 - }
1024 -}
1025 -
1026 -void fcoe_watchdog(ulong vp)
1027 -{
1028 - struct fc_lport *lp;
1029 - struct fcoe_softc *fc;
1030 - struct fcoe_info *fci = &fcoei;
1031 - int paused = 0;
1032 -
1033 - read_lock(&fci->fcoe_hostlist_lock);
1034 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
1035 - lp = fc->lp;
1036 - if (lp) {
1037 - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1038 - paused = 1;
1039 - if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
1040 - if (paused)
1041 - fc_unpause(lp);
1042 - }
1043 - }
1044 - }
1045 - read_unlock(&fci->fcoe_hostlist_lock);
1046 -
1047 - fci->timer.expires = jiffies + (1 * HZ);
1048 - add_timer(&fci->timer);
1049 -}
1050 -
1051 -/*
1052 - * the wait_queue is used when the skb transmit fails. skb will go
1053 - * in the wait_queue which will be emptied by the time function OR
1054 - * by the next skb transmit.
1055 - *
1056 - */
1057 -
1058 -/*
1059 - * Function name : fcoe_check_wait_queue()
1060 - *
1061 - * Return Values : 0 or error
1062 - *
1063 - * Description : empties the wait_queue
1064 - * dequeue the head of the wait_queue queue and
1065 - * calls fcoe_start_io() for each packet
1066 - * if all skb have been transmitted, return 0
1067 - * if a error occurs, then restore wait_queue and try again
1068 - * later
1069 - *
1070 - */
1071 -
1072 -static int fcoe_check_wait_queue(struct fc_lport *lp)
1073 -{
1074 - int rc, unpause = 0;
1075 - int paused = 0;
1076 - struct sk_buff *skb;
1077 - struct fcoe_softc *fc;
1078 -
1079 - fc = (struct fcoe_softc *)lp->drv_priv;
1080 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1081 -
1082 - /*
1083 - * is this interface paused?
1084 - */
1085 - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1086 - paused = 1;
1087 - if (fc->fcoe_pending_queue.qlen) {
1088 - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1089 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1090 - rc = fcoe_start_io(skb);
1091 - if (rc) {
1092 - fcoe_insert_wait_queue_head(lp, skb);
1093 - return rc;
1094 - }
1095 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1096 - }
1097 - if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
1098 - unpause = 1;
1099 - }
1100 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1101 - if ((unpause) && (paused))
1102 - fc_unpause(lp);
1103 - return fc->fcoe_pending_queue.qlen;
1104 -}
1105 -
1106 -static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
1107 - struct sk_buff *skb)
1108 -{
1109 - struct fcoe_softc *fc;
1110 -
1111 - fc = (struct fcoe_softc *)lp->drv_priv;
1112 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1113 - __skb_queue_head(&fc->fcoe_pending_queue, skb);
1114 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1115 -}
1116 -
1117 -static void fcoe_insert_wait_queue(struct fc_lport *lp,
1118 - struct sk_buff *skb)
1119 -{
1120 - struct fcoe_softc *fc;
1121 -
1122 - fc = (struct fcoe_softc *)lp->drv_priv;
1123 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1124 - __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1125 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1126 -}
1127 diff --git a/drivers/scsi/fcoe/fcoe_if.c b/drivers/scsi/fcoe/fcoe_if.c
1128 deleted file mode 100644
1129 index 73b83ce..0000000
1130 --- a/drivers/scsi/fcoe/fcoe_if.c
1131 +++ /dev/null
1132 @@ -1,496 +0,0 @@
1133 -/*
1134 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
1135 - *
1136 - * This program is free software; you can redistribute it and/or modify it
1137 - * under the terms and conditions of the GNU General Public License,
1138 - * version 2, as published by the Free Software Foundation.
1139 - *
1140 - * This program is distributed in the hope it will be useful, but WITHOUT
1141 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1142 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1143 - * more details.
1144 - *
1145 - * You should have received a copy of the GNU General Public License along with
1146 - * this program; if not, write to the Free Software Foundation, Inc.,
1147 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1148 - *
1149 - * Maintained at www.Open-FCoE.org
1150 - */
1151 -
1152 -/*
1153 - * FCOE protocol file
1154 - */
1155 -
1156 -#include <linux/module.h>
1157 -#include <linux/version.h>
1158 -#include <linux/kernel.h>
1159 -#include <linux/init.h>
1160 -#include <linux/spinlock.h>
1161 -#include <linux/netdevice.h>
1162 -#include <linux/etherdevice.h>
1163 -#include <linux/ethtool.h>
1164 -#include <linux/if_ether.h>
1165 -#include <linux/if_vlan.h>
1166 -#include <net/rtnetlink.h>
1167 -
1168 -#include <scsi/fc/fc_els.h>
1169 -#include <scsi/fc/fc_encaps.h>
1170 -#include <scsi/fc/fc_fs.h>
1171 -#include <scsi/scsi_transport.h>
1172 -#include <scsi/scsi_transport_fc.h>
1173 -
1174 -#include <scsi/libfc/libfc.h>
1175 -
1176 -#include <scsi/fc/fc_fcoe.h>
1177 -#include "fcoe_def.h"
1178 -
1179 -#define FCOE_VERSION "0.1"
1180 -
1181 -#define FCOE_MAX_LUN 255
1182 -#define FCOE_MAX_FCP_TARGET 256
1183 -
1184 -#define FCOE_MIN_XID 0x0004
1185 -#define FCOE_MAX_XID 0x07ef
1186 -
1187 -int debug_fcoe;
1188 -
1189 -struct fcoe_info fcoei = {
1190 - .fcoe_hostlist = LIST_HEAD_INIT(fcoei.fcoe_hostlist),
1191 -};
1192 -
1193 -static struct fcoe_softc *fcoe_find_fc_lport(const char *name)
1194 -{
1195 - struct fcoe_softc *fc;
1196 - struct fc_lport *lp;
1197 - struct fcoe_info *fci = &fcoei;
1198 -
1199 - read_lock(&fci->fcoe_hostlist_lock);
1200 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
1201 - lp = fc->lp;
1202 - if (!strncmp(name, lp->ifname, IFNAMSIZ)) {
1203 - read_unlock(&fci->fcoe_hostlist_lock);
1204 - return fc;
1205 - }
1206 - }
1207 - read_unlock(&fci->fcoe_hostlist_lock);
1208 - return NULL;
1209 -}
1210 -
1211 -/*
1212 - * Convert 48-bit IEEE MAC address to 64-bit FC WWN.
1213 - */
1214 -static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1215 - unsigned int scheme, unsigned int port)
1216 -{
1217 - u64 wwn;
1218 - u64 host_mac;
1219 -
1220 - /* The MAC is in NO, so flip only the low 48 bits */
1221 - host_mac = ((u64) mac[0] << 40) |
1222 - ((u64) mac[1] << 32) |
1223 - ((u64) mac[2] << 24) |
1224 - ((u64) mac[3] << 16) |
1225 - ((u64) mac[4] << 8) |
1226 - (u64) mac[5];
1227 -
1228 - WARN_ON(host_mac >= (1ULL << 48));
1229 - wwn = host_mac | ((u64) scheme << 60);
1230 - switch (scheme) {
1231 - case 1:
1232 - WARN_ON(port != 0);
1233 - break;
1234 - case 2:
1235 - WARN_ON(port >= 0xfff);
1236 - wwn |= (u64) port << 48;
1237 - break;
1238 - default:
1239 - WARN_ON(1);
1240 - break;
1241 - }
1242 -
1243 - return wwn;
1244 -}
1245 -
1246 -static struct scsi_host_template fcoe_driver_template = {
1247 - .module = THIS_MODULE,
1248 - .name = "FCoE Driver",
1249 - .proc_name = FCOE_DRIVER_NAME,
1250 - .queuecommand = fc_queuecommand,
1251 - .eh_abort_handler = fc_eh_abort,
1252 - .eh_device_reset_handler = fc_eh_device_reset,
1253 - .eh_host_reset_handler = fc_eh_host_reset,
1254 - .slave_alloc = fc_slave_alloc,
1255 - .change_queue_depth = fc_change_queue_depth,
1256 - .change_queue_type = fc_change_queue_type,
1257 - .this_id = -1,
1258 - .cmd_per_lun = 32,
1259 - .can_queue = FC_MAX_OUTSTANDING_COMMANDS,
1260 - .use_clustering = ENABLE_CLUSTERING,
1261 - .sg_tablesize = 4,
1262 - .max_sectors = 0xffff,
1263 -};
1264 -
1265 -int fcoe_destroy_interface(const char *ifname)
1266 -{
1267 - int cpu, idx;
1268 - struct fcoe_dev_stats *p;
1269 - struct fcoe_percpu_s *pp;
1270 - struct fcoe_softc *fc;
1271 - struct fcoe_rcv_info *fr;
1272 - struct fcoe_info *fci = &fcoei;
1273 - struct sk_buff_head *list;
1274 - struct sk_buff *skb, *next;
1275 - struct sk_buff *head;
1276 - struct fc_lport *lp;
1277 - u8 flogi_maddr[ETH_ALEN];
1278 -
1279 - fc = fcoe_find_fc_lport(ifname);
1280 - if (!fc)
1281 - return -ENODEV;
1282 -
1283 - lp = fc->lp;
1284 -
1285 - /* Remove the instance from fcoe's list */
1286 - write_lock_bh(&fci->fcoe_hostlist_lock);
1287 - list_del(&fc->list);
1288 - write_unlock_bh(&fci->fcoe_hostlist_lock);
1289 -
1290 - /* Don't listen for Ethernet packets anymore */
1291 - dev_remove_pack(&fc->fcoe_packet_type);
1292 -
1293 - /* Detach from the scsi-ml */
1294 - fc_remove_host(lp->host);
1295 - scsi_remove_host(lp->host);
1296 -
1297 - /* Cleanup the fc_lport */
1298 - fc_lport_destroy(lp);
1299 - fc_fcp_destroy(lp);
1300 - if (lp->emp)
1301 - fc_exch_mgr_free(lp->emp);
1302 -
1303 - /* Delete secondary MAC addresses */
1304 - rtnl_lock();
1305 - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1306 - dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
1307 - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1308 - dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1309 - rtnl_unlock();
1310 -
1311 - /* Free the per-CPU revieve threads */
1312 - for (idx = 0; idx < NR_CPUS; idx++) {
1313 - if (fci->fcoe_percpu[idx]) {
1314 - pp = fci->fcoe_percpu[idx];
1315 - spin_lock_bh(&pp->fcoe_rx_list.lock);
1316 - list = &pp->fcoe_rx_list;
1317 - head = list->next;
1318 - for (skb = head; skb != (struct sk_buff *)list;
1319 - skb = next) {
1320 - next = skb->next;
1321 - fr = fcoe_dev_from_skb(skb);
1322 - if (fr->fr_dev == fc->lp) {
1323 - __skb_unlink(skb, list);
1324 - kfree_skb(skb);
1325 - }
1326 - }
1327 - spin_unlock_bh(&pp->fcoe_rx_list.lock);
1328 - }
1329 - }
1330 -
1331 - /* Free existing skbs */
1332 - fcoe_clean_pending_queue(lp);
1333 -
1334 - /* Free memory used by statistical counters */
1335 - for_each_online_cpu(cpu) {
1336 - p = lp->dev_stats[cpu];
1337 - if (p) {
1338 - lp->dev_stats[cpu] = NULL;
1339 - kfree(p);
1340 - }
1341 - }
1342 -
1343 - /* Release the net_device and Scsi_Host */
1344 - dev_put(fc->real_dev);
1345 - scsi_host_put(lp->host);
1346 - return 0;
1347 -}
1348 -
1349 -/*
1350 - * Return zero if link is OK for use by FCoE.
1351 - * Any permanently-disqualifying conditions have been previously checked.
1352 - * This also updates the speed setting, which may change with link for 100/1000.
1353 - *
1354 - * This function should probably be checking for PAUSE support at some point
1355 - * in the future. Currently Per-priority-pause is not determinable using
1356 - * ethtool, so we shouldn't be restrictive until that problem is resolved.
1357 - */
1358 -int fcoe_link_ok(struct fc_lport *lp)
1359 -{
1360 - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1361 - struct net_device *dev = fc->real_dev;
1362 - struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1363 - int rc = 0;
1364 -
1365 - if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1366 - dev = fc->phys_dev;
1367 - if (dev->ethtool_ops->get_settings) {
1368 - dev->ethtool_ops->get_settings(dev, &ecmd);
1369 - lp->link_supported_speeds &=
1370 - ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1371 - if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1372 - SUPPORTED_1000baseT_Full))
1373 - lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1374 - if (ecmd.supported & SUPPORTED_10000baseT_Full)
1375 - lp->link_supported_speeds |=
1376 - FC_PORTSPEED_10GBIT;
1377 - if (ecmd.speed == SPEED_1000)
1378 - lp->link_speed = FC_PORTSPEED_1GBIT;
1379 - if (ecmd.speed == SPEED_10000)
1380 - lp->link_speed = FC_PORTSPEED_10GBIT;
1381 - }
1382 - } else
1383 - rc = -1;
1384 -
1385 - return rc;
1386 -}
1387 -
1388 -static struct libfc_function_template fcoe_libfc_fcn_templ = {
1389 - .frame_send = fcoe_xmit,
1390 -};
1391 -
1392 -static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
1393 -{
1394 - int i = 0;
1395 - struct fcoe_dev_stats *p;
1396 -
1397 - lp->host = shost;
1398 - lp->drv_priv = (void *)(lp + 1);
1399 -
1400 - lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
1401 - FCOE_MIN_XID, FCOE_MAX_XID);
1402 - if (!lp->emp)
1403 - return -ENOMEM;
1404 -
1405 - lp->link_status = 0;
1406 - lp->max_retry_count = 3;
1407 - lp->e_d_tov = 2 * 1000; /* FC-FS default */
1408 - lp->r_a_tov = 2 * 2 * 1000;
1409 - lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1410 - FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1411 -
1412 - /*
1413 - * allocate per cpu stats block
1414 - */
1415 - for_each_online_cpu(i) {
1416 - p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
1417 - if (p)
1418 - lp->dev_stats[i] = p;
1419 - }
1420 -
1421 - /* Finish fc_lport configuration */
1422 - fc_lport_config(lp);
1423 -
1424 - return 0;
1425 -}
1426 -
1427 -static int net_config(struct fc_lport *lp)
1428 -{
1429 - u32 mfs;
1430 - u64 wwnn, wwpn;
1431 - struct net_device *net_dev;
1432 - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1433 - u8 flogi_maddr[ETH_ALEN];
1434 -
1435 - /* Require support for get_pauseparam ethtool op. */
1436 - net_dev = fc->real_dev;
1437 - if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN))
1438 - net_dev = vlan_dev_real_dev(net_dev);
1439 - if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam)
1440 - return -EOPNOTSUPP;
1441 -
1442 - fc->phys_dev = net_dev;
1443 -
1444 - /* Do not support for bonding device */
1445 - if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
1446 - (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
1447 - (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
1448 - return -EOPNOTSUPP;
1449 - }
1450 -
1451 - /*
1452 - * Determine max frame size based on underlying device and optional
1453 - * user-configured limit. If the MFS is too low, fcoe_link_ok()
1454 - * will return 0, so do this first.
1455 - */
1456 - mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
1457 - sizeof(struct fcoe_crc_eof));
1458 - fc_set_mfs(lp, mfs);
1459 -
1460 - lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
1461 - if (!fcoe_link_ok(lp))
1462 - lp->link_status |= FC_LINK_UP;
1463 -
1464 - if (fc->real_dev->features & NETIF_F_SG)
1465 - lp->capabilities = TRANS_C_SG;
1466 -
1467 -
1468 - skb_queue_head_init(&fc->fcoe_pending_queue);
1469 -
1470 - memcpy(lp->ifname, fc->real_dev->name, IFNAMSIZ);
1471 -
1472 - /* setup Source Mac Address */
1473 - memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
1474 - fc->real_dev->addr_len);
1475 -
1476 - wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
1477 - fc_set_wwnn(lp, wwnn);
1478 - /* XXX - 3rd arg needs to be vlan id */
1479 - wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
1480 - fc_set_wwpn(lp, wwpn);
1481 -
1482 - /*
1483 - * Add FCoE MAC address as second unicast MAC address
1484 - * or enter promiscuous mode if not capable of listening
1485 - * for multiple unicast MACs.
1486 - */
1487 - rtnl_lock();
1488 - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1489 - dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
1490 - rtnl_unlock();
1491 -
1492 - /*
1493 - * setup the receive function from ethernet driver
1494 - * on the ethertype for the given device
1495 - */
1496 - fc->fcoe_packet_type.func = fcoe_rcv;
1497 - fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
1498 - fc->fcoe_packet_type.dev = fc->real_dev;
1499 - dev_add_pack(&fc->fcoe_packet_type);
1500 -
1501 - return 0;
1502 -}
1503 -
1504 -static void shost_config(struct fc_lport *lp)
1505 -{
1506 - lp->host->max_lun = FCOE_MAX_LUN;
1507 - lp->host->max_id = FCOE_MAX_FCP_TARGET;
1508 - lp->host->max_channel = 0;
1509 - lp->host->transportt = fcoe_transport_template;
1510 -}
1511 -
1512 -static int libfc_config(struct fc_lport *lp)
1513 -{
1514 - /* Set the function pointers set by the LLDD */
1515 - memcpy(&lp->tt, &fcoe_libfc_fcn_templ,
1516 - sizeof(struct libfc_function_template));
1517 -
1518 - if (fc_fcp_init(lp))
1519 - return -ENOMEM;
1520 - fc_exch_init(lp);
1521 - fc_lport_init(lp);
1522 - fc_rport_init(lp);
1523 - fc_disc_init(lp);
1524 -
1525 - return 0;
1526 -}
1527 -
1528 -/*
1529 - * This function creates the fcoe interface
1530 - * create struct fcdev which is a shared structure between opefc
1531 - * and transport level protocol.
1532 - */
1533 -int fcoe_create_interface(const char *ifname)
1534 -{
1535 - struct fc_lport *lp = NULL;
1536 - struct fcoe_softc *fc;
1537 - struct net_device *net_dev;
1538 - struct Scsi_Host *shost;
1539 - struct fcoe_info *fci = &fcoei;
1540 - int rc = 0;
1541 -
1542 - net_dev = dev_get_by_name(&init_net, ifname);
1543 - if (net_dev == NULL) {
1544 - FC_DBG("could not get network device for %s",
1545 - ifname);
1546 - return -ENODEV;
1547 - }
1548 -
1549 - if (fcoe_find_fc_lport(net_dev->name) != NULL) {
1550 - rc = -EEXIST;
1551 - goto out_put_dev;
1552 - }
1553 -
1554 - shost = scsi_host_alloc(&fcoe_driver_template,
1555 - sizeof(struct fc_lport) +
1556 - sizeof(struct fcoe_softc));
1557 -
1558 - if (!shost) {
1559 - FC_DBG("Could not allocate host structure\n");
1560 - rc = -ENOMEM;
1561 - goto out_put_dev;
1562 - }
1563 -
1564 - lp = shost_priv(shost);
1565 - rc = lport_config(lp, shost);
1566 - if (rc)
1567 - goto out_host_put;
1568 -
1569 - /* Configure the fcoe_softc */
1570 - fc = (struct fcoe_softc *)lp->drv_priv;
1571 - fc->lp = lp;
1572 - fc->real_dev = net_dev;
1573 - shost_config(lp);
1574 -
1575 -
1576 - /* Add the new host to the SCSI-ml */
1577 - rc = scsi_add_host(lp->host, NULL);
1578 - if (rc) {
1579 - FC_DBG("error on scsi_add_host\n");
1580 - goto out_lp_destroy;
1581 - }
1582 -
1583 - sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
1584 - FCOE_DRIVER_NAME, FCOE_VERSION,
1585 - ifname);
1586 -
1587 - /* Configure netdev and networking properties of the lp */
1588 - rc = net_config(lp);
1589 - if (rc)
1590 - goto out_lp_destroy;
1591 -
1592 - /* Initialize the library */
1593 - rc = libfc_config(lp);
1594 - if (rc)
1595 - goto out_lp_destroy;
1596 -
1597 - write_lock_bh(&fci->fcoe_hostlist_lock);
1598 - list_add_tail(&fc->list, &fci->fcoe_hostlist);
1599 - write_unlock_bh(&fci->fcoe_hostlist_lock);
1600 -
1601 - lp->boot_time = jiffies;
1602 -
1603 - fc_fabric_login(lp);
1604 -
1605 - return rc;
1606 -
1607 -out_lp_destroy:
1608 - fc_exch_mgr_free(lp->emp); /* Free the EM */
1609 -out_host_put:
1610 - scsi_host_put(lp->host);
1611 -out_put_dev:
1612 - dev_put(net_dev);
1613 - return rc;
1614 -}
1615 -
1616 -void fcoe_clean_pending_queue(struct fc_lport *lp)
1617 -{
1618 - struct fcoe_softc *fc = lp->drv_priv;
1619 - struct sk_buff *skb;
1620 -
1621 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1622 - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1623 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1624 - kfree_skb(skb);
1625 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1626 - }
1627 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1628 -}
1629 diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
1630 new file mode 100644
1631 index 0000000..3cf5ad6
1632 --- /dev/null
1633 +++ b/drivers/scsi/fcoe/fcoe_sw.c
1634 @@ -0,0 +1,532 @@
1635 +/*
1636 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
1637 + *
1638 + * This program is free software; you can redistribute it and/or modify it
1639 + * under the terms and conditions of the GNU General Public License,
1640 + * version 2, as published by the Free Software Foundation.
1641 + *
1642 + * This program is distributed in the hope it will be useful, but WITHOUT
1643 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1644 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1645 + * more details.
1646 + *
1647 + * You should have received a copy of the GNU General Public License along with
1648 + * this program; if not, write to the Free Software Foundation, Inc.,
1649 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1650 + *
1651 + * Maintained at www.Open-FCoE.org
1652 + */
1653 +
1654 +/*
1655 + * FCOE protocol file
1656 + */
1657 +
1658 +#include <linux/module.h>
1659 +#include <linux/version.h>
1660 +#include <linux/kernel.h>
1661 +#include <linux/init.h>
1662 +#include <linux/spinlock.h>
1663 +#include <linux/netdevice.h>
1664 +#include <linux/etherdevice.h>
1665 +#include <linux/ethtool.h>
1666 +#include <linux/if_ether.h>
1667 +#include <linux/if_vlan.h>
1668 +#include <net/rtnetlink.h>
1669 +
1670 +#include <scsi/fc/fc_els.h>
1671 +#include <scsi/fc/fc_encaps.h>
1672 +#include <scsi/fc/fc_fs.h>
1673 +#include <scsi/scsi_transport.h>
1674 +#include <scsi/scsi_transport_fc.h>
1675 +
1676 +#include <scsi/libfc/libfc.h>
1677 +
1678 +#include <scsi/fc/fc_fcoe.h>
1679 +#include "fcoe_def.h"
1680 +
1681 +#define FCOE_VERSION "0.1"
1682 +
1683 +#define FCOE_MAX_LUN 255
1684 +#define FCOE_MAX_FCP_TARGET 256
1685 +
1686 +#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
1687 +
1688 +#define FCOE_MIN_XID 0x0004
1689 +#define FCOE_MAX_XID 0x07ef
1690 +
1691 +LIST_HEAD(fcoe_hostlist);
1692 +DEFINE_RWLOCK(fcoe_hostlist_lock);
1693 +DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
1694 +struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
1695 +
1696 +static struct scsi_transport_template *fcoe_transport_template;
1697 +
1698 +static int fcoe_reset(struct Scsi_Host *shost)
1699 +{
1700 + struct fc_lport *lport = shost_priv(shost);
1701 + fc_lport_reset(lport);
1702 + return 0;
1703 +}
1704 +
1705 +struct fc_function_template fcoe_transport_function = {
1706 + .show_host_node_name = 1,
1707 + .show_host_port_name = 1,
1708 + .show_host_supported_classes = 1,
1709 + .show_host_supported_fc4s = 1,
1710 + .show_host_active_fc4s = 1,
1711 + .show_host_maxframe_size = 1,
1712 +
1713 + .show_host_port_id = 1,
1714 + .show_host_supported_speeds = 1,
1715 + .get_host_speed = fc_get_host_speed,
1716 + .show_host_speed = 1,
1717 + .show_host_port_type = 1,
1718 + .get_host_port_state = fc_get_host_port_state,
1719 + .show_host_port_state = 1,
1720 + .show_host_symbolic_name = 1,
1721 +
1722 + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
1723 + .show_rport_maxframe_size = 1,
1724 + .show_rport_supported_classes = 1,
1725 +
1726 + .show_host_fabric_name = 1,
1727 + .show_starget_node_name = 1,
1728 + .show_starget_port_name = 1,
1729 + .show_starget_port_id = 1,
1730 + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
1731 + .show_rport_dev_loss_tmo = 1,
1732 + .get_fc_host_stats = fc_get_host_stats,
1733 + .issue_fc_host_lip = fcoe_reset,
1734 +
1735 + .terminate_rport_io = fc_rport_terminate_io,
1736 +};
1737 +
1738 +static struct fcoe_softc *fcoe_find_fc_lport(const struct net_device *netdev)
1739 +{
1740 + struct fcoe_softc *fc;
1741 +
1742 + read_lock(&fcoe_hostlist_lock);
1743 + list_for_each_entry(fc, &fcoe_hostlist, list) {
1744 + if (fc->real_dev == netdev) {
1745 + read_unlock(&fcoe_hostlist_lock);
1746 + return fc;
1747 + }
1748 + }
1749 + read_unlock(&fcoe_hostlist_lock);
1750 + return NULL;
1751 +}
1752 +
1753 +/*
1754 + * Convert 48-bit IEEE MAC address to 64-bit FC WWN.
1755 + */
1756 +static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1757 + unsigned int scheme, unsigned int port)
1758 +{
1759 + u64 wwn;
1760 + u64 host_mac;
1761 +
1762 + /* The MAC is in NO, so flip only the low 48 bits */
1763 + host_mac = ((u64) mac[0] << 40) |
1764 + ((u64) mac[1] << 32) |
1765 + ((u64) mac[2] << 24) |
1766 + ((u64) mac[3] << 16) |
1767 + ((u64) mac[4] << 8) |
1768 + (u64) mac[5];
1769 +
1770 + WARN_ON(host_mac >= (1ULL << 48));
1771 + wwn = host_mac | ((u64) scheme << 60);
1772 + switch (scheme) {
1773 + case 1:
1774 + WARN_ON(port != 0);
1775 + break;
1776 + case 2:
1777 + WARN_ON(port >= 0xfff);
1778 + wwn |= (u64) port << 48;
1779 + break;
1780 + default:
1781 + WARN_ON(1);
1782 + break;
1783 + }
1784 +
1785 + return wwn;
1786 +}
1787 +
1788 +static struct scsi_host_template fcoe_driver_template = {
1789 + .module = THIS_MODULE,
1790 + .name = "FCoE Driver",
1791 + .proc_name = FCOE_DRIVER_NAME,
1792 + .queuecommand = fc_queuecommand,
1793 + .eh_abort_handler = fc_eh_abort,
1794 + .eh_device_reset_handler = fc_eh_device_reset,
1795 + .eh_host_reset_handler = fc_eh_host_reset,
1796 + .slave_alloc = fc_slave_alloc,
1797 + .change_queue_depth = fc_change_queue_depth,
1798 + .change_queue_type = fc_change_queue_type,
1799 + .this_id = -1,
1800 + .cmd_per_lun = 32,
1801 + .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
1802 + .use_clustering = ENABLE_CLUSTERING,
1803 + .sg_tablesize = 4,
1804 + .max_sectors = 0xffff,
1805 +};
1806 +
1807 +int fcoe_destroy_interface(struct net_device *netdev)
1808 +{
1809 + int cpu, idx;
1810 + struct fcoe_dev_stats *p;
1811 + struct fcoe_percpu_s *pp;
1812 + struct fcoe_softc *fc;
1813 + struct fcoe_rcv_info *fr;
1814 + struct sk_buff_head *list;
1815 + struct sk_buff *skb, *next;
1816 + struct sk_buff *head;
1817 + struct fc_lport *lp;
1818 + u8 flogi_maddr[ETH_ALEN];
1819 +
1820 + fc = fcoe_find_fc_lport(netdev);
1821 + if (!fc)
1822 + return -ENODEV;
1823 +
1824 + lp = fc->lp;
1825 +
1826 + /* Remove the instance from fcoe's list */
1827 + write_lock_bh(&fcoe_hostlist_lock);
1828 + list_del(&fc->list);
1829 + write_unlock_bh(&fcoe_hostlist_lock);
1830 +
1831 + /* Don't listen for Ethernet packets anymore */
1832 + dev_remove_pack(&fc->fcoe_packet_type);
1833 +
1834 + /* Detach from the scsi-ml */
1835 + fc_remove_host(lp->host);
1836 + scsi_remove_host(lp->host);
1837 +
1838 + /* Cleanup the fc_lport */
1839 + fc_lport_destroy(lp);
1840 + fc_fcp_destroy(lp);
1841 + if (lp->emp)
1842 + fc_exch_mgr_free(lp->emp);
1843 +
1844 + /* Delete secondary MAC addresses */
1845 + rtnl_lock();
1846 + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1847 + dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
1848 + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1849 + dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1850 + rtnl_unlock();
1851 +
1852 + /* Free the per-CPU revieve threads */
1853 + for (idx = 0; idx < NR_CPUS; idx++) {
1854 + if (fcoe_percpu[idx]) {
1855 + pp = fcoe_percpu[idx];
1856 + spin_lock_bh(&pp->fcoe_rx_list.lock);
1857 + list = &pp->fcoe_rx_list;
1858 + head = list->next;
1859 + for (skb = head; skb != (struct sk_buff *)list;
1860 + skb = next) {
1861 + next = skb->next;
1862 + fr = fcoe_dev_from_skb(skb);
1863 + if (fr->fr_dev == fc->lp) {
1864 + __skb_unlink(skb, list);
1865 + kfree_skb(skb);
1866 + }
1867 + }
1868 + spin_unlock_bh(&pp->fcoe_rx_list.lock);
1869 + }
1870 + }
1871 +
1872 + /* Free existing skbs */
1873 + fcoe_clean_pending_queue(lp);
1874 +
1875 + /* Free memory used by statistical counters */
1876 + for_each_online_cpu(cpu) {
1877 + p = lp->dev_stats[cpu];
1878 + if (p) {
1879 + lp->dev_stats[cpu] = NULL;
1880 + kfree(p);
1881 + }
1882 + }
1883 +
1884 + /* Release the net_device and Scsi_Host */
1885 + dev_put(fc->real_dev);
1886 + scsi_host_put(lp->host);
1887 + return 0;
1888 +}
1889 +
1890 +/*
1891 + * Return zero if link is OK for use by FCoE.
1892 + * Any permanently-disqualifying conditions have been previously checked.
1893 + * This also updates the speed setting, which may change with link for 100/1000.
1894 + *
1895 + * This function should probably be checking for PAUSE support at some point
1896 + * in the future. Currently Per-priority-pause is not determinable using
1897 + * ethtool, so we shouldn't be restrictive until that problem is resolved.
1898 + */
1899 +int fcoe_link_ok(struct fc_lport *lp)
1900 +{
1901 + struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1902 + struct net_device *dev = fc->real_dev;
1903 + struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1904 + int rc = 0;
1905 +
1906 + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1907 + dev = fc->phys_dev;
1908 + if (dev->ethtool_ops->get_settings) {
1909 + dev->ethtool_ops->get_settings(dev, &ecmd);
1910 + lp->link_supported_speeds &=
1911 + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1912 + if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1913 + SUPPORTED_1000baseT_Full))
1914 + lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1915 + if (ecmd.supported & SUPPORTED_10000baseT_Full)
1916 + lp->link_supported_speeds |=
1917 + FC_PORTSPEED_10GBIT;
1918 + if (ecmd.speed == SPEED_1000)
1919 + lp->link_speed = FC_PORTSPEED_1GBIT;
1920 + if (ecmd.speed == SPEED_10000)
1921 + lp->link_speed = FC_PORTSPEED_10GBIT;
1922 + }
1923 + } else
1924 + rc = -1;
1925 +
1926 + return rc;
1927 +}
1928 +
1929 +static struct libfc_function_template fcoe_libfc_fcn_templ = {
1930 + .frame_send = fcoe_xmit,
1931 +};
1932 +
1933 +static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
1934 +{
1935 + int i = 0;
1936 + struct fcoe_dev_stats *p;
1937 +
1938 + lp->host = shost;
1939 + lp->drv_priv = (void *)(lp + 1);
1940 +
1941 + lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
1942 + FCOE_MIN_XID, FCOE_MAX_XID);
1943 + if (!lp->emp)
1944 + return -ENOMEM;
1945 +
1946 + lp->link_status = 0;
1947 + lp->max_retry_count = 3;
1948 + lp->e_d_tov = 2 * 1000; /* FC-FS default */
1949 + lp->r_a_tov = 2 * 2 * 1000;
1950 + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1951 + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1952 +
1953 + /*
1954 + * allocate per cpu stats block
1955 + */
1956 + for_each_online_cpu(i) {
1957 + p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
1958 + if (p)
1959 + lp->dev_stats[i] = p;
1960 + }
1961 +
1962 + /* Finish fc_lport configuration */
1963 + fc_lport_config(lp);
1964 +
1965 + return 0;
1966 +}
1967 +
1968 +static int net_config(struct fc_lport *lp)
1969 +{
1970 + u32 mfs;
1971 + u64 wwnn, wwpn;
1972 + struct net_device *net_dev;
1973 + struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1974 + u8 flogi_maddr[ETH_ALEN];
1975 +
1976 + /* Require support for get_pauseparam ethtool op. */
1977 + net_dev = fc->real_dev;
1978 + if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN))
1979 + net_dev = vlan_dev_real_dev(net_dev);
1980 + if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam)
1981 + return -EOPNOTSUPP;
1982 +
1983 + fc->phys_dev = net_dev;
1984 +
1985 + /* Do not support for bonding device */
1986 + if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
1987 + (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
1988 + (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
1989 + return -EOPNOTSUPP;
1990 + }
1991 +
1992 + /*
1993 + * Determine max frame size based on underlying device and optional
1994 + * user-configured limit. If the MFS is too low, fcoe_link_ok()
1995 + * will return 0, so do this first.
1996 + */
1997 + mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
1998 + sizeof(struct fcoe_crc_eof));
1999 + fc_set_mfs(lp, mfs);
2000 +
2001 + lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
2002 + if (!fcoe_link_ok(lp))
2003 + lp->link_status |= FC_LINK_UP;
2004 +
2005 + if (fc->real_dev->features & NETIF_F_SG)
2006 + lp->sg_supp = 1;
2007 +
2008 +
2009 + skb_queue_head_init(&fc->fcoe_pending_queue);
2010 +
2011 + /* setup Source Mac Address */
2012 + memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
2013 + fc->real_dev->addr_len);
2014 +
2015 + wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
2016 + fc_set_wwnn(lp, wwnn);
2017 + /* XXX - 3rd arg needs to be vlan id */
2018 + wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
2019 + fc_set_wwpn(lp, wwpn);
2020 +
2021 + /*
2022 + * Add FCoE MAC address as second unicast MAC address
2023 + * or enter promiscuous mode if not capable of listening
2024 + * for multiple unicast MACs.
2025 + */
2026 + rtnl_lock();
2027 + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
2028 + dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
2029 + rtnl_unlock();
2030 +
2031 + /*
2032 + * setup the receive function from ethernet driver
2033 + * on the ethertype for the given device
2034 + */
2035 + fc->fcoe_packet_type.func = fcoe_rcv;
2036 + fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
2037 + fc->fcoe_packet_type.dev = fc->real_dev;
2038 + dev_add_pack(&fc->fcoe_packet_type);
2039 +
2040 + return 0;
2041 +}
2042 +
2043 +static void shost_config(struct fc_lport *lp)
2044 +{
2045 + lp->host->max_lun = FCOE_MAX_LUN;
2046 + lp->host->max_id = FCOE_MAX_FCP_TARGET;
2047 + lp->host->max_channel = 0;
2048 + lp->host->transportt = fcoe_transport_template;
2049 +}
2050 +
2051 +static int libfc_config(struct fc_lport *lp)
2052 +{
2053 + /* Set the function pointers set by the LLDD */
2054 + memcpy(&lp->tt, &fcoe_libfc_fcn_templ,
2055 + sizeof(struct libfc_function_template));
2056 +
2057 + if (fc_fcp_init(lp))
2058 + return -ENOMEM;
2059 + fc_exch_init(lp);
2060 + fc_lport_init(lp);
2061 + fc_rport_init(lp);
2062 + fc_disc_init(lp);
2063 +
2064 + return 0;
2065 +}
2066 +
2067 +/*
2068 + * This function creates the fcoe interface
2069 + * create struct fcdev which is a shared structure between opefc
2070 + * and transport level protocol.
2071 + */
2072 +int fcoe_create_interface(struct net_device *netdev)
2073 +{
2074 + struct fc_lport *lp = NULL;
2075 + struct fcoe_softc *fc;
2076 + struct Scsi_Host *shost;
2077 + int rc = 0;
2078 +
2079 + if (fcoe_find_fc_lport(netdev) != NULL)
2080 + return -EEXIST;
2081 +
2082 + shost = scsi_host_alloc(&fcoe_driver_template,
2083 + sizeof(struct fc_lport) +
2084 + sizeof(struct fcoe_softc));
2085 +
2086 + if (!shost) {
2087 + FC_DBG("Could not allocate host structure\n");
2088 + return -ENOMEM;
2089 + }
2090 +
2091 + lp = shost_priv(shost);
2092 + rc = lport_config(lp, shost);
2093 + if (rc)
2094 + goto out_host_put;
2095 +
2096 + /* Configure the fcoe_softc */
2097 + fc = (struct fcoe_softc *)lp->drv_priv;
2098 + fc->lp = lp;
2099 + fc->real_dev = netdev;
2100 + shost_config(lp);
2101 +
2102 +
2103 + /* Add the new host to the SCSI-ml */
2104 + rc = scsi_add_host(lp->host, NULL);
2105 + if (rc) {
2106 + FC_DBG("error on scsi_add_host\n");
2107 + goto out_lp_destroy;
2108 + }
2109 +
2110 + sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
2111 + FCOE_DRIVER_NAME, FCOE_VERSION,
2112 + netdev->name);
2113 +
2114 + /* Configure netdev and networking properties of the lp */
2115 + rc = net_config(lp);
2116 + if (rc)
2117 + goto out_lp_destroy;
2118 +
2119 + /* Initialize the library */
2120 + rc = libfc_config(lp);
2121 + if (rc)
2122 + goto out_lp_destroy;
2123 +
2124 + write_lock_bh(&fcoe_hostlist_lock);
2125 + list_add_tail(&fc->list, &fcoe_hostlist);
2126 + write_unlock_bh(&fcoe_hostlist_lock);
2127 +
2128 + lp->boot_time = jiffies;
2129 +
2130 + fc_fabric_login(lp);
2131 +
2132 + dev_hold(netdev);
2133 + return rc;
2134 +
2135 +out_lp_destroy:
2136 + fc_exch_mgr_free(lp->emp); /* Free the EM */
2137 +out_host_put:
2138 + scsi_host_put(lp->host);
2139 + return rc;
2140 +}
2141 +
2142 +void fcoe_clean_pending_queue(struct fc_lport *lp)
2143 +{
2144 + struct fcoe_softc *fc = lp->drv_priv;
2145 + struct sk_buff *skb;
2146 +
2147 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
2148 + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
2149 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2150 + kfree_skb(skb);
2151 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
2152 + }
2153 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2154 +}
2155 +
2156 +int __init fcoe_sw_init(void)
2157 +{
2158 + fcoe_transport_template =
2159 + fc_attach_transport(&fcoe_transport_function);
2160 + return fcoe_transport_template ? 0 : -1;
2161 +}
2162 +
2163 +void __exit fcoe_sw_exit(void)
2164 +{
2165 + fc_release_transport(fcoe_transport_template);
2166 +}
2167 diff --git a/drivers/scsi/fcoe/fcoeinit.c b/drivers/scsi/fcoe/fcoeinit.c
2168 deleted file mode 100644
2169 index 7d52ed5..0000000
2170 --- a/drivers/scsi/fcoe/fcoeinit.c
2171 +++ /dev/null
2172 @@ -1,440 +0,0 @@
2173 -/*
2174 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
2175 - *
2176 - * This program is free software; you can redistribute it and/or modify it
2177 - * under the terms and conditions of the GNU General Public License,
2178 - * version 2, as published by the Free Software Foundation.
2179 - *
2180 - * This program is distributed in the hope it will be useful, but WITHOUT
2181 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2182 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2183 - * more details.
2184 - *
2185 - * You should have received a copy of the GNU General Public License along with
2186 - * this program; if not, write to the Free Software Foundation, Inc.,
2187 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2188 - *
2189 - * Maintained at www.Open-FCoE.org
2190 - */
2191 -
2192 -#include <linux/module.h>
2193 -#include <linux/version.h>
2194 -#include <linux/kernel.h>
2195 -#include <linux/kthread.h>
2196 -#include <linux/spinlock.h>
2197 -#include <linux/cpu.h>
2198 -#include <linux/netdevice.h>
2199 -#include <linux/etherdevice.h>
2200 -#include <linux/ethtool.h>
2201 -#include <linux/if_ether.h>
2202 -#include <linux/fs.h>
2203 -#include <linux/sysfs.h>
2204 -#include <linux/ctype.h>
2205 -
2206 -#include <scsi/libfc/libfc.h>
2207 -
2208 -#include "fcoe_def.h"
2209 -
2210 -MODULE_AUTHOR("Open-FCoE.org");
2211 -MODULE_DESCRIPTION("FCoE");
2212 -MODULE_LICENSE("GPL");
2213 -
2214 -/*
2215 - * Static functions and variables definations
2216 - */
2217 -#ifdef CONFIG_HOTPLUG_CPU
2218 -static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
2219 -#endif /* CONFIG_HOTPLUG_CPU */
2220 -static int fcoe_device_notification(struct notifier_block *, ulong, void *);
2221 -static void fcoe_dev_setup(void);
2222 -static void fcoe_dev_cleanup(void);
2223 -
2224 -struct scsi_transport_template *fcoe_transport_template;
2225 -
2226 -static int fcoe_reset(struct Scsi_Host *shost)
2227 -{
2228 - struct fc_lport *lport = shost_priv(shost);
2229 - fc_lport_reset(lport);
2230 - return 0;
2231 -}
2232 -
2233 -struct fc_function_template fcoe_transport_function = {
2234 - .show_host_node_name = 1,
2235 - .show_host_port_name = 1,
2236 - .show_host_supported_classes = 1,
2237 - .show_host_supported_fc4s = 1,
2238 - .show_host_active_fc4s = 1,
2239 - .show_host_maxframe_size = 1,
2240 -
2241 - .show_host_port_id = 1,
2242 - .show_host_supported_speeds = 1,
2243 - .get_host_speed = fc_get_host_speed,
2244 - .show_host_speed = 1,
2245 - .show_host_port_type = 1,
2246 - .get_host_port_state = fc_get_host_port_state,
2247 - .show_host_port_state = 1,
2248 - .show_host_symbolic_name = 1,
2249 -
2250 - .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
2251 - .show_rport_maxframe_size = 1,
2252 - .show_rport_supported_classes = 1,
2253 -
2254 - .show_host_fabric_name = 1,
2255 - .show_starget_node_name = 1,
2256 - .show_starget_port_name = 1,
2257 - .show_starget_port_id = 1,
2258 - .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2259 - .show_rport_dev_loss_tmo = 1,
2260 - .get_fc_host_stats = fc_get_host_stats,
2261 - .issue_fc_host_lip = fcoe_reset,
2262 -
2263 - .terminate_rport_io = fc_rport_terminate_io,
2264 -};
2265 -
2266 -struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
2267 -
2268 -#ifdef CONFIG_HOTPLUG_CPU
2269 -static struct notifier_block fcoe_cpu_notifier = {
2270 - .notifier_call = fcoe_cpu_callback,
2271 -};
2272 -#endif /* CONFIG_HOTPLUG_CPU */
2273 -
2274 -/*
2275 - * notification function from net device
2276 - */
2277 -static struct notifier_block fcoe_notifier = {
2278 - .notifier_call = fcoe_device_notification,
2279 -};
2280 -
2281 -#ifdef CONFIG_HOTPLUG_CPU
2282 -/*
2283 - * create percpu stats block
2284 - * called by cpu add/remove notifier
2285 - */
2286 -static void fcoe_create_percpu_data(int cpu)
2287 -{
2288 - struct fc_lport *lp;
2289 - struct fcoe_softc *fc;
2290 - struct fcoe_dev_stats *p;
2291 - struct fcoe_info *fci = &fcoei;
2292 -
2293 - write_lock_bh(&fci->fcoe_hostlist_lock);
2294 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
2295 - lp = fc->lp;
2296 - if (lp->dev_stats[cpu] == NULL) {
2297 - p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
2298 - if (p)
2299 - lp->dev_stats[cpu] = p;
2300 - }
2301 - }
2302 - write_unlock_bh(&fci->fcoe_hostlist_lock);
2303 -}
2304 -
2305 -/*
2306 - * destroy percpu stats block
2307 - * called by cpu add/remove notifier
2308 - */
2309 -static void fcoe_destroy_percpu_data(int cpu)
2310 -{
2311 - struct fcoe_dev_stats *p;
2312 - struct fc_lport *lp;
2313 - struct fcoe_softc *fc;
2314 - struct fcoe_info *fci = &fcoei;
2315 -
2316 - write_lock_bh(&fci->fcoe_hostlist_lock);
2317 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
2318 - lp = fc->lp;
2319 - p = lp->dev_stats[cpu];
2320 - if (p != NULL) {
2321 - lp->dev_stats[cpu] = NULL;
2322 - kfree(p);
2323 - }
2324 - }
2325 - write_unlock_bh(&fci->fcoe_hostlist_lock);
2326 -}
2327 -
2328 -/*
2329 - * Get notified when a cpu comes on/off. Be hotplug friendly.
2330 - */
2331 -static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
2332 - void *hcpu)
2333 -{
2334 - unsigned int cpu = (unsigned long)hcpu;
2335 -
2336 - switch (action) {
2337 - case CPU_ONLINE:
2338 - fcoe_create_percpu_data(cpu);
2339 - break;
2340 - case CPU_DEAD:
2341 - fcoe_destroy_percpu_data(cpu);
2342 - break;
2343 - default:
2344 - break;
2345 - }
2346 - return NOTIFY_OK;
2347 -}
2348 -#endif /* CONFIG_HOTPLUG_CPU */
2349 -
2350 -/*
2351 - * function to setup link change notification interface
2352 - */
2353 -static void fcoe_dev_setup(void)
2354 -{
2355 - /*
2356 - * here setup a interface specific wd time to
2357 - * monitor the link state
2358 - */
2359 - register_netdevice_notifier(&fcoe_notifier);
2360 -}
2361 -
2362 -/*
2363 - * function to cleanup link change notification interface
2364 - */
2365 -static void fcoe_dev_cleanup(void)
2366 -{
2367 - unregister_netdevice_notifier(&fcoe_notifier);
2368 -}
2369 -
2370 -/*
2371 - * This function is called by the ethernet driver
2372 - * this is called in case of link change event
2373 - */
2374 -static int fcoe_device_notification(struct notifier_block *notifier,
2375 - ulong event, void *ptr)
2376 -{
2377 - struct fc_lport *lp = NULL;
2378 - struct net_device *real_dev = ptr;
2379 - struct fcoe_softc *fc;
2380 - struct fcoe_dev_stats *stats;
2381 - struct fcoe_info *fci = &fcoei;
2382 - u16 new_status;
2383 - u32 mfs;
2384 - int rc = NOTIFY_OK;
2385 -
2386 - read_lock(&fci->fcoe_hostlist_lock);
2387 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
2388 - if (fc->real_dev == real_dev) {
2389 - lp = fc->lp;
2390 - break;
2391 - }
2392 - }
2393 - read_unlock(&fci->fcoe_hostlist_lock);
2394 - if (lp == NULL) {
2395 - rc = NOTIFY_DONE;
2396 - goto out;
2397 - }
2398 -
2399 - new_status = lp->link_status;
2400 - switch (event) {
2401 - case NETDEV_DOWN:
2402 - case NETDEV_GOING_DOWN:
2403 - new_status &= ~FC_LINK_UP;
2404 - break;
2405 - case NETDEV_UP:
2406 - case NETDEV_CHANGE:
2407 - new_status &= ~FC_LINK_UP;
2408 - if (!fcoe_link_ok(lp))
2409 - new_status |= FC_LINK_UP;
2410 - break;
2411 - case NETDEV_CHANGEMTU:
2412 - mfs = fc->real_dev->mtu -
2413 - (sizeof(struct fcoe_hdr) +
2414 - sizeof(struct fcoe_crc_eof));
2415 - if (fc->user_mfs && fc->user_mfs < mfs)
2416 - mfs = fc->user_mfs;
2417 - if (mfs >= FC_MIN_MAX_FRAME)
2418 - fc_set_mfs(lp, mfs);
2419 - new_status &= ~FC_LINK_UP;
2420 - if (!fcoe_link_ok(lp))
2421 - new_status |= FC_LINK_UP;
2422 - break;
2423 - case NETDEV_REGISTER:
2424 - break;
2425 - default:
2426 - FC_DBG("unknown event %ld call", event);
2427 - }
2428 - if (lp->link_status != new_status) {
2429 - if ((new_status & FC_LINK_UP) == FC_LINK_UP)
2430 - fc_linkup(lp);
2431 - else {
2432 - stats = lp->dev_stats[smp_processor_id()];
2433 - stats->LinkFailureCount++;
2434 - fc_linkdown(lp);
2435 - fcoe_clean_pending_queue(lp);
2436 - }
2437 - }
2438 -out:
2439 - return rc;
2440 -}
2441 -
2442 -static void trimstr(char *str, int len)
2443 -{
2444 - char *cp = str + len;
2445 - while (--cp >= str && *cp == '\n')
2446 - *cp = '\0';
2447 -}
2448 -
2449 -static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr,
2450 - const char *buffer, size_t size)
2451 -{
2452 - char ifname[40];
2453 - strcpy(ifname, buffer);
2454 - trimstr(ifname, strlen(ifname));
2455 - fcoe_destroy_interface(ifname);
2456 - return size;
2457 -}
2458 -
2459 -static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr,
2460 - const char *buffer, size_t size)
2461 -{
2462 - char ifname[40];
2463 - strcpy(ifname, buffer);
2464 - trimstr(ifname, strlen(ifname));
2465 - fcoe_create_interface(ifname);
2466 - return size;
2467 -}
2468 -
2469 -static const struct kobj_attribute fcoe_destroyattr = \
2470 - __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy);
2471 -static const struct kobj_attribute fcoe_createattr = \
2472 - __ATTR(create, S_IWUSR, NULL, fcoe_create);
2473 -
2474 -/*
2475 - * Initialization routine
2476 - * 1. Will create fc transport software structure
2477 - * 2. initialize the link list of port information structure
2478 - */
2479 -static int __init fcoeinit(void)
2480 -{
2481 - int rc = 0;
2482 - int cpu;
2483 - struct fcoe_percpu_s *p;
2484 - struct fcoe_info *fci = &fcoei;
2485 -
2486 - rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
2487 - &fcoe_destroyattr.attr);
2488 - if (!rc)
2489 - rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
2490 - &fcoe_createattr.attr);
2491 -
2492 - if (rc)
2493 - return rc;
2494 -
2495 - rwlock_init(&fci->fcoe_hostlist_lock);
2496 -
2497 -#ifdef CONFIG_HOTPLUG_CPU
2498 - register_cpu_notifier(&fcoe_cpu_notifier);
2499 -#endif /* CONFIG_HOTPLUG_CPU */
2500 -
2501 - /*
2502 - * initialize per CPU interrupt thread
2503 - */
2504 - for_each_online_cpu(cpu) {
2505 - p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
2506 - if (p) {
2507 - p->thread = kthread_create(fcoe_percpu_receive_thread,
2508 - (void *)p,
2509 - "fcoethread/%d", cpu);
2510 -
2511 - /*
2512 - * if there is no error then bind the thread to the cpu
2513 - * initialize the semaphore and skb queue head
2514 - */
2515 - if (likely(!IS_ERR(p->thread))) {
2516 - p->cpu = cpu;
2517 - fci->fcoe_percpu[cpu] = p;
2518 - skb_queue_head_init(&p->fcoe_rx_list);
2519 - kthread_bind(p->thread, cpu);
2520 - wake_up_process(p->thread);
2521 - } else {
2522 - fci->fcoe_percpu[cpu] = NULL;
2523 - kfree(p);
2524 -
2525 - }
2526 - }
2527 - }
2528 - if (rc < 0) {
2529 - FC_DBG("failed to initialize proc intrerface\n");
2530 - rc = -ENODEV;
2531 - goto out_chrdev;
2532 - }
2533 -
2534 - /*
2535 - * setup link change notification
2536 - */
2537 - fcoe_dev_setup();
2538 -
2539 - init_timer(&fci->timer);
2540 - fci->timer.data = (ulong) fci;
2541 - fci->timer.function = fcoe_watchdog;
2542 - fci->timer.expires = (jiffies + (10 * HZ));
2543 - add_timer(&fci->timer);
2544 -
2545 - fcoe_transport_template =
2546 - fc_attach_transport(&fcoe_transport_function);
2547 -
2548 - if (fcoe_transport_template == NULL) {
2549 - FC_DBG("fail to attach fc transport");
2550 - return -1;
2551 - }
2552 -
2553 - return 0;
2554 -
2555 -out_chrdev:
2556 -#ifdef CONFIG_HOTPLUG_CPU
2557 - unregister_cpu_notifier(&fcoe_cpu_notifier);
2558 -#endif /* CONFIG_HOTPLUG_CPU */
2559 - return rc;
2560 -}
2561 -
2562 -static void __exit fcoe_exit(void)
2563 -{
2564 - u32 idx;
2565 - struct fcoe_softc *fc, *tmp;
2566 - struct fc_lport *lp;
2567 - struct fcoe_info *fci = &fcoei;
2568 - struct fcoe_percpu_s *p;
2569 - struct sk_buff *skb;
2570 -
2571 - /*
2572 - * Stop all call back interfaces
2573 - */
2574 -#ifdef CONFIG_HOTPLUG_CPU
2575 - unregister_cpu_notifier(&fcoe_cpu_notifier);
2576 -#endif /* CONFIG_HOTPLUG_CPU */
2577 - fcoe_dev_cleanup();
2578 -
2579 - /*
2580 - * stop timer
2581 - */
2582 - del_timer_sync(&fci->timer);
2583 -
2584 - /*
2585 - * assuming that at this time there will be no
2586 - * ioctl in prograss, therefore we do not need to lock the
2587 - * list.
2588 - */
2589 - list_for_each_entry_safe(fc, tmp, &fci->fcoe_hostlist, list) {
2590 - lp = fc->lp;
2591 - fcoe_destroy_interface(lp->ifname);
2592 - }
2593 -
2594 - for (idx = 0; idx < NR_CPUS; idx++) {
2595 - if (fci->fcoe_percpu[idx]) {
2596 - kthread_stop(fci->fcoe_percpu[idx]->thread);
2597 - p = fci->fcoe_percpu[idx];
2598 - spin_lock_bh(&p->fcoe_rx_list.lock);
2599 - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
2600 - kfree_skb(skb);
2601 - spin_unlock_bh(&p->fcoe_rx_list.lock);
2602 - if (fci->fcoe_percpu[idx]->crc_eof_page)
2603 - put_page(fci->fcoe_percpu[idx]->crc_eof_page);
2604 - kfree(fci->fcoe_percpu[idx]);
2605 - }
2606 - }
2607 -
2608 - fc_release_transport(fcoe_transport_template);
2609 -}
2610 -
2611 -module_init(fcoeinit);
2612 -module_exit(fcoe_exit);
2613 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
2614 new file mode 100644
2615 index 0000000..93c47aa
2616 --- /dev/null
2617 +++ b/drivers/scsi/fcoe/libfcoe.c
2618 @@ -0,0 +1,632 @@
2619 +/*
2620 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
2621 + *
2622 + * This program is free software; you can redistribute it and/or modify it
2623 + * under the terms and conditions of the GNU General Public License,
2624 + * version 2, as published by the Free Software Foundation.
2625 + *
2626 + * This program is distributed in the hope it will be useful, but WITHOUT
2627 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2628 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2629 + * more details.
2630 + *
2631 + * You should have received a copy of the GNU General Public License along with
2632 + * this program; if not, write to the Free Software Foundation, Inc.,
2633 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2634 + *
2635 + * Maintained at www.Open-FCoE.org
2636 + */
2637 +
2638 +/*
2639 + * FCOE protocol file
2640 + */
2641 +
2642 +#include <linux/module.h>
2643 +#include <linux/version.h>
2644 +#include <linux/kernel.h>
2645 +#include <linux/spinlock.h>
2646 +#include <linux/skbuff.h>
2647 +#include <linux/netdevice.h>
2648 +#include <linux/etherdevice.h>
2649 +#include <linux/if_ether.h>
2650 +#include <linux/kthread.h>
2651 +#include <linux/crc32.h>
2652 +#include <scsi/scsi_tcq.h>
2653 +#include <scsi/scsicam.h>
2654 +#include <scsi/scsi_transport.h>
2655 +#include <scsi/scsi_transport_fc.h>
2656 +#include <net/rtnetlink.h>
2657 +
2658 +#include <scsi/fc/fc_encaps.h>
2659 +
2660 +#include <scsi/libfc/libfc.h>
2661 +#include <scsi/libfc/fc_frame.h>
2662 +
2663 +#include <scsi/fc/fc_fcoe.h>
2664 +#include "fcoe_def.h"
2665 +
2666 +static int debug_fcoe;
2667 +
2668 +#define FCOE_MAX_QUEUE_DEPTH 256
2669 +
2670 +/* destination address mode */
2671 +#define FCOE_GW_ADDR_MODE 0x00
2672 +#define FCOE_FCOUI_ADDR_MODE 0x01
2673 +
2674 +/* Function Prototyes */
2675 +static int fcoe_check_wait_queue(struct fc_lport *);
2676 +static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
2677 +static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
2678 +static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
2679 +
2680 +/*
2681 + * this is the fcoe receive function
2682 + * called by NET_RX_SOFTIRQ
2683 + * this function will receive the packet and
2684 + * build fc frame and pass it up
2685 + */
2686 +int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
2687 + struct packet_type *ptype, struct net_device *olddev)
2688 +{
2689 + struct fc_lport *lp;
2690 + struct fcoe_rcv_info *fr;
2691 + struct fcoe_softc *fc;
2692 + struct fcoe_dev_stats *stats;
2693 + u8 *data;
2694 + struct fc_frame_header *fh;
2695 + unsigned short oxid;
2696 + int cpu_idx;
2697 + struct fcoe_percpu_s *fps;
2698 +
2699 + fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
2700 + lp = fc->lp;
2701 + if (unlikely(lp == NULL)) {
2702 + FC_DBG("cannot find hba structure");
2703 + goto err2;
2704 + }
2705 +
2706 + if (unlikely(debug_fcoe)) {
2707 + FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
2708 + "end:%p sum:%d dev:%s", skb->len, skb->data_len,
2709 + skb->head, skb->data, skb_tail_pointer(skb),
2710 + skb_end_pointer(skb), skb->csum,
2711 + skb->dev ? skb->dev->name : "<NULL>");
2712 +
2713 + }
2714 +
2715 + /* check for FCOE packet type */
2716 + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
2717 + FC_DBG("wrong FC type frame");
2718 + goto err;
2719 + }
2720 + data = skb->data;
2721 + data += sizeof(struct fcoe_hdr);
2722 + fh = (struct fc_frame_header *)data;
2723 + oxid = ntohs(fh->fh_ox_id);
2724 +
2725 + fr = fcoe_dev_from_skb(skb);
2726 + fr->fr_dev = lp;
2727 + fr->ptype = ptype;
2728 + cpu_idx = 0;
2729 +#ifdef CONFIG_SMP
2730 + /*
2731 + * The exchange ID are ANDed with num of online CPUs,
2732 + * so that will have the least lock contention in
2733 + * handling the exchange. if there is no thread
2734 + * for a given idx then use first online cpu.
2735 + */
2736 + cpu_idx = oxid & (num_online_cpus() >> 1);
2737 + if (fcoe_percpu[cpu_idx] == NULL)
2738 + cpu_idx = first_cpu(cpu_online_map);
2739 +#endif
2740 + fps = fcoe_percpu[cpu_idx];
2741 +
2742 + spin_lock_bh(&fps->fcoe_rx_list.lock);
2743 + __skb_queue_tail(&fps->fcoe_rx_list, skb);
2744 + if (fps->fcoe_rx_list.qlen == 1)
2745 + wake_up_process(fps->thread);
2746 +
2747 + spin_unlock_bh(&fps->fcoe_rx_list.lock);
2748 +
2749 + return 0;
2750 +err:
2751 +#ifdef CONFIG_SMP
2752 + stats = lp->dev_stats[smp_processor_id()];
2753 +#else
2754 + stats = lp->dev_stats[0];
2755 +#endif
2756 + stats->ErrorFrames++;
2757 +
2758 +err2:
2759 + kfree_skb(skb);
2760 + return -1;
2761 +}
2762 +
2763 +static inline int fcoe_start_io(struct sk_buff *skb)
2764 +{
2765 + int rc;
2766 +
2767 + skb_get(skb);
2768 + rc = dev_queue_xmit(skb);
2769 + if (rc != 0)
2770 + return rc;
2771 + kfree_skb(skb);
2772 + return 0;
2773 +}
2774 +
2775 +static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
2776 +{
2777 + struct fcoe_percpu_s *fps;
2778 + struct page *page;
2779 + int cpu_idx;
2780 +
2781 + cpu_idx = get_cpu();
2782 + fps = fcoe_percpu[cpu_idx];
2783 + page = fps->crc_eof_page;
2784 + if (!page) {
2785 + page = alloc_page(GFP_ATOMIC);
2786 + if (!page) {
2787 + put_cpu();
2788 + return -ENOMEM;
2789 + }
2790 + fps->crc_eof_page = page;
2791 + WARN_ON(fps->crc_eof_offset != 0);
2792 + }
2793 +
2794 + get_page(page);
2795 + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
2796 + fps->crc_eof_offset, tlen);
2797 + skb->len += tlen;
2798 + skb->data_len += tlen;
2799 + skb->truesize += tlen;
2800 + fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
2801 +
2802 + if (fps->crc_eof_offset >= PAGE_SIZE) {
2803 + fps->crc_eof_page = NULL;
2804 + fps->crc_eof_offset = 0;
2805 + put_page(page);
2806 + }
2807 + put_cpu();
2808 + return 0;
2809 +}
2810 +
2811 +/*
2812 + * this is the frame xmit routine
2813 + */
2814 +int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
2815 +{
2816 + int indx;
2817 + int wlen, rc = 0;
2818 + u32 crc;
2819 + struct ethhdr *eh;
2820 + struct fcoe_crc_eof *cp;
2821 + struct sk_buff *skb;
2822 + struct fcoe_dev_stats *stats;
2823 + struct fc_frame_header *fh;
2824 + unsigned int hlen; /* header length implies the version */
2825 + unsigned int tlen; /* trailer length */
2826 + int flogi_in_progress = 0;
2827 + struct fcoe_softc *fc;
2828 + void *data;
2829 + u8 sof, eof;
2830 + struct fcoe_hdr *hp;
2831 +
2832 + WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
2833 +
2834 + fc = (struct fcoe_softc *)lp->drv_priv;
2835 + /*
2836 + * if it is a flogi then we need to learn gw-addr
2837 + * and my own fcid
2838 + */
2839 + fh = fc_frame_header_get(fp);
2840 + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
2841 + if (fc_frame_payload_op(fp) == ELS_FLOGI) {
2842 + fc->flogi_oxid = ntohs(fh->fh_ox_id);
2843 + fc->address_mode = FCOE_FCOUI_ADDR_MODE;
2844 + fc->flogi_progress = 1;
2845 + flogi_in_progress = 1;
2846 + } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
2847 + /*
2848 + * Here we must've gotten an SID by accepting an FLOGI
2849 + * from a point-to-point connection. Switch to using
2850 + * the source mac based on the SID. The destination
2851 + * MAC in this case would have been set by receving the
2852 + * FLOGI.
2853 + */
2854 + fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
2855 + fc->flogi_progress = 0;
2856 + }
2857 + }
2858 +
2859 + skb = fp_skb(fp);
2860 + sof = fr_sof(fp);
2861 + eof = fr_eof(fp);
2862 +
2863 + crc = ~0;
2864 + crc = crc32(crc, skb->data, skb_headlen(skb));
2865 +
2866 + for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) {
2867 + skb_frag_t *frag = &skb_shinfo(skb)->frags[indx];
2868 + unsigned long off = frag->page_offset;
2869 + unsigned long len = frag->size;
2870 +
2871 + while (len > 0) {
2872 + unsigned long clen;
2873 +
2874 + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
2875 + data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
2876 + KM_SKB_DATA_SOFTIRQ);
2877 + crc = crc32(crc, data + (off & ~PAGE_MASK),
2878 + clen);
2879 + kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
2880 + off += clen;
2881 + len -= clen;
2882 + }
2883 + }
2884 +
2885 + /*
2886 + * Get header and trailer lengths.
2887 + * This is temporary code until we get rid of the old protocol.
2888 + * Both versions have essentially the same trailer layout but T11
2889 + * has padding afterwards.
2890 + */
2891 + hlen = sizeof(struct fcoe_hdr);
2892 + tlen = sizeof(struct fcoe_crc_eof);
2893 +
2894 + /*
2895 + * copy fc crc and eof to the skb buff
2896 + * Use utility buffer in the fc_frame part of the sk_buff for the
2897 + * trailer.
2898 + * We don't do a get_page for this frag, since that page may not be
2899 + * managed that way. So that skb_free() doesn't do that either, we
2900 + * setup the destructor to remove this frag.
2901 + */
2902 + if (skb_is_nonlinear(skb)) {
2903 + skb_frag_t *frag;
2904 + if (fcoe_get_paged_crc_eof(skb, tlen)) {
2905 + kfree(skb);
2906 + return -ENOMEM;
2907 + }
2908 + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2909 + cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
2910 + + frag->page_offset;
2911 + } else {
2912 + cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
2913 + }
2914 +
2915 + cp->fcoe_eof = eof;
2916 + cp->fcoe_crc32 = cpu_to_le32(~crc);
2917 + if (tlen == sizeof(*cp))
2918 + memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd));
2919 + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
2920 +
2921 + if (skb_is_nonlinear(skb)) {
2922 + kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
2923 + cp = NULL;
2924 + }
2925 +
2926 + /*
2927 + * Fill in the control structures
2928 + */
2929 + skb->ip_summed = CHECKSUM_NONE;
2930 + eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr));
2931 + if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
2932 + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
2933 + else
2934 + /* insert GW address */
2935 + memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
2936 +
2937 + if (unlikely(flogi_in_progress))
2938 + memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
2939 + else
2940 + memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
2941 +
2942 + eh->h_proto = htons(ETH_P_FCOE);
2943 + skb->protocol = htons(ETH_P_802_3);
2944 + skb_reset_mac_header(skb);
2945 + skb_reset_network_header(skb);
2946 +
2947 + hp = (struct fcoe_hdr *)(eh + 1);
2948 + memset(hp, 0, sizeof(*hp));
2949 + if (FC_FCOE_VER)
2950 + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
2951 + hp->fcoe_sof = sof;
2952 +
2953 + stats = lp->dev_stats[smp_processor_id()];
2954 + stats->TxFrames++;
2955 + stats->TxWords += wlen;
2956 + skb->dev = fc->real_dev;
2957 +
2958 + fr_dev(fp) = lp;
2959 + if (fc->fcoe_pending_queue.qlen)
2960 + rc = fcoe_check_wait_queue(lp);
2961 +
2962 + if (rc == 0)
2963 + rc = fcoe_start_io(skb);
2964 +
2965 + if (rc) {
2966 + fcoe_insert_wait_queue(lp, skb);
2967 + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
2968 + fc_pause(lp);
2969 + }
2970 +
2971 + return 0;
2972 +}
2973 +
2974 +int fcoe_percpu_receive_thread(void *arg)
2975 +{
2976 + struct fcoe_percpu_s *p = arg;
2977 + u32 fr_len;
2978 + unsigned int hlen;
2979 + unsigned int tlen;
2980 + struct fc_lport *lp;
2981 + struct fcoe_rcv_info *fr;
2982 + struct fcoe_dev_stats *stats;
2983 + struct fc_frame_header *fh;
2984 + struct sk_buff *skb;
2985 + struct fcoe_crc_eof *cp;
2986 + enum fc_sof sof;
2987 + struct fc_frame *fp;
2988 + u8 *mac = NULL;
2989 + struct fcoe_softc *fc;
2990 + struct fcoe_hdr *hp;
2991 +
2992 + set_user_nice(current, 19);
2993 +
2994 + while (!kthread_should_stop()) {
2995 +
2996 + spin_lock_bh(&p->fcoe_rx_list.lock);
2997 + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
2998 + set_current_state(TASK_INTERRUPTIBLE);
2999 + spin_unlock_bh(&p->fcoe_rx_list.lock);
3000 + schedule();
3001 + set_current_state(TASK_RUNNING);
3002 + if (kthread_should_stop())
3003 + return 0;
3004 + spin_lock_bh(&p->fcoe_rx_list.lock);
3005 + }
3006 + spin_unlock_bh(&p->fcoe_rx_list.lock);
3007 + fr = fcoe_dev_from_skb(skb);
3008 + lp = fr->fr_dev;
3009 + if (unlikely(lp == NULL)) {
3010 + FC_DBG("invalid HBA Structure");
3011 + kfree_skb(skb);
3012 + continue;
3013 + }
3014 +
3015 + stats = lp->dev_stats[smp_processor_id()];
3016 +
3017 + if (unlikely(debug_fcoe)) {
3018 + FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
3019 + "tail:%p end:%p sum:%d dev:%s",
3020 + skb->len, skb->data_len,
3021 + skb->head, skb->data, skb_tail_pointer(skb),
3022 + skb_end_pointer(skb), skb->csum,
3023 + skb->dev ? skb->dev->name : "<NULL>");
3024 + }
3025 +
3026 + /*
3027 + * Save source MAC address before discarding header.
3028 + */
3029 + fc = lp->drv_priv;
3030 + if (unlikely(fc->flogi_progress))
3031 + mac = eth_hdr(skb)->h_source;
3032 +
3033 + if (skb_is_nonlinear(skb))
3034 + skb_linearize(skb); /* not ideal */
3035 +
3036 + /*
3037 + * Check the header and pull it off.
3038 + */
3039 + hlen = sizeof(struct fcoe_hdr);
3040 +
3041 + hp = (struct fcoe_hdr *)skb->data;
3042 + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
3043 + if (stats->ErrorFrames < 5)
3044 + FC_DBG("unknown FCoE version %x",
3045 + FC_FCOE_DECAPS_VER(hp));
3046 + stats->ErrorFrames++;
3047 + kfree_skb(skb);
3048 + continue;
3049 + }
3050 + sof = hp->fcoe_sof;
3051 + skb_pull(skb, sizeof(*hp));
3052 + fr_len = skb->len - sizeof(struct fcoe_crc_eof);
3053 + skb_trim(skb, fr_len);
3054 + tlen = sizeof(struct fcoe_crc_eof);
3055 +
3056 + if (unlikely(fr_len > skb->len)) {
3057 + if (stats->ErrorFrames < 5)
3058 + FC_DBG("length error fr_len 0x%x skb->len 0x%x",
3059 + fr_len, skb->len);
3060 + stats->ErrorFrames++;
3061 + kfree_skb(skb);
3062 + continue;
3063 + }
3064 + stats->RxFrames++;
3065 + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
3066 +
3067 + fp = (struct fc_frame *) skb;
3068 + fc_frame_init(fp);
3069 + cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
3070 + fr_eof(fp) = cp->fcoe_eof;
3071 + fr_sof(fp) = sof;
3072 + fr_dev(fp) = lp;
3073 +
3074 + /*
3075 + * Check the CRC here, unless it's solicited data for SCSI.
3076 + * In that case, the SCSI layer can check it during the copy,
3077 + * and it'll be more cache-efficient.
3078 + */
3079 + fh = fc_frame_header_get(fp);
3080 + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
3081 + fh->fh_type == FC_TYPE_FCP) {
3082 + fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
3083 + fc_exch_recv(lp, lp->emp, fp);
3084 + } else if (le32_to_cpu(cp->fcoe_crc32) ==
3085 + ~crc32(~0, skb->data, fr_len)) {
3086 + if (unlikely(fc->flogi_progress))
3087 + fcoe_recv_flogi(fc, fp, mac);
3088 + fc_exch_recv(lp, lp->emp, fp);
3089 + } else {
3090 + if (debug_fcoe || stats->InvalidCRCCount < 5) {
3091 + printk(KERN_WARNING \
3092 + "fcoe: dropping frame with CRC error");
3093 + }
3094 + stats->InvalidCRCCount++;
3095 + stats->ErrorFrames++;
3096 + fc_frame_free(fp);
3097 + }
3098 + }
3099 + return 0;
3100 +}
3101 +
3102 +/*
3103 + * Snoop potential response to FLOGI or even incoming FLOGI.
3104 + */
3105 +static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
3106 +{
3107 + struct fc_frame_header *fh;
3108 + u8 op;
3109 +
3110 + fh = fc_frame_header_get(fp);
3111 + if (fh->fh_type != FC_TYPE_ELS)
3112 + return;
3113 + op = fc_frame_payload_op(fp);
3114 + if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
3115 + fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
3116 + /*
3117 + * FLOGI accepted.
3118 + * If the src mac addr is FC_OUI-based, then we mark the
3119 + * address_mode flag to use FC_OUI-based Ethernet DA.
3120 + * Otherwise we use the FCoE gateway addr
3121 + */
3122 + if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
3123 + fc->address_mode = FCOE_FCOUI_ADDR_MODE;
3124 + } else {
3125 + memcpy(fc->dest_addr, sa, ETH_ALEN);
3126 + fc->address_mode = FCOE_GW_ADDR_MODE;
3127 + }
3128 +
3129 + /*
3130 + * Remove any previously-set unicast MAC filter.
3131 + * Add secondary FCoE MAC address filter for our OUI.
3132 + */
3133 + rtnl_lock();
3134 + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
3135 + dev_unicast_delete(fc->real_dev, fc->data_src_addr,
3136 + ETH_ALEN);
3137 + fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
3138 + dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
3139 + rtnl_unlock();
3140 +
3141 + fc->flogi_progress = 0;
3142 + } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
3143 + /*
3144 + * Save source MAC for point-to-point responses.
3145 + */
3146 + memcpy(fc->dest_addr, sa, ETH_ALEN);
3147 + fc->address_mode = FCOE_GW_ADDR_MODE;
3148 + }
3149 +}
3150 +
3151 +void fcoe_watchdog(ulong vp)
3152 +{
3153 + struct fc_lport *lp;
3154 + struct fcoe_softc *fc;
3155 + int paused = 0;
3156 +
3157 + read_lock(&fcoe_hostlist_lock);
3158 + list_for_each_entry(fc, &fcoe_hostlist, list) {
3159 + lp = fc->lp;
3160 + if (lp) {
3161 + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
3162 + paused = 1;
3163 + if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
3164 + if (paused)
3165 + fc_unpause(lp);
3166 + }
3167 + }
3168 + }
3169 + read_unlock(&fcoe_hostlist_lock);
3170 +
3171 + fcoe_timer.expires = jiffies + (1 * HZ);
3172 + add_timer(&fcoe_timer);
3173 +}
3174 +
3175 +/*
3176 + * the wait_queue is used when the skb transmit fails. skb will go
3177 + * in the wait_queue which will be emptied by the time function OR
3178 + * by the next skb transmit.
3179 + *
3180 + */
3181 +
3182 +/*
3183 + * Function name : fcoe_check_wait_queue()
3184 + *
3185 + * Return Values : 0 or error
3186 + *
3187 + * Description : empties the wait_queue
3188 + * dequeue the head of the wait_queue queue and
3189 + * calls fcoe_start_io() for each packet
3190 + * if all skb have been transmitted, return 0
3191 + * if a error occurs, then restore wait_queue and try again
3192 + * later
3193 + *
3194 + */
3195 +
3196 +static int fcoe_check_wait_queue(struct fc_lport *lp)
3197 +{
3198 + int rc, unpause = 0;
3199 + int paused = 0;
3200 + struct sk_buff *skb;
3201 + struct fcoe_softc *fc;
3202 +
3203 + fc = (struct fcoe_softc *)lp->drv_priv;
3204 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3205 +
3206 + /*
3207 + * is this interface paused?
3208 + */
3209 + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
3210 + paused = 1;
3211 + if (fc->fcoe_pending_queue.qlen) {
3212 + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
3213 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3214 + rc = fcoe_start_io(skb);
3215 + if (rc) {
3216 + fcoe_insert_wait_queue_head(lp, skb);
3217 + return rc;
3218 + }
3219 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3220 + }
3221 + if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
3222 + unpause = 1;
3223 + }
3224 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3225 + if ((unpause) && (paused))
3226 + fc_unpause(lp);
3227 + return fc->fcoe_pending_queue.qlen;
3228 +}
3229 +
3230 +static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
3231 + struct sk_buff *skb)
3232 +{
3233 + struct fcoe_softc *fc;
3234 +
3235 + fc = (struct fcoe_softc *)lp->drv_priv;
3236 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3237 + __skb_queue_head(&fc->fcoe_pending_queue, skb);
3238 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3239 +}
3240 +
3241 +static void fcoe_insert_wait_queue(struct fc_lport *lp,
3242 + struct sk_buff *skb)
3243 +{
3244 + struct fcoe_softc *fc;
3245 +
3246 + fc = (struct fcoe_softc *)lp->drv_priv;
3247 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3248 + __skb_queue_tail(&fc->fcoe_pending_queue, skb);
3249 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3250 +}
3251 diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
3252 index 30403aa..f724dd2 100644
3253 --- a/drivers/scsi/libfc/fc_disc.c
3254 +++ b/drivers/scsi/libfc/fc_disc.c
3255 @@ -19,7 +19,9 @@
3256
3257 /*
3258 * Target Discovery
3259 - * Actually, this discovers all FC-4 remote ports, including FCP initiators.
3260 + *
3261 + * This block discovers all FC-4 remote ports, including FCP initiators. It
3262 + * also handles RSCN events and re-discovery if necessary.
3263 */
3264
3265 #include <linux/timer.h>
3266 @@ -33,12 +35,18 @@
3267 #define FC_DISC_RETRY_LIMIT 3 /* max retries */
3268 #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
3269
3270 -int fc_disc_debug;
3271 +static int fc_disc_debug;
3272 +
3273 +#define FC_DEBUG_DISC(fmt...) \
3274 + do { \
3275 + if (fc_disc_debug) \
3276 + FC_DBG(fmt); \
3277 + } while (0)
3278
3279 static void fc_disc_gpn_ft_req(struct fc_lport *);
3280 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
3281 static int fc_disc_new_target(struct fc_lport *, struct fc_rport *,
3282 - struct fc_rport_identifiers *);
3283 + struct fc_rport_identifiers *);
3284 static void fc_disc_del_target(struct fc_lport *, struct fc_rport *);
3285 static void fc_disc_done(struct fc_lport *);
3286 static void fc_disc_error(struct fc_lport *, struct fc_frame *);
3287 @@ -47,13 +55,13 @@ static void fc_disc_single(struct fc_lport *, struct fc_disc_port *);
3288 static int fc_disc_restart(struct fc_lport *);
3289
3290 /**
3291 - * fc_disc_rscn_req - Handle Registered State Change Notification (RSCN)
3292 + * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
3293 * @sp: Current sequence of the RSCN exchange
3294 * @fp: RSCN Frame
3295 - * @lp: Fibre Channel host port instance
3296 + * @lport: Fibre Channel host port instance
3297 */
3298 -static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3299 - struct fc_lport *lp)
3300 +static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3301 + struct fc_lport *lport)
3302 {
3303 struct fc_els_rscn *rp;
3304 struct fc_els_rscn_page *pp;
3305 @@ -86,12 +94,14 @@ static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3306 */
3307 switch (fmt) {
3308 case ELS_ADDR_FMT_PORT:
3309 + FC_DEBUG_DISC("Port address format for port (%6x)\n",
3310 + ntoh24(pp->rscn_fid));
3311 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
3312 if (!dp) {
3313 redisc = 1;
3314 break;
3315 }
3316 - dp->lp = lp;
3317 + dp->lp = lport;
3318 dp->ids.port_id = ntoh24(pp->rscn_fid);
3319 dp->ids.port_name = -1;
3320 dp->ids.node_name = -1;
3321 @@ -102,27 +112,26 @@ static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3322 case ELS_ADDR_FMT_DOM:
3323 case ELS_ADDR_FMT_FAB:
3324 default:
3325 + FC_DEBUG_DISC("Address format is (%d)\n", fmt);
3326 redisc = 1;
3327 break;
3328 }
3329 }
3330 - lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
3331 + lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
3332 if (redisc) {
3333 - if (fc_disc_debug)
3334 - FC_DBG("RSCN received: rediscovering\n");
3335 + FC_DEBUG_DISC("RSCN received: rediscovering\n");
3336 list_for_each_entry_safe(dp, next, &disc_list, peers) {
3337 list_del(&dp->peers);
3338 kfree(dp);
3339 }
3340 - fc_disc_restart(lp);
3341 + fc_disc_restart(lport);
3342 } else {
3343 - if (fc_disc_debug)
3344 - FC_DBG("RSCN received: not rediscovering. "
3345 - "redisc %d state %d in_prog %d\n",
3346 - redisc, lp->state, lp->disc_pending);
3347 + FC_DEBUG_DISC("RSCN received: not rediscovering. "
3348 + "redisc %d state %d in_prog %d\n",
3349 + redisc, lport->state, lport->disc_pending);
3350 list_for_each_entry_safe(dp, next, &disc_list, peers) {
3351 list_del(&dp->peers);
3352 - fc_disc_single(lp, dp);
3353 + fc_disc_single(lport, dp);
3354 }
3355 }
3356 fc_frame_free(fp);
3357 @@ -131,48 +140,53 @@ reject:
3358 rjt_data.fp = NULL;
3359 rjt_data.reason = ELS_RJT_LOGIC;
3360 rjt_data.explan = ELS_EXPL_NONE;
3361 - lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
3362 + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
3363 fc_frame_free(fp);
3364 }
3365
3366 +/**
3367 + * fc_disc_recv_req - Handle incoming requests
3368 + * @sp: Current sequence of the request exchange
3369 + * @fp: The frame
3370 + * @lport: The FC local port
3371 + */
3372 static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
3373 - struct fc_lport *lp)
3374 + struct fc_lport *lport)
3375 {
3376 - switch (fc_frame_payload_op(fp)) {
3377 + u8 op;
3378 +
3379 + op = fc_frame_payload_op(fp);
3380 + switch (op) {
3381 case ELS_RSCN:
3382 - fc_disc_rscn_req(sp, fp, lp);
3383 + fc_disc_recv_rscn_req(sp, fp, lport);
3384 break;
3385 default:
3386 - FC_DBG("fc_disc recieved an unexpected request\n");
3387 + FC_DBG("Received an unsupported request. opcode (%x)\n", op);
3388 break;
3389 }
3390 }
3391
3392 -/*
3393 - * Refresh target discovery, perhaps due to an RSCN.
3394 - * A configurable delay is introduced to collect any subsequent RSCNs.
3395 +/**
3396 + * fc_disc_restart - Restart discovery
3397 + * @lport: FC local port
3398 */
3399 -static int fc_disc_restart(struct fc_lport *lp)
3400 +static int fc_disc_restart(struct fc_lport *lport)
3401 {
3402 - if (!lp->disc_requested && !lp->disc_pending) {
3403 - schedule_delayed_work(&lp->disc_work,
3404 - msecs_to_jiffies(lp->disc_delay * 1000));
3405 + if (!lport->disc_requested && !lport->disc_pending) {
3406 + schedule_delayed_work(&lport->disc_work,
3407 + msecs_to_jiffies(lport->disc_delay * 1000));
3408 }
3409 - lp->disc_requested = 1;
3410 + lport->disc_requested = 1;
3411 return 0;
3412 }
3413
3414 -/*
3415 - * Fibre Channel Target discovery.
3416 +/**
3417 + * fc_disc_start - Fibre Channel Target discovery
3418 + * @lport: FC local port
3419 *
3420 * Returns non-zero if discovery cannot be started.
3421 - *
3422 - * Callback is called for each target remote port found in discovery.
3423 - * When discovery is complete, the callback is called with a NULL remote port.
3424 - * Discovery may be restarted after an RSCN is received, causing the
3425 - * callback to be called after discovery complete is indicated.
3426 */
3427 -int fc_disc_start(struct fc_lport *lp)
3428 +static int fc_disc_start(struct fc_lport *lport)
3429 {
3430 struct fc_rport *rport;
3431 int error;
3432 @@ -181,20 +195,20 @@ int fc_disc_start(struct fc_lport *lp)
3433 /*
3434 * If not ready, or already running discovery, just set request flag.
3435 */
3436 - if (!fc_lport_test_ready(lp) || lp->disc_pending) {
3437 - lp->disc_requested = 1;
3438 + if (!fc_lport_test_ready(lport) || lport->disc_pending) {
3439 + lport->disc_requested = 1;
3440
3441 return 0;
3442 }
3443 - lp->disc_pending = 1;
3444 - lp->disc_requested = 0;
3445 - lp->disc_retry_count = 0;
3446 + lport->disc_pending = 1;
3447 + lport->disc_requested = 0;
3448 + lport->disc_retry_count = 0;
3449
3450 /*
3451 * Handle point-to-point mode as a simple discovery
3452 * of the remote port.
3453 */
3454 - rport = lp->ptp_rp;
3455 + rport = lport->ptp_rp;
3456 if (rport) {
3457 ids.port_id = rport->port_id;
3458 ids.port_name = rport->port_name;
3459 @@ -202,46 +216,41 @@ int fc_disc_start(struct fc_lport *lp)
3460 ids.roles = FC_RPORT_ROLE_UNKNOWN;
3461 get_device(&rport->dev);
3462
3463 - error = fc_disc_new_target(lp, rport, &ids);
3464 + error = fc_disc_new_target(lport, rport, &ids);
3465 put_device(&rport->dev);
3466 if (!error)
3467 - fc_disc_done(lp);
3468 + fc_disc_done(lport);
3469 } else {
3470 - fc_block_rports(lp);
3471 - fc_disc_gpn_ft_req(lp); /* get ports by FC-4 type */
3472 + fc_disc_gpn_ft_req(lport); /* get ports by FC-4 type */
3473 error = 0;
3474 }
3475 return error;
3476 }
3477
3478 -/*
3479 - * Restart discovery after a delay due to resource shortages.
3480 - * If the error persists, the discovery will be abandoned.
3481 +/**
3482 + * fc_disc_retry - Retry discovery
3483 + * @lport: FC local port
3484 */
3485 -static void fc_disc_retry(struct fc_lport *lp)
3486 +static void fc_disc_retry(struct fc_lport *lport)
3487 {
3488 unsigned long delay = FC_DISC_RETRY_DELAY;
3489
3490 - if (!lp->disc_retry_count)
3491 + if (!lport->disc_retry_count)
3492 delay /= 4; /* timeout faster first time */
3493 - if (lp->disc_retry_count++ < FC_DISC_RETRY_LIMIT)
3494 - schedule_delayed_work(&lp->disc_work,
3495 + if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT)
3496 + schedule_delayed_work(&lport->disc_work,
3497 msecs_to_jiffies(delay));
3498 else
3499 - fc_disc_done(lp);
3500 + fc_disc_done(lport);
3501 }
3502
3503 -/*
3504 - * Handle new target found by discovery.
3505 - * Create remote port and session if needed.
3506 - * Ignore returns of our own FID & WWPN.
3507 - *
3508 - * If a non-NULL rp is passed in, it is held for the caller, but not for us.
3509 - *
3510 - * Events delivered are:
3511 - * FC_EV_READY, when remote port is rediscovered.
3512 +/**
3513 + * fc_disc_new_target - Handle new target found by discovery
3514 + * @lport: FC local port
3515 + * @rport: The previous FC remote port (NULL if new remote port)
3516 + * @ids: Identifiers for the new FC remote port
3517 */
3518 -static int fc_disc_new_target(struct fc_lport *lp,
3519 +static int fc_disc_new_target(struct fc_lport *lport,
3520 struct fc_rport *rport,
3521 struct fc_rport_identifiers *ids)
3522 {
3523 @@ -263,61 +272,64 @@ static int fc_disc_new_target(struct fc_lport *lp,
3524 * assigned the same FCID. This should be rare.
3525 * Delete the old one and fall thru to re-create.
3526 */
3527 - fc_disc_del_target(lp, rport);
3528 + fc_disc_del_target(lport, rport);
3529 rport = NULL;
3530 }
3531 }
3532 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
3533 - ids->port_id != lp->fid && ids->port_name != lp->wwpn) {
3534 + ids->port_id != fc_host_port_id(lport->host) &&
3535 + ids->port_name != lport->wwpn) {
3536 if (!rport) {
3537 - rport = lp->tt.rport_lookup(lp, ids->port_id);
3538 + rport = lport->tt.rport_lookup(lport, ids->port_id);
3539 if (!rport) {
3540 struct fc_disc_port dp;
3541 - dp.lp = lp;
3542 + dp.lp = lport;
3543 dp.ids.port_id = ids->port_id;
3544 dp.ids.port_name = ids->port_name;
3545 dp.ids.node_name = ids->node_name;
3546 dp.ids.roles = ids->roles;
3547 - rport = fc_rport_dummy_create(&dp);
3548 + rport = fc_rport_rogue_create(&dp);
3549 }
3550 if (!rport)
3551 error = ENOMEM;
3552 }
3553 if (rport) {
3554 rp = rport->dd_data;
3555 - rp->event_callback = lp->tt.event_callback;
3556 + rp->event_callback = lport->tt.event_callback;
3557 rp->rp_state = RPORT_ST_INIT;
3558 - lp->tt.rport_login(rport);
3559 + lport->tt.rport_login(rport);
3560 }
3561 }
3562 return error;
3563 }
3564
3565 -/*
3566 - * Delete the remote port.
3567 +/**
3568 + * fc_disc_del_target - Delete a target
3569 + * @lport: FC local port
3570 + * @rport: The remote port to be removed
3571 */
3572 -static void fc_disc_del_target(struct fc_lport *lp, struct fc_rport *rport)
3573 +static void fc_disc_del_target(struct fc_lport *lport, struct fc_rport *rport)
3574 {
3575 - lp->tt.rport_reset(rport);
3576 - fc_remote_port_delete(rport); /* release hold from create */
3577 + lport->tt.rport_stop(rport);
3578 }
3579
3580 -/*
3581 - * Done with discovery
3582 +/**
3583 + * fc_disc_done - Discovery has been completed
3584 + * @lport: FC local port
3585 */
3586 -static void fc_disc_done(struct fc_lport *lp)
3587 +static void fc_disc_done(struct fc_lport *lport)
3588 {
3589 - lp->disc_done = 1;
3590 - lp->disc_pending = 0;
3591 - if (lp->disc_requested)
3592 - lp->tt.disc_start(lp);
3593 + lport->disc_done = 1;
3594 + lport->disc_pending = 0;
3595 + if (lport->disc_requested)
3596 + lport->tt.disc_start(lport);
3597 }
3598
3599 /**
3600 * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
3601 - * @lp: Fibre Channel host port instance
3602 + * @lport: FC local port
3603 */
3604 -static void fc_disc_gpn_ft_req(struct fc_lport *lp)
3605 +static void fc_disc_gpn_ft_req(struct fc_lport *lport)
3606 {
3607 struct fc_frame *fp;
3608 struct fc_seq *sp = NULL;
3609 @@ -327,60 +339,64 @@ static void fc_disc_gpn_ft_req(struct fc_lport *lp)
3610 } *rp;
3611 int error = 0;
3612
3613 - lp->disc_buf_len = 0;
3614 - lp->disc_seq_count = 0;
3615 - fp = fc_frame_alloc(lp, sizeof(*rp));
3616 - if (fp == NULL) {
3617 + lport->disc_buf_len = 0;
3618 + lport->disc_seq_count = 0;
3619 + fp = fc_frame_alloc(lport, sizeof(*rp));
3620 + if (!fp) {
3621 error = ENOMEM;
3622 } else {
3623 rp = fc_frame_payload_get(fp, sizeof(*rp));
3624 - fc_fill_dns_hdr(lp, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
3625 + fc_fill_dns_hdr(lport, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
3626 rp->gid.fn_fc4_type = FC_TYPE_FCP;
3627
3628 - WARN_ON(!fc_lport_test_ready(lp));
3629 + WARN_ON(!fc_lport_test_ready(lport));
3630
3631 fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
3632 - sp = lp->tt.exch_seq_send(lp, fp,
3633 - fc_disc_gpn_ft_resp, NULL,
3634 - lp, lp->e_d_tov,
3635 - lp->fid,
3636 - FC_FID_DIR_SERV,
3637 - FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3638 + sp = lport->tt.exch_seq_send(lport, fp,
3639 + fc_disc_gpn_ft_resp, NULL,
3640 + lport, lport->e_d_tov,
3641 + fc_host_port_id(lport->host),
3642 + FC_FID_DIR_SERV,
3643 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3644 }
3645 - if (error || sp == NULL)
3646 - fc_disc_retry(lp);
3647 + if (error || !sp)
3648 + fc_disc_retry(lport);
3649 }
3650
3651 -/*
3652 - * Handle error on dNS request.
3653 +/**
3654 + * fc_disc_error - Handle error on dNS request
3655 + * @lport: FC local port
3656 + * @fp: The frame pointer
3657 */
3658 -static void fc_disc_error(struct fc_lport *lp, struct fc_frame *fp)
3659 +static void fc_disc_error(struct fc_lport *lport, struct fc_frame *fp)
3660 {
3661 - int err = PTR_ERR(fp);
3662 + long err = PTR_ERR(fp);
3663
3664 + FC_DEBUG_DISC("Error %ld, retries %d/%d\n", PTR_ERR(fp),
3665 + lport->retry_count, FC_DISC_RETRY_LIMIT);
3666 +
3667 switch (err) {
3668 case -FC_EX_TIMEOUT:
3669 - if (lp->disc_retry_count++ < FC_DISC_RETRY_LIMIT) {
3670 - fc_disc_gpn_ft_req(lp);
3671 + if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT) {
3672 + fc_disc_gpn_ft_req(lport);
3673 } else {
3674 - FC_DBG("err %d - ending\n", err);
3675 - fc_disc_done(lp);
3676 + fc_disc_done(lport);
3677 }
3678 break;
3679 default:
3680 - FC_DBG("err %d - ending\n", err);
3681 - fc_disc_done(lp);
3682 + FC_DBG("Error code %ld not supported\n", err);
3683 + fc_disc_done(lport);
3684 break;
3685 }
3686 }
3687
3688 /**
3689 * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
3690 - * @lp: Fibre Channel host port instance
3691 + * @lport: Fibre Channel host port instance
3692 * @buf: GPN_FT response buffer
3693 * @len: size of response buffer
3694 */
3695 -static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3696 +static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3697 {
3698 struct fc_gpn_ft_resp *np;
3699 char *bp;
3700 @@ -388,8 +404,8 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3701 size_t tlen;
3702 int error = 0;
3703 struct fc_disc_port dp;
3704 - struct fc_rport *rp;
3705 - struct fc_rport_libfc_priv *rpp;
3706 + struct fc_rport *rport;
3707 + struct fc_rport_libfc_priv *rdata;
3708
3709 /*
3710 * Handle partial name record left over from previous call.
3711 @@ -397,7 +413,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3712 bp = buf;
3713 plen = len;
3714 np = (struct fc_gpn_ft_resp *)bp;
3715 - tlen = lp->disc_buf_len;
3716 + tlen = lport->disc_buf_len;
3717 if (tlen) {
3718 WARN_ON(tlen >= sizeof(*np));
3719 plen = sizeof(*np) - tlen;
3720 @@ -405,7 +421,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3721 WARN_ON(plen >= sizeof(*np));
3722 if (plen > len)
3723 plen = len;
3724 - np = &lp->disc_buf;
3725 + np = &lport->disc_buf;
3726 memcpy((char *)np + tlen, bp, plen);
3727
3728 /*
3729 @@ -415,9 +431,9 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3730 bp -= tlen;
3731 len += tlen;
3732 plen += tlen;
3733 - lp->disc_buf_len = (unsigned char) plen;
3734 + lport->disc_buf_len = (unsigned char) plen;
3735 if (plen == sizeof(*np))
3736 - lp->disc_buf_len = 0;
3737 + lport->disc_buf_len = 0;
3738 }
3739
3740 /*
3741 @@ -428,19 +444,20 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3742 * After the first time through the loop, things return to "normal".
3743 */
3744 while (plen >= sizeof(*np)) {
3745 - dp.lp = lp;
3746 + dp.lp = lport;
3747 dp.ids.port_id = ntoh24(np->fp_fid);
3748 dp.ids.port_name = ntohll(np->fp_wwpn);
3749 dp.ids.node_name = -1;
3750 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
3751
3752 - if ((dp.ids.port_id != lp->fid) &&
3753 - (dp.ids.port_name != lp->wwpn)) {
3754 - rp = fc_rport_dummy_create(&dp);
3755 - if (rp) {
3756 - rpp = rp->dd_data;
3757 - rpp->local_port = lp;
3758 - lp->tt.rport_login(rp);
3759 + if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
3760 + (dp.ids.port_name != lport->wwpn)) {
3761 + rport = fc_rport_rogue_create(&dp);
3762 + if (rport) {
3763 + rdata = rport->dd_data;
3764 + rdata->event_callback = lport->tt.event_callback;
3765 + rdata->local_port = lport;
3766 + lport->tt.rport_login(rport);
3767 } else
3768 FC_DBG("Failed to allocate memory for "
3769 "the newly discovered port (%6x)\n",
3770 @@ -448,7 +465,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3771 }
3772
3773 if (np->fp_flags & FC_NS_FID_LAST) {
3774 - fc_disc_done(lp);
3775 + fc_disc_done(lport);
3776 len = 0;
3777 break;
3778 }
3779 @@ -462,11 +479,11 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3780 * Save any partial record at the end of the buffer for next time.
3781 */
3782 if (error == 0 && len > 0 && len < sizeof(*np)) {
3783 - if (np != &lp->disc_buf)
3784 - memcpy(&lp->disc_buf, np, len);
3785 - lp->disc_buf_len = (unsigned char) len;
3786 + if (np != &lport->disc_buf)
3787 + memcpy(&lport->disc_buf, np, len);
3788 + lport->disc_buf_len = (unsigned char) len;
3789 } else {
3790 - lp->disc_buf_len = 0;
3791 + lport->disc_buf_len = 0;
3792 }
3793 return error;
3794 }
3795 @@ -476,14 +493,14 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3796 */
3797 static void fc_disc_timeout(struct work_struct *work)
3798 {
3799 - struct fc_lport *lp;
3800 + struct fc_lport *lport;
3801
3802 - lp = container_of(work, struct fc_lport, disc_work.work);
3803 + lport = container_of(work, struct fc_lport, disc_work.work);
3804
3805 - if (lp->disc_pending)
3806 - fc_disc_gpn_ft_req(lp);
3807 + if (lport->disc_pending)
3808 + fc_disc_gpn_ft_req(lport);
3809 else
3810 - lp->tt.disc_start(lp);
3811 + lport->tt.disc_start(lport);
3812 }
3813
3814 /**
3815 @@ -495,9 +512,9 @@ static void fc_disc_timeout(struct work_struct *work)
3816 * The response may be in multiple frames
3817 */
3818 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3819 - void *lp_arg)
3820 + void *lp_arg)
3821 {
3822 - struct fc_lport *lp = lp_arg;
3823 + struct fc_lport *lport = lp_arg;
3824 struct fc_ct_hdr *cp;
3825 struct fc_frame_header *fh;
3826 unsigned int seq_cnt;
3827 @@ -506,7 +523,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3828 int error;
3829
3830 if (IS_ERR(fp)) {
3831 - fc_disc_error(lp, fp);
3832 + fc_disc_error(lport, fp);
3833 return;
3834 }
3835
3836 @@ -515,7 +532,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3837 len = fr_len(fp) - sizeof(*fh);
3838 seq_cnt = ntohs(fh->fh_seq_cnt);
3839 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
3840 - lp->disc_seq_count == 0) {
3841 + lport->disc_seq_count == 0) {
3842 cp = fc_frame_payload_get(fp, sizeof(*cp));
3843 if (cp == NULL) {
3844 FC_DBG("GPN_FT response too short, len %d\n",
3845 @@ -531,68 +548,76 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3846 FC_DBG("GPN_FT rejected reason %x exp %x "
3847 "(check zoning)\n", cp->ct_reason,
3848 cp->ct_explan);
3849 - fc_disc_done(lp);
3850 + fc_disc_done(lport);
3851 } else {
3852 FC_DBG("GPN_FT unexpected response code %x\n",
3853 ntohs(cp->ct_cmd));
3854 }
3855 } else if (fr_sof(fp) == FC_SOF_N3 &&
3856 - seq_cnt == lp->disc_seq_count) {
3857 + seq_cnt == lport->disc_seq_count) {
3858 buf = fh + 1;
3859 } else {
3860 FC_DBG("GPN_FT unexpected frame - out of sequence? "
3861 "seq_cnt %x expected %x sof %x eof %x\n",
3862 - seq_cnt, lp->disc_seq_count, fr_sof(fp), fr_eof(fp));
3863 + seq_cnt, lport->disc_seq_count, fr_sof(fp), fr_eof(fp));
3864 }
3865 if (buf) {
3866 - error = fc_disc_gpn_ft_parse(lp, buf, len);
3867 + error = fc_disc_gpn_ft_parse(lport, buf, len);
3868 if (error)
3869 - fc_disc_retry(lp);
3870 + fc_disc_retry(lport);
3871 else
3872 - lp->disc_seq_count++;
3873 + lport->disc_seq_count++;
3874 }
3875 fc_frame_free(fp);
3876 }
3877
3878 -/*
3879 - * Discover the directory information for a single target.
3880 +/**
3881 + * fc_disc_single - Discover the directory information for a single target
3882 + * @lport: FC local port
3883 + * @dp: The port to rediscover
3884 + *
3885 * This could be from an RSCN that reported a change for the target.
3886 */
3887 -static void fc_disc_single(struct fc_lport *lp, struct fc_disc_port *dp)
3888 +static void fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
3889 {
3890 struct fc_rport *rport;
3891 - struct fc_rport *rp;
3892 - struct fc_rport_libfc_priv *rpp;
3893 + struct fc_rport *new_rport;
3894 + struct fc_rport_libfc_priv *rdata;
3895
3896 - if (dp->ids.port_id == lp->fid)
3897 + if (dp->ids.port_id == fc_host_port_id(lport->host))
3898 goto out;
3899
3900 - rport = lp->tt.rport_lookup(lp, dp->ids.port_id);
3901 + rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
3902 if (rport) {
3903 - fc_disc_del_target(lp, rport);
3904 + fc_disc_del_target(lport, rport);
3905 put_device(&rport->dev); /* hold from lookup */
3906 }
3907
3908 - rp = fc_rport_dummy_create(dp);
3909 - if (rp) {
3910 - rpp = rp->dd_data;
3911 + new_rport = fc_rport_rogue_create(dp);
3912 + if (new_rport) {
3913 + rdata = new_rport->dd_data;
3914 + rdata->event_callback = lport->tt.event_callback;
3915 kfree(dp);
3916 - lp->tt.rport_login(rp);
3917 + lport->tt.rport_login(new_rport);
3918 }
3919 return;
3920 out:
3921 kfree(dp);
3922 }
3923
3924 -int fc_disc_init(struct fc_lport *lp)
3925 +/**
3926 + * fc_disc_init - Initialize the discovery block
3927 + * @lport: FC local port
3928 + */
3929 +int fc_disc_init(struct fc_lport *lport)
3930 {
3931 - INIT_DELAYED_WORK(&lp->disc_work, fc_disc_timeout);
3932 + INIT_DELAYED_WORK(&lport->disc_work, fc_disc_timeout);
3933
3934 - if (!lp->tt.disc_start)
3935 - lp->tt.disc_start = fc_disc_start;
3936 + if (!lport->tt.disc_start)
3937 + lport->tt.disc_start = fc_disc_start;
3938
3939 - if (!lp->tt.disc_recv_req)
3940 - lp->tt.disc_recv_req = fc_disc_recv_req;
3941 + if (!lport->tt.disc_recv_req)
3942 + lport->tt.disc_recv_req = fc_disc_recv_req;
3943
3944 return 0;
3945 }
3946 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
3947 index ed74d95..80dc1ef 100644
3948 --- a/drivers/scsi/libfc/fc_exch.c
3949 +++ b/drivers/scsi/libfc/fc_exch.c
3950 @@ -37,6 +37,13 @@
3951 * fc_exch_debug can be set in debugger or at compile time to get more logs.
3952 */
3953 static int fc_exch_debug;
3954 +
3955 +#define FC_DEBUG_EXCH(fmt...) \
3956 + do { \
3957 + if (fc_exch_debug) \
3958 + FC_DBG(fmt); \
3959 + } while (0)
3960 +
3961 static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
3962
3963 /*
3964 @@ -86,7 +93,7 @@ struct fc_exch {
3965 struct list_head ex_list; /* free or busy list linkage */
3966 spinlock_t ex_lock; /* lock covering exchange state */
3967 atomic_t ex_refcnt; /* reference counter */
3968 - struct timer_list ex_timer; /* timer for upper level protocols */
3969 + struct delayed_work timeout_work; /* timer for upper level protocols */
3970 struct fc_lport *lp; /* fc device instance */
3971 u16 oxid; /* originator's exchange ID */
3972 u16 rxid; /* responder's exchange ID */
3973 @@ -310,7 +317,6 @@ static void fc_exch_release(struct fc_exch *ep)
3974 if (ep->lp->tt.exch_put)
3975 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
3976 WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
3977 - WARN_ON(timer_pending(&ep->ex_timer));
3978 mempool_free(ep, mp->ep_pool);
3979 }
3980 }
3981 @@ -332,7 +338,7 @@ static int fc_exch_done_locked(struct fc_exch *ep)
3982
3983 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
3984 ep->state |= FC_EX_DONE;
3985 - if (del_timer(&ep->ex_timer))
3986 + if (cancel_delayed_work(&ep->timeout_work))
3987 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
3988 rc = 0;
3989 }
3990 @@ -362,7 +368,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
3991 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
3992 return;
3993
3994 - if (!mod_timer(&ep->ex_timer, jiffies + msecs_to_jiffies(timer_msec)))
3995 + FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
3996 + ep->xid);
3997 + if (schedule_delayed_work(&ep->timeout_work,
3998 + jiffies + msecs_to_jiffies(timer_msec)))
3999 fc_exch_hold(ep); /* hold for timer */
4000 }
4001
4002 @@ -435,9 +444,10 @@ EXPORT_SYMBOL(fc_seq_exch_abort);
4003 * Exchange timeout - handle exchange timer expiration.
4004 * The timer will have been cancelled before this is called.
4005 */
4006 -static void fc_exch_timeout(unsigned long ep_arg)
4007 +static void fc_exch_timeout(struct work_struct *work)
4008 {
4009 - struct fc_exch *ep = (struct fc_exch *)ep_arg;
4010 + struct fc_exch *ep = container_of(work, struct fc_exch,
4011 + timeout_work.work);
4012 struct fc_seq *sp = &ep->seq;
4013 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
4014 void *arg;
4015 @@ -584,7 +594,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid)
4016 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
4017 ep->rxid = FC_XID_UNKNOWN;
4018 ep->class = mp->class;
4019 - setup_timer(&ep->ex_timer, fc_exch_timeout, (unsigned long)ep);
4020 + INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
4021 out:
4022 return ep;
4023 err:
4024 @@ -843,9 +853,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
4025 struct fc_exch *ep = fc_seq_exch(sp);
4026
4027 sp = fc_seq_alloc(ep, ep->seq_id++);
4028 - if (fc_exch_debug)
4029 - FC_DBG("exch %4x f_ctl %6x seq %2x f_ctl %6x\n",
4030 - ep->xid, ep->f_ctl, sp->id, sp->f_ctl);
4031 + FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x f_ctl %6x\n",
4032 + ep->xid, ep->f_ctl, sp->id, sp->f_ctl);
4033 return sp;
4034 }
4035 /*
4036 @@ -913,7 +922,18 @@ int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
4037 }
4038
4039 hton24(fh->fh_f_ctl, f_ctl | fill);
4040 - fh->fh_seq_cnt = htons(sp->cnt++);
4041 + fh->fh_seq_cnt = htons(sp->cnt);
4042 +
4043 + /*
4044 + * update sequence count if this frame is carrying
4045 + * multiple FC frames when sequence offload is enabled
4046 + * by LLD.
4047 + */
4048 + if (fr_max_payload(fp))
4049 + sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
4050 + fr_max_payload(fp));
4051 + else
4052 + sp->cnt++;
4053
4054 /*
4055 * Send the frame.
4056 @@ -1185,8 +1205,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
4057 lp->tt.lport_recv(lp, sp, fp);
4058 fc_exch_release(ep); /* release from lookup */
4059 } else {
4060 - if (fc_exch_debug)
4061 - FC_DBG("exch/seq lookup failed: reject %x\n", reject);
4062 + FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
4063 fc_frame_free(fp);
4064 }
4065 }
4066 @@ -1290,12 +1309,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
4067 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
4068 if (!sp) {
4069 atomic_inc(&mp->stats.xid_not_found);
4070 - if (fc_exch_debug)
4071 - FC_DBG("seq lookup failed\n");
4072 + FC_DEBUG_EXCH("seq lookup failed\n");
4073 } else {
4074 atomic_inc(&mp->stats.non_bls_resp);
4075 - if (fc_exch_debug)
4076 - FC_DBG("non-BLS response to sequence");
4077 + FC_DEBUG_EXCH("non-BLS response to sequence");
4078 }
4079 fc_frame_free(fp);
4080 }
4081 @@ -1316,11 +1333,10 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
4082 int rc = 1, has_rec = 0;
4083
4084 fh = fc_frame_header_get(fp);
4085 - if (fc_exch_debug)
4086 - FC_DBG("exch: BLS rctl %x - %s\n",
4087 - fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
4088 + FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
4089 + fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
4090
4091 - if (del_timer_sync(&ep->ex_timer))
4092 + if (cancel_delayed_work_sync(&ep->timeout_work))
4093 fc_exch_release(ep); /* release from pending timer hold */
4094
4095 spin_lock_bh(&ep->ex_lock);
4096 @@ -1410,10 +1426,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
4097 case FC_RCTL_ACK_0:
4098 break;
4099 default:
4100 - if (fc_exch_debug)
4101 - FC_DBG("BLS rctl %x - %s received",
4102 - fh->fh_r_ctl,
4103 - fc_exch_rctl_name(fh->fh_r_ctl));
4104 + FC_DEBUG_EXCH("BLS rctl %x - %s received",
4105 + fh->fh_r_ctl,
4106 + fc_exch_rctl_name(fh->fh_r_ctl));
4107 break;
4108 }
4109 fc_frame_free(fp);
4110 @@ -1498,7 +1513,7 @@ static void fc_exch_reset(struct fc_exch *ep)
4111 * functions can also grab the lport lock which could cause
4112 * a deadlock).
4113 */
4114 - if (del_timer(&ep->ex_timer))
4115 + if (cancel_delayed_work(&ep->timeout_work))
4116 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4117 resp = ep->resp;
4118 ep->resp = NULL;
4119 @@ -1720,7 +1735,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
4120 if (ep->esb_stat & ESB_ST_RESP)
4121 did = ep->sid;
4122 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
4123 - lp->e_d_tov, lp->fid, did,
4124 + lp->e_d_tov, fc_host_port_id(lp->host), did,
4125 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4126 if (!rrq_sp) {
4127 ep->esb_stat |= ESB_ST_REC_QUAL;
4128 @@ -1774,8 +1789,10 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
4129 ep->esb_stat &= ~ESB_ST_REC_QUAL;
4130 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
4131 }
4132 - if ((ep->esb_stat & ESB_ST_COMPLETE) && (del_timer(&ep->ex_timer)))
4133 - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4134 + if (ep->esb_stat & ESB_ST_COMPLETE) {
4135 + if (cancel_delayed_work(&ep->timeout_work))
4136 + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4137 + }
4138
4139 spin_unlock_bh(&ep->ex_lock);
4140
4141 diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
4142 index 2566eed..bf8202f 100644
4143 --- a/drivers/scsi/libfc/fc_fcp.c
4144 +++ b/drivers/scsi/libfc/fc_fcp.c
4145 @@ -42,22 +42,29 @@ MODULE_AUTHOR("Open-FCoE.org");
4146 MODULE_DESCRIPTION("libfc");
4147 MODULE_LICENSE("GPL");
4148
4149 -int fc_fcp_debug;
4150 +static int fc_fcp_debug;
4151 +
4152 +#define FC_DEBUG_FCP(fmt...) \
4153 + do { \
4154 + if (fc_fcp_debug) \
4155 + FC_DBG(fmt); \
4156 + } while (0)
4157 +
4158 static struct kmem_cache *scsi_pkt_cachep;
4159
4160 /* SRB state definitions */
4161 -#define FC_SRB_FREE 0 /* cmd is free */
4162 -#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
4163 -#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
4164 -#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
4165 -#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
4166 -#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
4167 -#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
4168 +#define FC_SRB_FREE 0 /* cmd is free */
4169 +#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
4170 +#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
4171 +#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
4172 +#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
4173 +#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
4174 +#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
4175 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
4176 -#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
4177 +#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
4178
4179 -#define FC_SRB_READ (1 << 1)
4180 -#define FC_SRB_WRITE (1 << 0)
4181 +#define FC_SRB_READ (1 << 1)
4182 +#define FC_SRB_WRITE (1 << 0)
4183
4184 /*
4185 * scsi request structure, one for each scsi request
4186 @@ -184,8 +191,8 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
4187 #define FC_SCSI_REC_TOV (2 * HZ)
4188 #define FC_HOST_RESET_TIMEOUT (30 * HZ)
4189
4190 -#define FC_MAX_ERROR_CNT 5
4191 -#define FC_MAX_RECOV_RETRY 3
4192 +#define FC_MAX_ERROR_CNT 5
4193 +#define FC_MAX_RECOV_RETRY 3
4194
4195 #define FC_FCP_DFLT_QUEUE_DEPTH 32
4196
4197 @@ -353,11 +360,8 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
4198 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
4199 fc_frame_crc_check(fp))
4200 goto crc_err;
4201 - if (fc_fcp_debug) {
4202 - FC_DBG("data received past end. "
4203 - "len %zx offset %zx "
4204 - "data_len %x\n", len, offset, fsp->data_len);
4205 - }
4206 + FC_DEBUG_FCP("data received past end. len %zx offset %zx "
4207 + "data_len %x\n", len, offset, fsp->data_len);
4208 fc_fcp_retry_cmd(fsp);
4209 return;
4210 }
4211 @@ -449,55 +453,54 @@ crc_err:
4212 /*
4213 * Send SCSI data to target.
4214 * Called after receiving a Transfer Ready data descriptor.
4215 + * if LLD is capable of seq offload then send down seq_blen
4216 + * size of data in single frame, otherwise send multiple FC
4217 + * frames of max FC frame payload supported by target port.
4218 */
4219 static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4220 - size_t offset, size_t len,
4221 - struct fc_frame *oldfp, int sg_supp)
4222 + size_t offset, size_t seq_blen)
4223 {
4224 struct scsi_cmnd *sc;
4225 struct scatterlist *sg;
4226 struct fc_frame *fp = NULL;
4227 struct fc_lport *lp = fsp->lp;
4228 size_t remaining;
4229 - size_t mfs;
4230 + size_t t_blen;
4231 size_t tlen;
4232 size_t sg_bytes;
4233 size_t frame_offset;
4234 int error;
4235 void *data = NULL;
4236 void *page_addr;
4237 - int using_sg = sg_supp;
4238 + int using_sg = lp->sg_supp;
4239 u32 f_ctl;
4240
4241 - if (unlikely(offset + len > fsp->data_len)) {
4242 - /*
4243 - * this should never happen
4244 - */
4245 - if (fc_fcp_debug) {
4246 - FC_DBG("xfer-ready past end. len %zx offset %zx\n",
4247 - len, offset);
4248 - }
4249 + WARN_ON(seq_blen <= 0);
4250 + if (unlikely(offset + seq_blen > fsp->data_len)) {
4251 + /* this should never happen */
4252 + FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
4253 + seq_blen, offset);
4254 fc_fcp_send_abort(fsp);
4255 return 0;
4256 } else if (offset != fsp->xfer_len) {
4257 - /*
4258 - * Out of Order Data Request - no problem, but unexpected.
4259 - */
4260 - if (fc_fcp_debug) {
4261 - FC_DBG("xfer-ready non-contiguous. "
4262 - "len %zx offset %zx\n", len, offset);
4263 - }
4264 + /* Out of Order Data Request - no problem, but unexpected. */
4265 + FC_DEBUG_FCP("xfer-ready non-contiguous. "
4266 + "seq_blen %zx offset %zx\n", seq_blen, offset);
4267 }
4268 - mfs = fsp->max_payload;
4269 - WARN_ON(mfs > FC_MAX_PAYLOAD);
4270 - WARN_ON(mfs < FC_MIN_MAX_PAYLOAD);
4271 - if (mfs > 512)
4272 - mfs &= ~(512 - 1); /* round down to block size */
4273 - WARN_ON(mfs < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
4274 - WARN_ON(len <= 0);
4275 +
4276 + /*
4277 + * if LLD is capable of seq_offload then set transport
4278 + * burst length (t_blen) to seq_blen, otherwise set t_blen
4279 + * to max FC frame payload previously set in fsp->max_payload.
4280 + */
4281 + t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
4282 + WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
4283 + if (t_blen > 512)
4284 + t_blen &= ~(512 - 1); /* round down to block size */
4285 + WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
4286 sc = fsp->cmd;
4287
4288 - remaining = len;
4289 + remaining = seq_blen;
4290 frame_offset = offset;
4291 tlen = 0;
4292 sp = lp->tt.seq_start_next(sp);
4293 @@ -540,7 +543,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4294 continue;
4295 }
4296 if (!fp) {
4297 - tlen = min(mfs, remaining);
4298 + tlen = min(t_blen, remaining);
4299
4300 /*
4301 * TODO. Temporary workaround. fc_seq_send() can't
4302 @@ -563,6 +566,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4303 }
4304 fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP);
4305 fc_frame_set_offset(fp, frame_offset);
4306 + fr_max_payload(fp) = fsp->max_payload;
4307 }
4308 sg_bytes = min(tlen, sg->length - offset);
4309 if (using_sg) {
4310 @@ -621,7 +625,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4311 return 0;
4312 }
4313 }
4314 - fsp->xfer_len += len; /* premature count? */
4315 + fsp->xfer_len += seq_blen; /* premature count? */
4316 return 0;
4317 }
4318
4319 @@ -741,8 +745,7 @@ static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
4320
4321 rc = fc_fcp_send_data(fsp, sp,
4322 (size_t) ntohl(dd->ft_data_ro),
4323 - (size_t) ntohl(dd->ft_burst_len), fp,
4324 - lp->capabilities & TRANS_C_SG);
4325 + (size_t) ntohl(dd->ft_burst_len));
4326 if (!rc)
4327 lp->tt.seq_set_rec_data(sp, fsp->xfer_len);
4328 else if (rc == -ENOMEM)
4329 @@ -1066,7 +1069,7 @@ static int fc_fcp_send_cmd(struct fc_fcp_pkt *fsp)
4330 fc_fcp_recv,
4331 fc_fcp_pkt_destroy,
4332 fsp, 0,
4333 - rp->local_port->fid,
4334 + fc_host_port_id(rp->local_port->host),
4335 rport->port_id,
4336 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4337 if (!sp) {
4338 @@ -1175,7 +1178,7 @@ static void fc_lun_reset_send(unsigned long data)
4339 fc_tm_done,
4340 fc_fcp_pkt_destroy,
4341 fsp, 0,
4342 - rp->local_port->fid,
4343 + fc_host_port_id(rp->local_port->host),
4344 rport->port_id,
4345 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4346
4347 @@ -1367,7 +1370,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
4348 rec = fc_frame_payload_get(fp, sizeof(*rec));
4349 memset(rec, 0, sizeof(*rec));
4350 rec->rec_cmd = ELS_REC;
4351 - hton24(rec->rec_s_id, lp->fid);
4352 + hton24(rec->rec_s_id, fc_host_port_id(lp->host));
4353 rec->rec_ox_id = htons(ox_id);
4354 rec->rec_rx_id = htons(rx_id);
4355
4356 @@ -1376,7 +1379,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
4357 sp = lp->tt.exch_seq_send(lp, fp,
4358 fc_fcp_rec_resp, NULL,
4359 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
4360 - rp->local_port->fid,
4361 + fc_host_port_id(rp->local_port->host),
4362 rport->port_id,
4363 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4364
4365 @@ -1425,16 +1428,13 @@ static void fc_fcp_rec_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
4366 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
4367 switch (rjt->er_reason) {
4368 default:
4369 - if (fc_fcp_debug)
4370 - FC_DBG("device %x unexpected REC reject "
4371 - "reason %d expl %d\n",
4372 - fsp->rport->port_id, rjt->er_reason,
4373 - rjt->er_explan);
4374 + FC_DEBUG_FCP("device %x unexpected REC reject "
4375 + "reason %d expl %d\n",
4376 + fsp->rport->port_id, rjt->er_reason,
4377 + rjt->er_explan);
4378 /* fall through */
4379 -
4380 case ELS_RJT_UNSUP:
4381 - if (fc_fcp_debug)
4382 - FC_DBG("device does not support REC\n");
4383 + FC_DEBUG_FCP("device does not support REC\n");
4384 rp = fsp->rport->dd_data;
4385 /*
4386 * if we do not spport RECs or got some bogus
4387 @@ -1636,7 +1636,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
4388 sp = lp->tt.exch_seq_send(lp, fp,
4389 fc_fcp_srr_resp, NULL,
4390 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
4391 - rp->local_port->fid,
4392 + fc_host_port_id(rp->local_port->host),
4393 rport->port_id,
4394 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4395 if (!sp) {
4396 @@ -2199,7 +2199,17 @@ static int __init libfc_init(void)
4397
4398 rc = fc_setup_exch_mgr();
4399 if (rc)
4400 - kmem_cache_destroy(scsi_pkt_cachep);
4401 + goto destroy_pkt_cache;
4402 +
4403 + rc = fc_setup_rport();
4404 + if (rc)
4405 + goto destroy_em;
4406 +
4407 + return rc;
4408 +destroy_em:
4409 + fc_destroy_exch_mgr();
4410 +destroy_pkt_cache:
4411 + kmem_cache_destroy(scsi_pkt_cachep);
4412 return rc;
4413 }
4414
4415 @@ -2207,6 +2217,7 @@ static void __exit libfc_exit(void)
4416 {
4417 kmem_cache_destroy(scsi_pkt_cachep);
4418 fc_destroy_exch_mgr();
4419 + fc_destroy_rport();
4420 }
4421
4422 module_init(libfc_init);
4423 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
4424 index b1854b9..bfbc7d4 100644
4425 --- a/drivers/scsi/libfc/fc_lport.c
4426 +++ b/drivers/scsi/libfc/fc_lport.c
4427 @@ -18,7 +18,59 @@
4428 */
4429
4430 /*
4431 - * Logical interface support.
4432 + * General locking notes:
4433 + *
4434 + * The lport and rport blocks both have mutexes that are used to protect
4435 + * the port objects states. The main motivation for this protection is that
4436 + * we don't want to be preparing a request/response in one context while
4437 + * another thread "resets" the port in question. For example, if the lport
4438 + * block is sending a SCR request to the directory server we don't want
4439 + * the lport to be reset before we fill out the frame header's port_id. The
4440 + * problem is that a reset would cause the lport's port_id to reset to 0.
4441 + * If we don't protect the lport we'd spew incorrect frames.
4442 + *
4443 + * At the time of this writing there are two primary mutexes, one for the
4444 + * lport and one for the rport. Since the lport uses the rport and makes
4445 + * calls into that block the rport should never make calls that would cause
4446 + * the lport's mutex to be locked. In other words, the lport's mutex is
4447 + * considered the outer lock and the rport's lock is considered the inner
4448 + * lock. The bottom line is that you can hold a lport's mutex and then
4449 + * hold the rport's mutex, but not the other way around.
4450 + *
4451 + * The only complication to this rule is the callbacks from the rport to
4452 + * the lport's event_callback function. When rports become READY they make
4453 + * a callback to the lport so that it can track them. In the case of the
4454 + * directory server that callback might cause the lport to change its
4455 + * state, implying that the lport mutex would need to be held. This problem
4456 + * was solved by serializing the rport notifications to the lport and the
4457 + * callback is made without holding the rport's lock.
4458 + *
4459 + * lport locking notes:
4460 + *
4461 + * The critical sections protected by the lport's mutex are quite broad and
4462 + * may be improved upon in the future. The lport code and its locking doesn't
4463 + * influence the I/O path, so excessive locking doesn't penalize I/O
4464 + * performance.
4465 + *
4466 + * The strategy is to lock whenever processing a request or response. Note
4467 + * that every _enter_* function corresponds to a state change. They generally
4468 + * change the lports state and then sends a request out on the wire. We lock
4469 + * before calling any of these functions to protect that state change. This
4470 + * means that the entry points into the lport block to manage the locks while
4471 + * the state machine can transition between states (i.e. _enter_* functions)
4472 + * while always staying protected.
4473 + *
4474 + * When handling responses we also hold the lport mutex broadly. When the
4475 + * lport receives the response frame it locks the mutex and then calls the
4476 + * appropriate handler for the particuar response. Generally a response will
4477 + * trigger a state change and so the lock must already be held.
4478 + *
4479 + * Retries also have to consider the locking. The retries occur from a work
4480 + * context and the work function will lock the lport and then retry the state
4481 + * (i.e. _enter_* function).
4482 + *
4483 + * The implication to all of this is that each lport can only process one
4484 + * state at a time.
4485 */
4486
4487 #include <linux/timer.h>
4488 @@ -36,6 +88,12 @@
4489
4490 static int fc_lport_debug;
4491
4492 +#define FC_DEBUG_LPORT(fmt...) \
4493 + do { \
4494 + if (fc_lport_debug) \
4495 + FC_DBG(fmt); \
4496 + } while (0)
4497 +
4498 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
4499
4500 static void fc_lport_enter_reset(struct fc_lport *);
4501 @@ -66,41 +124,71 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
4502 }
4503
4504 /**
4505 + * fc_lport_lookup_rport - lookup a remote port by port_id
4506 + * @lport: Fibre Channel host port instance
4507 + * @port_id: remote port port_id to match
4508 + */
4509 +struct fc_rport *fc_lport_lookup_rport(const struct fc_lport *lport,
4510 + u32 port_id)
4511 +{
4512 + struct fc_rport *rport, *found;
4513 + struct fc_rport_libfc_priv *rdata;
4514 +
4515 + found = NULL;
4516 +
4517 + list_for_each_entry(rdata, &lport->rports, peers) {
4518 + rport = PRIV_TO_RPORT(rdata);
4519 + if (rport->port_id == port_id) {
4520 + found = rport;
4521 + get_device(&found->dev);
4522 + break;
4523 + }
4524 + }
4525 + return found;
4526 +}
4527 +
4528 +
4529 +
4530 +/**
4531 * fc_lport_rport_event - Event handler for rport events
4532 * @lport: The lport which is receiving the event
4533 - * @port_id: The FID of the rport which the event has occured on
4534 + * @rport: The rport which the event has occured on
4535 * @event: The event that occured
4536 *
4537 * Locking Note: The rport lock should not be held when calling
4538 * this function.
4539 */
4540 -static void fc_lport_rport_event(struct fc_lport *lport, u32 port_id,
4541 +static void fc_lport_rport_event(struct fc_lport *lport,
4542 + struct fc_rport *rport,
4543 enum fc_lport_event event)
4544 {
4545 - struct fc_rport *rport = lport->tt.rport_lookup(lport, port_id);
4546 + struct fc_rport_libfc_priv *rdata = rport->dd_data;
4547
4548 - if (fc_lport_debug)
4549 - FC_DBG("Received a %d event for port (%6x)\n", event, port_id);
4550 + FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
4551 + rport->port_id);
4552
4553 - if (port_id == FC_FID_DIR_SERV) {
4554 - mutex_lock(&lport->lp_mutex);
4555 - switch (event) {
4556 - case LPORT_EV_RPORT_CREATED:
4557 - if (rport) {
4558 - lport->dns_rp = rport;
4559 - fc_lport_enter_rpn_id(lport);
4560 - }
4561 - break;
4562 - case LPORT_EV_RPORT_LOGO:
4563 - case LPORT_EV_RPORT_FAILED:
4564 - lport->dns_rp = NULL;
4565 - fc_lport_enter_dns(lport);
4566 - break;
4567 - case LPORT_EV_RPORT_NONE:
4568 - break;
4569 + mutex_lock(&lport->lp_mutex);
4570 + switch (event) {
4571 + case LPORT_EV_RPORT_CREATED:
4572 + if (rport->port_id == FC_FID_DIR_SERV) {
4573 + lport->dns_rp = rport;
4574 + fc_lport_enter_rpn_id(lport);
4575 + } else {
4576 + list_add_tail(&rdata->peers, &lport->rports);
4577 }
4578 - mutex_unlock(&lport->lp_mutex);
4579 + break;
4580 + case LPORT_EV_RPORT_LOGO:
4581 + case LPORT_EV_RPORT_FAILED:
4582 + case LPORT_EV_RPORT_STOP:
4583 + if (rport->port_id == FC_FID_DIR_SERV)
4584 + lport->dns_rp = NULL;
4585 + else
4586 + list_del(&rdata->peers);
4587 + break;
4588 + case LPORT_EV_RPORT_NONE:
4589 + break;
4590 }
4591 + mutex_unlock(&lport->lp_mutex);
4592 }
4593
4594 /**
4595 @@ -118,18 +206,6 @@ static const char *fc_lport_state(struct fc_lport *lport)
4596 }
4597
4598 /**
4599 - * fc_lport_ptp_clear - Delete the ptp rport
4600 - * @lport: The lport whose ptp rport should be removed
4601 - */
4602 -static void fc_lport_ptp_clear(struct fc_lport *lport)
4603 -{
4604 - if (lport->ptp_rp) {
4605 - fc_remote_port_delete(lport->ptp_rp);
4606 - lport->ptp_rp = NULL;
4607 - }
4608 -}
4609 -
4610 -/**
4611 * fc_lport_ptp_setup - Create an rport for point-to-point mode
4612 * @lport: The lport to attach the ptp rport to
4613 * @fid: The FID of the ptp rport
4614 @@ -148,19 +224,25 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
4615 dp.ids.node_name = remote_wwnn;
4616 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
4617
4618 - fc_lport_ptp_clear(lport);
4619 + if (lport->ptp_rp) {
4620 + lport->tt.rport_stop(lport->ptp_rp);
4621 + lport->ptp_rp = NULL;
4622 + }
4623
4624 - lport->ptp_rp = fc_rport_dummy_create(&dp);
4625 + lport->ptp_rp = fc_rport_rogue_create(&dp);
4626
4627 lport->tt.rport_login(lport->ptp_rp);
4628
4629 fc_lport_enter_ready(lport);
4630 }
4631
4632 -/**
4633 - * fc_get_host_port_state - supports fc_function_template
4634 - * @shost: The host whose port state should be returned
4635 - */
4636 +void fc_get_host_port_type(struct Scsi_Host *shost)
4637 +{
4638 + /* TODO - currently just NPORT */
4639 + fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
4640 +}
4641 +EXPORT_SYMBOL(fc_get_host_port_type);
4642 +
4643 void fc_get_host_port_state(struct Scsi_Host *shost)
4644 {
4645 struct fc_lport *lp = shost_priv(shost);
4646 @@ -277,8 +359,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
4647 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
4648 struct fc_lport *lport)
4649 {
4650 - if (fc_lport_debug)
4651 - FC_DBG("Received RLIR request while in state %s\n",
4652 + FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
4653 fc_lport_state(lport));
4654
4655 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
4656 @@ -303,8 +384,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
4657 void *dp;
4658 u32 f_ctl;
4659
4660 - if (fc_lport_debug)
4661 - FC_DBG("Received RLIR request while in state %s\n",
4662 + FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
4663 fc_lport_state(lport));
4664
4665 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
4666 @@ -350,8 +430,7 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
4667 size_t len;
4668 u32 f_ctl;
4669
4670 - if (fc_lport_debug)
4671 - FC_DBG("Received RNID request while in state %s\n",
4672 + FC_DEBUG_LPORT("Received RNID request while in state %s\n",
4673 fc_lport_state(lport));
4674
4675 req = fc_frame_payload_get(in_fp, sizeof(*req));
4676 @@ -520,12 +599,10 @@ EXPORT_SYMBOL(fc_fabric_logoff);
4677 **/
4678 int fc_lport_destroy(struct fc_lport *lport)
4679 {
4680 - mutex_lock(&lport->lp_mutex);
4681 cancel_delayed_work_sync(&lport->disc_work);
4682 lport->tt.scsi_abort_io(lport);
4683 lport->tt.frame_send = fc_frame_drop;
4684 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
4685 - mutex_unlock(&lport->lp_mutex);
4686 return 0;
4687 }
4688 EXPORT_SYMBOL(fc_lport_destroy);
4689 @@ -569,9 +646,8 @@ EXPORT_SYMBOL(fc_set_mfs);
4690 */
4691 static void fc_lport_enter_ready(struct fc_lport *lport)
4692 {
4693 - if (fc_lport_debug)
4694 - FC_DBG("Port (%6x) entered Ready from state %s\n",
4695 - lport->fid, fc_lport_state(lport));
4696 + FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
4697 + fc_host_port_id(lport->host), fc_lport_state(lport));
4698
4699 fc_lport_state_enter(lport, LPORT_ST_READY);
4700
4701 @@ -605,8 +681,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
4702 u32 local_fid;
4703 u32 f_ctl;
4704
4705 - if (fc_lport_debug)
4706 - FC_DBG("Received FLOGI request while in state %s\n",
4707 + FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
4708 fc_lport_state(lport));
4709
4710 fh = fc_frame_header_get(rx_fp);
4711 @@ -636,7 +711,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
4712 remote_fid = FC_LOCAL_PTP_FID_HI;
4713 }
4714
4715 - lport->fid = local_fid;
4716 + fc_host_port_id(lport->host) = local_fid;
4717
4718 fp = fc_frame_alloc(lport, sizeof(*flp));
4719 if (fp) {
4720 @@ -733,7 +808,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
4721 s_id = ntoh24(fh->fh_s_id);
4722 d_id = ntoh24(fh->fh_d_id);
4723
4724 - rport = lport->tt.rport_lookup(lport, s_id);
4725 + rport = fc_lport_lookup_rport(lport, s_id);
4726 if (rport) {
4727 lport->tt.rport_recv_req(sp, fp, rport);
4728 put_device(&rport->dev); /* hold from lookup */
4729 @@ -752,6 +827,12 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
4730 fc_frame_free(fp);
4731 }
4732 mutex_unlock(&lport->lp_mutex);
4733 +
4734 + /*
4735 + * The common exch_done for all request may not be good
4736 + * if any request requires longer hold on exhange. XXX
4737 + */
4738 + lport->tt.exch_done(sp);
4739 }
4740
4741 /**
4742 @@ -771,6 +852,24 @@ int fc_lport_reset(struct fc_lport *lport)
4743 EXPORT_SYMBOL(fc_lport_reset);
4744
4745 /**
4746 + * fc_lport_stop_rports - delete all the remote ports associated with the lport
4747 + * @lport: libfc local port instance
4748 + *
4749 + * Locking Note: This function expects that the lport mutex is locked before
4750 + * calling it.
4751 + */
4752 +void fc_lport_stop_rports(struct fc_lport *lport)
4753 +{
4754 + struct fc_rport *rport;
4755 + struct fc_rport_libfc_priv *rdata;
4756 +
4757 + list_for_each_entry(rdata, &lport->rports, peers) {
4758 + rport = PRIV_TO_RPORT(rdata);
4759 + lport->tt.rport_stop(rport);
4760 + }
4761 +}
4762 +
4763 +/**
4764 * fc_rport_enter_reset - Reset the local port
4765 * @lport: Fibre Channel local port to be reset
4766 *
4767 @@ -779,24 +878,26 @@ EXPORT_SYMBOL(fc_lport_reset);
4768 */
4769 static void fc_lport_enter_reset(struct fc_lport *lport)
4770 {
4771 - if (fc_lport_debug)
4772 - FC_DBG("Port (%6x) entered RESET state from %s state\n",
4773 - lport->fid, fc_lport_state(lport));
4774 + FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
4775 + fc_host_port_id(lport->host), fc_lport_state(lport));
4776
4777 fc_lport_state_enter(lport, LPORT_ST_RESET);
4778
4779 if (lport->dns_rp) {
4780 - fc_remote_port_delete(lport->dns_rp);
4781 + lport->tt.rport_stop(lport->dns_rp);
4782 lport->dns_rp = NULL;
4783 }
4784 - fc_lport_ptp_clear(lport);
4785
4786 - fc_block_rports(lport);
4787 + if (lport->ptp_rp) {
4788 + lport->tt.rport_stop(lport->ptp_rp);
4789 + lport->ptp_rp = NULL;
4790 + }
4791 +
4792 + fc_lport_stop_rports(lport);
4793
4794 - lport->tt.rport_reset_list(lport);
4795 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
4796 fc_host_fabric_name(lport->host) = 0;
4797 - lport->fid = 0;
4798 + fc_host_port_id(lport->host) = 0;
4799
4800 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
4801 fc_lport_enter_flogi(lport);
4802 @@ -814,33 +915,38 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
4803 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
4804 {
4805 unsigned long delay = 0;
4806 - if (fc_lport_debug)
4807 - FC_DBG("Error %ld in state %s, retries %d\n",
4808 + FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
4809 PTR_ERR(fp), fc_lport_state(lport),
4810 lport->retry_count);
4811
4812 - if (lport->retry_count < lport->max_retry_count) {
4813 - lport->retry_count++;
4814 - if (!fp)
4815 - delay = msecs_to_jiffies(500);
4816 - else
4817 - delay = jiffies +
4818 - msecs_to_jiffies(lport->e_d_tov);
4819 -
4820 - schedule_delayed_work(&lport->retry_work, delay);
4821 - } else {
4822 - switch (lport->state) {
4823 - case LPORT_ST_NONE:
4824 - case LPORT_ST_READY:
4825 - case LPORT_ST_RESET:
4826 - case LPORT_ST_RPN_ID:
4827 - case LPORT_ST_RFT_ID:
4828 - case LPORT_ST_SCR:
4829 - case LPORT_ST_DNS:
4830 - case LPORT_ST_FLOGI:
4831 - case LPORT_ST_LOGO:
4832 - fc_lport_enter_reset(lport);
4833 - break;
4834 + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
4835 + /*
4836 + * Memory allocation failure, or the exchange timed out.
4837 + * Retry after delay
4838 + */
4839 + if (lport->retry_count < lport->max_retry_count) {
4840 + lport->retry_count++;
4841 + if (!fp)
4842 + delay = msecs_to_jiffies(500);
4843 + else
4844 + delay = jiffies +
4845 + msecs_to_jiffies(lport->e_d_tov);
4846 +
4847 + schedule_delayed_work(&lport->retry_work, delay);
4848 + } else {
4849 + switch (lport->state) {
4850 + case LPORT_ST_NONE:
4851 + case LPORT_ST_READY:
4852 + case LPORT_ST_RESET:
4853 + case LPORT_ST_RPN_ID:
4854 + case LPORT_ST_RFT_ID:
4855 + case LPORT_ST_SCR:
4856 + case LPORT_ST_DNS:
4857 + case LPORT_ST_FLOGI:
4858 + case LPORT_ST_LOGO:
4859 + fc_lport_enter_reset(lport);
4860 + break;
4861 + }
4862 }
4863 }
4864 }
4865 @@ -865,8 +971,7 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4866
4867 mutex_lock(&lport->lp_mutex);
4868
4869 - if (fc_lport_debug)
4870 - FC_DBG("Received a RFT_ID response\n");
4871 + FC_DEBUG_LPORT("Received a RFT_ID response\n");
4872
4873 if (lport->state != LPORT_ST_RFT_ID) {
4874 FC_DBG("Received a RFT_ID response, but in state %s\n",
4875 @@ -876,7 +981,7 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4876
4877 if (IS_ERR(fp)) {
4878 fc_lport_error(lport, fp);
4879 - goto out;
4880 + goto err;
4881 }
4882
4883 fh = fc_frame_header_get(fp);
4884 @@ -890,8 +995,9 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4885 else
4886 fc_lport_error(lport, fp);
4887 out:
4888 - mutex_unlock(&lport->lp_mutex);
4889 fc_frame_free(fp);
4890 +err:
4891 + mutex_unlock(&lport->lp_mutex);
4892 }
4893
4894 /**
4895 @@ -914,8 +1020,7 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4896
4897 mutex_lock(&lport->lp_mutex);
4898
4899 - if (fc_lport_debug)
4900 - FC_DBG("Received a RPN_ID response\n");
4901 + FC_DEBUG_LPORT("Received a RPN_ID response\n");
4902
4903 if (lport->state != LPORT_ST_RPN_ID) {
4904 FC_DBG("Received a RPN_ID response, but in state %s\n",
4905 @@ -925,7 +1030,7 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4906
4907 if (IS_ERR(fp)) {
4908 fc_lport_error(lport, fp);
4909 - goto out;
4910 + goto err;
4911 }
4912
4913 fh = fc_frame_header_get(fp);
4914 @@ -939,8 +1044,9 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4915 fc_lport_error(lport, fp);
4916
4917 out:
4918 - mutex_unlock(&lport->lp_mutex);
4919 fc_frame_free(fp);
4920 +err:
4921 + mutex_unlock(&lport->lp_mutex);
4922 }
4923
4924 /**
4925 @@ -961,8 +1067,7 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
4926
4927 mutex_lock(&lport->lp_mutex);
4928
4929 - if (fc_lport_debug)
4930 - FC_DBG("Received a SCR response\n");
4931 + FC_DEBUG_LPORT("Received a SCR response\n");
4932
4933 if (lport->state != LPORT_ST_SCR) {
4934 FC_DBG("Received a SCR response, but in state %s\n",
4935 @@ -972,7 +1077,7 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
4936
4937 if (IS_ERR(fp)) {
4938 fc_lport_error(lport, fp);
4939 - goto out;
4940 + goto err;
4941 }
4942
4943 op = fc_frame_payload_op(fp);
4944 @@ -982,8 +1087,9 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
4945 fc_lport_error(lport, fp);
4946
4947 out:
4948 - mutex_unlock(&lport->lp_mutex);
4949 fc_frame_free(fp);
4950 +err:
4951 + mutex_unlock(&lport->lp_mutex);
4952 }
4953
4954 /**
4955 @@ -998,9 +1104,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
4956 struct fc_frame *fp;
4957 struct fc_els_scr *scr;
4958
4959 - if (fc_lport_debug)
4960 - FC_DBG("Port (%6x) entered SCR state from %s state\n",
4961 - lport->fid, fc_lport_state(lport));
4962 + FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
4963 + fc_host_port_id(lport->host), fc_lport_state(lport));
4964
4965 fc_lport_state_enter(lport, LPORT_ST_SCR);
4966
4967 @@ -1020,7 +1125,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
4968 if (!lport->tt.exch_seq_send(lport, fp,
4969 fc_lport_scr_resp, NULL,
4970 lport, lport->e_d_tov,
4971 - lport->fid, FC_FID_FCTRL,
4972 + fc_host_port_id(lport->host),
4973 + FC_FID_FCTRL,
4974 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
4975 fc_lport_error(lport, fp);
4976 }
4977 @@ -1043,9 +1149,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
4978 struct fc_ns_fts *lps;
4979 int i;
4980
4981 - if (fc_lport_debug)
4982 - FC_DBG("Port (%6x) entered RFT_ID state from %s state\n",
4983 - lport->fid, fc_lport_state(lport));
4984 + FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
4985 + fc_host_port_id(lport->host), fc_lport_state(lport));
4986
4987 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
4988
4989 @@ -1069,14 +1174,14 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
4990 FC_NS_RFT_ID,
4991 sizeof(*req) -
4992 sizeof(struct fc_ct_hdr));
4993 - hton24(req->fid.fp_fid, lport->fid);
4994 + hton24(req->fid.fp_fid, fc_host_port_id(lport->host));
4995 req->fts = *lps;
4996 fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
4997
4998 if (!lport->tt.exch_seq_send(lport, fp,
4999 fc_lport_rft_id_resp, NULL,
5000 lport, lport->e_d_tov,
5001 - lport->fid,
5002 + fc_host_port_id(lport->host),
5003 FC_FID_DIR_SERV,
5004 FC_FC_SEQ_INIT |
5005 FC_FC_END_SEQ))
5006 @@ -1099,9 +1204,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
5007 struct fc_ns_rn_id rn;
5008 } *req;
5009
5010 - if (fc_lport_debug)
5011 - FC_DBG("Port (%6x) entered RPN_ID state from %s state\n",
5012 - lport->fid, fc_lport_state(lport));
5013 + FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
5014 + fc_host_port_id(lport->host), fc_lport_state(lport));
5015
5016 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
5017
5018 @@ -1114,14 +1218,14 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
5019 req = fc_frame_payload_get(fp, sizeof(*req));
5020 memset(req, 0, sizeof(*req));
5021 fc_fill_dns_hdr(lport, &req->ct, FC_NS_RPN_ID, sizeof(req->rn));
5022 - hton24(req->rn.fr_fid.fp_fid, lport->fid);
5023 + hton24(req->rn.fr_fid.fp_fid, fc_host_port_id(lport->host));
5024 put_unaligned_be64(lport->wwpn, &req->rn.fr_wwn);
5025 fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
5026
5027 if (!lport->tt.exch_seq_send(lport, fp,
5028 fc_lport_rpn_id_resp, NULL,
5029 lport, lport->e_d_tov,
5030 - lport->fid,
5031 + fc_host_port_id(lport->host),
5032 FC_FID_DIR_SERV,
5033 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5034 fc_lport_error(lport, fp);
5035 @@ -1147,20 +1251,18 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
5036 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
5037 dp.lp = lport;
5038
5039 - if (fc_lport_debug)
5040 - FC_DBG("Port (%6x) entered DNS state from %s state\n",
5041 - lport->fid, fc_lport_state(lport));
5042 + FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
5043 + fc_host_port_id(lport->host), fc_lport_state(lport));
5044
5045 fc_lport_state_enter(lport, LPORT_ST_DNS);
5046
5047 if (!lport->dns_rp) {
5048 - /* Set up a dummy rport to directory server */
5049 - rport = fc_rport_dummy_create(&dp);
5050 + /* Set up a rogue rport to directory server */
5051 + rport = fc_rport_rogue_create(&dp);
5052
5053 if (!rport)
5054 goto err;
5055 lport->dns_rp = rport;
5056 - FC_DBG("created an rport for the NS\n");
5057 }
5058
5059 rport = lport->dns_rp;
5060 @@ -1232,8 +1334,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5061
5062 mutex_lock(&lport->lp_mutex);
5063
5064 - if (fc_lport_debug)
5065 - FC_DBG("Received a LOGO response\n");
5066 + FC_DEBUG_LPORT("Received a LOGO response\n");
5067
5068 if (lport->state != LPORT_ST_LOGO) {
5069 FC_DBG("Received a LOGO response, but in state %s\n",
5070 @@ -1243,7 +1344,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5071
5072 if (IS_ERR(fp)) {
5073 fc_lport_error(lport, fp);
5074 - goto out;
5075 + goto err;
5076 }
5077
5078 op = fc_frame_payload_op(fp);
5079 @@ -1253,8 +1354,9 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5080 fc_lport_error(lport, fp);
5081
5082 out:
5083 - mutex_unlock(&lport->lp_mutex);
5084 fc_frame_free(fp);
5085 +err:
5086 + mutex_unlock(&lport->lp_mutex);
5087 }
5088
5089 /**
5090 @@ -1269,15 +1371,14 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5091 struct fc_frame *fp;
5092 struct fc_els_logo *logo;
5093
5094 - if (fc_lport_debug)
5095 - FC_DBG("Port (%6x) entered LOGO state from %s state\n",
5096 - lport->fid, fc_lport_state(lport));
5097 + FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
5098 + fc_host_port_id(lport->host), fc_lport_state(lport));
5099
5100 fc_lport_state_enter(lport, LPORT_ST_LOGO);
5101
5102 /* DNS session should be closed so we can release it here */
5103 if (lport->dns_rp) {
5104 - fc_remote_port_delete(lport->dns_rp);
5105 + lport->tt.rport_logout(lport->dns_rp);
5106 lport->dns_rp = NULL;
5107 }
5108
5109 @@ -1290,7 +1391,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5110 logo = fc_frame_payload_get(fp, sizeof(*logo));
5111 memset(logo, 0, sizeof(*logo));
5112 logo->fl_cmd = ELS_LOGO;
5113 - hton24(logo->fl_n_port_id, lport->fid);
5114 + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
5115 logo->fl_n_port_wwn = htonll(lport->wwpn);
5116 fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5117 fc_frame_set_offset(fp, 0);
5118 @@ -1298,7 +1399,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5119 if (!lport->tt.exch_seq_send(lport, fp,
5120 fc_lport_logo_resp, NULL,
5121 lport, lport->e_d_tov,
5122 - lport->fid, FC_FID_FLOGI,
5123 + fc_host_port_id(lport->host), FC_FID_FLOGI,
5124 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5125 fc_lport_error(lport, fp);
5126 }
5127 @@ -1327,8 +1428,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5128
5129 mutex_lock(&lport->lp_mutex);
5130
5131 - if (fc_lport_debug)
5132 - FC_DBG("Received a FLOGI response\n");
5133 + FC_DEBUG_LPORT("Received a FLOGI response\n");
5134
5135 if (lport->state != LPORT_ST_FLOGI) {
5136 FC_DBG("Received a FLOGI response, but in state %s\n",
5137 @@ -1338,16 +1438,16 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5138
5139 if (IS_ERR(fp)) {
5140 fc_lport_error(lport, fp);
5141 - goto out;
5142 + goto err;
5143 }
5144
5145 fh = fc_frame_header_get(fp);
5146 did = ntoh24(fh->fh_d_id);
5147 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
5148 - if (fc_lport_debug)
5149 - FC_DBG("Assigned fid %x\n", did);
5150
5151 - lport->fid = did;
5152 + FC_DEBUG_LPORT("Assigned fid %x\n", did);
5153 + fc_host_port_id(lport->host) = did;
5154 +
5155 flp = fc_frame_payload_get(fp, sizeof(*flp));
5156 if (flp) {
5157 mfs = ntohs(flp->fl_csp.sp_bb_data) &
5158 @@ -1391,8 +1491,9 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5159 }
5160
5161 out:
5162 - mutex_unlock(&lport->lp_mutex);
5163 fc_frame_free(fp);
5164 +err:
5165 + mutex_unlock(&lport->lp_mutex);
5166 }
5167
5168 /**
5169 @@ -1407,8 +1508,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
5170 struct fc_frame *fp;
5171 struct fc_els_flogi *flp;
5172
5173 - if (fc_lport_debug)
5174 - FC_DBG("Processing FLOGI state\n");
5175 + FC_DEBUG_LPORT("Processing FLOGI state\n");
5176
5177 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
5178
5179 @@ -1436,6 +1536,7 @@ int fc_lport_config(struct fc_lport *lport)
5180 {
5181 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
5182 mutex_init(&lport->lp_mutex);
5183 + INIT_LIST_HEAD(&lport->rports);
5184
5185 fc_lport_state_enter(lport, LPORT_ST_NONE);
5186
5187 @@ -1456,6 +1557,9 @@ int fc_lport_init(struct fc_lport *lport)
5188 if (!lport->tt.lport_reset)
5189 lport->tt.lport_reset = fc_lport_reset;
5190
5191 + if (!lport->tt.rport_lookup)
5192 + lport->tt.rport_lookup = fc_lport_lookup_rport;
5193 +
5194 if (!lport->tt.event_callback)
5195 lport->tt.event_callback = fc_lport_rport_event;
5196
5197 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
5198 index 107b304..651a3ed 100644
5199 --- a/drivers/scsi/libfc/fc_rport.c
5200 +++ b/drivers/scsi/libfc/fc_rport.c
5201 @@ -18,10 +18,27 @@
5202 */
5203
5204 /*
5205 - * Remote Port support.
5206 + * This file contains all processing regarding fc_rports. It contains the
5207 + * rport state machine and does all rport interaction with the transport class.
5208 + * There should be no other places in libfc that interact directly with the
5209 + * transport class in regards to adding and deleting rports.
5210 *
5211 - * A remote port structure contains information about an N port to which we
5212 - * will create sessions.
5213 + * fc_rport's represent N_Port's within the fabric.
5214 + *
5215 + * rport locking notes:
5216 + *
5217 + * The rport should never hold the rport mutex and then lock the lport
5218 + * mutex. The rport's mutex is considered lesser than the lport's mutex, so
5219 + * the lport mutex can be held before locking the rport mutex, but not the
5220 + * other way around. See the comment block at the top of fc_lport.c for more
5221 + * details.
5222 + *
5223 + * The locking strategy is similar to the lport's strategy. The lock protects
5224 + * the rport's states and is held and released by the entry points to the rport
5225 + * block. All _enter_* functions correspond to rport states and expect the rport
5226 + * mutex to be locked before calling them. This means that rports only handle one
5227 + * request or response at a time, since they're not critical for the I/O path
5228 + * this potential over-use of the mutex is acceptable.
5229 */
5230
5231 #include <linux/kernel.h>
5232 @@ -34,7 +51,15 @@
5233
5234 #include <scsi/libfc/libfc.h>
5235
5236 -static int fc_rp_debug;
5237 +static int fc_rport_debug;
5238 +
5239 +#define FC_DEBUG_RPORT(fmt...) \
5240 + do { \
5241 + if (fc_rport_debug) \
5242 + FC_DBG(fmt); \
5243 + } while (0)
5244 +
5245 +static struct workqueue_struct *rport_event_queue;
5246
5247 static void fc_rport_enter_plogi(struct fc_rport *);
5248 static void fc_rport_enter_prli(struct fc_rport *);
5249 @@ -52,6 +77,7 @@ static void fc_rport_recv_logo_req(struct fc_rport *,
5250 struct fc_seq *, struct fc_frame *);
5251 static void fc_rport_timeout(struct work_struct *);
5252 static void fc_rport_error(struct fc_rport *, struct fc_frame *);
5253 +static void fc_rport_work(struct work_struct *);
5254
5255 static const char *fc_rport_state_names[] = {
5256 [RPORT_ST_NONE] = "None",
5257 @@ -63,7 +89,7 @@ static const char *fc_rport_state_names[] = {
5258 [RPORT_ST_LOGO] = "LOGO",
5259 };
5260
5261 -struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *dp)
5262 +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
5263 {
5264 struct fc_rport *rport;
5265 struct fc_rport_libfc_priv *rdata;
5266 @@ -91,11 +117,17 @@ struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *dp)
5267 rdata->e_d_tov = dp->lp->e_d_tov;
5268 rdata->r_a_tov = dp->lp->r_a_tov;
5269 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
5270 + INIT_WORK(&rdata->event_work, fc_rport_work);
5271 + /*
5272 + * For good measure, but not necessary as we should only
5273 + * add REAL rport to the lport list.
5274 + */
5275 + INIT_LIST_HEAD(&rdata->peers);
5276
5277 return rport;
5278 }
5279
5280 -void fc_rport_dummy_destroy(struct fc_rport *rport)
5281 +void fc_rport_rogue_destroy(struct fc_rport *rport)
5282 {
5283 kfree(rport);
5284 }
5285 @@ -116,30 +148,6 @@ static const char *fc_rport_state(struct fc_rport *rport)
5286 }
5287
5288 /**
5289 - * fc_rport_lookup - lookup a remote port by port_id
5290 - * @lp: Fibre Channel host port instance
5291 - * @fid: remote port port_id to match
5292 - */
5293 -struct fc_rport *fc_rport_lookup(const struct fc_lport *lp, u32 fid)
5294 -{
5295 - struct Scsi_Host *shost = lp->host;
5296 - struct fc_rport *rport, *found;
5297 - unsigned long flags;
5298 -
5299 - found = NULL;
5300 - spin_lock_irqsave(shost->host_lock, flags);
5301 - list_for_each_entry(rport, &fc_host_rports(shost), peers)
5302 - if (rport->port_id == fid &&
5303 - rport->port_state == FC_PORTSTATE_ONLINE) {
5304 - found = rport;
5305 - get_device(&found->dev);
5306 - break;
5307 - }
5308 - spin_unlock_irqrestore(shost->host_lock, flags);
5309 - return found;
5310 -}
5311 -
5312 -/**
5313 * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
5314 * @rport: Pointer to Fibre Channel remote port structure
5315 * @timeout: timeout in seconds
5316 @@ -229,15 +237,20 @@ static void fc_rport_state_enter(struct fc_rport *rport,
5317 rdata->rp_state = new;
5318 }
5319
5320 -static void fc_rport_unlock(struct fc_rport *rport)
5321 +static void fc_rport_work(struct work_struct *work)
5322 {
5323 - struct fc_rport_libfc_priv *rdata = rport->dd_data;
5324 - enum fc_lport_event event = rdata->event;
5325 + struct fc_rport_libfc_priv *rdata =
5326 + container_of(work, struct fc_rport_libfc_priv, event_work);
5327 + enum fc_lport_event event;
5328 + enum fc_rport_trans_state trans_state;
5329 struct fc_lport *lport = rdata->local_port;
5330 - u32 fid = rport->port_id;
5331 - void (*event_callback)(struct fc_lport *, u32,
5332 - enum fc_lport_event) =
5333 - rdata->event_callback;
5334 + void (*event_callback)(struct fc_lport *, struct fc_rport *,
5335 + enum fc_lport_event);
5336 + struct fc_rport *rport = PRIV_TO_RPORT(rdata);
5337 +
5338 + mutex_lock(&rdata->rp_mutex);
5339 + event = rdata->event;
5340 + event_callback = rdata->event_callback;
5341
5342 if (event == LPORT_EV_RPORT_CREATED) {
5343 struct fc_rport *new_rport;
5344 @@ -249,10 +262,12 @@ static void fc_rport_unlock(struct fc_rport *rport)
5345 ids.port_name = rport->port_name;
5346 ids.node_name = rport->node_name;
5347
5348 + mutex_unlock(&rdata->rp_mutex);
5349 +
5350 new_rport = fc_remote_port_add(lport->host, 0, &ids);
5351 if (new_rport) {
5352 /*
5353 - * Switch from the dummy rport to the rport
5354 + * Switch from the rogue rport to the rport
5355 * returned by the FC class.
5356 */
5357 new_rport->maxframe_size = rport->maxframe_size;
5358 @@ -267,36 +282,32 @@ static void fc_rport_unlock(struct fc_rport *rport)
5359 mutex_init(&new_rdata->rp_mutex);
5360 INIT_DELAYED_WORK(&new_rdata->retry_work,
5361 fc_rport_timeout);
5362 + INIT_LIST_HEAD(&new_rdata->peers);
5363 + INIT_WORK(&new_rdata->event_work, fc_rport_work);
5364
5365 fc_rport_state_enter(new_rport, RPORT_ST_READY);
5366 - fc_remote_port_rolechg(new_rport, rdata->roles);
5367 } else {
5368 FC_DBG("Failed to create the rport for port "
5369 "(%6x).\n", ids.port_id);
5370 event = LPORT_EV_RPORT_FAILED;
5371 }
5372 -
5373 - mutex_unlock(&rdata->rp_mutex);
5374 - fc_rport_dummy_destroy(rport);
5375 + fc_rport_rogue_destroy(rport);
5376 rport = new_rport;
5377 rdata = new_rport->dd_data;
5378 + event_callback(lport, rport, event);
5379 } else if ((event == LPORT_EV_RPORT_FAILED) ||
5380 - (event == LPORT_EV_RPORT_LOGO)) {
5381 - if (rdata->trans_state == FC_PORTSTATE_ROGUE) {
5382 - mutex_unlock(&rdata->rp_mutex);
5383 - fc_rport_dummy_destroy(rport);
5384 - } else {
5385 - mutex_unlock(&rdata->rp_mutex);
5386 + (event == LPORT_EV_RPORT_LOGO) ||
5387 + (event == LPORT_EV_RPORT_STOP)) {
5388 +
5389 + trans_state = rdata->trans_state;
5390 + mutex_unlock(&rdata->rp_mutex);
5391 + event_callback(lport, rport, event);
5392 + if (trans_state == FC_PORTSTATE_ROGUE)
5393 + fc_rport_rogue_destroy(rport);
5394 + else
5395 fc_remote_port_delete(rport);
5396 - }
5397 - } else {
5398 + } else
5399 mutex_unlock(&rdata->rp_mutex);
5400 - }
5401 -
5402 - if (event != LPORT_EV_RPORT_NONE && event_callback) {
5403 - event_callback(lport, fid, event);
5404 - rdata->event = LPORT_EV_RPORT_NONE;
5405 - }
5406 }
5407
5408 /**
5409 @@ -313,12 +324,11 @@ int fc_rport_login(struct fc_rport *rport)
5410
5411 mutex_lock(&rdata->rp_mutex);
5412
5413 - if (fc_rp_debug)
5414 - FC_DBG("Login to port (%6x)\n", rport->port_id);
5415 + FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
5416
5417 fc_rport_enter_plogi(rport);
5418
5419 - fc_rport_unlock(rport);
5420 + mutex_unlock(&rdata->rp_mutex);
5421
5422 return 0;
5423 }
5424 @@ -337,57 +347,37 @@ int fc_rport_logout(struct fc_rport *rport)
5425
5426 mutex_lock(&rdata->rp_mutex);
5427
5428 - if (fc_rp_debug)
5429 - FC_DBG("Logout of port (%6x)\n", rport->port_id);
5430 + FC_DEBUG_RPORT("Logout of port (%6x)\n", rport->port_id);
5431
5432 fc_rport_enter_logo(rport);
5433 - fc_rport_unlock(rport);
5434 +
5435 + mutex_unlock(&rdata->rp_mutex);
5436
5437 return 0;
5438 }
5439
5440 /**
5441 - * fc_rport_reset - Reset the remote port
5442 - * @rport: Fibre Channel remote port
5443 - *
5444 - * XXX - This functionality is currently broken
5445 + * fc_rport_remove - Remove an rport
5446 + * @rport: Fibre Channel remote port to be removed
5447 *
5448 * Locking Note: Called without the rport lock held. This
5449 * function will hold the rport lock, call an _enter_*
5450 * function and then unlock the rport.
5451 */
5452 -void fc_rport_reset(struct fc_rport *rport)
5453 +int fc_rport_stop(struct fc_rport *rport)
5454 {
5455 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5456
5457 mutex_lock(&rdata->rp_mutex);
5458
5459 - if (fc_rp_debug)
5460 - FC_DBG("Reset port (%6x)\n", rport->port_id);
5461 + FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
5462
5463 - fc_rport_enter_plogi(rport);
5464 -
5465 - fc_rport_unlock(rport);
5466 -}
5467 + rdata->event = LPORT_EV_RPORT_STOP;
5468 + queue_work(rport_event_queue, &rdata->event_work);
5469
5470 -/**
5471 - * fc_rport_reset_list - Reset all sessions for a local port session list.
5472 - * @lport: The lport whose rports should be reset
5473 - *
5474 - * Locking Note: TBD
5475 - */
5476 -void fc_rport_reset_list(struct fc_lport *lport)
5477 -{
5478 - struct Scsi_Host *shost = lport->host;
5479 - struct fc_rport *rport;
5480 - struct fc_rport *next;
5481 - unsigned long flags;
5482 + mutex_unlock(&rdata->rp_mutex);
5483
5484 - spin_lock_irqsave(shost->host_lock, flags);
5485 - list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) {
5486 - lport->tt.rport_reset(rport);
5487 - }
5488 - spin_unlock_irqrestore(shost->host_lock, flags);
5489 + return 0;
5490 }
5491
5492 /**
5493 @@ -403,10 +393,10 @@ static void fc_rport_enter_ready(struct fc_rport *rport)
5494
5495 fc_rport_state_enter(rport, RPORT_ST_READY);
5496
5497 - if (fc_rp_debug)
5498 - FC_DBG("Port (%6x) is Ready\n", rport->port_id);
5499 + FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
5500
5501 rdata->event = LPORT_EV_RPORT_CREATED;
5502 + queue_work(rport_event_queue, &rdata->event_work);
5503 }
5504
5505 /**
5506 @@ -447,7 +437,7 @@ static void fc_rport_timeout(struct work_struct *work)
5507 }
5508 put_device(&rport->dev);
5509
5510 - fc_rport_unlock(rport);
5511 + mutex_unlock(&rdata->rp_mutex);
5512 }
5513
5514 /**
5515 @@ -467,37 +457,37 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
5516 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5517 unsigned long delay = 0;
5518
5519 - if (fc_rp_debug)
5520 - FC_DBG("Error %ld in state %s, retries %d\n",
5521 + FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
5522 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
5523
5524 - if (rdata->retries < rdata->local_port->max_retry_count) {
5525 - rdata->retries++;
5526 - if (!fp)
5527 - delay = msecs_to_jiffies(500);
5528 - get_device(&rport->dev);
5529 - schedule_delayed_work(&rdata->retry_work, delay);
5530 - } else {
5531 - switch (rdata->rp_state) {
5532 - case RPORT_ST_PLOGI:
5533 - case RPORT_ST_PRLI:
5534 - case RPORT_ST_LOGO:
5535 - if (fc_rp_debug)
5536 - FC_DBG("Remote port (%6x) closed.\n",
5537 - rport->port_id);
5538 -
5539 - fc_remote_port_delete(rport);
5540 -
5541 - rdata->event = LPORT_EV_RPORT_FAILED;
5542 - break;
5543 - case RPORT_ST_RTV:
5544 - fc_rport_enter_ready(rport);
5545 - break;
5546 - case RPORT_ST_NONE:
5547 - case RPORT_ST_READY:
5548 - case RPORT_ST_INIT:
5549 - BUG();
5550 - break;
5551 + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
5552 + /*
5553 + * Memory allocation failure, or the exchange timed out.
5554 + * Retry after delay
5555 + */
5556 + if (rdata->retries < rdata->local_port->max_retry_count) {
5557 + rdata->retries++;
5558 + if (!fp)
5559 + delay = msecs_to_jiffies(500);
5560 + get_device(&rport->dev);
5561 + schedule_delayed_work(&rdata->retry_work, delay);
5562 + } else {
5563 + switch (rdata->rp_state) {
5564 + case RPORT_ST_PLOGI:
5565 + case RPORT_ST_PRLI:
5566 + case RPORT_ST_LOGO:
5567 + rdata->event = LPORT_EV_RPORT_FAILED;
5568 + queue_work(rport_event_queue, &rdata->event_work);
5569 + break;
5570 + case RPORT_ST_RTV:
5571 + fc_rport_enter_ready(rport);
5572 + break;
5573 + case RPORT_ST_NONE:
5574 + case RPORT_ST_READY:
5575 + case RPORT_ST_INIT:
5576 + BUG();
5577 + break;
5578 + }
5579 }
5580 }
5581 }
5582 @@ -526,8 +516,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5583
5584 mutex_lock(&rdata->rp_mutex);
5585
5586 - if (fc_rp_debug)
5587 - FC_DBG("Received a PLOGI response\n");
5588 + FC_DEBUG_RPORT("Received a PLOGI response\n");
5589
5590 if (rdata->rp_state != RPORT_ST_PLOGI) {
5591 FC_DBG("Received a PLOGI response, but in state %s\n",
5592 @@ -537,12 +526,15 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5593
5594 if (IS_ERR(fp)) {
5595 fc_rport_error(rport, fp);
5596 - goto out;
5597 + goto err;
5598 }
5599
5600 op = fc_frame_payload_op(fp);
5601 if (op == ELS_LS_ACC &&
5602 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
5603 + rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
5604 + rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
5605 +
5606 tov = ntohl(plp->fl_csp.sp_e_d_tov);
5607 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
5608 tov /= 1000;
5609 @@ -568,8 +560,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5610 fc_rport_error(rport, fp);
5611
5612 out:
5613 - fc_rport_unlock(rport);
5614 fc_frame_free(fp);
5615 +err:
5616 + mutex_unlock(&rdata->rp_mutex);
5617 }
5618
5619 /**
5620 @@ -586,8 +579,7 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
5621 struct fc_frame *fp;
5622 struct fc_els_flogi *plogi;
5623
5624 - if (fc_rp_debug)
5625 - FC_DBG("Port (%6x) entered PLOGI state from %s state\n",
5626 + FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
5627 rport->port_id, fc_rport_state(rport));
5628
5629 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
5630 @@ -607,7 +599,7 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
5631 if (!lport->tt.exch_seq_send(lport, fp,
5632 fc_rport_plogi_resp, NULL,
5633 rport, lport->e_d_tov,
5634 - rdata->local_port->fid,
5635 + fc_host_port_id(rdata->local_port->host),
5636 rport->port_id,
5637 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5638 fc_rport_error(rport, fp);
5639 @@ -638,8 +630,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5640
5641 mutex_lock(&rdata->rp_mutex);
5642
5643 - if (fc_rp_debug)
5644 - FC_DBG("Received a PRLI response\n");
5645 + FC_DEBUG_RPORT("Received a PRLI response\n");
5646
5647 if (rdata->rp_state != RPORT_ST_PRLI) {
5648 FC_DBG("Received a PRLI response, but in state %s\n",
5649 @@ -649,7 +640,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5650
5651 if (IS_ERR(fp)) {
5652 fc_rport_error(rport, fp);
5653 - goto out;
5654 + goto err;
5655 }
5656
5657 op = fc_frame_payload_op(fp);
5658 @@ -667,18 +658,19 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5659 if (fcp_parm & FCP_SPPF_TARG_FCN)
5660 roles |= FC_RPORT_ROLE_FCP_TARGET;
5661
5662 - rdata->roles = roles;
5663 + rport->roles = roles;
5664 fc_rport_enter_rtv(rport);
5665
5666 } else {
5667 FC_DBG("Bad ELS response\n");
5668 rdata->event = LPORT_EV_RPORT_FAILED;
5669 - fc_remote_port_delete(rport);
5670 + queue_work(rport_event_queue, &rdata->event_work);
5671 }
5672
5673 out:
5674 - fc_rport_unlock(rport);
5675 fc_frame_free(fp);
5676 +err:
5677 + mutex_unlock(&rdata->rp_mutex);
5678 }
5679
5680 /**
5681 @@ -700,8 +692,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5682
5683 mutex_lock(&rdata->rp_mutex);
5684
5685 - if (fc_rp_debug)
5686 - FC_DBG("Received a LOGO response\n");
5687 + FC_DEBUG_RPORT("Received a LOGO response\n");
5688
5689 if (rdata->rp_state != RPORT_ST_LOGO) {
5690 FC_DBG("Received a LOGO response, but in state %s\n",
5691 @@ -711,22 +702,22 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5692
5693 if (IS_ERR(fp)) {
5694 fc_rport_error(rport, fp);
5695 - goto out;
5696 + goto err;
5697 }
5698
5699 op = fc_frame_payload_op(fp);
5700 if (op == ELS_LS_ACC) {
5701 fc_rport_enter_rtv(rport);
5702 -
5703 } else {
5704 FC_DBG("Bad ELS response\n");
5705 rdata->event = LPORT_EV_RPORT_LOGO;
5706 - fc_remote_port_delete(rport);
5707 + queue_work(rport_event_queue, &rdata->event_work);
5708 }
5709
5710 out:
5711 - fc_rport_unlock(rport);
5712 fc_frame_free(fp);
5713 +err:
5714 + mutex_unlock(&rdata->rp_mutex);
5715 }
5716
5717 /**
5718 @@ -746,8 +737,7 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
5719 } *pp;
5720 struct fc_frame *fp;
5721
5722 - if (fc_rp_debug)
5723 - FC_DBG("Port (%6x) entered PRLI state from %s state\n",
5724 + FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
5725 rport->port_id, fc_rport_state(rport));
5726
5727 fc_rport_state_enter(rport, RPORT_ST_PRLI);
5728 @@ -771,7 +761,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
5729 if (!lport->tt.exch_seq_send(lport, fp,
5730 fc_rport_prli_resp, NULL,
5731 rport, lport->e_d_tov,
5732 - lport->fid, rport->port_id,
5733 + fc_host_port_id(lport->host),
5734 + rport->port_id,
5735 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5736 fc_rport_error(rport, fp);
5737 }
5738 @@ -797,8 +788,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
5739
5740 mutex_lock(&rdata->rp_mutex);
5741
5742 - if (fc_rp_debug)
5743 - FC_DBG("Received a RTV response\n");
5744 + FC_DEBUG_RPORT("Received a RTV response\n");
5745
5746 if (rdata->rp_state != RPORT_ST_RTV) {
5747 FC_DBG("Received a RTV response, but in state %s\n",
5748 @@ -808,7 +798,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
5749
5750 if (IS_ERR(fp)) {
5751 fc_rport_error(rport, fp);
5752 - goto out;
5753 + goto err;
5754 }
5755
5756 op = fc_frame_payload_op(fp);
5757 @@ -836,8 +826,9 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
5758 fc_rport_enter_ready(rport);
5759
5760 out:
5761 - fc_rport_unlock(rport);
5762 fc_frame_free(fp);
5763 +err:
5764 + mutex_unlock(&rdata->rp_mutex);
5765 }
5766
5767 /**
5768 @@ -854,8 +845,7 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
5769 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5770 struct fc_lport *lport = rdata->local_port;
5771
5772 - if (fc_rp_debug)
5773 - FC_DBG("Port (%6x) entered RTV state from %s state\n",
5774 + FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
5775 rport->port_id, fc_rport_state(rport));
5776
5777 fc_rport_state_enter(rport, RPORT_ST_RTV);
5778 @@ -874,7 +864,8 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
5779 if (!lport->tt.exch_seq_send(lport, fp,
5780 fc_rport_rtv_resp, NULL,
5781 rport, lport->e_d_tov,
5782 - lport->fid, rport->port_id,
5783 + fc_host_port_id(lport->host),
5784 + rport->port_id,
5785 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5786 fc_rport_error(rport, fp);
5787 }
5788 @@ -893,8 +884,7 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
5789 struct fc_frame *fp;
5790 struct fc_els_logo *logo;
5791
5792 - if (fc_rp_debug)
5793 - FC_DBG("Port (%6x) entered LOGO state from %s state\n",
5794 + FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
5795 rport->port_id, fc_rport_state(rport));
5796
5797 fc_rport_state_enter(rport, RPORT_ST_LOGO);
5798 @@ -908,14 +898,15 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
5799 logo = fc_frame_payload_get(fp, sizeof(*logo));
5800 memset(logo, 0, sizeof(*logo));
5801 logo->fl_cmd = ELS_LOGO;
5802 - hton24(logo->fl_n_port_id, lport->fid);
5803 + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
5804 logo->fl_n_port_wwn = htonll(lport->wwpn);
5805 fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5806
5807 if (!lport->tt.exch_seq_send(lport, fp,
5808 fc_rport_logo_resp, NULL,
5809 rport, lport->e_d_tov,
5810 - lport->fid, rport->port_id,
5811 + fc_host_port_id(lport->host),
5812 + rport->port_id,
5813 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5814 fc_rport_error(rport, fp);
5815 }
5816 @@ -979,7 +970,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
5817 }
5818 }
5819
5820 - fc_rport_unlock(rport);
5821 + mutex_unlock(&rdata->rp_mutex);
5822 fc_frame_free(fp);
5823 }
5824
5825 @@ -1011,8 +1002,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
5826
5827 fh = fc_frame_header_get(fp);
5828
5829 - if (fc_rp_debug)
5830 - FC_DBG("Received PLOGI request from port (%6x) "
5831 + FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
5832 "while in state %s\n", ntoh24(fh->fh_s_id),
5833 fc_rport_state(rport));
5834
5835 @@ -1041,29 +1031,25 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
5836 */
5837 switch (rdata->rp_state) {
5838 case RPORT_ST_INIT:
5839 - if (fc_rp_debug)
5840 - FC_DBG("incoming PLOGI from %6x wwpn %llx state INIT "
5841 + FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
5842 "- reject\n", sid, wwpn);
5843 reject = ELS_RJT_UNSUP;
5844 break;
5845 case RPORT_ST_PLOGI:
5846 - if (fc_rp_debug)
5847 - FC_DBG("incoming PLOGI from %x in PLOGI state %d\n",
5848 + FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
5849 sid, rdata->rp_state);
5850 if (wwpn < lport->wwpn)
5851 reject = ELS_RJT_INPROG;
5852 break;
5853 case RPORT_ST_PRLI:
5854 case RPORT_ST_READY:
5855 - if (fc_rp_debug)
5856 - FC_DBG("incoming PLOGI from %x in logged-in state %d "
5857 + FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
5858 "- ignored for now\n", sid, rdata->rp_state);
5859 /* XXX TBD - should reset */
5860 break;
5861 case RPORT_ST_NONE:
5862 default:
5863 - if (fc_rp_debug)
5864 - FC_DBG("incoming PLOGI from %x in unexpected "
5865 + FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
5866 "state %d\n", sid, rdata->rp_state);
5867 break;
5868 }
5869 @@ -1145,8 +1131,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
5870
5871 fh = fc_frame_header_get(rx_fp);
5872
5873 - if (fc_rp_debug)
5874 - FC_DBG("Received PRLI request from port (%6x) "
5875 + FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
5876 "while in state %s\n", ntoh24(fh->fh_s_id),
5877 fc_rport_state(rport));
5878
5879 @@ -1220,7 +1205,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
5880 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
5881 if (fcp_parm & FCP_SPPF_TARG_FCN)
5882 roles |= FC_RPORT_ROLE_FCP_TARGET;
5883 - rdata->roles = roles;
5884 + rport->roles = roles;
5885
5886 spp->spp_params =
5887 htonl(lport->service_params);
5888 @@ -1278,8 +1263,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
5889
5890 fh = fc_frame_header_get(fp);
5891
5892 - if (fc_rp_debug)
5893 - FC_DBG("Received PRLO request from port (%6x) "
5894 + FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
5895 "while in state %s\n", ntoh24(fh->fh_s_id),
5896 fc_rport_state(rport));
5897
5898 @@ -1308,12 +1292,12 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
5899
5900 fh = fc_frame_header_get(fp);
5901
5902 - if (fc_rp_debug)
5903 - FC_DBG("Received LOGO request from port (%6x) "
5904 + FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
5905 "while in state %s\n", ntoh24(fh->fh_s_id),
5906 fc_rport_state(rport));
5907
5908 rdata->event = LPORT_EV_RPORT_LOGO;
5909 + queue_work(rport_event_queue, &rdata->event_work);
5910
5911 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
5912 fc_frame_free(fp);
5913 @@ -1327,63 +1311,37 @@ int fc_rport_init(struct fc_lport *lport)
5914 if (!lport->tt.rport_logout)
5915 lport->tt.rport_logout = fc_rport_logout;
5916
5917 + if (!lport->tt.rport_stop)
5918 + lport->tt.rport_stop = fc_rport_stop;
5919 +
5920 if (!lport->tt.rport_recv_req)
5921 lport->tt.rport_recv_req = fc_rport_recv_req;
5922
5923 - if (!lport->tt.rport_lookup)
5924 - lport->tt.rport_lookup = fc_rport_lookup;
5925 -
5926 - if (!lport->tt.rport_reset)
5927 - lport->tt.rport_reset = fc_rport_reset;
5928 -
5929 - if (!lport->tt.rport_reset_list)
5930 - lport->tt.rport_reset_list = fc_rport_reset_list;
5931 -
5932 return 0;
5933 }
5934 EXPORT_SYMBOL(fc_rport_init);
5935
5936 -/**
5937 - * fc_block_rports - delete all the remote ports, on reset or link down
5938 - * @lp: libfc local port instance
5939 - *
5940 - * This routine temporarily removes any online remote ports from the fc_host
5941 - * rport list, then drops the host lock in order to call fc_remote_port_delete()
5942 - * on each rport in turn, and finally splices the list back onto the fc_host.
5943 - */
5944 -void fc_block_rports(struct fc_lport *lp)
5945 +int fc_setup_rport()
5946 {
5947 - struct Scsi_Host *shost = lp->host;
5948 - struct fc_rport *rport, *next;
5949 - unsigned long flags;
5950 - LIST_HEAD(rports);
5951 -
5952 - spin_lock_irqsave(shost->host_lock, flags);
5953 - list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) {
5954 - /* protect the name service remote port */
5955 - if (rport->port_id == FC_FID_DIR_SERV)
5956 - continue;
5957 - if (rport->port_state != FC_PORTSTATE_ONLINE)
5958 - continue;
5959 - list_move_tail(&rport->peers, &rports);
5960 - }
5961 - spin_unlock_irqrestore(shost->host_lock, flags);
5962 -
5963 - list_for_each_entry(rport, &rports, peers) {
5964 - fc_remote_port_delete(rport);
5965 - }
5966 + rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
5967 + if (!rport_event_queue)
5968 + return -ENOMEM;
5969 + return 0;
5970 +}
5971 +EXPORT_SYMBOL(fc_setup_rport);
5972
5973 - spin_lock_irqsave(shost->host_lock, flags);
5974 - list_splice(&rports, &fc_host_rports(shost));
5975 - spin_unlock_irqrestore(shost->host_lock, flags);
5976 +void fc_destroy_rport()
5977 +{
5978 + destroy_workqueue(rport_event_queue);
5979 }
5980 +EXPORT_SYMBOL(fc_destroy_rport);
5981
5982 void fc_rport_terminate_io(struct fc_rport *rport)
5983 {
5984 - struct fc_rport_libfc_priv *rp = rport->dd_data;
5985 - struct fc_lport *lp = rp->local_port;
5986 + struct fc_rport_libfc_priv *rdata = rport->dd_data;
5987 + struct fc_lport *lport = rdata->local_port;
5988
5989 - lp->tt.exch_mgr_reset(lp->emp, 0, rport->port_id);
5990 - lp->tt.exch_mgr_reset(lp->emp, rport->port_id, 0);
5991 + lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
5992 + lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
5993 }
5994 EXPORT_SYMBOL(fc_rport_terminate_io);
5995 diff --git a/include/scsi/libfc/fc_frame.h b/include/scsi/libfc/fc_frame.h
5996 index c7a52bb..9508e55 100644
5997 --- a/include/scsi/libfc/fc_frame.h
5998 +++ b/include/scsi/libfc/fc_frame.h
5999 @@ -51,6 +51,7 @@
6000 #define fr_sof(fp) (fr_cb(fp)->fr_sof)
6001 #define fr_eof(fp) (fr_cb(fp)->fr_eof)
6002 #define fr_flags(fp) (fr_cb(fp)->fr_flags)
6003 +#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
6004
6005 struct fc_frame {
6006 struct sk_buff skb;
6007 @@ -63,6 +64,7 @@ struct fcoe_rcv_info {
6008 enum fc_sof fr_sof; /* start of frame delimiter */
6009 enum fc_eof fr_eof; /* end of frame delimiter */
6010 u8 fr_flags; /* flags - see below */
6011 + u16 fr_max_payload; /* max FC payload */
6012 };
6013
6014 /*
6015 diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h
6016 index 24d3fcb..7e5e6be 100644
6017 --- a/include/scsi/libfc/libfc.h
6018 +++ b/include/scsi/libfc/libfc.h
6019 @@ -36,12 +36,10 @@
6020 #define LIBFC_DEBUG
6021
6022 #ifdef LIBFC_DEBUG
6023 -/*
6024 - * Log message.
6025 - */
6026 +/* Log messages */
6027 #define FC_DBG(fmt, args...) \
6028 do { \
6029 - printk(KERN_INFO "%s " fmt, __func__, ##args); \
6030 + printk(KERN_INFO "%s " fmt, __func__, ##args); \
6031 } while (0)
6032 #else
6033 #define FC_DBG(fmt, args...)
6034 @@ -59,35 +57,22 @@
6035 #define ntohll(x) be64_to_cpu(x)
6036 #define htonll(x) cpu_to_be64(x)
6037
6038 -#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
6039 +#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
6040
6041 -#define hton24(p, v) do { \
6042 - p[0] = (((v) >> 16) & 0xFF); \
6043 - p[1] = (((v) >> 8) & 0xFF); \
6044 - p[2] = ((v) & 0xFF); \
6045 -} while (0)
6046 +#define hton24(p, v) do { \
6047 + p[0] = (((v) >> 16) & 0xFF); \
6048 + p[1] = (((v) >> 8) & 0xFF); \
6049 + p[2] = ((v) & 0xFF); \
6050 + } while (0)
6051
6052 struct fc_exch_mgr;
6053
6054 /*
6055 - * tgt_flags
6056 - */
6057 -#define FC_TGT_REC_SUPPORTED (1 << 0)
6058 -
6059 -/*
6060 * FC HBA status
6061 */
6062 #define FC_PAUSE (1 << 1)
6063 #define FC_LINK_UP (1 << 0)
6064
6065 -/* for fc_softc */
6066 -#define FC_MAX_OUTSTANDING_COMMANDS 1024
6067 -
6068 -/*
6069 - * Transport Capabilities
6070 - */
6071 -#define TRANS_C_SG (1 << 0) /* Scatter gather */
6072 -
6073 enum fc_lport_state {
6074 LPORT_ST_NONE = 0,
6075 LPORT_ST_FLOGI,
6076 @@ -104,6 +89,7 @@ enum fc_lport_event {
6077 LPORT_EV_RPORT_NONE = 0,
6078 LPORT_EV_RPORT_CREATED,
6079 LPORT_EV_RPORT_FAILED,
6080 + LPORT_EV_RPORT_STOP,
6081 LPORT_EV_RPORT_LOGO
6082 };
6083
6084 @@ -163,9 +149,11 @@ struct fc_rport_libfc_priv {
6085 struct mutex rp_mutex;
6086 struct delayed_work retry_work;
6087 enum fc_lport_event event;
6088 - void (*event_callback)(struct fc_lport *, u32,
6089 + void (*event_callback)(struct fc_lport *,
6090 + struct fc_rport *,
6091 enum fc_lport_event);
6092 - u32 roles;
6093 + struct list_head peers;
6094 + struct work_struct event_work;
6095 };
6096
6097 #define PRIV_TO_RPORT(x) \
6098 @@ -173,8 +161,8 @@ struct fc_rport_libfc_priv {
6099 #define RPORT_TO_PRIV(x) \
6100 (struct fc_rport_libfc_priv*)((void *)x + sizeof(struct fc_rport));
6101
6102 -struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *);
6103 -void fc_rport_dummy_destroy(struct fc_rport *);
6104 +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
6105 +void fc_rport_rogue_destroy(struct fc_rport *);
6106
6107 static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
6108 {
6109 @@ -360,7 +348,7 @@ struct libfc_function_template {
6110
6111 int (*lport_reset)(struct fc_lport *);
6112
6113 - void (*event_callback)(struct fc_lport *, u32,
6114 + void (*event_callback)(struct fc_lport *, struct fc_rport *,
6115 enum fc_lport_event);
6116
6117 /**
6118 @@ -384,15 +372,21 @@ struct libfc_function_template {
6119 */
6120 int (*rport_logout)(struct fc_rport *rport);
6121
6122 + /*
6123 + * Delete the rport and remove it from the transport if
6124 + * it had been added. This will not send a LOGO, use
6125 + * rport_logout for a gracefull logout.
6126 + */
6127 + int (*rport_stop)(struct fc_rport *rport);
6128 +
6129 + /*
6130 + * Recieve a request from a remote port.
6131 + */
6132 void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
6133 struct fc_rport *);
6134
6135 struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
6136
6137 - void (*rport_reset)(struct fc_rport *);
6138 -
6139 - void (*rport_reset_list)(struct fc_lport *);
6140 -
6141 /**
6142 * SCSI interfaces
6143 */
6144 @@ -429,6 +423,7 @@ struct fc_lport {
6145 struct fc_rport *dns_rp;
6146 struct fc_rport *ptp_rp;
6147 void *scsi_priv;
6148 + struct list_head rports;
6149
6150 /* Operational Information */
6151 struct libfc_function_template tt;
6152 @@ -442,7 +437,6 @@ struct fc_lport {
6153
6154 u64 wwpn;
6155 u64 wwnn;
6156 - u32 fid;
6157 u8 retry_count;
6158 unsigned char disc_retry_count;
6159 unsigned char disc_delay;
6160 @@ -452,8 +446,8 @@ struct fc_lport {
6161 unsigned char disc_buf_len;
6162
6163 /* Capabilities */
6164 - char ifname[IFNAMSIZ];
6165 - u32 capabilities;
6166 + u32 sg_supp:1; /* scatter gather supported */
6167 + u32 seq_offload:1; /* seq offload supported */
6168 u32 mfs; /* max FC payload size */
6169 unsigned int service_params;
6170 unsigned int e_d_tov;
6171 @@ -484,11 +478,6 @@ static inline int fc_lport_test_ready(struct fc_lport *lp)
6172 return lp->state == LPORT_ST_READY;
6173 }
6174
6175 -static inline u32 fc_lport_get_fid(const struct fc_lport *lp)
6176 -{
6177 - return lp->fid;
6178 -}
6179 -
6180 static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
6181 {
6182 lp->wwnn = wwnn;
6183 @@ -586,8 +575,6 @@ int fc_set_mfs(struct fc_lport *lp, u32 mfs);
6184 *****************************/
6185 int fc_rport_init(struct fc_lport *lp);
6186 void fc_rport_terminate_io(struct fc_rport *rp);
6187 -void fc_block_rports(struct fc_lport *lp);
6188 -
6189
6190 /**
6191 * DISCOVERY LAYER
6192 @@ -776,6 +763,7 @@ void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data);
6193 * Functions for fc_functions_template
6194 */
6195 void fc_get_host_speed(struct Scsi_Host *shost);
6196 +void fc_get_host_port_type(struct Scsi_Host *shost);
6197 void fc_get_host_port_state(struct Scsi_Host *shost);
6198 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
6199 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
6200 @@ -785,6 +773,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
6201 */
6202 int fc_setup_exch_mgr(void);
6203 void fc_destroy_exch_mgr(void);
6204 -
6205 +int fc_setup_rport(void);
6206 +void fc_destroy_rport(void);
6207
6208 #endif /* _LIBFC_H_ */