1 Subject: Open-FCoE: Update for Beta4
2 From: John Fastabend <john.r.fastabend@intel.com>
3 Date: Thu Nov 6 13:08:49 2008 +0100:
4 Git: c66b456a7eb389e5f19d5bf23170b47a3e01d755
7 Incremental Open-FCoE update for Beta4.
9 Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
10 Acked-by: Hannes Reinecke <hare@suse.de>
12 diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
13 index 342e2ad..b78da06 100644
14 --- a/drivers/scsi/fcoe/Makefile
15 +++ b/drivers/scsi/fcoe/Makefile
17 obj-$(CONFIG_FCOE) += fcoe.o
26 diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
28 index 0000000..e11d36b
30 +++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
33 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
35 + * This program is free software; you can redistribute it and/or modify it
36 + * under the terms and conditions of the GNU General Public License,
37 + * version 2, as published by the Free Software Foundation.
39 + * This program is distributed in the hope it will be useful, but WITHOUT
40 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
41 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
44 + * You should have received a copy of the GNU General Public License along with
45 + * this program; if not, write to the Free Software Foundation, Inc.,
46 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
48 + * Maintained at www.Open-FCoE.org
51 +#include <linux/module.h>
52 +#include <linux/version.h>
53 +#include <linux/kernel.h>
54 +#include <linux/kthread.h>
55 +#include <linux/spinlock.h>
56 +#include <linux/cpu.h>
57 +#include <linux/netdevice.h>
58 +#include <linux/etherdevice.h>
59 +#include <linux/ethtool.h>
60 +#include <linux/if_ether.h>
61 +#include <linux/fs.h>
62 +#include <linux/sysfs.h>
63 +#include <linux/ctype.h>
65 +#include <scsi/libfc/libfc.h>
67 +#include "fcoe_def.h"
69 +MODULE_AUTHOR("Open-FCoE.org");
70 +MODULE_DESCRIPTION("FCoE");
71 +MODULE_LICENSE("GPL");
74 + * Static functions and variables definations
76 +#ifdef CONFIG_HOTPLUG_CPU
77 +static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
78 +#endif /* CONFIG_HOTPLUG_CPU */
79 +static int fcoe_device_notification(struct notifier_block *, ulong, void *);
80 +static void fcoe_dev_setup(void);
81 +static void fcoe_dev_cleanup(void);
83 +#ifdef CONFIG_HOTPLUG_CPU
84 +static struct notifier_block fcoe_cpu_notifier = {
85 + .notifier_call = fcoe_cpu_callback,
87 +#endif /* CONFIG_HOTPLUG_CPU */
90 + * notification function from net device
92 +static struct notifier_block fcoe_notifier = {
93 + .notifier_call = fcoe_device_notification,
96 +#ifdef CONFIG_HOTPLUG_CPU
98 + * create percpu stats block
99 + * called by cpu add/remove notifier
101 +static void fcoe_create_percpu_data(int cpu)
103 + struct fc_lport *lp;
104 + struct fcoe_softc *fc;
105 + struct fcoe_dev_stats *p;
107 + write_lock_bh(&fcoe_hostlist_lock);
108 + list_for_each_entry(fc, &fcoe_hostlist, list) {
110 + if (lp->dev_stats[cpu] == NULL) {
111 + p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
113 + lp->dev_stats[cpu] = p;
116 + write_unlock_bh(&fcoe_hostlist_lock);
120 + * destroy percpu stats block
121 + * called by cpu add/remove notifier
123 +static void fcoe_destroy_percpu_data(int cpu)
125 + struct fcoe_dev_stats *p;
126 + struct fc_lport *lp;
127 + struct fcoe_softc *fc;
129 + write_lock_bh(&fcoe_hostlist_lock);
130 + list_for_each_entry(fc, &fcoe_hostlist, list) {
132 + p = lp->dev_stats[cpu];
134 + lp->dev_stats[cpu] = NULL;
138 + write_unlock_bh(&fcoe_hostlist_lock);
142 + * Get notified when a cpu comes on/off. Be hotplug friendly.
144 +static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
147 + unsigned int cpu = (unsigned long)hcpu;
151 + fcoe_create_percpu_data(cpu);
154 + fcoe_destroy_percpu_data(cpu);
161 +#endif /* CONFIG_HOTPLUG_CPU */
164 + * function to setup link change notification interface
166 +static void fcoe_dev_setup(void)
169 + * here setup a interface specific wd time to
170 + * monitor the link state
172 + register_netdevice_notifier(&fcoe_notifier);
176 + * function to cleanup link change notification interface
178 +static void fcoe_dev_cleanup(void)
180 + unregister_netdevice_notifier(&fcoe_notifier);
184 + * This function is called by the ethernet driver
185 + * this is called in case of link change event
187 +static int fcoe_device_notification(struct notifier_block *notifier,
188 + ulong event, void *ptr)
190 + struct fc_lport *lp = NULL;
191 + struct net_device *real_dev = ptr;
192 + struct fcoe_softc *fc;
193 + struct fcoe_dev_stats *stats;
196 + int rc = NOTIFY_OK;
198 + read_lock(&fcoe_hostlist_lock);
199 + list_for_each_entry(fc, &fcoe_hostlist, list) {
200 + if (fc->real_dev == real_dev) {
205 + read_unlock(&fcoe_hostlist_lock);
211 + new_status = lp->link_status;
214 + case NETDEV_GOING_DOWN:
215 + new_status &= ~FC_LINK_UP;
218 + case NETDEV_CHANGE:
219 + new_status &= ~FC_LINK_UP;
220 + if (!fcoe_link_ok(lp))
221 + new_status |= FC_LINK_UP;
223 + case NETDEV_CHANGEMTU:
224 + mfs = fc->real_dev->mtu -
225 + (sizeof(struct fcoe_hdr) +
226 + sizeof(struct fcoe_crc_eof));
227 + if (fc->user_mfs && fc->user_mfs < mfs)
228 + mfs = fc->user_mfs;
229 + if (mfs >= FC_MIN_MAX_FRAME)
230 + fc_set_mfs(lp, mfs);
231 + new_status &= ~FC_LINK_UP;
232 + if (!fcoe_link_ok(lp))
233 + new_status |= FC_LINK_UP;
235 + case NETDEV_REGISTER:
238 + FC_DBG("unknown event %ld call", event);
240 + if (lp->link_status != new_status) {
241 + if ((new_status & FC_LINK_UP) == FC_LINK_UP)
244 + stats = lp->dev_stats[smp_processor_id()];
245 + stats->LinkFailureCount++;
247 + fcoe_clean_pending_queue(lp);
254 +static void trimstr(char *str, int len)
256 + char *cp = str + len;
257 + while (--cp >= str && *cp == '\n')
261 +static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr,
262 + const char *buffer, size_t size)
264 + struct net_device *netdev;
265 + char ifname[IFNAMSIZ + 2];
267 + strlcpy(ifname, buffer, IFNAMSIZ);
268 + trimstr(ifname, strlen(ifname));
269 + netdev = dev_get_by_name(&init_net, ifname);
271 + fcoe_destroy_interface(netdev);
277 +static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr,
278 + const char *buffer, size_t size)
280 + struct net_device *netdev;
281 + char ifname[IFNAMSIZ + 2];
283 + strlcpy(ifname, buffer, IFNAMSIZ);
284 + trimstr(ifname, strlen(ifname));
285 + netdev = dev_get_by_name(&init_net, ifname);
287 + fcoe_create_interface(netdev);
293 +static const struct kobj_attribute fcoe_destroyattr = \
294 + __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy);
295 +static const struct kobj_attribute fcoe_createattr = \
296 + __ATTR(create, S_IWUSR, NULL, fcoe_create);
299 + * Initialization routine
300 + * 1. Will create fc transport software structure
301 + * 2. initialize the link list of port information structure
303 +static int __init fcoe_init(void)
307 + struct fcoe_percpu_s *p;
309 + rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
310 + &fcoe_destroyattr.attr);
312 + rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
313 + &fcoe_createattr.attr);
318 + rwlock_init(&fcoe_hostlist_lock);
320 +#ifdef CONFIG_HOTPLUG_CPU
321 + register_cpu_notifier(&fcoe_cpu_notifier);
322 +#endif /* CONFIG_HOTPLUG_CPU */
325 + * initialize per CPU interrupt thread
327 + for_each_online_cpu(cpu) {
328 + p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
330 + p->thread = kthread_create(fcoe_percpu_receive_thread,
332 + "fcoethread/%d", cpu);
335 + * if there is no error then bind the thread to the cpu
336 + * initialize the semaphore and skb queue head
338 + if (likely(!IS_ERR(p->thread))) {
340 + fcoe_percpu[cpu] = p;
341 + skb_queue_head_init(&p->fcoe_rx_list);
342 + kthread_bind(p->thread, cpu);
343 + wake_up_process(p->thread);
345 + fcoe_percpu[cpu] = NULL;
352 + FC_DBG("failed to initialize proc intrerface\n");
358 + * setup link change notification
362 + init_timer(&fcoe_timer);
363 + fcoe_timer.data = 0;
364 + fcoe_timer.function = fcoe_watchdog;
365 + fcoe_timer.expires = (jiffies + (10 * HZ));
366 + add_timer(&fcoe_timer);
368 + if (fcoe_sw_init() != 0) {
369 + FC_DBG("fail to attach fc transport");
376 +#ifdef CONFIG_HOTPLUG_CPU
377 + unregister_cpu_notifier(&fcoe_cpu_notifier);
378 +#endif /* CONFIG_HOTPLUG_CPU */
381 +module_init(fcoe_init);
383 +static void __exit fcoe_exit(void)
386 + struct fcoe_softc *fc, *tmp;
387 + struct fcoe_percpu_s *p;
388 + struct sk_buff *skb;
391 + * Stop all call back interfaces
393 +#ifdef CONFIG_HOTPLUG_CPU
394 + unregister_cpu_notifier(&fcoe_cpu_notifier);
395 +#endif /* CONFIG_HOTPLUG_CPU */
396 + fcoe_dev_cleanup();
401 + del_timer_sync(&fcoe_timer);
404 + * assuming that at this time there will be no
405 + * ioctl in prograss, therefore we do not need to lock the
408 + list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
409 + fcoe_destroy_interface(fc->real_dev);
411 + for (idx = 0; idx < NR_CPUS; idx++) {
412 + if (fcoe_percpu[idx]) {
413 + kthread_stop(fcoe_percpu[idx]->thread);
414 + p = fcoe_percpu[idx];
415 + spin_lock_bh(&p->fcoe_rx_list.lock);
416 + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
418 + spin_unlock_bh(&p->fcoe_rx_list.lock);
419 + if (fcoe_percpu[idx]->crc_eof_page)
420 + put_page(fcoe_percpu[idx]->crc_eof_page);
421 + kfree(fcoe_percpu[idx]);
427 +module_exit(fcoe_exit);
428 diff --git a/drivers/scsi/fcoe/fcoe_def.h b/drivers/scsi/fcoe/fcoe_def.h
429 index 12bf69c..b00e14b 100644
430 --- a/drivers/scsi/fcoe/fcoe_def.h
431 +++ b/drivers/scsi/fcoe/fcoe_def.h
434 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
435 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
437 * This program is free software; you can redistribute it and/or modify it
438 * under the terms and conditions of the GNU General Public License,
439 @@ -48,16 +48,10 @@ struct fcoe_percpu_s {
444 - struct timer_list timer;
446 - * fcoe host list is protected by the following read/write lock
448 - rwlock_t fcoe_hostlist_lock;
449 - struct list_head fcoe_hostlist;
451 - struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
453 +extern struct timer_list fcoe_timer;
454 +extern rwlock_t fcoe_hostlist_lock;
455 +extern struct list_head fcoe_hostlist;
456 +extern struct fcoe_percpu_s *fcoe_percpu[];
459 struct list_head list;
460 @@ -79,22 +73,20 @@ struct fcoe_softc {
464 -extern int debug_fcoe;
465 -extern struct fcoe_percpu_s *fcoe_percpu[];
466 -extern struct scsi_transport_template *fcoe_transport_template;
467 int fcoe_percpu_receive_thread(void *arg);
470 * HBA transport ops prototypes
472 -extern struct fcoe_info fcoei;
474 void fcoe_clean_pending_queue(struct fc_lport *fd);
475 void fcoe_watchdog(ulong vp);
476 -int fcoe_destroy_interface(const char *ifname);
477 -int fcoe_create_interface(const char *ifname);
478 +int fcoe_destroy_interface(struct net_device *);
479 +int fcoe_create_interface(struct net_device *);
480 int fcoe_xmit(struct fc_lport *, struct fc_frame *);
481 int fcoe_rcv(struct sk_buff *, struct net_device *,
482 struct packet_type *, struct net_device *);
483 int fcoe_link_ok(struct fc_lport *);
485 +int __init fcoe_sw_init(void);
486 +void __exit fcoe_sw_exit(void);
487 #endif /* _FCOE_DEF_H_ */
488 diff --git a/drivers/scsi/fcoe/fcoe_dev.c b/drivers/scsi/fcoe/fcoe_dev.c
489 deleted file mode 100644
490 index d5a354f..0000000
491 --- a/drivers/scsi/fcoe/fcoe_dev.c
495 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
497 - * This program is free software; you can redistribute it and/or modify it
498 - * under the terms and conditions of the GNU General Public License,
499 - * version 2, as published by the Free Software Foundation.
501 - * This program is distributed in the hope it will be useful, but WITHOUT
502 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
503 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
506 - * You should have received a copy of the GNU General Public License along with
507 - * this program; if not, write to the Free Software Foundation, Inc.,
508 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
510 - * Maintained at www.Open-FCoE.org
514 - * FCOE protocol file
517 -#include <linux/module.h>
518 -#include <linux/version.h>
519 -#include <linux/kernel.h>
520 -#include <linux/spinlock.h>
521 -#include <linux/skbuff.h>
522 -#include <linux/netdevice.h>
523 -#include <linux/etherdevice.h>
524 -#include <linux/if_ether.h>
525 -#include <linux/kthread.h>
526 -#include <linux/crc32.h>
527 -#include <scsi/scsi_tcq.h>
528 -#include <scsi/scsicam.h>
529 -#include <scsi/scsi_transport.h>
530 -#include <scsi/scsi_transport_fc.h>
531 -#include <net/rtnetlink.h>
533 -#include <scsi/fc/fc_encaps.h>
535 -#include <scsi/libfc/libfc.h>
536 -#include <scsi/libfc/fc_frame.h>
538 -#include <scsi/fc/fc_fcoe.h>
539 -#include "fcoe_def.h"
541 -#define FCOE_MAX_QUEUE_DEPTH 256
543 -/* destination address mode */
544 -#define FCOE_GW_ADDR_MODE 0x00
545 -#define FCOE_FCOUI_ADDR_MODE 0x01
547 -/* Function Prototyes */
548 -static int fcoe_check_wait_queue(struct fc_lport *);
549 -static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
550 -static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
551 -static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
554 - * this is the fcoe receive function
555 - * called by NET_RX_SOFTIRQ
556 - * this function will receive the packet and
557 - * build fc frame and pass it up
559 -int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
560 - struct packet_type *ptype, struct net_device *olddev)
562 - struct fc_lport *lp;
563 - struct fcoe_rcv_info *fr;
564 - struct fcoe_softc *fc;
565 - struct fcoe_dev_stats *stats;
567 - struct fc_frame_header *fh;
568 - unsigned short oxid;
570 - struct fcoe_percpu_s *fps;
571 - struct fcoe_info *fci = &fcoei;
573 - fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
575 - if (unlikely(lp == NULL)) {
576 - FC_DBG("cannot find hba structure");
580 - if (unlikely(debug_fcoe)) {
581 - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
582 - "end:%p sum:%d dev:%s", skb->len, skb->data_len,
583 - skb->head, skb->data, skb_tail_pointer(skb),
584 - skb_end_pointer(skb), skb->csum,
585 - skb->dev ? skb->dev->name : "<NULL>");
589 - /* check for FCOE packet type */
590 - if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
591 - FC_DBG("wrong FC type frame");
595 - data += sizeof(struct fcoe_hdr);
596 - fh = (struct fc_frame_header *)data;
597 - oxid = ntohs(fh->fh_ox_id);
599 - fr = fcoe_dev_from_skb(skb);
605 - * The exchange ID are ANDed with num of online CPUs,
606 - * so that will have the least lock contention in
607 - * handling the exchange. if there is no thread
608 - * for a given idx then use first online cpu.
610 - cpu_idx = oxid & (num_online_cpus() >> 1);
611 - if (fci->fcoe_percpu[cpu_idx] == NULL)
612 - cpu_idx = first_cpu(cpu_online_map);
614 - fps = fci->fcoe_percpu[cpu_idx];
616 - spin_lock_bh(&fps->fcoe_rx_list.lock);
617 - __skb_queue_tail(&fps->fcoe_rx_list, skb);
618 - if (fps->fcoe_rx_list.qlen == 1)
619 - wake_up_process(fps->thread);
621 - spin_unlock_bh(&fps->fcoe_rx_list.lock);
626 - stats = lp->dev_stats[smp_processor_id()];
628 - stats = lp->dev_stats[0];
630 - stats->ErrorFrames++;
637 -static inline int fcoe_start_io(struct sk_buff *skb)
642 - rc = dev_queue_xmit(skb);
649 -static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
651 - struct fcoe_info *fci = &fcoei;
652 - struct fcoe_percpu_s *fps;
656 - cpu_idx = get_cpu();
657 - fps = fci->fcoe_percpu[cpu_idx];
658 - page = fps->crc_eof_page;
660 - page = alloc_page(GFP_ATOMIC);
665 - fps->crc_eof_page = page;
666 - WARN_ON(fps->crc_eof_offset != 0);
670 - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
671 - fps->crc_eof_offset, tlen);
673 - skb->data_len += tlen;
674 - skb->truesize += tlen;
675 - fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
677 - if (fps->crc_eof_offset >= PAGE_SIZE) {
678 - fps->crc_eof_page = NULL;
679 - fps->crc_eof_offset = 0;
687 - * this is the frame xmit routine
689 -int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
695 - struct fcoe_crc_eof *cp;
696 - struct sk_buff *skb;
697 - struct fcoe_dev_stats *stats;
698 - struct fc_frame_header *fh;
699 - unsigned int hlen; /* header length implies the version */
700 - unsigned int tlen; /* trailer length */
701 - int flogi_in_progress = 0;
702 - struct fcoe_softc *fc;
705 - struct fcoe_hdr *hp;
707 - WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
709 - fc = (struct fcoe_softc *)lp->drv_priv;
711 - * if it is a flogi then we need to learn gw-addr
714 - fh = fc_frame_header_get(fp);
715 - if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
716 - if (fc_frame_payload_op(fp) == ELS_FLOGI) {
717 - fc->flogi_oxid = ntohs(fh->fh_ox_id);
718 - fc->address_mode = FCOE_FCOUI_ADDR_MODE;
719 - fc->flogi_progress = 1;
720 - flogi_in_progress = 1;
721 - } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
723 - * Here we must've gotten an SID by accepting an FLOGI
724 - * from a point-to-point connection. Switch to using
725 - * the source mac based on the SID. The destination
726 - * MAC in this case would have been set by receving the
729 - fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
730 - fc->flogi_progress = 0;
739 - crc = crc32(crc, skb->data, skb_headlen(skb));
741 - for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) {
742 - skb_frag_t *frag = &skb_shinfo(skb)->frags[indx];
743 - unsigned long off = frag->page_offset;
744 - unsigned long len = frag->size;
747 - unsigned long clen;
749 - clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
750 - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
751 - KM_SKB_DATA_SOFTIRQ);
752 - crc = crc32(crc, data + (off & ~PAGE_MASK),
754 - kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
761 - * Get header and trailer lengths.
762 - * This is temporary code until we get rid of the old protocol.
763 - * Both versions have essentially the same trailer layout but T11
764 - * has padding afterwards.
766 - hlen = sizeof(struct fcoe_hdr);
767 - tlen = sizeof(struct fcoe_crc_eof);
770 - * copy fc crc and eof to the skb buff
771 - * Use utility buffer in the fc_frame part of the sk_buff for the
773 - * We don't do a get_page for this frag, since that page may not be
774 - * managed that way. So that skb_free() doesn't do that either, we
775 - * setup the destructor to remove this frag.
777 - if (skb_is_nonlinear(skb)) {
779 - if (fcoe_get_paged_crc_eof(skb, tlen)) {
783 - frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
784 - cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
785 - + frag->page_offset;
787 - cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
790 - cp->fcoe_eof = eof;
791 - cp->fcoe_crc32 = cpu_to_le32(~crc);
792 - if (tlen == sizeof(*cp))
793 - memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd));
794 - wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
796 - if (skb_is_nonlinear(skb)) {
797 - kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
802 - * Fill in the control structures
804 - skb->ip_summed = CHECKSUM_NONE;
805 - eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr));
806 - if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
807 - fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
809 - /* insert GW address */
810 - memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
812 - if (unlikely(flogi_in_progress))
813 - memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
815 - memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
817 - eh->h_proto = htons(ETH_P_FCOE);
818 - skb->protocol = htons(ETH_P_802_3);
819 - skb_reset_mac_header(skb);
820 - skb_reset_network_header(skb);
822 - hp = (struct fcoe_hdr *)(eh + 1);
823 - memset(hp, 0, sizeof(*hp));
825 - FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
826 - hp->fcoe_sof = sof;
828 - stats = lp->dev_stats[smp_processor_id()];
830 - stats->TxWords += wlen;
831 - skb->dev = fc->real_dev;
834 - if (fc->fcoe_pending_queue.qlen)
835 - rc = fcoe_check_wait_queue(lp);
838 - rc = fcoe_start_io(skb);
841 - fcoe_insert_wait_queue(lp, skb);
842 - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
849 -int fcoe_percpu_receive_thread(void *arg)
851 - struct fcoe_percpu_s *p = arg;
855 - struct fc_lport *lp;
856 - struct fcoe_rcv_info *fr;
857 - struct fcoe_dev_stats *stats;
858 - struct fc_frame_header *fh;
859 - struct sk_buff *skb;
860 - struct fcoe_crc_eof *cp;
862 - struct fc_frame *fp;
864 - struct fcoe_softc *fc;
865 - struct fcoe_hdr *hp;
867 - set_user_nice(current, 19);
869 - while (!kthread_should_stop()) {
871 - spin_lock_bh(&p->fcoe_rx_list.lock);
872 - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
873 - set_current_state(TASK_INTERRUPTIBLE);
874 - spin_unlock_bh(&p->fcoe_rx_list.lock);
876 - set_current_state(TASK_RUNNING);
877 - if (kthread_should_stop())
879 - spin_lock_bh(&p->fcoe_rx_list.lock);
881 - spin_unlock_bh(&p->fcoe_rx_list.lock);
882 - fr = fcoe_dev_from_skb(skb);
884 - if (unlikely(lp == NULL)) {
885 - FC_DBG("invalid HBA Structure");
890 - stats = lp->dev_stats[smp_processor_id()];
892 - if (unlikely(debug_fcoe)) {
893 - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
894 - "tail:%p end:%p sum:%d dev:%s",
895 - skb->len, skb->data_len,
896 - skb->head, skb->data, skb_tail_pointer(skb),
897 - skb_end_pointer(skb), skb->csum,
898 - skb->dev ? skb->dev->name : "<NULL>");
902 - * Save source MAC address before discarding header.
905 - if (unlikely(fc->flogi_progress))
906 - mac = eth_hdr(skb)->h_source;
908 - if (skb_is_nonlinear(skb))
909 - skb_linearize(skb); /* not ideal */
912 - * Check the header and pull it off.
914 - hlen = sizeof(struct fcoe_hdr);
916 - hp = (struct fcoe_hdr *)skb->data;
917 - if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
918 - if (stats->ErrorFrames < 5)
919 - FC_DBG("unknown FCoE version %x",
920 - FC_FCOE_DECAPS_VER(hp));
921 - stats->ErrorFrames++;
925 - sof = hp->fcoe_sof;
926 - skb_pull(skb, sizeof(*hp));
927 - fr_len = skb->len - sizeof(struct fcoe_crc_eof);
928 - skb_trim(skb, fr_len);
929 - tlen = sizeof(struct fcoe_crc_eof);
931 - if (unlikely(fr_len > skb->len)) {
932 - if (stats->ErrorFrames < 5)
933 - FC_DBG("length error fr_len 0x%x skb->len 0x%x",
935 - stats->ErrorFrames++;
940 - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
942 - fp = (struct fc_frame *) skb;
944 - cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
945 - fr_eof(fp) = cp->fcoe_eof;
950 - * Check the CRC here, unless it's solicited data for SCSI.
951 - * In that case, the SCSI layer can check it during the copy,
952 - * and it'll be more cache-efficient.
954 - fh = fc_frame_header_get(fp);
955 - if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
956 - fh->fh_type == FC_TYPE_FCP) {
957 - fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
958 - fc_exch_recv(lp, lp->emp, fp);
959 - } else if (le32_to_cpu(cp->fcoe_crc32) ==
960 - ~crc32(~0, skb->data, fr_len)) {
961 - if (unlikely(fc->flogi_progress))
962 - fcoe_recv_flogi(fc, fp, mac);
963 - fc_exch_recv(lp, lp->emp, fp);
965 - if (debug_fcoe || stats->InvalidCRCCount < 5) {
966 - printk(KERN_WARNING \
967 - "fcoe: dropping frame with CRC error");
969 - stats->InvalidCRCCount++;
970 - stats->ErrorFrames++;
978 - * Snoop potential response to FLOGI or even incoming FLOGI.
980 -static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
982 - struct fc_frame_header *fh;
985 - fh = fc_frame_header_get(fp);
986 - if (fh->fh_type != FC_TYPE_ELS)
988 - op = fc_frame_payload_op(fp);
989 - if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
990 - fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
993 - * If the src mac addr is FC_OUI-based, then we mark the
994 - * address_mode flag to use FC_OUI-based Ethernet DA.
995 - * Otherwise we use the FCoE gateway addr
997 - if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
998 - fc->address_mode = FCOE_FCOUI_ADDR_MODE;
1000 - memcpy(fc->dest_addr, sa, ETH_ALEN);
1001 - fc->address_mode = FCOE_GW_ADDR_MODE;
1005 - * Remove any previously-set unicast MAC filter.
1006 - * Add secondary FCoE MAC address filter for our OUI.
1009 - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1010 - dev_unicast_delete(fc->real_dev, fc->data_src_addr,
1012 - fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
1013 - dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1016 - fc->flogi_progress = 0;
1017 - } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
1019 - * Save source MAC for point-to-point responses.
1021 - memcpy(fc->dest_addr, sa, ETH_ALEN);
1022 - fc->address_mode = FCOE_GW_ADDR_MODE;
1026 -void fcoe_watchdog(ulong vp)
1028 - struct fc_lport *lp;
1029 - struct fcoe_softc *fc;
1030 - struct fcoe_info *fci = &fcoei;
1033 - read_lock(&fci->fcoe_hostlist_lock);
1034 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
1037 - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1039 - if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
1045 - read_unlock(&fci->fcoe_hostlist_lock);
1047 - fci->timer.expires = jiffies + (1 * HZ);
1048 - add_timer(&fci->timer);
1052 - * the wait_queue is used when the skb transmit fails. skb will go
1053 - * in the wait_queue which will be emptied by the time function OR
1054 - * by the next skb transmit.
1059 - * Function name : fcoe_check_wait_queue()
1061 - * Return Values : 0 or error
1063 - * Description : empties the wait_queue
1064 - * dequeue the head of the wait_queue queue and
1065 - * calls fcoe_start_io() for each packet
1066 - * if all skb have been transmitted, return 0
1067 - * if a error occurs, then restore wait_queue and try again
1072 -static int fcoe_check_wait_queue(struct fc_lport *lp)
1074 - int rc, unpause = 0;
1076 - struct sk_buff *skb;
1077 - struct fcoe_softc *fc;
1079 - fc = (struct fcoe_softc *)lp->drv_priv;
1080 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1083 - * is this interface paused?
1085 - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1087 - if (fc->fcoe_pending_queue.qlen) {
1088 - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1089 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1090 - rc = fcoe_start_io(skb);
1092 - fcoe_insert_wait_queue_head(lp, skb);
1095 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1097 - if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
1100 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1101 - if ((unpause) && (paused))
1103 - return fc->fcoe_pending_queue.qlen;
1106 -static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
1107 - struct sk_buff *skb)
1109 - struct fcoe_softc *fc;
1111 - fc = (struct fcoe_softc *)lp->drv_priv;
1112 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1113 - __skb_queue_head(&fc->fcoe_pending_queue, skb);
1114 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1117 -static void fcoe_insert_wait_queue(struct fc_lport *lp,
1118 - struct sk_buff *skb)
1120 - struct fcoe_softc *fc;
1122 - fc = (struct fcoe_softc *)lp->drv_priv;
1123 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1124 - __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1125 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1127 diff --git a/drivers/scsi/fcoe/fcoe_if.c b/drivers/scsi/fcoe/fcoe_if.c
1128 deleted file mode 100644
1129 index 73b83ce..0000000
1130 --- a/drivers/scsi/fcoe/fcoe_if.c
1134 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
1136 - * This program is free software; you can redistribute it and/or modify it
1137 - * under the terms and conditions of the GNU General Public License,
1138 - * version 2, as published by the Free Software Foundation.
1140 - * This program is distributed in the hope it will be useful, but WITHOUT
1141 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1142 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1145 - * You should have received a copy of the GNU General Public License along with
1146 - * this program; if not, write to the Free Software Foundation, Inc.,
1147 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1149 - * Maintained at www.Open-FCoE.org
1153 - * FCOE protocol file
1156 -#include <linux/module.h>
1157 -#include <linux/version.h>
1158 -#include <linux/kernel.h>
1159 -#include <linux/init.h>
1160 -#include <linux/spinlock.h>
1161 -#include <linux/netdevice.h>
1162 -#include <linux/etherdevice.h>
1163 -#include <linux/ethtool.h>
1164 -#include <linux/if_ether.h>
1165 -#include <linux/if_vlan.h>
1166 -#include <net/rtnetlink.h>
1168 -#include <scsi/fc/fc_els.h>
1169 -#include <scsi/fc/fc_encaps.h>
1170 -#include <scsi/fc/fc_fs.h>
1171 -#include <scsi/scsi_transport.h>
1172 -#include <scsi/scsi_transport_fc.h>
1174 -#include <scsi/libfc/libfc.h>
1176 -#include <scsi/fc/fc_fcoe.h>
1177 -#include "fcoe_def.h"
1179 -#define FCOE_VERSION "0.1"
1181 -#define FCOE_MAX_LUN 255
1182 -#define FCOE_MAX_FCP_TARGET 256
1184 -#define FCOE_MIN_XID 0x0004
1185 -#define FCOE_MAX_XID 0x07ef
1189 -struct fcoe_info fcoei = {
1190 - .fcoe_hostlist = LIST_HEAD_INIT(fcoei.fcoe_hostlist),
1193 -static struct fcoe_softc *fcoe_find_fc_lport(const char *name)
1195 - struct fcoe_softc *fc;
1196 - struct fc_lport *lp;
1197 - struct fcoe_info *fci = &fcoei;
1199 - read_lock(&fci->fcoe_hostlist_lock);
1200 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
1202 - if (!strncmp(name, lp->ifname, IFNAMSIZ)) {
1203 - read_unlock(&fci->fcoe_hostlist_lock);
1207 - read_unlock(&fci->fcoe_hostlist_lock);
1212 - * Convert 48-bit IEEE MAC address to 64-bit FC WWN.
1214 -static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1215 - unsigned int scheme, unsigned int port)
1220 - /* The MAC is in NO, so flip only the low 48 bits */
1221 - host_mac = ((u64) mac[0] << 40) |
1222 - ((u64) mac[1] << 32) |
1223 - ((u64) mac[2] << 24) |
1224 - ((u64) mac[3] << 16) |
1225 - ((u64) mac[4] << 8) |
1228 - WARN_ON(host_mac >= (1ULL << 48));
1229 - wwn = host_mac | ((u64) scheme << 60);
1232 - WARN_ON(port != 0);
1235 - WARN_ON(port >= 0xfff);
1236 - wwn |= (u64) port << 48;
1246 -static struct scsi_host_template fcoe_driver_template = {
1247 - .module = THIS_MODULE,
1248 - .name = "FCoE Driver",
1249 - .proc_name = FCOE_DRIVER_NAME,
1250 - .queuecommand = fc_queuecommand,
1251 - .eh_abort_handler = fc_eh_abort,
1252 - .eh_device_reset_handler = fc_eh_device_reset,
1253 - .eh_host_reset_handler = fc_eh_host_reset,
1254 - .slave_alloc = fc_slave_alloc,
1255 - .change_queue_depth = fc_change_queue_depth,
1256 - .change_queue_type = fc_change_queue_type,
1258 - .cmd_per_lun = 32,
1259 - .can_queue = FC_MAX_OUTSTANDING_COMMANDS,
1260 - .use_clustering = ENABLE_CLUSTERING,
1261 - .sg_tablesize = 4,
1262 - .max_sectors = 0xffff,
1265 -int fcoe_destroy_interface(const char *ifname)
1268 - struct fcoe_dev_stats *p;
1269 - struct fcoe_percpu_s *pp;
1270 - struct fcoe_softc *fc;
1271 - struct fcoe_rcv_info *fr;
1272 - struct fcoe_info *fci = &fcoei;
1273 - struct sk_buff_head *list;
1274 - struct sk_buff *skb, *next;
1275 - struct sk_buff *head;
1276 - struct fc_lport *lp;
1277 - u8 flogi_maddr[ETH_ALEN];
1279 - fc = fcoe_find_fc_lport(ifname);
1285 - /* Remove the instance from fcoe's list */
1286 - write_lock_bh(&fci->fcoe_hostlist_lock);
1287 - list_del(&fc->list);
1288 - write_unlock_bh(&fci->fcoe_hostlist_lock);
1290 - /* Don't listen for Ethernet packets anymore */
1291 - dev_remove_pack(&fc->fcoe_packet_type);
1293 - /* Detach from the scsi-ml */
1294 - fc_remove_host(lp->host);
1295 - scsi_remove_host(lp->host);
1297 - /* Cleanup the fc_lport */
1298 - fc_lport_destroy(lp);
1299 - fc_fcp_destroy(lp);
1301 - fc_exch_mgr_free(lp->emp);
1303 - /* Delete secondary MAC addresses */
1305 - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1306 - dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
1307 - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1308 - dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1311 - /* Free the per-CPU revieve threads */
1312 - for (idx = 0; idx < NR_CPUS; idx++) {
1313 - if (fci->fcoe_percpu[idx]) {
1314 - pp = fci->fcoe_percpu[idx];
1315 - spin_lock_bh(&pp->fcoe_rx_list.lock);
1316 - list = &pp->fcoe_rx_list;
1317 - head = list->next;
1318 - for (skb = head; skb != (struct sk_buff *)list;
1321 - fr = fcoe_dev_from_skb(skb);
1322 - if (fr->fr_dev == fc->lp) {
1323 - __skb_unlink(skb, list);
1327 - spin_unlock_bh(&pp->fcoe_rx_list.lock);
1331 - /* Free existing skbs */
1332 - fcoe_clean_pending_queue(lp);
1334 - /* Free memory used by statistical counters */
1335 - for_each_online_cpu(cpu) {
1336 - p = lp->dev_stats[cpu];
1338 - lp->dev_stats[cpu] = NULL;
1343 - /* Release the net_device and Scsi_Host */
1344 - dev_put(fc->real_dev);
1345 - scsi_host_put(lp->host);
1350 - * Return zero if link is OK for use by FCoE.
1351 - * Any permanently-disqualifying conditions have been previously checked.
1352 - * This also updates the speed setting, which may change with link for 100/1000.
1354 - * This function should probably be checking for PAUSE support at some point
1355 - * in the future. Currently Per-priority-pause is not determinable using
1356 - * ethtool, so we shouldn't be restrictive until that problem is resolved.
1358 -int fcoe_link_ok(struct fc_lport *lp)
1360 - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1361 - struct net_device *dev = fc->real_dev;
1362 - struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1365 - if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1366 - dev = fc->phys_dev;
1367 - if (dev->ethtool_ops->get_settings) {
1368 - dev->ethtool_ops->get_settings(dev, &ecmd);
1369 - lp->link_supported_speeds &=
1370 - ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1371 - if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1372 - SUPPORTED_1000baseT_Full))
1373 - lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1374 - if (ecmd.supported & SUPPORTED_10000baseT_Full)
1375 - lp->link_supported_speeds |=
1376 - FC_PORTSPEED_10GBIT;
1377 - if (ecmd.speed == SPEED_1000)
1378 - lp->link_speed = FC_PORTSPEED_1GBIT;
1379 - if (ecmd.speed == SPEED_10000)
1380 - lp->link_speed = FC_PORTSPEED_10GBIT;
1388 -static struct libfc_function_template fcoe_libfc_fcn_templ = {
1389 - .frame_send = fcoe_xmit,
1392 -static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
1395 - struct fcoe_dev_stats *p;
1398 - lp->drv_priv = (void *)(lp + 1);
1400 - lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
1401 - FCOE_MIN_XID, FCOE_MAX_XID);
1405 - lp->link_status = 0;
1406 - lp->max_retry_count = 3;
1407 - lp->e_d_tov = 2 * 1000; /* FC-FS default */
1408 - lp->r_a_tov = 2 * 2 * 1000;
1409 - lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1410 - FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1413 - * allocate per cpu stats block
1415 - for_each_online_cpu(i) {
1416 - p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
1418 - lp->dev_stats[i] = p;
1421 - /* Finish fc_lport configuration */
1422 - fc_lport_config(lp);
1427 -static int net_config(struct fc_lport *lp)
1431 - struct net_device *net_dev;
1432 - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1433 - u8 flogi_maddr[ETH_ALEN];
1435 - /* Require support for get_pauseparam ethtool op. */
1436 - net_dev = fc->real_dev;
1437 - if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN))
1438 - net_dev = vlan_dev_real_dev(net_dev);
1439 - if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam)
1440 - return -EOPNOTSUPP;
1442 - fc->phys_dev = net_dev;
1444 - /* Do not support for bonding device */
1445 - if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
1446 - (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
1447 - (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
1448 - return -EOPNOTSUPP;
1452 - * Determine max frame size based on underlying device and optional
1453 - * user-configured limit. If the MFS is too low, fcoe_link_ok()
1454 - * will return 0, so do this first.
1456 - mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
1457 - sizeof(struct fcoe_crc_eof));
1458 - fc_set_mfs(lp, mfs);
1460 - lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
1461 - if (!fcoe_link_ok(lp))
1462 - lp->link_status |= FC_LINK_UP;
1464 - if (fc->real_dev->features & NETIF_F_SG)
1465 - lp->capabilities = TRANS_C_SG;
1468 - skb_queue_head_init(&fc->fcoe_pending_queue);
1470 - memcpy(lp->ifname, fc->real_dev->name, IFNAMSIZ);
1472 - /* setup Source Mac Address */
1473 - memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
1474 - fc->real_dev->addr_len);
1476 - wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
1477 - fc_set_wwnn(lp, wwnn);
1478 - /* XXX - 3rd arg needs to be vlan id */
1479 - wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
1480 - fc_set_wwpn(lp, wwpn);
1483 - * Add FCoE MAC address as second unicast MAC address
1484 - * or enter promiscuous mode if not capable of listening
1485 - * for multiple unicast MACs.
1488 - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1489 - dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
1493 - * setup the receive function from ethernet driver
1494 - * on the ethertype for the given device
1496 - fc->fcoe_packet_type.func = fcoe_rcv;
1497 - fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
1498 - fc->fcoe_packet_type.dev = fc->real_dev;
1499 - dev_add_pack(&fc->fcoe_packet_type);
1504 -static void shost_config(struct fc_lport *lp)
1506 - lp->host->max_lun = FCOE_MAX_LUN;
1507 - lp->host->max_id = FCOE_MAX_FCP_TARGET;
1508 - lp->host->max_channel = 0;
1509 - lp->host->transportt = fcoe_transport_template;
1512 -static int libfc_config(struct fc_lport *lp)
1514 - /* Set the function pointers set by the LLDD */
1515 - memcpy(&lp->tt, &fcoe_libfc_fcn_templ,
1516 - sizeof(struct libfc_function_template));
1518 - if (fc_fcp_init(lp))
1521 - fc_lport_init(lp);
1522 - fc_rport_init(lp);
1529 - * This function creates the fcoe interface
1530 - * create struct fcdev which is a shared structure between opefc
1531 - * and transport level protocol.
1533 -int fcoe_create_interface(const char *ifname)
1535 - struct fc_lport *lp = NULL;
1536 - struct fcoe_softc *fc;
1537 - struct net_device *net_dev;
1538 - struct Scsi_Host *shost;
1539 - struct fcoe_info *fci = &fcoei;
1542 - net_dev = dev_get_by_name(&init_net, ifname);
1543 - if (net_dev == NULL) {
1544 - FC_DBG("could not get network device for %s",
1549 - if (fcoe_find_fc_lport(net_dev->name) != NULL) {
1554 - shost = scsi_host_alloc(&fcoe_driver_template,
1555 - sizeof(struct fc_lport) +
1556 - sizeof(struct fcoe_softc));
1559 - FC_DBG("Could not allocate host structure\n");
1564 - lp = shost_priv(shost);
1565 - rc = lport_config(lp, shost);
1567 - goto out_host_put;
1569 - /* Configure the fcoe_softc */
1570 - fc = (struct fcoe_softc *)lp->drv_priv;
1572 - fc->real_dev = net_dev;
1576 - /* Add the new host to the SCSI-ml */
1577 - rc = scsi_add_host(lp->host, NULL);
1579 - FC_DBG("error on scsi_add_host\n");
1580 - goto out_lp_destroy;
1583 - sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
1584 - FCOE_DRIVER_NAME, FCOE_VERSION,
1587 - /* Configure netdev and networking properties of the lp */
1588 - rc = net_config(lp);
1590 - goto out_lp_destroy;
1592 - /* Initialize the library */
1593 - rc = libfc_config(lp);
1595 - goto out_lp_destroy;
1597 - write_lock_bh(&fci->fcoe_hostlist_lock);
1598 - list_add_tail(&fc->list, &fci->fcoe_hostlist);
1599 - write_unlock_bh(&fci->fcoe_hostlist_lock);
1601 - lp->boot_time = jiffies;
1603 - fc_fabric_login(lp);
1608 - fc_exch_mgr_free(lp->emp); /* Free the EM */
1610 - scsi_host_put(lp->host);
1616 -void fcoe_clean_pending_queue(struct fc_lport *lp)
1618 - struct fcoe_softc *fc = lp->drv_priv;
1619 - struct sk_buff *skb;
1621 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1622 - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1623 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1625 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1627 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1629 diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
1630 new file mode 100644
1631 index 0000000..3cf5ad6
1633 +++ b/drivers/scsi/fcoe/fcoe_sw.c
1636 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
1638 + * This program is free software; you can redistribute it and/or modify it
1639 + * under the terms and conditions of the GNU General Public License,
1640 + * version 2, as published by the Free Software Foundation.
1642 + * This program is distributed in the hope it will be useful, but WITHOUT
1643 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1644 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1647 + * You should have received a copy of the GNU General Public License along with
1648 + * this program; if not, write to the Free Software Foundation, Inc.,
1649 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
1651 + * Maintained at www.Open-FCoE.org
1655 + * FCOE protocol file
1658 +#include <linux/module.h>
1659 +#include <linux/version.h>
1660 +#include <linux/kernel.h>
1661 +#include <linux/init.h>
1662 +#include <linux/spinlock.h>
1663 +#include <linux/netdevice.h>
1664 +#include <linux/etherdevice.h>
1665 +#include <linux/ethtool.h>
1666 +#include <linux/if_ether.h>
1667 +#include <linux/if_vlan.h>
1668 +#include <net/rtnetlink.h>
1670 +#include <scsi/fc/fc_els.h>
1671 +#include <scsi/fc/fc_encaps.h>
1672 +#include <scsi/fc/fc_fs.h>
1673 +#include <scsi/scsi_transport.h>
1674 +#include <scsi/scsi_transport_fc.h>
1676 +#include <scsi/libfc/libfc.h>
1678 +#include <scsi/fc/fc_fcoe.h>
1679 +#include "fcoe_def.h"
1681 +#define FCOE_VERSION "0.1"
1683 +#define FCOE_MAX_LUN 255
1684 +#define FCOE_MAX_FCP_TARGET 256
1686 +#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
1688 +#define FCOE_MIN_XID 0x0004
1689 +#define FCOE_MAX_XID 0x07ef
1691 +LIST_HEAD(fcoe_hostlist);
1692 +DEFINE_RWLOCK(fcoe_hostlist_lock);
1693 +DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
1694 +struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
1696 +static struct scsi_transport_template *fcoe_transport_template;
1698 +static int fcoe_reset(struct Scsi_Host *shost)
1700 + struct fc_lport *lport = shost_priv(shost);
1701 + fc_lport_reset(lport);
1705 +struct fc_function_template fcoe_transport_function = {
1706 + .show_host_node_name = 1,
1707 + .show_host_port_name = 1,
1708 + .show_host_supported_classes = 1,
1709 + .show_host_supported_fc4s = 1,
1710 + .show_host_active_fc4s = 1,
1711 + .show_host_maxframe_size = 1,
1713 + .show_host_port_id = 1,
1714 + .show_host_supported_speeds = 1,
1715 + .get_host_speed = fc_get_host_speed,
1716 + .show_host_speed = 1,
1717 + .show_host_port_type = 1,
1718 + .get_host_port_state = fc_get_host_port_state,
1719 + .show_host_port_state = 1,
1720 + .show_host_symbolic_name = 1,
1722 + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
1723 + .show_rport_maxframe_size = 1,
1724 + .show_rport_supported_classes = 1,
1726 + .show_host_fabric_name = 1,
1727 + .show_starget_node_name = 1,
1728 + .show_starget_port_name = 1,
1729 + .show_starget_port_id = 1,
1730 + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
1731 + .show_rport_dev_loss_tmo = 1,
1732 + .get_fc_host_stats = fc_get_host_stats,
1733 + .issue_fc_host_lip = fcoe_reset,
1735 + .terminate_rport_io = fc_rport_terminate_io,
1738 +static struct fcoe_softc *fcoe_find_fc_lport(const struct net_device *netdev)
1740 + struct fcoe_softc *fc;
1742 + read_lock(&fcoe_hostlist_lock);
1743 + list_for_each_entry(fc, &fcoe_hostlist, list) {
1744 + if (fc->real_dev == netdev) {
1745 + read_unlock(&fcoe_hostlist_lock);
1749 + read_unlock(&fcoe_hostlist_lock);
1754 + * Convert 48-bit IEEE MAC address to 64-bit FC WWN.
1756 +static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1757 + unsigned int scheme, unsigned int port)
1762 + /* The MAC is in NO, so flip only the low 48 bits */
1763 + host_mac = ((u64) mac[0] << 40) |
1764 + ((u64) mac[1] << 32) |
1765 + ((u64) mac[2] << 24) |
1766 + ((u64) mac[3] << 16) |
1767 + ((u64) mac[4] << 8) |
1770 + WARN_ON(host_mac >= (1ULL << 48));
1771 + wwn = host_mac | ((u64) scheme << 60);
1774 + WARN_ON(port != 0);
1777 + WARN_ON(port >= 0xfff);
1778 + wwn |= (u64) port << 48;
1788 +static struct scsi_host_template fcoe_driver_template = {
1789 + .module = THIS_MODULE,
1790 + .name = "FCoE Driver",
1791 + .proc_name = FCOE_DRIVER_NAME,
1792 + .queuecommand = fc_queuecommand,
1793 + .eh_abort_handler = fc_eh_abort,
1794 + .eh_device_reset_handler = fc_eh_device_reset,
1795 + .eh_host_reset_handler = fc_eh_host_reset,
1796 + .slave_alloc = fc_slave_alloc,
1797 + .change_queue_depth = fc_change_queue_depth,
1798 + .change_queue_type = fc_change_queue_type,
1800 + .cmd_per_lun = 32,
1801 + .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
1802 + .use_clustering = ENABLE_CLUSTERING,
1803 + .sg_tablesize = 4,
1804 + .max_sectors = 0xffff,
1807 +int fcoe_destroy_interface(struct net_device *netdev)
1810 + struct fcoe_dev_stats *p;
1811 + struct fcoe_percpu_s *pp;
1812 + struct fcoe_softc *fc;
1813 + struct fcoe_rcv_info *fr;
1814 + struct sk_buff_head *list;
1815 + struct sk_buff *skb, *next;
1816 + struct sk_buff *head;
1817 + struct fc_lport *lp;
1818 + u8 flogi_maddr[ETH_ALEN];
1820 + fc = fcoe_find_fc_lport(netdev);
1826 + /* Remove the instance from fcoe's list */
1827 + write_lock_bh(&fcoe_hostlist_lock);
1828 + list_del(&fc->list);
1829 + write_unlock_bh(&fcoe_hostlist_lock);
1831 + /* Don't listen for Ethernet packets anymore */
1832 + dev_remove_pack(&fc->fcoe_packet_type);
1834 + /* Detach from the scsi-ml */
1835 + fc_remove_host(lp->host);
1836 + scsi_remove_host(lp->host);
1838 + /* Cleanup the fc_lport */
1839 + fc_lport_destroy(lp);
1840 + fc_fcp_destroy(lp);
1842 + fc_exch_mgr_free(lp->emp);
1844 + /* Delete secondary MAC addresses */
1846 + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1847 + dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
1848 + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1849 + dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1852 + /* Free the per-CPU revieve threads */
1853 + for (idx = 0; idx < NR_CPUS; idx++) {
1854 + if (fcoe_percpu[idx]) {
1855 + pp = fcoe_percpu[idx];
1856 + spin_lock_bh(&pp->fcoe_rx_list.lock);
1857 + list = &pp->fcoe_rx_list;
1858 + head = list->next;
1859 + for (skb = head; skb != (struct sk_buff *)list;
1862 + fr = fcoe_dev_from_skb(skb);
1863 + if (fr->fr_dev == fc->lp) {
1864 + __skb_unlink(skb, list);
1868 + spin_unlock_bh(&pp->fcoe_rx_list.lock);
1872 + /* Free existing skbs */
1873 + fcoe_clean_pending_queue(lp);
1875 + /* Free memory used by statistical counters */
1876 + for_each_online_cpu(cpu) {
1877 + p = lp->dev_stats[cpu];
1879 + lp->dev_stats[cpu] = NULL;
1884 + /* Release the net_device and Scsi_Host */
1885 + dev_put(fc->real_dev);
1886 + scsi_host_put(lp->host);
1891 + * Return zero if link is OK for use by FCoE.
1892 + * Any permanently-disqualifying conditions have been previously checked.
1893 + * This also updates the speed setting, which may change with link for 100/1000.
1895 + * This function should probably be checking for PAUSE support at some point
1896 + * in the future. Currently Per-priority-pause is not determinable using
1897 + * ethtool, so we shouldn't be restrictive until that problem is resolved.
1899 +int fcoe_link_ok(struct fc_lport *lp)
1901 + struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1902 + struct net_device *dev = fc->real_dev;
1903 + struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1906 + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1907 + dev = fc->phys_dev;
1908 + if (dev->ethtool_ops->get_settings) {
1909 + dev->ethtool_ops->get_settings(dev, &ecmd);
1910 + lp->link_supported_speeds &=
1911 + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1912 + if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1913 + SUPPORTED_1000baseT_Full))
1914 + lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1915 + if (ecmd.supported & SUPPORTED_10000baseT_Full)
1916 + lp->link_supported_speeds |=
1917 + FC_PORTSPEED_10GBIT;
1918 + if (ecmd.speed == SPEED_1000)
1919 + lp->link_speed = FC_PORTSPEED_1GBIT;
1920 + if (ecmd.speed == SPEED_10000)
1921 + lp->link_speed = FC_PORTSPEED_10GBIT;
1929 +static struct libfc_function_template fcoe_libfc_fcn_templ = {
1930 + .frame_send = fcoe_xmit,
1933 +static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
1936 + struct fcoe_dev_stats *p;
1939 + lp->drv_priv = (void *)(lp + 1);
1941 + lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
1942 + FCOE_MIN_XID, FCOE_MAX_XID);
1946 + lp->link_status = 0;
1947 + lp->max_retry_count = 3;
1948 + lp->e_d_tov = 2 * 1000; /* FC-FS default */
1949 + lp->r_a_tov = 2 * 2 * 1000;
1950 + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1951 + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1954 + * allocate per cpu stats block
1956 + for_each_online_cpu(i) {
1957 + p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
1959 + lp->dev_stats[i] = p;
1962 + /* Finish fc_lport configuration */
1963 + fc_lport_config(lp);
1968 +static int net_config(struct fc_lport *lp)
1972 + struct net_device *net_dev;
1973 + struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1974 + u8 flogi_maddr[ETH_ALEN];
1976 + /* Require support for get_pauseparam ethtool op. */
1977 + net_dev = fc->real_dev;
1978 + if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN))
1979 + net_dev = vlan_dev_real_dev(net_dev);
1980 + if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam)
1981 + return -EOPNOTSUPP;
1983 + fc->phys_dev = net_dev;
1985 + /* Do not support for bonding device */
1986 + if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
1987 + (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
1988 + (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
1989 + return -EOPNOTSUPP;
1993 + * Determine max frame size based on underlying device and optional
1994 + * user-configured limit. If the MFS is too low, fcoe_link_ok()
1995 + * will return 0, so do this first.
1997 + mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
1998 + sizeof(struct fcoe_crc_eof));
1999 + fc_set_mfs(lp, mfs);
2001 + lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
2002 + if (!fcoe_link_ok(lp))
2003 + lp->link_status |= FC_LINK_UP;
2005 + if (fc->real_dev->features & NETIF_F_SG)
2009 + skb_queue_head_init(&fc->fcoe_pending_queue);
2011 + /* setup Source Mac Address */
2012 + memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
2013 + fc->real_dev->addr_len);
2015 + wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
2016 + fc_set_wwnn(lp, wwnn);
2017 + /* XXX - 3rd arg needs to be vlan id */
2018 + wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
2019 + fc_set_wwpn(lp, wwpn);
2022 + * Add FCoE MAC address as second unicast MAC address
2023 + * or enter promiscuous mode if not capable of listening
2024 + * for multiple unicast MACs.
2027 + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
2028 + dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
2032 + * setup the receive function from ethernet driver
2033 + * on the ethertype for the given device
2035 + fc->fcoe_packet_type.func = fcoe_rcv;
2036 + fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
2037 + fc->fcoe_packet_type.dev = fc->real_dev;
2038 + dev_add_pack(&fc->fcoe_packet_type);
2043 +static void shost_config(struct fc_lport *lp)
2045 + lp->host->max_lun = FCOE_MAX_LUN;
2046 + lp->host->max_id = FCOE_MAX_FCP_TARGET;
2047 + lp->host->max_channel = 0;
2048 + lp->host->transportt = fcoe_transport_template;
2051 +static int libfc_config(struct fc_lport *lp)
2053 + /* Set the function pointers set by the LLDD */
2054 + memcpy(&lp->tt, &fcoe_libfc_fcn_templ,
2055 + sizeof(struct libfc_function_template));
2057 + if (fc_fcp_init(lp))
2060 + fc_lport_init(lp);
2061 + fc_rport_init(lp);
2068 + * This function creates the fcoe interface
2069 + * create struct fcdev which is a shared structure between opefc
2070 + * and transport level protocol.
2072 +int fcoe_create_interface(struct net_device *netdev)
2074 + struct fc_lport *lp = NULL;
2075 + struct fcoe_softc *fc;
2076 + struct Scsi_Host *shost;
2079 + if (fcoe_find_fc_lport(netdev) != NULL)
2082 + shost = scsi_host_alloc(&fcoe_driver_template,
2083 + sizeof(struct fc_lport) +
2084 + sizeof(struct fcoe_softc));
2087 + FC_DBG("Could not allocate host structure\n");
2091 + lp = shost_priv(shost);
2092 + rc = lport_config(lp, shost);
2094 + goto out_host_put;
2096 + /* Configure the fcoe_softc */
2097 + fc = (struct fcoe_softc *)lp->drv_priv;
2099 + fc->real_dev = netdev;
2103 + /* Add the new host to the SCSI-ml */
2104 + rc = scsi_add_host(lp->host, NULL);
2106 + FC_DBG("error on scsi_add_host\n");
2107 + goto out_lp_destroy;
2110 + sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
2111 + FCOE_DRIVER_NAME, FCOE_VERSION,
2114 + /* Configure netdev and networking properties of the lp */
2115 + rc = net_config(lp);
2117 + goto out_lp_destroy;
2119 + /* Initialize the library */
2120 + rc = libfc_config(lp);
2122 + goto out_lp_destroy;
2124 + write_lock_bh(&fcoe_hostlist_lock);
2125 + list_add_tail(&fc->list, &fcoe_hostlist);
2126 + write_unlock_bh(&fcoe_hostlist_lock);
2128 + lp->boot_time = jiffies;
2130 + fc_fabric_login(lp);
2136 + fc_exch_mgr_free(lp->emp); /* Free the EM */
2138 + scsi_host_put(lp->host);
2142 +void fcoe_clean_pending_queue(struct fc_lport *lp)
2144 + struct fcoe_softc *fc = lp->drv_priv;
2145 + struct sk_buff *skb;
2147 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
2148 + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
2149 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2151 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
2153 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2156 +int __init fcoe_sw_init(void)
2158 + fcoe_transport_template =
2159 + fc_attach_transport(&fcoe_transport_function);
2160 + return fcoe_transport_template ? 0 : -1;
2163 +void __exit fcoe_sw_exit(void)
2165 + fc_release_transport(fcoe_transport_template);
2167 diff --git a/drivers/scsi/fcoe/fcoeinit.c b/drivers/scsi/fcoe/fcoeinit.c
2168 deleted file mode 100644
2169 index 7d52ed5..0000000
2170 --- a/drivers/scsi/fcoe/fcoeinit.c
2174 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
2176 - * This program is free software; you can redistribute it and/or modify it
2177 - * under the terms and conditions of the GNU General Public License,
2178 - * version 2, as published by the Free Software Foundation.
2180 - * This program is distributed in the hope it will be useful, but WITHOUT
2181 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2182 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2185 - * You should have received a copy of the GNU General Public License along with
2186 - * this program; if not, write to the Free Software Foundation, Inc.,
2187 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2189 - * Maintained at www.Open-FCoE.org
2192 -#include <linux/module.h>
2193 -#include <linux/version.h>
2194 -#include <linux/kernel.h>
2195 -#include <linux/kthread.h>
2196 -#include <linux/spinlock.h>
2197 -#include <linux/cpu.h>
2198 -#include <linux/netdevice.h>
2199 -#include <linux/etherdevice.h>
2200 -#include <linux/ethtool.h>
2201 -#include <linux/if_ether.h>
2202 -#include <linux/fs.h>
2203 -#include <linux/sysfs.h>
2204 -#include <linux/ctype.h>
2206 -#include <scsi/libfc/libfc.h>
2208 -#include "fcoe_def.h"
2210 -MODULE_AUTHOR("Open-FCoE.org");
2211 -MODULE_DESCRIPTION("FCoE");
2212 -MODULE_LICENSE("GPL");
2215 - * Static functions and variables definations
2217 -#ifdef CONFIG_HOTPLUG_CPU
2218 -static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
2219 -#endif /* CONFIG_HOTPLUG_CPU */
2220 -static int fcoe_device_notification(struct notifier_block *, ulong, void *);
2221 -static void fcoe_dev_setup(void);
2222 -static void fcoe_dev_cleanup(void);
2224 -struct scsi_transport_template *fcoe_transport_template;
2226 -static int fcoe_reset(struct Scsi_Host *shost)
2228 - struct fc_lport *lport = shost_priv(shost);
2229 - fc_lport_reset(lport);
2233 -struct fc_function_template fcoe_transport_function = {
2234 - .show_host_node_name = 1,
2235 - .show_host_port_name = 1,
2236 - .show_host_supported_classes = 1,
2237 - .show_host_supported_fc4s = 1,
2238 - .show_host_active_fc4s = 1,
2239 - .show_host_maxframe_size = 1,
2241 - .show_host_port_id = 1,
2242 - .show_host_supported_speeds = 1,
2243 - .get_host_speed = fc_get_host_speed,
2244 - .show_host_speed = 1,
2245 - .show_host_port_type = 1,
2246 - .get_host_port_state = fc_get_host_port_state,
2247 - .show_host_port_state = 1,
2248 - .show_host_symbolic_name = 1,
2250 - .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
2251 - .show_rport_maxframe_size = 1,
2252 - .show_rport_supported_classes = 1,
2254 - .show_host_fabric_name = 1,
2255 - .show_starget_node_name = 1,
2256 - .show_starget_port_name = 1,
2257 - .show_starget_port_id = 1,
2258 - .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2259 - .show_rport_dev_loss_tmo = 1,
2260 - .get_fc_host_stats = fc_get_host_stats,
2261 - .issue_fc_host_lip = fcoe_reset,
2263 - .terminate_rport_io = fc_rport_terminate_io,
2266 -struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
2268 -#ifdef CONFIG_HOTPLUG_CPU
2269 -static struct notifier_block fcoe_cpu_notifier = {
2270 - .notifier_call = fcoe_cpu_callback,
2272 -#endif /* CONFIG_HOTPLUG_CPU */
2275 - * notification function from net device
2277 -static struct notifier_block fcoe_notifier = {
2278 - .notifier_call = fcoe_device_notification,
2281 -#ifdef CONFIG_HOTPLUG_CPU
2283 - * create percpu stats block
2284 - * called by cpu add/remove notifier
2286 -static void fcoe_create_percpu_data(int cpu)
2288 - struct fc_lport *lp;
2289 - struct fcoe_softc *fc;
2290 - struct fcoe_dev_stats *p;
2291 - struct fcoe_info *fci = &fcoei;
2293 - write_lock_bh(&fci->fcoe_hostlist_lock);
2294 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
2296 - if (lp->dev_stats[cpu] == NULL) {
2297 - p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
2299 - lp->dev_stats[cpu] = p;
2302 - write_unlock_bh(&fci->fcoe_hostlist_lock);
2306 - * destroy percpu stats block
2307 - * called by cpu add/remove notifier
2309 -static void fcoe_destroy_percpu_data(int cpu)
2311 - struct fcoe_dev_stats *p;
2312 - struct fc_lport *lp;
2313 - struct fcoe_softc *fc;
2314 - struct fcoe_info *fci = &fcoei;
2316 - write_lock_bh(&fci->fcoe_hostlist_lock);
2317 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
2319 - p = lp->dev_stats[cpu];
2321 - lp->dev_stats[cpu] = NULL;
2325 - write_unlock_bh(&fci->fcoe_hostlist_lock);
2329 - * Get notified when a cpu comes on/off. Be hotplug friendly.
2331 -static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
2334 - unsigned int cpu = (unsigned long)hcpu;
2338 - fcoe_create_percpu_data(cpu);
2341 - fcoe_destroy_percpu_data(cpu);
2348 -#endif /* CONFIG_HOTPLUG_CPU */
2351 - * function to setup link change notification interface
2353 -static void fcoe_dev_setup(void)
2356 - * here setup a interface specific wd time to
2357 - * monitor the link state
2359 - register_netdevice_notifier(&fcoe_notifier);
2363 - * function to cleanup link change notification interface
2365 -static void fcoe_dev_cleanup(void)
2367 - unregister_netdevice_notifier(&fcoe_notifier);
2371 - * This function is called by the ethernet driver
2372 - * this is called in case of link change event
2374 -static int fcoe_device_notification(struct notifier_block *notifier,
2375 - ulong event, void *ptr)
2377 - struct fc_lport *lp = NULL;
2378 - struct net_device *real_dev = ptr;
2379 - struct fcoe_softc *fc;
2380 - struct fcoe_dev_stats *stats;
2381 - struct fcoe_info *fci = &fcoei;
2384 - int rc = NOTIFY_OK;
2386 - read_lock(&fci->fcoe_hostlist_lock);
2387 - list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
2388 - if (fc->real_dev == real_dev) {
2393 - read_unlock(&fci->fcoe_hostlist_lock);
2399 - new_status = lp->link_status;
2402 - case NETDEV_GOING_DOWN:
2403 - new_status &= ~FC_LINK_UP;
2406 - case NETDEV_CHANGE:
2407 - new_status &= ~FC_LINK_UP;
2408 - if (!fcoe_link_ok(lp))
2409 - new_status |= FC_LINK_UP;
2411 - case NETDEV_CHANGEMTU:
2412 - mfs = fc->real_dev->mtu -
2413 - (sizeof(struct fcoe_hdr) +
2414 - sizeof(struct fcoe_crc_eof));
2415 - if (fc->user_mfs && fc->user_mfs < mfs)
2416 - mfs = fc->user_mfs;
2417 - if (mfs >= FC_MIN_MAX_FRAME)
2418 - fc_set_mfs(lp, mfs);
2419 - new_status &= ~FC_LINK_UP;
2420 - if (!fcoe_link_ok(lp))
2421 - new_status |= FC_LINK_UP;
2423 - case NETDEV_REGISTER:
2426 - FC_DBG("unknown event %ld call", event);
2428 - if (lp->link_status != new_status) {
2429 - if ((new_status & FC_LINK_UP) == FC_LINK_UP)
2432 - stats = lp->dev_stats[smp_processor_id()];
2433 - stats->LinkFailureCount++;
2435 - fcoe_clean_pending_queue(lp);
2442 -static void trimstr(char *str, int len)
2444 - char *cp = str + len;
2445 - while (--cp >= str && *cp == '\n')
2449 -static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr,
2450 - const char *buffer, size_t size)
2453 - strcpy(ifname, buffer);
2454 - trimstr(ifname, strlen(ifname));
2455 - fcoe_destroy_interface(ifname);
2459 -static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr,
2460 - const char *buffer, size_t size)
2463 - strcpy(ifname, buffer);
2464 - trimstr(ifname, strlen(ifname));
2465 - fcoe_create_interface(ifname);
2469 -static const struct kobj_attribute fcoe_destroyattr = \
2470 - __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy);
2471 -static const struct kobj_attribute fcoe_createattr = \
2472 - __ATTR(create, S_IWUSR, NULL, fcoe_create);
2475 - * Initialization routine
2476 - * 1. Will create fc transport software structure
2477 - * 2. initialize the link list of port information structure
2479 -static int __init fcoeinit(void)
2483 - struct fcoe_percpu_s *p;
2484 - struct fcoe_info *fci = &fcoei;
2486 - rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
2487 - &fcoe_destroyattr.attr);
2489 - rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj,
2490 - &fcoe_createattr.attr);
2495 - rwlock_init(&fci->fcoe_hostlist_lock);
2497 -#ifdef CONFIG_HOTPLUG_CPU
2498 - register_cpu_notifier(&fcoe_cpu_notifier);
2499 -#endif /* CONFIG_HOTPLUG_CPU */
2502 - * initialize per CPU interrupt thread
2504 - for_each_online_cpu(cpu) {
2505 - p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
2507 - p->thread = kthread_create(fcoe_percpu_receive_thread,
2509 - "fcoethread/%d", cpu);
2512 - * if there is no error then bind the thread to the cpu
2513 - * initialize the semaphore and skb queue head
2515 - if (likely(!IS_ERR(p->thread))) {
2517 - fci->fcoe_percpu[cpu] = p;
2518 - skb_queue_head_init(&p->fcoe_rx_list);
2519 - kthread_bind(p->thread, cpu);
2520 - wake_up_process(p->thread);
2522 - fci->fcoe_percpu[cpu] = NULL;
2529 - FC_DBG("failed to initialize proc intrerface\n");
2535 - * setup link change notification
2539 - init_timer(&fci->timer);
2540 - fci->timer.data = (ulong) fci;
2541 - fci->timer.function = fcoe_watchdog;
2542 - fci->timer.expires = (jiffies + (10 * HZ));
2543 - add_timer(&fci->timer);
2545 - fcoe_transport_template =
2546 - fc_attach_transport(&fcoe_transport_function);
2548 - if (fcoe_transport_template == NULL) {
2549 - FC_DBG("fail to attach fc transport");
2556 -#ifdef CONFIG_HOTPLUG_CPU
2557 - unregister_cpu_notifier(&fcoe_cpu_notifier);
2558 -#endif /* CONFIG_HOTPLUG_CPU */
2562 -static void __exit fcoe_exit(void)
2565 - struct fcoe_softc *fc, *tmp;
2566 - struct fc_lport *lp;
2567 - struct fcoe_info *fci = &fcoei;
2568 - struct fcoe_percpu_s *p;
2569 - struct sk_buff *skb;
2572 - * Stop all call back interfaces
2574 -#ifdef CONFIG_HOTPLUG_CPU
2575 - unregister_cpu_notifier(&fcoe_cpu_notifier);
2576 -#endif /* CONFIG_HOTPLUG_CPU */
2577 - fcoe_dev_cleanup();
2582 - del_timer_sync(&fci->timer);
2585 - * assuming that at this time there will be no
2586 - * ioctl in prograss, therefore we do not need to lock the
2589 - list_for_each_entry_safe(fc, tmp, &fci->fcoe_hostlist, list) {
2591 - fcoe_destroy_interface(lp->ifname);
2594 - for (idx = 0; idx < NR_CPUS; idx++) {
2595 - if (fci->fcoe_percpu[idx]) {
2596 - kthread_stop(fci->fcoe_percpu[idx]->thread);
2597 - p = fci->fcoe_percpu[idx];
2598 - spin_lock_bh(&p->fcoe_rx_list.lock);
2599 - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
2601 - spin_unlock_bh(&p->fcoe_rx_list.lock);
2602 - if (fci->fcoe_percpu[idx]->crc_eof_page)
2603 - put_page(fci->fcoe_percpu[idx]->crc_eof_page);
2604 - kfree(fci->fcoe_percpu[idx]);
2608 - fc_release_transport(fcoe_transport_template);
2611 -module_init(fcoeinit);
2612 -module_exit(fcoe_exit);
2613 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
2614 new file mode 100644
2615 index 0000000..93c47aa
2617 +++ b/drivers/scsi/fcoe/libfcoe.c
2620 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
2622 + * This program is free software; you can redistribute it and/or modify it
2623 + * under the terms and conditions of the GNU General Public License,
2624 + * version 2, as published by the Free Software Foundation.
2626 + * This program is distributed in the hope it will be useful, but WITHOUT
2627 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
2628 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
2631 + * You should have received a copy of the GNU General Public License along with
2632 + * this program; if not, write to the Free Software Foundation, Inc.,
2633 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
2635 + * Maintained at www.Open-FCoE.org
2639 + * FCOE protocol file
2642 +#include <linux/module.h>
2643 +#include <linux/version.h>
2644 +#include <linux/kernel.h>
2645 +#include <linux/spinlock.h>
2646 +#include <linux/skbuff.h>
2647 +#include <linux/netdevice.h>
2648 +#include <linux/etherdevice.h>
2649 +#include <linux/if_ether.h>
2650 +#include <linux/kthread.h>
2651 +#include <linux/crc32.h>
2652 +#include <scsi/scsi_tcq.h>
2653 +#include <scsi/scsicam.h>
2654 +#include <scsi/scsi_transport.h>
2655 +#include <scsi/scsi_transport_fc.h>
2656 +#include <net/rtnetlink.h>
2658 +#include <scsi/fc/fc_encaps.h>
2660 +#include <scsi/libfc/libfc.h>
2661 +#include <scsi/libfc/fc_frame.h>
2663 +#include <scsi/fc/fc_fcoe.h>
2664 +#include "fcoe_def.h"
2666 +static int debug_fcoe;
2668 +#define FCOE_MAX_QUEUE_DEPTH 256
2670 +/* destination address mode */
2671 +#define FCOE_GW_ADDR_MODE 0x00
2672 +#define FCOE_FCOUI_ADDR_MODE 0x01
2674 +/* Function Prototyes */
2675 +static int fcoe_check_wait_queue(struct fc_lport *);
2676 +static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
2677 +static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
2678 +static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
2681 + * this is the fcoe receive function
2682 + * called by NET_RX_SOFTIRQ
2683 + * this function will receive the packet and
2684 + * build fc frame and pass it up
2686 +int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
2687 + struct packet_type *ptype, struct net_device *olddev)
2689 + struct fc_lport *lp;
2690 + struct fcoe_rcv_info *fr;
2691 + struct fcoe_softc *fc;
2692 + struct fcoe_dev_stats *stats;
2694 + struct fc_frame_header *fh;
2695 + unsigned short oxid;
2697 + struct fcoe_percpu_s *fps;
2699 + fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
2701 + if (unlikely(lp == NULL)) {
2702 + FC_DBG("cannot find hba structure");
2706 + if (unlikely(debug_fcoe)) {
2707 + FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
2708 + "end:%p sum:%d dev:%s", skb->len, skb->data_len,
2709 + skb->head, skb->data, skb_tail_pointer(skb),
2710 + skb_end_pointer(skb), skb->csum,
2711 + skb->dev ? skb->dev->name : "<NULL>");
2715 + /* check for FCOE packet type */
2716 + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
2717 + FC_DBG("wrong FC type frame");
2721 + data += sizeof(struct fcoe_hdr);
2722 + fh = (struct fc_frame_header *)data;
2723 + oxid = ntohs(fh->fh_ox_id);
2725 + fr = fcoe_dev_from_skb(skb);
2727 + fr->ptype = ptype;
2731 + * The exchange ID are ANDed with num of online CPUs,
2732 + * so that will have the least lock contention in
2733 + * handling the exchange. if there is no thread
2734 + * for a given idx then use first online cpu.
2736 + cpu_idx = oxid & (num_online_cpus() >> 1);
2737 + if (fcoe_percpu[cpu_idx] == NULL)
2738 + cpu_idx = first_cpu(cpu_online_map);
2740 + fps = fcoe_percpu[cpu_idx];
2742 + spin_lock_bh(&fps->fcoe_rx_list.lock);
2743 + __skb_queue_tail(&fps->fcoe_rx_list, skb);
2744 + if (fps->fcoe_rx_list.qlen == 1)
2745 + wake_up_process(fps->thread);
2747 + spin_unlock_bh(&fps->fcoe_rx_list.lock);
2752 + stats = lp->dev_stats[smp_processor_id()];
2754 + stats = lp->dev_stats[0];
2756 + stats->ErrorFrames++;
2763 +static inline int fcoe_start_io(struct sk_buff *skb)
2768 + rc = dev_queue_xmit(skb);
2775 +static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
2777 + struct fcoe_percpu_s *fps;
2778 + struct page *page;
2781 + cpu_idx = get_cpu();
2782 + fps = fcoe_percpu[cpu_idx];
2783 + page = fps->crc_eof_page;
2785 + page = alloc_page(GFP_ATOMIC);
2790 + fps->crc_eof_page = page;
2791 + WARN_ON(fps->crc_eof_offset != 0);
2795 + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
2796 + fps->crc_eof_offset, tlen);
2798 + skb->data_len += tlen;
2799 + skb->truesize += tlen;
2800 + fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
2802 + if (fps->crc_eof_offset >= PAGE_SIZE) {
2803 + fps->crc_eof_page = NULL;
2804 + fps->crc_eof_offset = 0;
2812 + * this is the frame xmit routine
2814 +int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
2819 + struct ethhdr *eh;
2820 + struct fcoe_crc_eof *cp;
2821 + struct sk_buff *skb;
2822 + struct fcoe_dev_stats *stats;
2823 + struct fc_frame_header *fh;
2824 + unsigned int hlen; /* header length implies the version */
2825 + unsigned int tlen; /* trailer length */
2826 + int flogi_in_progress = 0;
2827 + struct fcoe_softc *fc;
2830 + struct fcoe_hdr *hp;
2832 + WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
2834 + fc = (struct fcoe_softc *)lp->drv_priv;
2836 + * if it is a flogi then we need to learn gw-addr
2839 + fh = fc_frame_header_get(fp);
2840 + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
2841 + if (fc_frame_payload_op(fp) == ELS_FLOGI) {
2842 + fc->flogi_oxid = ntohs(fh->fh_ox_id);
2843 + fc->address_mode = FCOE_FCOUI_ADDR_MODE;
2844 + fc->flogi_progress = 1;
2845 + flogi_in_progress = 1;
2846 + } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
2848 + * Here we must've gotten an SID by accepting an FLOGI
2849 + * from a point-to-point connection. Switch to using
2850 + * the source mac based on the SID. The destination
2851 + * MAC in this case would have been set by receving the
2854 + fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
2855 + fc->flogi_progress = 0;
2864 + crc = crc32(crc, skb->data, skb_headlen(skb));
2866 + for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) {
2867 + skb_frag_t *frag = &skb_shinfo(skb)->frags[indx];
2868 + unsigned long off = frag->page_offset;
2869 + unsigned long len = frag->size;
2872 + unsigned long clen;
2874 + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
2875 + data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
2876 + KM_SKB_DATA_SOFTIRQ);
2877 + crc = crc32(crc, data + (off & ~PAGE_MASK),
2879 + kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
2886 + * Get header and trailer lengths.
2887 + * This is temporary code until we get rid of the old protocol.
2888 + * Both versions have essentially the same trailer layout but T11
2889 + * has padding afterwards.
2891 + hlen = sizeof(struct fcoe_hdr);
2892 + tlen = sizeof(struct fcoe_crc_eof);
2895 + * copy fc crc and eof to the skb buff
2896 + * Use utility buffer in the fc_frame part of the sk_buff for the
2898 + * We don't do a get_page for this frag, since that page may not be
2899 + * managed that way. So that skb_free() doesn't do that either, we
2900 + * setup the destructor to remove this frag.
2902 + if (skb_is_nonlinear(skb)) {
2904 + if (fcoe_get_paged_crc_eof(skb, tlen)) {
2908 + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2909 + cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
2910 + + frag->page_offset;
2912 + cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
2915 + cp->fcoe_eof = eof;
2916 + cp->fcoe_crc32 = cpu_to_le32(~crc);
2917 + if (tlen == sizeof(*cp))
2918 + memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd));
2919 + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
2921 + if (skb_is_nonlinear(skb)) {
2922 + kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
2927 + * Fill in the control structures
2929 + skb->ip_summed = CHECKSUM_NONE;
2930 + eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr));
2931 + if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
2932 + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
2934 + /* insert GW address */
2935 + memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
2937 + if (unlikely(flogi_in_progress))
2938 + memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
2940 + memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
2942 + eh->h_proto = htons(ETH_P_FCOE);
2943 + skb->protocol = htons(ETH_P_802_3);
2944 + skb_reset_mac_header(skb);
2945 + skb_reset_network_header(skb);
2947 + hp = (struct fcoe_hdr *)(eh + 1);
2948 + memset(hp, 0, sizeof(*hp));
2950 + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
2951 + hp->fcoe_sof = sof;
2953 + stats = lp->dev_stats[smp_processor_id()];
2954 + stats->TxFrames++;
2955 + stats->TxWords += wlen;
2956 + skb->dev = fc->real_dev;
2959 + if (fc->fcoe_pending_queue.qlen)
2960 + rc = fcoe_check_wait_queue(lp);
2963 + rc = fcoe_start_io(skb);
2966 + fcoe_insert_wait_queue(lp, skb);
2967 + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
2974 +int fcoe_percpu_receive_thread(void *arg)
2976 + struct fcoe_percpu_s *p = arg;
2978 + unsigned int hlen;
2979 + unsigned int tlen;
2980 + struct fc_lport *lp;
2981 + struct fcoe_rcv_info *fr;
2982 + struct fcoe_dev_stats *stats;
2983 + struct fc_frame_header *fh;
2984 + struct sk_buff *skb;
2985 + struct fcoe_crc_eof *cp;
2987 + struct fc_frame *fp;
2989 + struct fcoe_softc *fc;
2990 + struct fcoe_hdr *hp;
2992 + set_user_nice(current, 19);
2994 + while (!kthread_should_stop()) {
2996 + spin_lock_bh(&p->fcoe_rx_list.lock);
2997 + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
2998 + set_current_state(TASK_INTERRUPTIBLE);
2999 + spin_unlock_bh(&p->fcoe_rx_list.lock);
3001 + set_current_state(TASK_RUNNING);
3002 + if (kthread_should_stop())
3004 + spin_lock_bh(&p->fcoe_rx_list.lock);
3006 + spin_unlock_bh(&p->fcoe_rx_list.lock);
3007 + fr = fcoe_dev_from_skb(skb);
3009 + if (unlikely(lp == NULL)) {
3010 + FC_DBG("invalid HBA Structure");
3015 + stats = lp->dev_stats[smp_processor_id()];
3017 + if (unlikely(debug_fcoe)) {
3018 + FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
3019 + "tail:%p end:%p sum:%d dev:%s",
3020 + skb->len, skb->data_len,
3021 + skb->head, skb->data, skb_tail_pointer(skb),
3022 + skb_end_pointer(skb), skb->csum,
3023 + skb->dev ? skb->dev->name : "<NULL>");
3027 + * Save source MAC address before discarding header.
3029 + fc = lp->drv_priv;
3030 + if (unlikely(fc->flogi_progress))
3031 + mac = eth_hdr(skb)->h_source;
3033 + if (skb_is_nonlinear(skb))
3034 + skb_linearize(skb); /* not ideal */
3037 + * Check the header and pull it off.
3039 + hlen = sizeof(struct fcoe_hdr);
3041 + hp = (struct fcoe_hdr *)skb->data;
3042 + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
3043 + if (stats->ErrorFrames < 5)
3044 + FC_DBG("unknown FCoE version %x",
3045 + FC_FCOE_DECAPS_VER(hp));
3046 + stats->ErrorFrames++;
3050 + sof = hp->fcoe_sof;
3051 + skb_pull(skb, sizeof(*hp));
3052 + fr_len = skb->len - sizeof(struct fcoe_crc_eof);
3053 + skb_trim(skb, fr_len);
3054 + tlen = sizeof(struct fcoe_crc_eof);
3056 + if (unlikely(fr_len > skb->len)) {
3057 + if (stats->ErrorFrames < 5)
3058 + FC_DBG("length error fr_len 0x%x skb->len 0x%x",
3059 + fr_len, skb->len);
3060 + stats->ErrorFrames++;
3064 + stats->RxFrames++;
3065 + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
3067 + fp = (struct fc_frame *) skb;
3068 + fc_frame_init(fp);
3069 + cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
3070 + fr_eof(fp) = cp->fcoe_eof;
3075 + * Check the CRC here, unless it's solicited data for SCSI.
3076 + * In that case, the SCSI layer can check it during the copy,
3077 + * and it'll be more cache-efficient.
3079 + fh = fc_frame_header_get(fp);
3080 + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
3081 + fh->fh_type == FC_TYPE_FCP) {
3082 + fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
3083 + fc_exch_recv(lp, lp->emp, fp);
3084 + } else if (le32_to_cpu(cp->fcoe_crc32) ==
3085 + ~crc32(~0, skb->data, fr_len)) {
3086 + if (unlikely(fc->flogi_progress))
3087 + fcoe_recv_flogi(fc, fp, mac);
3088 + fc_exch_recv(lp, lp->emp, fp);
3090 + if (debug_fcoe || stats->InvalidCRCCount < 5) {
3091 + printk(KERN_WARNING \
3092 + "fcoe: dropping frame with CRC error");
3094 + stats->InvalidCRCCount++;
3095 + stats->ErrorFrames++;
3096 + fc_frame_free(fp);
3103 + * Snoop potential response to FLOGI or even incoming FLOGI.
3105 +static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
3107 + struct fc_frame_header *fh;
3110 + fh = fc_frame_header_get(fp);
3111 + if (fh->fh_type != FC_TYPE_ELS)
3113 + op = fc_frame_payload_op(fp);
3114 + if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
3115 + fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
3118 + * If the src mac addr is FC_OUI-based, then we mark the
3119 + * address_mode flag to use FC_OUI-based Ethernet DA.
3120 + * Otherwise we use the FCoE gateway addr
3122 + if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
3123 + fc->address_mode = FCOE_FCOUI_ADDR_MODE;
3125 + memcpy(fc->dest_addr, sa, ETH_ALEN);
3126 + fc->address_mode = FCOE_GW_ADDR_MODE;
3130 + * Remove any previously-set unicast MAC filter.
3131 + * Add secondary FCoE MAC address filter for our OUI.
3134 + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
3135 + dev_unicast_delete(fc->real_dev, fc->data_src_addr,
3137 + fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
3138 + dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
3141 + fc->flogi_progress = 0;
3142 + } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
3144 + * Save source MAC for point-to-point responses.
3146 + memcpy(fc->dest_addr, sa, ETH_ALEN);
3147 + fc->address_mode = FCOE_GW_ADDR_MODE;
3151 +void fcoe_watchdog(ulong vp)
3153 + struct fc_lport *lp;
3154 + struct fcoe_softc *fc;
3157 + read_lock(&fcoe_hostlist_lock);
3158 + list_for_each_entry(fc, &fcoe_hostlist, list) {
3161 + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
3163 + if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
3169 + read_unlock(&fcoe_hostlist_lock);
3171 + fcoe_timer.expires = jiffies + (1 * HZ);
3172 + add_timer(&fcoe_timer);
3176 + * the wait_queue is used when the skb transmit fails. skb will go
3177 + * in the wait_queue which will be emptied by the time function OR
3178 + * by the next skb transmit.
3183 + * Function name : fcoe_check_wait_queue()
3185 + * Return Values : 0 or error
3187 + * Description : empties the wait_queue
3188 + * dequeue the head of the wait_queue queue and
3189 + * calls fcoe_start_io() for each packet
3190 + * if all skb have been transmitted, return 0
3191 + * if a error occurs, then restore wait_queue and try again
3196 +static int fcoe_check_wait_queue(struct fc_lport *lp)
3198 + int rc, unpause = 0;
3200 + struct sk_buff *skb;
3201 + struct fcoe_softc *fc;
3203 + fc = (struct fcoe_softc *)lp->drv_priv;
3204 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3207 + * is this interface paused?
3209 + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
3211 + if (fc->fcoe_pending_queue.qlen) {
3212 + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
3213 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3214 + rc = fcoe_start_io(skb);
3216 + fcoe_insert_wait_queue_head(lp, skb);
3219 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3221 + if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
3224 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3225 + if ((unpause) && (paused))
3227 + return fc->fcoe_pending_queue.qlen;
3230 +static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
3231 + struct sk_buff *skb)
3233 + struct fcoe_softc *fc;
3235 + fc = (struct fcoe_softc *)lp->drv_priv;
3236 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3237 + __skb_queue_head(&fc->fcoe_pending_queue, skb);
3238 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3241 +static void fcoe_insert_wait_queue(struct fc_lport *lp,
3242 + struct sk_buff *skb)
3244 + struct fcoe_softc *fc;
3246 + fc = (struct fcoe_softc *)lp->drv_priv;
3247 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
3248 + __skb_queue_tail(&fc->fcoe_pending_queue, skb);
3249 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
3251 diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
3252 index 30403aa..f724dd2 100644
3253 --- a/drivers/scsi/libfc/fc_disc.c
3254 +++ b/drivers/scsi/libfc/fc_disc.c
3259 - * Actually, this discovers all FC-4 remote ports, including FCP initiators.
3261 + * This block discovers all FC-4 remote ports, including FCP initiators. It
3262 + * also handles RSCN events and re-discovery if necessary.
3265 #include <linux/timer.h>
3267 #define FC_DISC_RETRY_LIMIT 3 /* max retries */
3268 #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
3271 +static int fc_disc_debug;
3273 +#define FC_DEBUG_DISC(fmt...) \
3275 + if (fc_disc_debug) \
3279 static void fc_disc_gpn_ft_req(struct fc_lport *);
3280 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
3281 static int fc_disc_new_target(struct fc_lport *, struct fc_rport *,
3282 - struct fc_rport_identifiers *);
3283 + struct fc_rport_identifiers *);
3284 static void fc_disc_del_target(struct fc_lport *, struct fc_rport *);
3285 static void fc_disc_done(struct fc_lport *);
3286 static void fc_disc_error(struct fc_lport *, struct fc_frame *);
3287 @@ -47,13 +55,13 @@ static void fc_disc_single(struct fc_lport *, struct fc_disc_port *);
3288 static int fc_disc_restart(struct fc_lport *);
3291 - * fc_disc_rscn_req - Handle Registered State Change Notification (RSCN)
3292 + * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
3293 * @sp: Current sequence of the RSCN exchange
3295 - * @lp: Fibre Channel host port instance
3296 + * @lport: Fibre Channel host port instance
3298 -static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3299 - struct fc_lport *lp)
3300 +static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3301 + struct fc_lport *lport)
3303 struct fc_els_rscn *rp;
3304 struct fc_els_rscn_page *pp;
3305 @@ -86,12 +94,14 @@ static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3308 case ELS_ADDR_FMT_PORT:
3309 + FC_DEBUG_DISC("Port address format for port (%6x)\n",
3310 + ntoh24(pp->rscn_fid));
3311 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
3318 dp->ids.port_id = ntoh24(pp->rscn_fid);
3319 dp->ids.port_name = -1;
3320 dp->ids.node_name = -1;
3321 @@ -102,27 +112,26 @@ static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3322 case ELS_ADDR_FMT_DOM:
3323 case ELS_ADDR_FMT_FAB:
3325 + FC_DEBUG_DISC("Address format is (%d)\n", fmt);
3330 - lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
3331 + lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
3333 - if (fc_disc_debug)
3334 - FC_DBG("RSCN received: rediscovering\n");
3335 + FC_DEBUG_DISC("RSCN received: rediscovering\n");
3336 list_for_each_entry_safe(dp, next, &disc_list, peers) {
3337 list_del(&dp->peers);
3340 - fc_disc_restart(lp);
3341 + fc_disc_restart(lport);
3343 - if (fc_disc_debug)
3344 - FC_DBG("RSCN received: not rediscovering. "
3345 - "redisc %d state %d in_prog %d\n",
3346 - redisc, lp->state, lp->disc_pending);
3347 + FC_DEBUG_DISC("RSCN received: not rediscovering. "
3348 + "redisc %d state %d in_prog %d\n",
3349 + redisc, lport->state, lport->disc_pending);
3350 list_for_each_entry_safe(dp, next, &disc_list, peers) {
3351 list_del(&dp->peers);
3352 - fc_disc_single(lp, dp);
3353 + fc_disc_single(lport, dp);
3357 @@ -131,48 +140,53 @@ reject:
3359 rjt_data.reason = ELS_RJT_LOGIC;
3360 rjt_data.explan = ELS_EXPL_NONE;
3361 - lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
3362 + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
3367 + * fc_disc_recv_req - Handle incoming requests
3368 + * @sp: Current sequence of the request exchange
3370 + * @lport: The FC local port
3372 static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
3373 - struct fc_lport *lp)
3374 + struct fc_lport *lport)
3376 - switch (fc_frame_payload_op(fp)) {
3379 + op = fc_frame_payload_op(fp);
3382 - fc_disc_rscn_req(sp, fp, lp);
3383 + fc_disc_recv_rscn_req(sp, fp, lport);
3386 - FC_DBG("fc_disc recieved an unexpected request\n");
3387 + FC_DBG("Received an unsupported request. opcode (%x)\n", op);
3393 - * Refresh target discovery, perhaps due to an RSCN.
3394 - * A configurable delay is introduced to collect any subsequent RSCNs.
3396 + * fc_disc_restart - Restart discovery
3397 + * @lport: FC local port
3399 -static int fc_disc_restart(struct fc_lport *lp)
3400 +static int fc_disc_restart(struct fc_lport *lport)
3402 - if (!lp->disc_requested && !lp->disc_pending) {
3403 - schedule_delayed_work(&lp->disc_work,
3404 - msecs_to_jiffies(lp->disc_delay * 1000));
3405 + if (!lport->disc_requested && !lport->disc_pending) {
3406 + schedule_delayed_work(&lport->disc_work,
3407 + msecs_to_jiffies(lport->disc_delay * 1000));
3409 - lp->disc_requested = 1;
3410 + lport->disc_requested = 1;
3415 - * Fibre Channel Target discovery.
3417 + * fc_disc_start - Fibre Channel Target discovery
3418 + * @lport: FC local port
3420 * Returns non-zero if discovery cannot be started.
3422 - * Callback is called for each target remote port found in discovery.
3423 - * When discovery is complete, the callback is called with a NULL remote port.
3424 - * Discovery may be restarted after an RSCN is received, causing the
3425 - * callback to be called after discovery complete is indicated.
3427 -int fc_disc_start(struct fc_lport *lp)
3428 +static int fc_disc_start(struct fc_lport *lport)
3430 struct fc_rport *rport;
3432 @@ -181,20 +195,20 @@ int fc_disc_start(struct fc_lport *lp)
3434 * If not ready, or already running discovery, just set request flag.
3436 - if (!fc_lport_test_ready(lp) || lp->disc_pending) {
3437 - lp->disc_requested = 1;
3438 + if (!fc_lport_test_ready(lport) || lport->disc_pending) {
3439 + lport->disc_requested = 1;
3443 - lp->disc_pending = 1;
3444 - lp->disc_requested = 0;
3445 - lp->disc_retry_count = 0;
3446 + lport->disc_pending = 1;
3447 + lport->disc_requested = 0;
3448 + lport->disc_retry_count = 0;
3451 * Handle point-to-point mode as a simple discovery
3452 * of the remote port.
3454 - rport = lp->ptp_rp;
3455 + rport = lport->ptp_rp;
3457 ids.port_id = rport->port_id;
3458 ids.port_name = rport->port_name;
3459 @@ -202,46 +216,41 @@ int fc_disc_start(struct fc_lport *lp)
3460 ids.roles = FC_RPORT_ROLE_UNKNOWN;
3461 get_device(&rport->dev);
3463 - error = fc_disc_new_target(lp, rport, &ids);
3464 + error = fc_disc_new_target(lport, rport, &ids);
3465 put_device(&rport->dev);
3468 + fc_disc_done(lport);
3470 - fc_block_rports(lp);
3471 - fc_disc_gpn_ft_req(lp); /* get ports by FC-4 type */
3472 + fc_disc_gpn_ft_req(lport); /* get ports by FC-4 type */
3479 - * Restart discovery after a delay due to resource shortages.
3480 - * If the error persists, the discovery will be abandoned.
3482 + * fc_disc_retry - Retry discovery
3483 + * @lport: FC local port
3485 -static void fc_disc_retry(struct fc_lport *lp)
3486 +static void fc_disc_retry(struct fc_lport *lport)
3488 unsigned long delay = FC_DISC_RETRY_DELAY;
3490 - if (!lp->disc_retry_count)
3491 + if (!lport->disc_retry_count)
3492 delay /= 4; /* timeout faster first time */
3493 - if (lp->disc_retry_count++ < FC_DISC_RETRY_LIMIT)
3494 - schedule_delayed_work(&lp->disc_work,
3495 + if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT)
3496 + schedule_delayed_work(&lport->disc_work,
3497 msecs_to_jiffies(delay));
3500 + fc_disc_done(lport);
3504 - * Handle new target found by discovery.
3505 - * Create remote port and session if needed.
3506 - * Ignore returns of our own FID & WWPN.
3508 - * If a non-NULL rp is passed in, it is held for the caller, but not for us.
3510 - * Events delivered are:
3511 - * FC_EV_READY, when remote port is rediscovered.
3513 + * fc_disc_new_target - Handle new target found by discovery
3514 + * @lport: FC local port
3515 + * @rport: The previous FC remote port (NULL if new remote port)
3516 + * @ids: Identifiers for the new FC remote port
3518 -static int fc_disc_new_target(struct fc_lport *lp,
3519 +static int fc_disc_new_target(struct fc_lport *lport,
3520 struct fc_rport *rport,
3521 struct fc_rport_identifiers *ids)
3523 @@ -263,61 +272,64 @@ static int fc_disc_new_target(struct fc_lport *lp,
3524 * assigned the same FCID. This should be rare.
3525 * Delete the old one and fall thru to re-create.
3527 - fc_disc_del_target(lp, rport);
3528 + fc_disc_del_target(lport, rport);
3532 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
3533 - ids->port_id != lp->fid && ids->port_name != lp->wwpn) {
3534 + ids->port_id != fc_host_port_id(lport->host) &&
3535 + ids->port_name != lport->wwpn) {
3537 - rport = lp->tt.rport_lookup(lp, ids->port_id);
3538 + rport = lport->tt.rport_lookup(lport, ids->port_id);
3540 struct fc_disc_port dp;
3543 dp.ids.port_id = ids->port_id;
3544 dp.ids.port_name = ids->port_name;
3545 dp.ids.node_name = ids->node_name;
3546 dp.ids.roles = ids->roles;
3547 - rport = fc_rport_dummy_create(&dp);
3548 + rport = fc_rport_rogue_create(&dp);
3554 rp = rport->dd_data;
3555 - rp->event_callback = lp->tt.event_callback;
3556 + rp->event_callback = lport->tt.event_callback;
3557 rp->rp_state = RPORT_ST_INIT;
3558 - lp->tt.rport_login(rport);
3559 + lport->tt.rport_login(rport);
3566 - * Delete the remote port.
3568 + * fc_disc_del_target - Delete a target
3569 + * @lport: FC local port
3570 + * @rport: The remote port to be removed
3572 -static void fc_disc_del_target(struct fc_lport *lp, struct fc_rport *rport)
3573 +static void fc_disc_del_target(struct fc_lport *lport, struct fc_rport *rport)
3575 - lp->tt.rport_reset(rport);
3576 - fc_remote_port_delete(rport); /* release hold from create */
3577 + lport->tt.rport_stop(rport);
3581 - * Done with discovery
3583 + * fc_disc_done - Discovery has been completed
3584 + * @lport: FC local port
3586 -static void fc_disc_done(struct fc_lport *lp)
3587 +static void fc_disc_done(struct fc_lport *lport)
3589 - lp->disc_done = 1;
3590 - lp->disc_pending = 0;
3591 - if (lp->disc_requested)
3592 - lp->tt.disc_start(lp);
3593 + lport->disc_done = 1;
3594 + lport->disc_pending = 0;
3595 + if (lport->disc_requested)
3596 + lport->tt.disc_start(lport);
3600 * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
3601 - * @lp: Fibre Channel host port instance
3602 + * @lport: FC local port
3604 -static void fc_disc_gpn_ft_req(struct fc_lport *lp)
3605 +static void fc_disc_gpn_ft_req(struct fc_lport *lport)
3607 struct fc_frame *fp;
3608 struct fc_seq *sp = NULL;
3609 @@ -327,60 +339,64 @@ static void fc_disc_gpn_ft_req(struct fc_lport *lp)
3613 - lp->disc_buf_len = 0;
3614 - lp->disc_seq_count = 0;
3615 - fp = fc_frame_alloc(lp, sizeof(*rp));
3617 + lport->disc_buf_len = 0;
3618 + lport->disc_seq_count = 0;
3619 + fp = fc_frame_alloc(lport, sizeof(*rp));
3623 rp = fc_frame_payload_get(fp, sizeof(*rp));
3624 - fc_fill_dns_hdr(lp, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
3625 + fc_fill_dns_hdr(lport, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
3626 rp->gid.fn_fc4_type = FC_TYPE_FCP;
3628 - WARN_ON(!fc_lport_test_ready(lp));
3629 + WARN_ON(!fc_lport_test_ready(lport));
3631 fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
3632 - sp = lp->tt.exch_seq_send(lp, fp,
3633 - fc_disc_gpn_ft_resp, NULL,
3637 - FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3638 + sp = lport->tt.exch_seq_send(lport, fp,
3639 + fc_disc_gpn_ft_resp, NULL,
3640 + lport, lport->e_d_tov,
3641 + fc_host_port_id(lport->host),
3643 + FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3645 - if (error || sp == NULL)
3646 - fc_disc_retry(lp);
3648 + fc_disc_retry(lport);
3652 - * Handle error on dNS request.
3654 + * fc_disc_error - Handle error on dNS request
3655 + * @lport: FC local port
3656 + * @fp: The frame pointer
3658 -static void fc_disc_error(struct fc_lport *lp, struct fc_frame *fp)
3659 +static void fc_disc_error(struct fc_lport *lport, struct fc_frame *fp)
3661 - int err = PTR_ERR(fp);
3662 + long err = PTR_ERR(fp);
3664 + FC_DEBUG_DISC("Error %ld, retries %d/%d\n", PTR_ERR(fp),
3665 + lport->retry_count, FC_DISC_RETRY_LIMIT);
3668 case -FC_EX_TIMEOUT:
3669 - if (lp->disc_retry_count++ < FC_DISC_RETRY_LIMIT) {
3670 - fc_disc_gpn_ft_req(lp);
3671 + if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT) {
3672 + fc_disc_gpn_ft_req(lport);
3674 - FC_DBG("err %d - ending\n", err);
3676 + fc_disc_done(lport);
3680 - FC_DBG("err %d - ending\n", err);
3682 + FC_DBG("Error code %ld not supported\n", err);
3683 + fc_disc_done(lport);
3689 * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
3690 - * @lp: Fibre Channel host port instance
3691 + * @lport: Fibre Channel host port instance
3692 * @buf: GPN_FT response buffer
3693 * @len: size of response buffer
3695 -static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3696 +static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3698 struct fc_gpn_ft_resp *np;
3700 @@ -388,8 +404,8 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3703 struct fc_disc_port dp;
3704 - struct fc_rport *rp;
3705 - struct fc_rport_libfc_priv *rpp;
3706 + struct fc_rport *rport;
3707 + struct fc_rport_libfc_priv *rdata;
3710 * Handle partial name record left over from previous call.
3711 @@ -397,7 +413,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3714 np = (struct fc_gpn_ft_resp *)bp;
3715 - tlen = lp->disc_buf_len;
3716 + tlen = lport->disc_buf_len;
3718 WARN_ON(tlen >= sizeof(*np));
3719 plen = sizeof(*np) - tlen;
3720 @@ -405,7 +421,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3721 WARN_ON(plen >= sizeof(*np));
3724 - np = &lp->disc_buf;
3725 + np = &lport->disc_buf;
3726 memcpy((char *)np + tlen, bp, plen);
3729 @@ -415,9 +431,9 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3733 - lp->disc_buf_len = (unsigned char) plen;
3734 + lport->disc_buf_len = (unsigned char) plen;
3735 if (plen == sizeof(*np))
3736 - lp->disc_buf_len = 0;
3737 + lport->disc_buf_len = 0;
3741 @@ -428,19 +444,20 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3742 * After the first time through the loop, things return to "normal".
3744 while (plen >= sizeof(*np)) {
3747 dp.ids.port_id = ntoh24(np->fp_fid);
3748 dp.ids.port_name = ntohll(np->fp_wwpn);
3749 dp.ids.node_name = -1;
3750 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
3752 - if ((dp.ids.port_id != lp->fid) &&
3753 - (dp.ids.port_name != lp->wwpn)) {
3754 - rp = fc_rport_dummy_create(&dp);
3756 - rpp = rp->dd_data;
3757 - rpp->local_port = lp;
3758 - lp->tt.rport_login(rp);
3759 + if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
3760 + (dp.ids.port_name != lport->wwpn)) {
3761 + rport = fc_rport_rogue_create(&dp);
3763 + rdata = rport->dd_data;
3764 + rdata->event_callback = lport->tt.event_callback;
3765 + rdata->local_port = lport;
3766 + lport->tt.rport_login(rport);
3768 FC_DBG("Failed to allocate memory for "
3769 "the newly discovered port (%6x)\n",
3770 @@ -448,7 +465,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3773 if (np->fp_flags & FC_NS_FID_LAST) {
3775 + fc_disc_done(lport);
3779 @@ -462,11 +479,11 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3780 * Save any partial record at the end of the buffer for next time.
3782 if (error == 0 && len > 0 && len < sizeof(*np)) {
3783 - if (np != &lp->disc_buf)
3784 - memcpy(&lp->disc_buf, np, len);
3785 - lp->disc_buf_len = (unsigned char) len;
3786 + if (np != &lport->disc_buf)
3787 + memcpy(&lport->disc_buf, np, len);
3788 + lport->disc_buf_len = (unsigned char) len;
3790 - lp->disc_buf_len = 0;
3791 + lport->disc_buf_len = 0;
3795 @@ -476,14 +493,14 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
3797 static void fc_disc_timeout(struct work_struct *work)
3799 - struct fc_lport *lp;
3800 + struct fc_lport *lport;
3802 - lp = container_of(work, struct fc_lport, disc_work.work);
3803 + lport = container_of(work, struct fc_lport, disc_work.work);
3805 - if (lp->disc_pending)
3806 - fc_disc_gpn_ft_req(lp);
3807 + if (lport->disc_pending)
3808 + fc_disc_gpn_ft_req(lport);
3810 - lp->tt.disc_start(lp);
3811 + lport->tt.disc_start(lport);
3815 @@ -495,9 +512,9 @@ static void fc_disc_timeout(struct work_struct *work)
3816 * The response may be in multiple frames
3818 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3822 - struct fc_lport *lp = lp_arg;
3823 + struct fc_lport *lport = lp_arg;
3824 struct fc_ct_hdr *cp;
3825 struct fc_frame_header *fh;
3826 unsigned int seq_cnt;
3827 @@ -506,7 +523,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3831 - fc_disc_error(lp, fp);
3832 + fc_disc_error(lport, fp);
3836 @@ -515,7 +532,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3837 len = fr_len(fp) - sizeof(*fh);
3838 seq_cnt = ntohs(fh->fh_seq_cnt);
3839 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
3840 - lp->disc_seq_count == 0) {
3841 + lport->disc_seq_count == 0) {
3842 cp = fc_frame_payload_get(fp, sizeof(*cp));
3844 FC_DBG("GPN_FT response too short, len %d\n",
3845 @@ -531,68 +548,76 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3846 FC_DBG("GPN_FT rejected reason %x exp %x "
3847 "(check zoning)\n", cp->ct_reason,
3850 + fc_disc_done(lport);
3852 FC_DBG("GPN_FT unexpected response code %x\n",
3855 } else if (fr_sof(fp) == FC_SOF_N3 &&
3856 - seq_cnt == lp->disc_seq_count) {
3857 + seq_cnt == lport->disc_seq_count) {
3860 FC_DBG("GPN_FT unexpected frame - out of sequence? "
3861 "seq_cnt %x expected %x sof %x eof %x\n",
3862 - seq_cnt, lp->disc_seq_count, fr_sof(fp), fr_eof(fp));
3863 + seq_cnt, lport->disc_seq_count, fr_sof(fp), fr_eof(fp));
3866 - error = fc_disc_gpn_ft_parse(lp, buf, len);
3867 + error = fc_disc_gpn_ft_parse(lport, buf, len);
3869 - fc_disc_retry(lp);
3870 + fc_disc_retry(lport);
3872 - lp->disc_seq_count++;
3873 + lport->disc_seq_count++;
3879 - * Discover the directory information for a single target.
3881 + * fc_disc_single - Discover the directory information for a single target
3882 + * @lport: FC local port
3883 + * @dp: The port to rediscover
3885 * This could be from an RSCN that reported a change for the target.
3887 -static void fc_disc_single(struct fc_lport *lp, struct fc_disc_port *dp)
3888 +static void fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
3890 struct fc_rport *rport;
3891 - struct fc_rport *rp;
3892 - struct fc_rport_libfc_priv *rpp;
3893 + struct fc_rport *new_rport;
3894 + struct fc_rport_libfc_priv *rdata;
3896 - if (dp->ids.port_id == lp->fid)
3897 + if (dp->ids.port_id == fc_host_port_id(lport->host))
3900 - rport = lp->tt.rport_lookup(lp, dp->ids.port_id);
3901 + rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
3903 - fc_disc_del_target(lp, rport);
3904 + fc_disc_del_target(lport, rport);
3905 put_device(&rport->dev); /* hold from lookup */
3908 - rp = fc_rport_dummy_create(dp);
3910 - rpp = rp->dd_data;
3911 + new_rport = fc_rport_rogue_create(dp);
3913 + rdata = new_rport->dd_data;
3914 + rdata->event_callback = lport->tt.event_callback;
3916 - lp->tt.rport_login(rp);
3917 + lport->tt.rport_login(new_rport);
3924 -int fc_disc_init(struct fc_lport *lp)
3926 + * fc_disc_init - Initialize the discovery block
3927 + * @lport: FC local port
3929 +int fc_disc_init(struct fc_lport *lport)
3931 - INIT_DELAYED_WORK(&lp->disc_work, fc_disc_timeout);
3932 + INIT_DELAYED_WORK(&lport->disc_work, fc_disc_timeout);
3934 - if (!lp->tt.disc_start)
3935 - lp->tt.disc_start = fc_disc_start;
3936 + if (!lport->tt.disc_start)
3937 + lport->tt.disc_start = fc_disc_start;
3939 - if (!lp->tt.disc_recv_req)
3940 - lp->tt.disc_recv_req = fc_disc_recv_req;
3941 + if (!lport->tt.disc_recv_req)
3942 + lport->tt.disc_recv_req = fc_disc_recv_req;
3946 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
3947 index ed74d95..80dc1ef 100644
3948 --- a/drivers/scsi/libfc/fc_exch.c
3949 +++ b/drivers/scsi/libfc/fc_exch.c
3951 * fc_exch_debug can be set in debugger or at compile time to get more logs.
3953 static int fc_exch_debug;
3955 +#define FC_DEBUG_EXCH(fmt...) \
3957 + if (fc_exch_debug) \
3961 static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
3964 @@ -86,7 +93,7 @@ struct fc_exch {
3965 struct list_head ex_list; /* free or busy list linkage */
3966 spinlock_t ex_lock; /* lock covering exchange state */
3967 atomic_t ex_refcnt; /* reference counter */
3968 - struct timer_list ex_timer; /* timer for upper level protocols */
3969 + struct delayed_work timeout_work; /* timer for upper level protocols */
3970 struct fc_lport *lp; /* fc device instance */
3971 u16 oxid; /* originator's exchange ID */
3972 u16 rxid; /* responder's exchange ID */
3973 @@ -310,7 +317,6 @@ static void fc_exch_release(struct fc_exch *ep)
3974 if (ep->lp->tt.exch_put)
3975 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
3976 WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
3977 - WARN_ON(timer_pending(&ep->ex_timer));
3978 mempool_free(ep, mp->ep_pool);
3981 @@ -332,7 +338,7 @@ static int fc_exch_done_locked(struct fc_exch *ep)
3983 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
3984 ep->state |= FC_EX_DONE;
3985 - if (del_timer(&ep->ex_timer))
3986 + if (cancel_delayed_work(&ep->timeout_work))
3987 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
3990 @@ -362,7 +368,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
3991 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
3994 - if (!mod_timer(&ep->ex_timer, jiffies + msecs_to_jiffies(timer_msec)))
3995 + FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
3997 + if (schedule_delayed_work(&ep->timeout_work,
3998 + jiffies + msecs_to_jiffies(timer_msec)))
3999 fc_exch_hold(ep); /* hold for timer */
4002 @@ -435,9 +444,10 @@ EXPORT_SYMBOL(fc_seq_exch_abort);
4003 * Exchange timeout - handle exchange timer expiration.
4004 * The timer will have been cancelled before this is called.
4006 -static void fc_exch_timeout(unsigned long ep_arg)
4007 +static void fc_exch_timeout(struct work_struct *work)
4009 - struct fc_exch *ep = (struct fc_exch *)ep_arg;
4010 + struct fc_exch *ep = container_of(work, struct fc_exch,
4011 + timeout_work.work);
4012 struct fc_seq *sp = &ep->seq;
4013 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
4015 @@ -584,7 +594,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid)
4016 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
4017 ep->rxid = FC_XID_UNKNOWN;
4018 ep->class = mp->class;
4019 - setup_timer(&ep->ex_timer, fc_exch_timeout, (unsigned long)ep);
4020 + INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
4024 @@ -843,9 +853,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
4025 struct fc_exch *ep = fc_seq_exch(sp);
4027 sp = fc_seq_alloc(ep, ep->seq_id++);
4028 - if (fc_exch_debug)
4029 - FC_DBG("exch %4x f_ctl %6x seq %2x f_ctl %6x\n",
4030 - ep->xid, ep->f_ctl, sp->id, sp->f_ctl);
4031 + FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x f_ctl %6x\n",
4032 + ep->xid, ep->f_ctl, sp->id, sp->f_ctl);
4036 @@ -913,7 +922,18 @@ int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
4039 hton24(fh->fh_f_ctl, f_ctl | fill);
4040 - fh->fh_seq_cnt = htons(sp->cnt++);
4041 + fh->fh_seq_cnt = htons(sp->cnt);
4044 + * update sequence count if this frame is carrying
4045 + * multiple FC frames when sequence offload is enabled
4048 + if (fr_max_payload(fp))
4049 + sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
4050 + fr_max_payload(fp));
4056 @@ -1185,8 +1205,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
4057 lp->tt.lport_recv(lp, sp, fp);
4058 fc_exch_release(ep); /* release from lookup */
4060 - if (fc_exch_debug)
4061 - FC_DBG("exch/seq lookup failed: reject %x\n", reject);
4062 + FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
4066 @@ -1290,12 +1309,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
4067 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
4069 atomic_inc(&mp->stats.xid_not_found);
4070 - if (fc_exch_debug)
4071 - FC_DBG("seq lookup failed\n");
4072 + FC_DEBUG_EXCH("seq lookup failed\n");
4074 atomic_inc(&mp->stats.non_bls_resp);
4075 - if (fc_exch_debug)
4076 - FC_DBG("non-BLS response to sequence");
4077 + FC_DEBUG_EXCH("non-BLS response to sequence");
4081 @@ -1316,11 +1333,10 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
4082 int rc = 1, has_rec = 0;
4084 fh = fc_frame_header_get(fp);
4085 - if (fc_exch_debug)
4086 - FC_DBG("exch: BLS rctl %x - %s\n",
4087 - fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
4088 + FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
4089 + fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
4091 - if (del_timer_sync(&ep->ex_timer))
4092 + if (cancel_delayed_work_sync(&ep->timeout_work))
4093 fc_exch_release(ep); /* release from pending timer hold */
4095 spin_lock_bh(&ep->ex_lock);
4096 @@ -1410,10 +1426,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
4100 - if (fc_exch_debug)
4101 - FC_DBG("BLS rctl %x - %s received",
4103 - fc_exch_rctl_name(fh->fh_r_ctl));
4104 + FC_DEBUG_EXCH("BLS rctl %x - %s received",
4106 + fc_exch_rctl_name(fh->fh_r_ctl));
4110 @@ -1498,7 +1513,7 @@ static void fc_exch_reset(struct fc_exch *ep)
4111 * functions can also grab the lport lock which could cause
4114 - if (del_timer(&ep->ex_timer))
4115 + if (cancel_delayed_work(&ep->timeout_work))
4116 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4119 @@ -1720,7 +1735,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
4120 if (ep->esb_stat & ESB_ST_RESP)
4122 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
4123 - lp->e_d_tov, lp->fid, did,
4124 + lp->e_d_tov, fc_host_port_id(lp->host), did,
4125 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4127 ep->esb_stat |= ESB_ST_REC_QUAL;
4128 @@ -1774,8 +1789,10 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
4129 ep->esb_stat &= ~ESB_ST_REC_QUAL;
4130 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
4132 - if ((ep->esb_stat & ESB_ST_COMPLETE) && (del_timer(&ep->ex_timer)))
4133 - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4134 + if (ep->esb_stat & ESB_ST_COMPLETE) {
4135 + if (cancel_delayed_work(&ep->timeout_work))
4136 + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4139 spin_unlock_bh(&ep->ex_lock);
4141 diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
4142 index 2566eed..bf8202f 100644
4143 --- a/drivers/scsi/libfc/fc_fcp.c
4144 +++ b/drivers/scsi/libfc/fc_fcp.c
4145 @@ -42,22 +42,29 @@ MODULE_AUTHOR("Open-FCoE.org");
4146 MODULE_DESCRIPTION("libfc");
4147 MODULE_LICENSE("GPL");
4150 +static int fc_fcp_debug;
4152 +#define FC_DEBUG_FCP(fmt...) \
4154 + if (fc_fcp_debug) \
4158 static struct kmem_cache *scsi_pkt_cachep;
4160 /* SRB state definitions */
4161 -#define FC_SRB_FREE 0 /* cmd is free */
4162 -#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
4163 -#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
4164 -#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
4165 -#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
4166 -#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
4167 -#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
4168 +#define FC_SRB_FREE 0 /* cmd is free */
4169 +#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
4170 +#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
4171 +#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
4172 +#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
4173 +#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
4174 +#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
4175 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
4176 -#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
4177 +#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
4179 -#define FC_SRB_READ (1 << 1)
4180 -#define FC_SRB_WRITE (1 << 0)
4181 +#define FC_SRB_READ (1 << 1)
4182 +#define FC_SRB_WRITE (1 << 0)
4185 * scsi request structure, one for each scsi request
4186 @@ -184,8 +191,8 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
4187 #define FC_SCSI_REC_TOV (2 * HZ)
4188 #define FC_HOST_RESET_TIMEOUT (30 * HZ)
4190 -#define FC_MAX_ERROR_CNT 5
4191 -#define FC_MAX_RECOV_RETRY 3
4192 +#define FC_MAX_ERROR_CNT 5
4193 +#define FC_MAX_RECOV_RETRY 3
4195 #define FC_FCP_DFLT_QUEUE_DEPTH 32
4197 @@ -353,11 +360,8 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
4198 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
4199 fc_frame_crc_check(fp))
4201 - if (fc_fcp_debug) {
4202 - FC_DBG("data received past end. "
4203 - "len %zx offset %zx "
4204 - "data_len %x\n", len, offset, fsp->data_len);
4206 + FC_DEBUG_FCP("data received past end. len %zx offset %zx "
4207 + "data_len %x\n", len, offset, fsp->data_len);
4208 fc_fcp_retry_cmd(fsp);
4211 @@ -449,55 +453,54 @@ crc_err:
4213 * Send SCSI data to target.
4214 * Called after receiving a Transfer Ready data descriptor.
4215 + * if LLD is capable of seq offload then send down seq_blen
4216 + * size of data in single frame, otherwise send multiple FC
4217 + * frames of max FC frame payload supported by target port.
4219 static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4220 - size_t offset, size_t len,
4221 - struct fc_frame *oldfp, int sg_supp)
4222 + size_t offset, size_t seq_blen)
4224 struct scsi_cmnd *sc;
4225 struct scatterlist *sg;
4226 struct fc_frame *fp = NULL;
4227 struct fc_lport *lp = fsp->lp;
4233 size_t frame_offset;
4237 - int using_sg = sg_supp;
4238 + int using_sg = lp->sg_supp;
4241 - if (unlikely(offset + len > fsp->data_len)) {
4243 - * this should never happen
4245 - if (fc_fcp_debug) {
4246 - FC_DBG("xfer-ready past end. len %zx offset %zx\n",
4249 + WARN_ON(seq_blen <= 0);
4250 + if (unlikely(offset + seq_blen > fsp->data_len)) {
4251 + /* this should never happen */
4252 + FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
4253 + seq_blen, offset);
4254 fc_fcp_send_abort(fsp);
4256 } else if (offset != fsp->xfer_len) {
4258 - * Out of Order Data Request - no problem, but unexpected.
4260 - if (fc_fcp_debug) {
4261 - FC_DBG("xfer-ready non-contiguous. "
4262 - "len %zx offset %zx\n", len, offset);
4264 + /* Out of Order Data Request - no problem, but unexpected. */
4265 + FC_DEBUG_FCP("xfer-ready non-contiguous. "
4266 + "seq_blen %zx offset %zx\n", seq_blen, offset);
4268 - mfs = fsp->max_payload;
4269 - WARN_ON(mfs > FC_MAX_PAYLOAD);
4270 - WARN_ON(mfs < FC_MIN_MAX_PAYLOAD);
4272 - mfs &= ~(512 - 1); /* round down to block size */
4273 - WARN_ON(mfs < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
4274 - WARN_ON(len <= 0);
4277 + * if LLD is capable of seq_offload then set transport
4278 + * burst length (t_blen) to seq_blen, otherwise set t_blen
4279 + * to max FC frame payload previously set in fsp->max_payload.
4281 + t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
4282 + WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
4284 + t_blen &= ~(512 - 1); /* round down to block size */
4285 + WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
4289 + remaining = seq_blen;
4290 frame_offset = offset;
4292 sp = lp->tt.seq_start_next(sp);
4293 @@ -540,7 +543,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4297 - tlen = min(mfs, remaining);
4298 + tlen = min(t_blen, remaining);
4301 * TODO. Temporary workaround. fc_seq_send() can't
4302 @@ -563,6 +566,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4304 fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP);
4305 fc_frame_set_offset(fp, frame_offset);
4306 + fr_max_payload(fp) = fsp->max_payload;
4308 sg_bytes = min(tlen, sg->length - offset);
4310 @@ -621,7 +625,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
4314 - fsp->xfer_len += len; /* premature count? */
4315 + fsp->xfer_len += seq_blen; /* premature count? */
4319 @@ -741,8 +745,7 @@ static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
4321 rc = fc_fcp_send_data(fsp, sp,
4322 (size_t) ntohl(dd->ft_data_ro),
4323 - (size_t) ntohl(dd->ft_burst_len), fp,
4324 - lp->capabilities & TRANS_C_SG);
4325 + (size_t) ntohl(dd->ft_burst_len));
4327 lp->tt.seq_set_rec_data(sp, fsp->xfer_len);
4328 else if (rc == -ENOMEM)
4329 @@ -1066,7 +1069,7 @@ static int fc_fcp_send_cmd(struct fc_fcp_pkt *fsp)
4333 - rp->local_port->fid,
4334 + fc_host_port_id(rp->local_port->host),
4336 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4338 @@ -1175,7 +1178,7 @@ static void fc_lun_reset_send(unsigned long data)
4342 - rp->local_port->fid,
4343 + fc_host_port_id(rp->local_port->host),
4345 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4347 @@ -1367,7 +1370,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
4348 rec = fc_frame_payload_get(fp, sizeof(*rec));
4349 memset(rec, 0, sizeof(*rec));
4350 rec->rec_cmd = ELS_REC;
4351 - hton24(rec->rec_s_id, lp->fid);
4352 + hton24(rec->rec_s_id, fc_host_port_id(lp->host));
4353 rec->rec_ox_id = htons(ox_id);
4354 rec->rec_rx_id = htons(rx_id);
4356 @@ -1376,7 +1379,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
4357 sp = lp->tt.exch_seq_send(lp, fp,
4358 fc_fcp_rec_resp, NULL,
4359 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
4360 - rp->local_port->fid,
4361 + fc_host_port_id(rp->local_port->host),
4363 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4365 @@ -1425,16 +1428,13 @@ static void fc_fcp_rec_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
4366 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
4367 switch (rjt->er_reason) {
4370 - FC_DBG("device %x unexpected REC reject "
4371 - "reason %d expl %d\n",
4372 - fsp->rport->port_id, rjt->er_reason,
4374 + FC_DEBUG_FCP("device %x unexpected REC reject "
4375 + "reason %d expl %d\n",
4376 + fsp->rport->port_id, rjt->er_reason,
4382 - FC_DBG("device does not support REC\n");
4383 + FC_DEBUG_FCP("device does not support REC\n");
4384 rp = fsp->rport->dd_data;
4386 * if we do not spport RECs or got some bogus
4387 @@ -1636,7 +1636,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
4388 sp = lp->tt.exch_seq_send(lp, fp,
4389 fc_fcp_srr_resp, NULL,
4390 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
4391 - rp->local_port->fid,
4392 + fc_host_port_id(rp->local_port->host),
4394 FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4396 @@ -2199,7 +2199,17 @@ static int __init libfc_init(void)
4398 rc = fc_setup_exch_mgr();
4400 - kmem_cache_destroy(scsi_pkt_cachep);
4401 + goto destroy_pkt_cache;
4403 + rc = fc_setup_rport();
4409 + fc_destroy_exch_mgr();
4411 + kmem_cache_destroy(scsi_pkt_cachep);
4415 @@ -2207,6 +2217,7 @@ static void __exit libfc_exit(void)
4417 kmem_cache_destroy(scsi_pkt_cachep);
4418 fc_destroy_exch_mgr();
4419 + fc_destroy_rport();
4422 module_init(libfc_init);
4423 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
4424 index b1854b9..bfbc7d4 100644
4425 --- a/drivers/scsi/libfc/fc_lport.c
4426 +++ b/drivers/scsi/libfc/fc_lport.c
4431 - * Logical interface support.
4432 + * General locking notes:
4434 + * The lport and rport blocks both have mutexes that are used to protect
4435 + * the port objects states. The main motivation for this protection is that
4436 + * we don't want to be preparing a request/response in one context while
4437 + * another thread "resets" the port in question. For example, if the lport
4438 + * block is sending a SCR request to the directory server we don't want
4439 + * the lport to be reset before we fill out the frame header's port_id. The
4440 + * problem is that a reset would cause the lport's port_id to reset to 0.
4441 + * If we don't protect the lport we'd spew incorrect frames.
4443 + * At the time of this writing there are two primary mutexes, one for the
4444 + * lport and one for the rport. Since the lport uses the rport and makes
4445 + * calls into that block the rport should never make calls that would cause
4446 + * the lport's mutex to be locked. In other words, the lport's mutex is
4447 + * considered the outer lock and the rport's lock is considered the inner
4448 + * lock. The bottom line is that you can hold a lport's mutex and then
4449 + * hold the rport's mutex, but not the other way around.
4451 + * The only complication to this rule is the callbacks from the rport to
4452 + * the lport's event_callback function. When rports become READY they make
4453 + * a callback to the lport so that it can track them. In the case of the
4454 + * directory server that callback might cause the lport to change its
4455 + * state, implying that the lport mutex would need to be held. This problem
4456 + * was solved by serializing the rport notifications to the lport and the
4457 + * callback is made without holding the rport's lock.
4459 + * lport locking notes:
4461 + * The critical sections protected by the lport's mutex are quite broad and
4462 + * may be improved upon in the future. The lport code and its locking doesn't
4463 + * influence the I/O path, so excessive locking doesn't penalize I/O
4466 + * The strategy is to lock whenever processing a request or response. Note
4467 + * that every _enter_* function corresponds to a state change. They generally
4468 + * change the lports state and then sends a request out on the wire. We lock
4469 + * before calling any of these functions to protect that state change. This
4470 + * means that the entry points into the lport block to manage the locks while
4471 + * the state machine can transition between states (i.e. _enter_* functions)
4472 + * while always staying protected.
4474 + * When handling responses we also hold the lport mutex broadly. When the
4475 + * lport receives the response frame it locks the mutex and then calls the
4476 + * appropriate handler for the particuar response. Generally a response will
4477 + * trigger a state change and so the lock must already be held.
4479 + * Retries also have to consider the locking. The retries occur from a work
4480 + * context and the work function will lock the lport and then retry the state
4481 + * (i.e. _enter_* function).
4483 + * The implication to all of this is that each lport can only process one
4484 + * state at a time.
4487 #include <linux/timer.h>
4490 static int fc_lport_debug;
4492 +#define FC_DEBUG_LPORT(fmt...) \
4494 + if (fc_lport_debug) \
4498 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
4500 static void fc_lport_enter_reset(struct fc_lport *);
4501 @@ -66,41 +124,71 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
4505 + * fc_lport_lookup_rport - lookup a remote port by port_id
4506 + * @lport: Fibre Channel host port instance
4507 + * @port_id: remote port port_id to match
4509 +struct fc_rport *fc_lport_lookup_rport(const struct fc_lport *lport,
4512 + struct fc_rport *rport, *found;
4513 + struct fc_rport_libfc_priv *rdata;
4517 + list_for_each_entry(rdata, &lport->rports, peers) {
4518 + rport = PRIV_TO_RPORT(rdata);
4519 + if (rport->port_id == port_id) {
4521 + get_device(&found->dev);
4531 * fc_lport_rport_event - Event handler for rport events
4532 * @lport: The lport which is receiving the event
4533 - * @port_id: The FID of the rport which the event has occured on
4534 + * @rport: The rport which the event has occured on
4535 * @event: The event that occured
4537 * Locking Note: The rport lock should not be held when calling
4540 -static void fc_lport_rport_event(struct fc_lport *lport, u32 port_id,
4541 +static void fc_lport_rport_event(struct fc_lport *lport,
4542 + struct fc_rport *rport,
4543 enum fc_lport_event event)
4545 - struct fc_rport *rport = lport->tt.rport_lookup(lport, port_id);
4546 + struct fc_rport_libfc_priv *rdata = rport->dd_data;
4548 - if (fc_lport_debug)
4549 - FC_DBG("Received a %d event for port (%6x)\n", event, port_id);
4550 + FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
4553 - if (port_id == FC_FID_DIR_SERV) {
4554 - mutex_lock(&lport->lp_mutex);
4556 - case LPORT_EV_RPORT_CREATED:
4558 - lport->dns_rp = rport;
4559 - fc_lport_enter_rpn_id(lport);
4562 - case LPORT_EV_RPORT_LOGO:
4563 - case LPORT_EV_RPORT_FAILED:
4564 - lport->dns_rp = NULL;
4565 - fc_lport_enter_dns(lport);
4567 - case LPORT_EV_RPORT_NONE:
4569 + mutex_lock(&lport->lp_mutex);
4571 + case LPORT_EV_RPORT_CREATED:
4572 + if (rport->port_id == FC_FID_DIR_SERV) {
4573 + lport->dns_rp = rport;
4574 + fc_lport_enter_rpn_id(lport);
4576 + list_add_tail(&rdata->peers, &lport->rports);
4578 - mutex_unlock(&lport->lp_mutex);
4580 + case LPORT_EV_RPORT_LOGO:
4581 + case LPORT_EV_RPORT_FAILED:
4582 + case LPORT_EV_RPORT_STOP:
4583 + if (rport->port_id == FC_FID_DIR_SERV)
4584 + lport->dns_rp = NULL;
4586 + list_del(&rdata->peers);
4588 + case LPORT_EV_RPORT_NONE:
4591 + mutex_unlock(&lport->lp_mutex);
4595 @@ -118,18 +206,6 @@ static const char *fc_lport_state(struct fc_lport *lport)
4599 - * fc_lport_ptp_clear - Delete the ptp rport
4600 - * @lport: The lport whose ptp rport should be removed
4602 -static void fc_lport_ptp_clear(struct fc_lport *lport)
4604 - if (lport->ptp_rp) {
4605 - fc_remote_port_delete(lport->ptp_rp);
4606 - lport->ptp_rp = NULL;
4611 * fc_lport_ptp_setup - Create an rport for point-to-point mode
4612 * @lport: The lport to attach the ptp rport to
4613 * @fid: The FID of the ptp rport
4614 @@ -148,19 +224,25 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
4615 dp.ids.node_name = remote_wwnn;
4616 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
4618 - fc_lport_ptp_clear(lport);
4619 + if (lport->ptp_rp) {
4620 + lport->tt.rport_stop(lport->ptp_rp);
4621 + lport->ptp_rp = NULL;
4624 - lport->ptp_rp = fc_rport_dummy_create(&dp);
4625 + lport->ptp_rp = fc_rport_rogue_create(&dp);
4627 lport->tt.rport_login(lport->ptp_rp);
4629 fc_lport_enter_ready(lport);
4633 - * fc_get_host_port_state - supports fc_function_template
4634 - * @shost: The host whose port state should be returned
4636 +void fc_get_host_port_type(struct Scsi_Host *shost)
4638 + /* TODO - currently just NPORT */
4639 + fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
4641 +EXPORT_SYMBOL(fc_get_host_port_type);
4643 void fc_get_host_port_state(struct Scsi_Host *shost)
4645 struct fc_lport *lp = shost_priv(shost);
4646 @@ -277,8 +359,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
4647 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
4648 struct fc_lport *lport)
4650 - if (fc_lport_debug)
4651 - FC_DBG("Received RLIR request while in state %s\n",
4652 + FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
4653 fc_lport_state(lport));
4655 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
4656 @@ -303,8 +384,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
4660 - if (fc_lport_debug)
4661 - FC_DBG("Received RLIR request while in state %s\n",
4662 + FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
4663 fc_lport_state(lport));
4665 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
4666 @@ -350,8 +430,7 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
4670 - if (fc_lport_debug)
4671 - FC_DBG("Received RNID request while in state %s\n",
4672 + FC_DEBUG_LPORT("Received RNID request while in state %s\n",
4673 fc_lport_state(lport));
4675 req = fc_frame_payload_get(in_fp, sizeof(*req));
4676 @@ -520,12 +599,10 @@ EXPORT_SYMBOL(fc_fabric_logoff);
4678 int fc_lport_destroy(struct fc_lport *lport)
4680 - mutex_lock(&lport->lp_mutex);
4681 cancel_delayed_work_sync(&lport->disc_work);
4682 lport->tt.scsi_abort_io(lport);
4683 lport->tt.frame_send = fc_frame_drop;
4684 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
4685 - mutex_unlock(&lport->lp_mutex);
4688 EXPORT_SYMBOL(fc_lport_destroy);
4689 @@ -569,9 +646,8 @@ EXPORT_SYMBOL(fc_set_mfs);
4691 static void fc_lport_enter_ready(struct fc_lport *lport)
4693 - if (fc_lport_debug)
4694 - FC_DBG("Port (%6x) entered Ready from state %s\n",
4695 - lport->fid, fc_lport_state(lport));
4696 + FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
4697 + fc_host_port_id(lport->host), fc_lport_state(lport));
4699 fc_lport_state_enter(lport, LPORT_ST_READY);
4701 @@ -605,8 +681,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
4705 - if (fc_lport_debug)
4706 - FC_DBG("Received FLOGI request while in state %s\n",
4707 + FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
4708 fc_lport_state(lport));
4710 fh = fc_frame_header_get(rx_fp);
4711 @@ -636,7 +711,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
4712 remote_fid = FC_LOCAL_PTP_FID_HI;
4715 - lport->fid = local_fid;
4716 + fc_host_port_id(lport->host) = local_fid;
4718 fp = fc_frame_alloc(lport, sizeof(*flp));
4720 @@ -733,7 +808,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
4721 s_id = ntoh24(fh->fh_s_id);
4722 d_id = ntoh24(fh->fh_d_id);
4724 - rport = lport->tt.rport_lookup(lport, s_id);
4725 + rport = fc_lport_lookup_rport(lport, s_id);
4727 lport->tt.rport_recv_req(sp, fp, rport);
4728 put_device(&rport->dev); /* hold from lookup */
4729 @@ -752,6 +827,12 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
4732 mutex_unlock(&lport->lp_mutex);
4735 + * The common exch_done for all request may not be good
4736 + * if any request requires longer hold on exhange. XXX
4738 + lport->tt.exch_done(sp);
4742 @@ -771,6 +852,24 @@ int fc_lport_reset(struct fc_lport *lport)
4743 EXPORT_SYMBOL(fc_lport_reset);
4746 + * fc_lport_stop_rports - delete all the remote ports associated with the lport
4747 + * @lport: libfc local port instance
4749 + * Locking Note: This function expects that the lport mutex is locked before
4752 +void fc_lport_stop_rports(struct fc_lport *lport)
4754 + struct fc_rport *rport;
4755 + struct fc_rport_libfc_priv *rdata;
4757 + list_for_each_entry(rdata, &lport->rports, peers) {
4758 + rport = PRIV_TO_RPORT(rdata);
4759 + lport->tt.rport_stop(rport);
4764 * fc_rport_enter_reset - Reset the local port
4765 * @lport: Fibre Channel local port to be reset
4767 @@ -779,24 +878,26 @@ EXPORT_SYMBOL(fc_lport_reset);
4769 static void fc_lport_enter_reset(struct fc_lport *lport)
4771 - if (fc_lport_debug)
4772 - FC_DBG("Port (%6x) entered RESET state from %s state\n",
4773 - lport->fid, fc_lport_state(lport));
4774 + FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
4775 + fc_host_port_id(lport->host), fc_lport_state(lport));
4777 fc_lport_state_enter(lport, LPORT_ST_RESET);
4779 if (lport->dns_rp) {
4780 - fc_remote_port_delete(lport->dns_rp);
4781 + lport->tt.rport_stop(lport->dns_rp);
4782 lport->dns_rp = NULL;
4784 - fc_lport_ptp_clear(lport);
4786 - fc_block_rports(lport);
4787 + if (lport->ptp_rp) {
4788 + lport->tt.rport_stop(lport->ptp_rp);
4789 + lport->ptp_rp = NULL;
4792 + fc_lport_stop_rports(lport);
4794 - lport->tt.rport_reset_list(lport);
4795 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
4796 fc_host_fabric_name(lport->host) = 0;
4798 + fc_host_port_id(lport->host) = 0;
4800 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
4801 fc_lport_enter_flogi(lport);
4802 @@ -814,33 +915,38 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
4803 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
4805 unsigned long delay = 0;
4806 - if (fc_lport_debug)
4807 - FC_DBG("Error %ld in state %s, retries %d\n",
4808 + FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
4809 PTR_ERR(fp), fc_lport_state(lport),
4810 lport->retry_count);
4812 - if (lport->retry_count < lport->max_retry_count) {
4813 - lport->retry_count++;
4815 - delay = msecs_to_jiffies(500);
4818 - msecs_to_jiffies(lport->e_d_tov);
4820 - schedule_delayed_work(&lport->retry_work, delay);
4822 - switch (lport->state) {
4823 - case LPORT_ST_NONE:
4824 - case LPORT_ST_READY:
4825 - case LPORT_ST_RESET:
4826 - case LPORT_ST_RPN_ID:
4827 - case LPORT_ST_RFT_ID:
4828 - case LPORT_ST_SCR:
4829 - case LPORT_ST_DNS:
4830 - case LPORT_ST_FLOGI:
4831 - case LPORT_ST_LOGO:
4832 - fc_lport_enter_reset(lport);
4834 + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
4836 + * Memory allocation failure, or the exchange timed out.
4837 + * Retry after delay
4839 + if (lport->retry_count < lport->max_retry_count) {
4840 + lport->retry_count++;
4842 + delay = msecs_to_jiffies(500);
4845 + msecs_to_jiffies(lport->e_d_tov);
4847 + schedule_delayed_work(&lport->retry_work, delay);
4849 + switch (lport->state) {
4850 + case LPORT_ST_NONE:
4851 + case LPORT_ST_READY:
4852 + case LPORT_ST_RESET:
4853 + case LPORT_ST_RPN_ID:
4854 + case LPORT_ST_RFT_ID:
4855 + case LPORT_ST_SCR:
4856 + case LPORT_ST_DNS:
4857 + case LPORT_ST_FLOGI:
4858 + case LPORT_ST_LOGO:
4859 + fc_lport_enter_reset(lport);
4865 @@ -865,8 +971,7 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4867 mutex_lock(&lport->lp_mutex);
4869 - if (fc_lport_debug)
4870 - FC_DBG("Received a RFT_ID response\n");
4871 + FC_DEBUG_LPORT("Received a RFT_ID response\n");
4873 if (lport->state != LPORT_ST_RFT_ID) {
4874 FC_DBG("Received a RFT_ID response, but in state %s\n",
4875 @@ -876,7 +981,7 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4878 fc_lport_error(lport, fp);
4883 fh = fc_frame_header_get(fp);
4884 @@ -890,8 +995,9 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4886 fc_lport_error(lport, fp);
4888 - mutex_unlock(&lport->lp_mutex);
4891 + mutex_unlock(&lport->lp_mutex);
4895 @@ -914,8 +1020,7 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4897 mutex_lock(&lport->lp_mutex);
4899 - if (fc_lport_debug)
4900 - FC_DBG("Received a RPN_ID response\n");
4901 + FC_DEBUG_LPORT("Received a RPN_ID response\n");
4903 if (lport->state != LPORT_ST_RPN_ID) {
4904 FC_DBG("Received a RPN_ID response, but in state %s\n",
4905 @@ -925,7 +1030,7 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4908 fc_lport_error(lport, fp);
4913 fh = fc_frame_header_get(fp);
4914 @@ -939,8 +1044,9 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
4915 fc_lport_error(lport, fp);
4918 - mutex_unlock(&lport->lp_mutex);
4921 + mutex_unlock(&lport->lp_mutex);
4925 @@ -961,8 +1067,7 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
4927 mutex_lock(&lport->lp_mutex);
4929 - if (fc_lport_debug)
4930 - FC_DBG("Received a SCR response\n");
4931 + FC_DEBUG_LPORT("Received a SCR response\n");
4933 if (lport->state != LPORT_ST_SCR) {
4934 FC_DBG("Received a SCR response, but in state %s\n",
4935 @@ -972,7 +1077,7 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
4938 fc_lport_error(lport, fp);
4943 op = fc_frame_payload_op(fp);
4944 @@ -982,8 +1087,9 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
4945 fc_lport_error(lport, fp);
4948 - mutex_unlock(&lport->lp_mutex);
4951 + mutex_unlock(&lport->lp_mutex);
4955 @@ -998,9 +1104,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
4956 struct fc_frame *fp;
4957 struct fc_els_scr *scr;
4959 - if (fc_lport_debug)
4960 - FC_DBG("Port (%6x) entered SCR state from %s state\n",
4961 - lport->fid, fc_lport_state(lport));
4962 + FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
4963 + fc_host_port_id(lport->host), fc_lport_state(lport));
4965 fc_lport_state_enter(lport, LPORT_ST_SCR);
4967 @@ -1020,7 +1125,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
4968 if (!lport->tt.exch_seq_send(lport, fp,
4969 fc_lport_scr_resp, NULL,
4970 lport, lport->e_d_tov,
4971 - lport->fid, FC_FID_FCTRL,
4972 + fc_host_port_id(lport->host),
4974 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
4975 fc_lport_error(lport, fp);
4977 @@ -1043,9 +1149,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
4978 struct fc_ns_fts *lps;
4981 - if (fc_lport_debug)
4982 - FC_DBG("Port (%6x) entered RFT_ID state from %s state\n",
4983 - lport->fid, fc_lport_state(lport));
4984 + FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
4985 + fc_host_port_id(lport->host), fc_lport_state(lport));
4987 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
4989 @@ -1069,14 +1174,14 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
4992 sizeof(struct fc_ct_hdr));
4993 - hton24(req->fid.fp_fid, lport->fid);
4994 + hton24(req->fid.fp_fid, fc_host_port_id(lport->host));
4996 fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
4998 if (!lport->tt.exch_seq_send(lport, fp,
4999 fc_lport_rft_id_resp, NULL,
5000 lport, lport->e_d_tov,
5002 + fc_host_port_id(lport->host),
5006 @@ -1099,9 +1204,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
5007 struct fc_ns_rn_id rn;
5010 - if (fc_lport_debug)
5011 - FC_DBG("Port (%6x) entered RPN_ID state from %s state\n",
5012 - lport->fid, fc_lport_state(lport));
5013 + FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
5014 + fc_host_port_id(lport->host), fc_lport_state(lport));
5016 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
5018 @@ -1114,14 +1218,14 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
5019 req = fc_frame_payload_get(fp, sizeof(*req));
5020 memset(req, 0, sizeof(*req));
5021 fc_fill_dns_hdr(lport, &req->ct, FC_NS_RPN_ID, sizeof(req->rn));
5022 - hton24(req->rn.fr_fid.fp_fid, lport->fid);
5023 + hton24(req->rn.fr_fid.fp_fid, fc_host_port_id(lport->host));
5024 put_unaligned_be64(lport->wwpn, &req->rn.fr_wwn);
5025 fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
5027 if (!lport->tt.exch_seq_send(lport, fp,
5028 fc_lport_rpn_id_resp, NULL,
5029 lport, lport->e_d_tov,
5031 + fc_host_port_id(lport->host),
5033 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5034 fc_lport_error(lport, fp);
5035 @@ -1147,20 +1251,18 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
5036 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
5039 - if (fc_lport_debug)
5040 - FC_DBG("Port (%6x) entered DNS state from %s state\n",
5041 - lport->fid, fc_lport_state(lport));
5042 + FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
5043 + fc_host_port_id(lport->host), fc_lport_state(lport));
5045 fc_lport_state_enter(lport, LPORT_ST_DNS);
5047 if (!lport->dns_rp) {
5048 - /* Set up a dummy rport to directory server */
5049 - rport = fc_rport_dummy_create(&dp);
5050 + /* Set up a rogue rport to directory server */
5051 + rport = fc_rport_rogue_create(&dp);
5055 lport->dns_rp = rport;
5056 - FC_DBG("created an rport for the NS\n");
5059 rport = lport->dns_rp;
5060 @@ -1232,8 +1334,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5062 mutex_lock(&lport->lp_mutex);
5064 - if (fc_lport_debug)
5065 - FC_DBG("Received a LOGO response\n");
5066 + FC_DEBUG_LPORT("Received a LOGO response\n");
5068 if (lport->state != LPORT_ST_LOGO) {
5069 FC_DBG("Received a LOGO response, but in state %s\n",
5070 @@ -1243,7 +1344,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5073 fc_lport_error(lport, fp);
5078 op = fc_frame_payload_op(fp);
5079 @@ -1253,8 +1354,9 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5080 fc_lport_error(lport, fp);
5083 - mutex_unlock(&lport->lp_mutex);
5086 + mutex_unlock(&lport->lp_mutex);
5090 @@ -1269,15 +1371,14 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5091 struct fc_frame *fp;
5092 struct fc_els_logo *logo;
5094 - if (fc_lport_debug)
5095 - FC_DBG("Port (%6x) entered LOGO state from %s state\n",
5096 - lport->fid, fc_lport_state(lport));
5097 + FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
5098 + fc_host_port_id(lport->host), fc_lport_state(lport));
5100 fc_lport_state_enter(lport, LPORT_ST_LOGO);
5102 /* DNS session should be closed so we can release it here */
5103 if (lport->dns_rp) {
5104 - fc_remote_port_delete(lport->dns_rp);
5105 + lport->tt.rport_logout(lport->dns_rp);
5106 lport->dns_rp = NULL;
5109 @@ -1290,7 +1391,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5110 logo = fc_frame_payload_get(fp, sizeof(*logo));
5111 memset(logo, 0, sizeof(*logo));
5112 logo->fl_cmd = ELS_LOGO;
5113 - hton24(logo->fl_n_port_id, lport->fid);
5114 + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
5115 logo->fl_n_port_wwn = htonll(lport->wwpn);
5116 fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5117 fc_frame_set_offset(fp, 0);
5118 @@ -1298,7 +1399,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5119 if (!lport->tt.exch_seq_send(lport, fp,
5120 fc_lport_logo_resp, NULL,
5121 lport, lport->e_d_tov,
5122 - lport->fid, FC_FID_FLOGI,
5123 + fc_host_port_id(lport->host), FC_FID_FLOGI,
5124 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5125 fc_lport_error(lport, fp);
5127 @@ -1327,8 +1428,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5129 mutex_lock(&lport->lp_mutex);
5131 - if (fc_lport_debug)
5132 - FC_DBG("Received a FLOGI response\n");
5133 + FC_DEBUG_LPORT("Received a FLOGI response\n");
5135 if (lport->state != LPORT_ST_FLOGI) {
5136 FC_DBG("Received a FLOGI response, but in state %s\n",
5137 @@ -1338,16 +1438,16 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5140 fc_lport_error(lport, fp);
5145 fh = fc_frame_header_get(fp);
5146 did = ntoh24(fh->fh_d_id);
5147 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
5148 - if (fc_lport_debug)
5149 - FC_DBG("Assigned fid %x\n", did);
5152 + FC_DEBUG_LPORT("Assigned fid %x\n", did);
5153 + fc_host_port_id(lport->host) = did;
5155 flp = fc_frame_payload_get(fp, sizeof(*flp));
5157 mfs = ntohs(flp->fl_csp.sp_bb_data) &
5158 @@ -1391,8 +1491,9 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5162 - mutex_unlock(&lport->lp_mutex);
5165 + mutex_unlock(&lport->lp_mutex);
5169 @@ -1407,8 +1508,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
5170 struct fc_frame *fp;
5171 struct fc_els_flogi *flp;
5173 - if (fc_lport_debug)
5174 - FC_DBG("Processing FLOGI state\n");
5175 + FC_DEBUG_LPORT("Processing FLOGI state\n");
5177 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
5179 @@ -1436,6 +1536,7 @@ int fc_lport_config(struct fc_lport *lport)
5181 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
5182 mutex_init(&lport->lp_mutex);
5183 + INIT_LIST_HEAD(&lport->rports);
5185 fc_lport_state_enter(lport, LPORT_ST_NONE);
5187 @@ -1456,6 +1557,9 @@ int fc_lport_init(struct fc_lport *lport)
5188 if (!lport->tt.lport_reset)
5189 lport->tt.lport_reset = fc_lport_reset;
5191 + if (!lport->tt.rport_lookup)
5192 + lport->tt.rport_lookup = fc_lport_lookup_rport;
5194 if (!lport->tt.event_callback)
5195 lport->tt.event_callback = fc_lport_rport_event;
5197 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
5198 index 107b304..651a3ed 100644
5199 --- a/drivers/scsi/libfc/fc_rport.c
5200 +++ b/drivers/scsi/libfc/fc_rport.c
5205 - * Remote Port support.
5206 + * This file contains all processing regarding fc_rports. It contains the
5207 + * rport state machine and does all rport interaction with the transport class.
5208 + * There should be no other places in libfc that interact directly with the
5209 + * transport class in regards to adding and deleting rports.
5211 - * A remote port structure contains information about an N port to which we
5212 - * will create sessions.
5213 + * fc_rport's represent N_Port's within the fabric.
5215 + * rport locking notes:
5217 + * The rport should never hold the rport mutex and then lock the lport
5218 + * mutex. The rport's mutex is considered lesser than the lport's mutex, so
5219 + * the lport mutex can be held before locking the rport mutex, but not the
5220 + * other way around. See the comment block at the top of fc_lport.c for more
5223 + * The locking strategy is similar to the lport's strategy. The lock protects
5224 + * the rport's states and is held and released by the entry points to the rport
5225 + * block. All _enter_* functions correspond to rport states and expect the rport
5226 + * mutex to be locked before calling them. This means that rports only handle one
5227 + * request or response at a time, since they're not critical for the I/O path
5228 + * this potential over-use of the mutex is acceptable.
5231 #include <linux/kernel.h>
5234 #include <scsi/libfc/libfc.h>
5236 -static int fc_rp_debug;
5237 +static int fc_rport_debug;
5239 +#define FC_DEBUG_RPORT(fmt...) \
5241 + if (fc_rport_debug) \
5245 +static struct workqueue_struct *rport_event_queue;
5247 static void fc_rport_enter_plogi(struct fc_rport *);
5248 static void fc_rport_enter_prli(struct fc_rport *);
5249 @@ -52,6 +77,7 @@ static void fc_rport_recv_logo_req(struct fc_rport *,
5250 struct fc_seq *, struct fc_frame *);
5251 static void fc_rport_timeout(struct work_struct *);
5252 static void fc_rport_error(struct fc_rport *, struct fc_frame *);
5253 +static void fc_rport_work(struct work_struct *);
5255 static const char *fc_rport_state_names[] = {
5256 [RPORT_ST_NONE] = "None",
5257 @@ -63,7 +89,7 @@ static const char *fc_rport_state_names[] = {
5258 [RPORT_ST_LOGO] = "LOGO",
5261 -struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *dp)
5262 +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
5264 struct fc_rport *rport;
5265 struct fc_rport_libfc_priv *rdata;
5266 @@ -91,11 +117,17 @@ struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *dp)
5267 rdata->e_d_tov = dp->lp->e_d_tov;
5268 rdata->r_a_tov = dp->lp->r_a_tov;
5269 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
5270 + INIT_WORK(&rdata->event_work, fc_rport_work);
5272 + * For good measure, but not necessary as we should only
5273 + * add REAL rport to the lport list.
5275 + INIT_LIST_HEAD(&rdata->peers);
5280 -void fc_rport_dummy_destroy(struct fc_rport *rport)
5281 +void fc_rport_rogue_destroy(struct fc_rport *rport)
5285 @@ -116,30 +148,6 @@ static const char *fc_rport_state(struct fc_rport *rport)
5289 - * fc_rport_lookup - lookup a remote port by port_id
5290 - * @lp: Fibre Channel host port instance
5291 - * @fid: remote port port_id to match
5293 -struct fc_rport *fc_rport_lookup(const struct fc_lport *lp, u32 fid)
5295 - struct Scsi_Host *shost = lp->host;
5296 - struct fc_rport *rport, *found;
5297 - unsigned long flags;
5300 - spin_lock_irqsave(shost->host_lock, flags);
5301 - list_for_each_entry(rport, &fc_host_rports(shost), peers)
5302 - if (rport->port_id == fid &&
5303 - rport->port_state == FC_PORTSTATE_ONLINE) {
5305 - get_device(&found->dev);
5308 - spin_unlock_irqrestore(shost->host_lock, flags);
5313 * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
5314 * @rport: Pointer to Fibre Channel remote port structure
5315 * @timeout: timeout in seconds
5316 @@ -229,15 +237,20 @@ static void fc_rport_state_enter(struct fc_rport *rport,
5317 rdata->rp_state = new;
5320 -static void fc_rport_unlock(struct fc_rport *rport)
5321 +static void fc_rport_work(struct work_struct *work)
5323 - struct fc_rport_libfc_priv *rdata = rport->dd_data;
5324 - enum fc_lport_event event = rdata->event;
5325 + struct fc_rport_libfc_priv *rdata =
5326 + container_of(work, struct fc_rport_libfc_priv, event_work);
5327 + enum fc_lport_event event;
5328 + enum fc_rport_trans_state trans_state;
5329 struct fc_lport *lport = rdata->local_port;
5330 - u32 fid = rport->port_id;
5331 - void (*event_callback)(struct fc_lport *, u32,
5332 - enum fc_lport_event) =
5333 - rdata->event_callback;
5334 + void (*event_callback)(struct fc_lport *, struct fc_rport *,
5335 + enum fc_lport_event);
5336 + struct fc_rport *rport = PRIV_TO_RPORT(rdata);
5338 + mutex_lock(&rdata->rp_mutex);
5339 + event = rdata->event;
5340 + event_callback = rdata->event_callback;
5342 if (event == LPORT_EV_RPORT_CREATED) {
5343 struct fc_rport *new_rport;
5344 @@ -249,10 +262,12 @@ static void fc_rport_unlock(struct fc_rport *rport)
5345 ids.port_name = rport->port_name;
5346 ids.node_name = rport->node_name;
5348 + mutex_unlock(&rdata->rp_mutex);
5350 new_rport = fc_remote_port_add(lport->host, 0, &ids);
5353 - * Switch from the dummy rport to the rport
5354 + * Switch from the rogue rport to the rport
5355 * returned by the FC class.
5357 new_rport->maxframe_size = rport->maxframe_size;
5358 @@ -267,36 +282,32 @@ static void fc_rport_unlock(struct fc_rport *rport)
5359 mutex_init(&new_rdata->rp_mutex);
5360 INIT_DELAYED_WORK(&new_rdata->retry_work,
5362 + INIT_LIST_HEAD(&new_rdata->peers);
5363 + INIT_WORK(&new_rdata->event_work, fc_rport_work);
5365 fc_rport_state_enter(new_rport, RPORT_ST_READY);
5366 - fc_remote_port_rolechg(new_rport, rdata->roles);
5368 FC_DBG("Failed to create the rport for port "
5369 "(%6x).\n", ids.port_id);
5370 event = LPORT_EV_RPORT_FAILED;
5373 - mutex_unlock(&rdata->rp_mutex);
5374 - fc_rport_dummy_destroy(rport);
5375 + fc_rport_rogue_destroy(rport);
5377 rdata = new_rport->dd_data;
5378 + event_callback(lport, rport, event);
5379 } else if ((event == LPORT_EV_RPORT_FAILED) ||
5380 - (event == LPORT_EV_RPORT_LOGO)) {
5381 - if (rdata->trans_state == FC_PORTSTATE_ROGUE) {
5382 - mutex_unlock(&rdata->rp_mutex);
5383 - fc_rport_dummy_destroy(rport);
5385 - mutex_unlock(&rdata->rp_mutex);
5386 + (event == LPORT_EV_RPORT_LOGO) ||
5387 + (event == LPORT_EV_RPORT_STOP)) {
5389 + trans_state = rdata->trans_state;
5390 + mutex_unlock(&rdata->rp_mutex);
5391 + event_callback(lport, rport, event);
5392 + if (trans_state == FC_PORTSTATE_ROGUE)
5393 + fc_rport_rogue_destroy(rport);
5395 fc_remote_port_delete(rport);
5399 mutex_unlock(&rdata->rp_mutex);
5402 - if (event != LPORT_EV_RPORT_NONE && event_callback) {
5403 - event_callback(lport, fid, event);
5404 - rdata->event = LPORT_EV_RPORT_NONE;
5409 @@ -313,12 +324,11 @@ int fc_rport_login(struct fc_rport *rport)
5411 mutex_lock(&rdata->rp_mutex);
5414 - FC_DBG("Login to port (%6x)\n", rport->port_id);
5415 + FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
5417 fc_rport_enter_plogi(rport);
5419 - fc_rport_unlock(rport);
5420 + mutex_unlock(&rdata->rp_mutex);
5424 @@ -337,57 +347,37 @@ int fc_rport_logout(struct fc_rport *rport)
5426 mutex_lock(&rdata->rp_mutex);
5429 - FC_DBG("Logout of port (%6x)\n", rport->port_id);
5430 + FC_DEBUG_RPORT("Logout of port (%6x)\n", rport->port_id);
5432 fc_rport_enter_logo(rport);
5433 - fc_rport_unlock(rport);
5435 + mutex_unlock(&rdata->rp_mutex);
5441 - * fc_rport_reset - Reset the remote port
5442 - * @rport: Fibre Channel remote port
5444 - * XXX - This functionality is currently broken
5445 + * fc_rport_remove - Remove an rport
5446 + * @rport: Fibre Channel remote port to be removed
5448 * Locking Note: Called without the rport lock held. This
5449 * function will hold the rport lock, call an _enter_*
5450 * function and then unlock the rport.
5452 -void fc_rport_reset(struct fc_rport *rport)
5453 +int fc_rport_stop(struct fc_rport *rport)
5455 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5457 mutex_lock(&rdata->rp_mutex);
5460 - FC_DBG("Reset port (%6x)\n", rport->port_id);
5461 + FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
5463 - fc_rport_enter_plogi(rport);
5465 - fc_rport_unlock(rport);
5467 + rdata->event = LPORT_EV_RPORT_STOP;
5468 + queue_work(rport_event_queue, &rdata->event_work);
5471 - * fc_rport_reset_list - Reset all sessions for a local port session list.
5472 - * @lport: The lport whose rports should be reset
5474 - * Locking Note: TBD
5476 -void fc_rport_reset_list(struct fc_lport *lport)
5478 - struct Scsi_Host *shost = lport->host;
5479 - struct fc_rport *rport;
5480 - struct fc_rport *next;
5481 - unsigned long flags;
5482 + mutex_unlock(&rdata->rp_mutex);
5484 - spin_lock_irqsave(shost->host_lock, flags);
5485 - list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) {
5486 - lport->tt.rport_reset(rport);
5488 - spin_unlock_irqrestore(shost->host_lock, flags);
5493 @@ -403,10 +393,10 @@ static void fc_rport_enter_ready(struct fc_rport *rport)
5495 fc_rport_state_enter(rport, RPORT_ST_READY);
5498 - FC_DBG("Port (%6x) is Ready\n", rport->port_id);
5499 + FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
5501 rdata->event = LPORT_EV_RPORT_CREATED;
5502 + queue_work(rport_event_queue, &rdata->event_work);
5506 @@ -447,7 +437,7 @@ static void fc_rport_timeout(struct work_struct *work)
5508 put_device(&rport->dev);
5510 - fc_rport_unlock(rport);
5511 + mutex_unlock(&rdata->rp_mutex);
5515 @@ -467,37 +457,37 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
5516 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5517 unsigned long delay = 0;
5520 - FC_DBG("Error %ld in state %s, retries %d\n",
5521 + FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
5522 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
5524 - if (rdata->retries < rdata->local_port->max_retry_count) {
5527 - delay = msecs_to_jiffies(500);
5528 - get_device(&rport->dev);
5529 - schedule_delayed_work(&rdata->retry_work, delay);
5531 - switch (rdata->rp_state) {
5532 - case RPORT_ST_PLOGI:
5533 - case RPORT_ST_PRLI:
5534 - case RPORT_ST_LOGO:
5536 - FC_DBG("Remote port (%6x) closed.\n",
5539 - fc_remote_port_delete(rport);
5541 - rdata->event = LPORT_EV_RPORT_FAILED;
5543 - case RPORT_ST_RTV:
5544 - fc_rport_enter_ready(rport);
5546 - case RPORT_ST_NONE:
5547 - case RPORT_ST_READY:
5548 - case RPORT_ST_INIT:
5551 + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
5553 + * Memory allocation failure, or the exchange timed out.
5554 + * Retry after delay
5556 + if (rdata->retries < rdata->local_port->max_retry_count) {
5559 + delay = msecs_to_jiffies(500);
5560 + get_device(&rport->dev);
5561 + schedule_delayed_work(&rdata->retry_work, delay);
5563 + switch (rdata->rp_state) {
5564 + case RPORT_ST_PLOGI:
5565 + case RPORT_ST_PRLI:
5566 + case RPORT_ST_LOGO:
5567 + rdata->event = LPORT_EV_RPORT_FAILED;
5568 + queue_work(rport_event_queue, &rdata->event_work);
5570 + case RPORT_ST_RTV:
5571 + fc_rport_enter_ready(rport);
5573 + case RPORT_ST_NONE:
5574 + case RPORT_ST_READY:
5575 + case RPORT_ST_INIT:
5582 @@ -526,8 +516,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5584 mutex_lock(&rdata->rp_mutex);
5587 - FC_DBG("Received a PLOGI response\n");
5588 + FC_DEBUG_RPORT("Received a PLOGI response\n");
5590 if (rdata->rp_state != RPORT_ST_PLOGI) {
5591 FC_DBG("Received a PLOGI response, but in state %s\n",
5592 @@ -537,12 +526,15 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5595 fc_rport_error(rport, fp);
5600 op = fc_frame_payload_op(fp);
5601 if (op == ELS_LS_ACC &&
5602 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
5603 + rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
5604 + rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
5606 tov = ntohl(plp->fl_csp.sp_e_d_tov);
5607 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
5609 @@ -568,8 +560,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5610 fc_rport_error(rport, fp);
5613 - fc_rport_unlock(rport);
5616 + mutex_unlock(&rdata->rp_mutex);
5620 @@ -586,8 +579,7 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
5621 struct fc_frame *fp;
5622 struct fc_els_flogi *plogi;
5625 - FC_DBG("Port (%6x) entered PLOGI state from %s state\n",
5626 + FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
5627 rport->port_id, fc_rport_state(rport));
5629 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
5630 @@ -607,7 +599,7 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
5631 if (!lport->tt.exch_seq_send(lport, fp,
5632 fc_rport_plogi_resp, NULL,
5633 rport, lport->e_d_tov,
5634 - rdata->local_port->fid,
5635 + fc_host_port_id(rdata->local_port->host),
5637 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5638 fc_rport_error(rport, fp);
5639 @@ -638,8 +630,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5641 mutex_lock(&rdata->rp_mutex);
5644 - FC_DBG("Received a PRLI response\n");
5645 + FC_DEBUG_RPORT("Received a PRLI response\n");
5647 if (rdata->rp_state != RPORT_ST_PRLI) {
5648 FC_DBG("Received a PRLI response, but in state %s\n",
5649 @@ -649,7 +640,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5652 fc_rport_error(rport, fp);
5657 op = fc_frame_payload_op(fp);
5658 @@ -667,18 +658,19 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5659 if (fcp_parm & FCP_SPPF_TARG_FCN)
5660 roles |= FC_RPORT_ROLE_FCP_TARGET;
5662 - rdata->roles = roles;
5663 + rport->roles = roles;
5664 fc_rport_enter_rtv(rport);
5667 FC_DBG("Bad ELS response\n");
5668 rdata->event = LPORT_EV_RPORT_FAILED;
5669 - fc_remote_port_delete(rport);
5670 + queue_work(rport_event_queue, &rdata->event_work);
5674 - fc_rport_unlock(rport);
5677 + mutex_unlock(&rdata->rp_mutex);
5681 @@ -700,8 +692,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5683 mutex_lock(&rdata->rp_mutex);
5686 - FC_DBG("Received a LOGO response\n");
5687 + FC_DEBUG_RPORT("Received a LOGO response\n");
5689 if (rdata->rp_state != RPORT_ST_LOGO) {
5690 FC_DBG("Received a LOGO response, but in state %s\n",
5691 @@ -711,22 +702,22 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5694 fc_rport_error(rport, fp);
5699 op = fc_frame_payload_op(fp);
5700 if (op == ELS_LS_ACC) {
5701 fc_rport_enter_rtv(rport);
5704 FC_DBG("Bad ELS response\n");
5705 rdata->event = LPORT_EV_RPORT_LOGO;
5706 - fc_remote_port_delete(rport);
5707 + queue_work(rport_event_queue, &rdata->event_work);
5711 - fc_rport_unlock(rport);
5714 + mutex_unlock(&rdata->rp_mutex);
5718 @@ -746,8 +737,7 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
5720 struct fc_frame *fp;
5723 - FC_DBG("Port (%6x) entered PRLI state from %s state\n",
5724 + FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
5725 rport->port_id, fc_rport_state(rport));
5727 fc_rport_state_enter(rport, RPORT_ST_PRLI);
5728 @@ -771,7 +761,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
5729 if (!lport->tt.exch_seq_send(lport, fp,
5730 fc_rport_prli_resp, NULL,
5731 rport, lport->e_d_tov,
5732 - lport->fid, rport->port_id,
5733 + fc_host_port_id(lport->host),
5735 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5736 fc_rport_error(rport, fp);
5738 @@ -797,8 +788,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
5740 mutex_lock(&rdata->rp_mutex);
5743 - FC_DBG("Received a RTV response\n");
5744 + FC_DEBUG_RPORT("Received a RTV response\n");
5746 if (rdata->rp_state != RPORT_ST_RTV) {
5747 FC_DBG("Received a RTV response, but in state %s\n",
5748 @@ -808,7 +798,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
5751 fc_rport_error(rport, fp);
5756 op = fc_frame_payload_op(fp);
5757 @@ -836,8 +826,9 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
5758 fc_rport_enter_ready(rport);
5761 - fc_rport_unlock(rport);
5764 + mutex_unlock(&rdata->rp_mutex);
5768 @@ -854,8 +845,7 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
5769 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5770 struct fc_lport *lport = rdata->local_port;
5773 - FC_DBG("Port (%6x) entered RTV state from %s state\n",
5774 + FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
5775 rport->port_id, fc_rport_state(rport));
5777 fc_rport_state_enter(rport, RPORT_ST_RTV);
5778 @@ -874,7 +864,8 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
5779 if (!lport->tt.exch_seq_send(lport, fp,
5780 fc_rport_rtv_resp, NULL,
5781 rport, lport->e_d_tov,
5782 - lport->fid, rport->port_id,
5783 + fc_host_port_id(lport->host),
5785 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5786 fc_rport_error(rport, fp);
5788 @@ -893,8 +884,7 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
5789 struct fc_frame *fp;
5790 struct fc_els_logo *logo;
5793 - FC_DBG("Port (%6x) entered LOGO state from %s state\n",
5794 + FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
5795 rport->port_id, fc_rport_state(rport));
5797 fc_rport_state_enter(rport, RPORT_ST_LOGO);
5798 @@ -908,14 +898,15 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
5799 logo = fc_frame_payload_get(fp, sizeof(*logo));
5800 memset(logo, 0, sizeof(*logo));
5801 logo->fl_cmd = ELS_LOGO;
5802 - hton24(logo->fl_n_port_id, lport->fid);
5803 + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
5804 logo->fl_n_port_wwn = htonll(lport->wwpn);
5805 fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5807 if (!lport->tt.exch_seq_send(lport, fp,
5808 fc_rport_logo_resp, NULL,
5809 rport, lport->e_d_tov,
5810 - lport->fid, rport->port_id,
5811 + fc_host_port_id(lport->host),
5813 FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5814 fc_rport_error(rport, fp);
5816 @@ -979,7 +970,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
5820 - fc_rport_unlock(rport);
5821 + mutex_unlock(&rdata->rp_mutex);
5825 @@ -1011,8 +1002,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
5827 fh = fc_frame_header_get(fp);
5830 - FC_DBG("Received PLOGI request from port (%6x) "
5831 + FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
5832 "while in state %s\n", ntoh24(fh->fh_s_id),
5833 fc_rport_state(rport));
5835 @@ -1041,29 +1031,25 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
5837 switch (rdata->rp_state) {
5840 - FC_DBG("incoming PLOGI from %6x wwpn %llx state INIT "
5841 + FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
5842 "- reject\n", sid, wwpn);
5843 reject = ELS_RJT_UNSUP;
5845 case RPORT_ST_PLOGI:
5847 - FC_DBG("incoming PLOGI from %x in PLOGI state %d\n",
5848 + FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
5849 sid, rdata->rp_state);
5850 if (wwpn < lport->wwpn)
5851 reject = ELS_RJT_INPROG;
5854 case RPORT_ST_READY:
5856 - FC_DBG("incoming PLOGI from %x in logged-in state %d "
5857 + FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
5858 "- ignored for now\n", sid, rdata->rp_state);
5859 /* XXX TBD - should reset */
5864 - FC_DBG("incoming PLOGI from %x in unexpected "
5865 + FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
5866 "state %d\n", sid, rdata->rp_state);
5869 @@ -1145,8 +1131,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
5871 fh = fc_frame_header_get(rx_fp);
5874 - FC_DBG("Received PRLI request from port (%6x) "
5875 + FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
5876 "while in state %s\n", ntoh24(fh->fh_s_id),
5877 fc_rport_state(rport));
5879 @@ -1220,7 +1205,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
5880 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
5881 if (fcp_parm & FCP_SPPF_TARG_FCN)
5882 roles |= FC_RPORT_ROLE_FCP_TARGET;
5883 - rdata->roles = roles;
5884 + rport->roles = roles;
5887 htonl(lport->service_params);
5888 @@ -1278,8 +1263,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
5890 fh = fc_frame_header_get(fp);
5893 - FC_DBG("Received PRLO request from port (%6x) "
5894 + FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
5895 "while in state %s\n", ntoh24(fh->fh_s_id),
5896 fc_rport_state(rport));
5898 @@ -1308,12 +1292,12 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
5900 fh = fc_frame_header_get(fp);
5903 - FC_DBG("Received LOGO request from port (%6x) "
5904 + FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
5905 "while in state %s\n", ntoh24(fh->fh_s_id),
5906 fc_rport_state(rport));
5908 rdata->event = LPORT_EV_RPORT_LOGO;
5909 + queue_work(rport_event_queue, &rdata->event_work);
5911 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
5913 @@ -1327,63 +1311,37 @@ int fc_rport_init(struct fc_lport *lport)
5914 if (!lport->tt.rport_logout)
5915 lport->tt.rport_logout = fc_rport_logout;
5917 + if (!lport->tt.rport_stop)
5918 + lport->tt.rport_stop = fc_rport_stop;
5920 if (!lport->tt.rport_recv_req)
5921 lport->tt.rport_recv_req = fc_rport_recv_req;
5923 - if (!lport->tt.rport_lookup)
5924 - lport->tt.rport_lookup = fc_rport_lookup;
5926 - if (!lport->tt.rport_reset)
5927 - lport->tt.rport_reset = fc_rport_reset;
5929 - if (!lport->tt.rport_reset_list)
5930 - lport->tt.rport_reset_list = fc_rport_reset_list;
5934 EXPORT_SYMBOL(fc_rport_init);
5937 - * fc_block_rports - delete all the remote ports, on reset or link down
5938 - * @lp: libfc local port instance
5940 - * This routine temporarily removes any online remote ports from the fc_host
5941 - * rport list, then drops the host lock in order to call fc_remote_port_delete()
5942 - * on each rport in turn, and finally splices the list back onto the fc_host.
5944 -void fc_block_rports(struct fc_lport *lp)
5945 +int fc_setup_rport()
5947 - struct Scsi_Host *shost = lp->host;
5948 - struct fc_rport *rport, *next;
5949 - unsigned long flags;
5950 - LIST_HEAD(rports);
5952 - spin_lock_irqsave(shost->host_lock, flags);
5953 - list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) {
5954 - /* protect the name service remote port */
5955 - if (rport->port_id == FC_FID_DIR_SERV)
5957 - if (rport->port_state != FC_PORTSTATE_ONLINE)
5959 - list_move_tail(&rport->peers, &rports);
5961 - spin_unlock_irqrestore(shost->host_lock, flags);
5963 - list_for_each_entry(rport, &rports, peers) {
5964 - fc_remote_port_delete(rport);
5966 + rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
5967 + if (!rport_event_queue)
5971 +EXPORT_SYMBOL(fc_setup_rport);
5973 - spin_lock_irqsave(shost->host_lock, flags);
5974 - list_splice(&rports, &fc_host_rports(shost));
5975 - spin_unlock_irqrestore(shost->host_lock, flags);
5976 +void fc_destroy_rport()
5978 + destroy_workqueue(rport_event_queue);
5980 +EXPORT_SYMBOL(fc_destroy_rport);
5982 void fc_rport_terminate_io(struct fc_rport *rport)
5984 - struct fc_rport_libfc_priv *rp = rport->dd_data;
5985 - struct fc_lport *lp = rp->local_port;
5986 + struct fc_rport_libfc_priv *rdata = rport->dd_data;
5987 + struct fc_lport *lport = rdata->local_port;
5989 - lp->tt.exch_mgr_reset(lp->emp, 0, rport->port_id);
5990 - lp->tt.exch_mgr_reset(lp->emp, rport->port_id, 0);
5991 + lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
5992 + lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
5994 EXPORT_SYMBOL(fc_rport_terminate_io);
5995 diff --git a/include/scsi/libfc/fc_frame.h b/include/scsi/libfc/fc_frame.h
5996 index c7a52bb..9508e55 100644
5997 --- a/include/scsi/libfc/fc_frame.h
5998 +++ b/include/scsi/libfc/fc_frame.h
6000 #define fr_sof(fp) (fr_cb(fp)->fr_sof)
6001 #define fr_eof(fp) (fr_cb(fp)->fr_eof)
6002 #define fr_flags(fp) (fr_cb(fp)->fr_flags)
6003 +#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
6007 @@ -63,6 +64,7 @@ struct fcoe_rcv_info {
6008 enum fc_sof fr_sof; /* start of frame delimiter */
6009 enum fc_eof fr_eof; /* end of frame delimiter */
6010 u8 fr_flags; /* flags - see below */
6011 + u16 fr_max_payload; /* max FC payload */
6015 diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h
6016 index 24d3fcb..7e5e6be 100644
6017 --- a/include/scsi/libfc/libfc.h
6018 +++ b/include/scsi/libfc/libfc.h
6027 #define FC_DBG(fmt, args...) \
6029 - printk(KERN_INFO "%s " fmt, __func__, ##args); \
6030 + printk(KERN_INFO "%s " fmt, __func__, ##args); \
6033 #define FC_DBG(fmt, args...)
6035 #define ntohll(x) be64_to_cpu(x)
6036 #define htonll(x) cpu_to_be64(x)
6038 -#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
6039 +#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
6041 -#define hton24(p, v) do { \
6042 - p[0] = (((v) >> 16) & 0xFF); \
6043 - p[1] = (((v) >> 8) & 0xFF); \
6044 - p[2] = ((v) & 0xFF); \
6046 +#define hton24(p, v) do { \
6047 + p[0] = (((v) >> 16) & 0xFF); \
6048 + p[1] = (((v) >> 8) & 0xFF); \
6049 + p[2] = ((v) & 0xFF); \
6057 -#define FC_TGT_REC_SUPPORTED (1 << 0)
6062 #define FC_PAUSE (1 << 1)
6063 #define FC_LINK_UP (1 << 0)
6066 -#define FC_MAX_OUTSTANDING_COMMANDS 1024
6069 - * Transport Capabilities
6071 -#define TRANS_C_SG (1 << 0) /* Scatter gather */
6073 enum fc_lport_state {
6076 @@ -104,6 +89,7 @@ enum fc_lport_event {
6077 LPORT_EV_RPORT_NONE = 0,
6078 LPORT_EV_RPORT_CREATED,
6079 LPORT_EV_RPORT_FAILED,
6080 + LPORT_EV_RPORT_STOP,
6084 @@ -163,9 +149,11 @@ struct fc_rport_libfc_priv {
6085 struct mutex rp_mutex;
6086 struct delayed_work retry_work;
6087 enum fc_lport_event event;
6088 - void (*event_callback)(struct fc_lport *, u32,
6089 + void (*event_callback)(struct fc_lport *,
6090 + struct fc_rport *,
6091 enum fc_lport_event);
6093 + struct list_head peers;
6094 + struct work_struct event_work;
6097 #define PRIV_TO_RPORT(x) \
6098 @@ -173,8 +161,8 @@ struct fc_rport_libfc_priv {
6099 #define RPORT_TO_PRIV(x) \
6100 (struct fc_rport_libfc_priv*)((void *)x + sizeof(struct fc_rport));
6102 -struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *);
6103 -void fc_rport_dummy_destroy(struct fc_rport *);
6104 +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
6105 +void fc_rport_rogue_destroy(struct fc_rport *);
6107 static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
6109 @@ -360,7 +348,7 @@ struct libfc_function_template {
6111 int (*lport_reset)(struct fc_lport *);
6113 - void (*event_callback)(struct fc_lport *, u32,
6114 + void (*event_callback)(struct fc_lport *, struct fc_rport *,
6115 enum fc_lport_event);
6118 @@ -384,15 +372,21 @@ struct libfc_function_template {
6120 int (*rport_logout)(struct fc_rport *rport);
6123 + * Delete the rport and remove it from the transport if
6124 + * it had been added. This will not send a LOGO, use
6125 + * rport_logout for a gracefull logout.
6127 + int (*rport_stop)(struct fc_rport *rport);
6130 + * Recieve a request from a remote port.
6132 void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
6135 struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
6137 - void (*rport_reset)(struct fc_rport *);
6139 - void (*rport_reset_list)(struct fc_lport *);
6144 @@ -429,6 +423,7 @@ struct fc_lport {
6145 struct fc_rport *dns_rp;
6146 struct fc_rport *ptp_rp;
6148 + struct list_head rports;
6150 /* Operational Information */
6151 struct libfc_function_template tt;
6152 @@ -442,7 +437,6 @@ struct fc_lport {
6158 unsigned char disc_retry_count;
6159 unsigned char disc_delay;
6160 @@ -452,8 +446,8 @@ struct fc_lport {
6161 unsigned char disc_buf_len;
6164 - char ifname[IFNAMSIZ];
6166 + u32 sg_supp:1; /* scatter gather supported */
6167 + u32 seq_offload:1; /* seq offload supported */
6168 u32 mfs; /* max FC payload size */
6169 unsigned int service_params;
6170 unsigned int e_d_tov;
6171 @@ -484,11 +478,6 @@ static inline int fc_lport_test_ready(struct fc_lport *lp)
6172 return lp->state == LPORT_ST_READY;
6175 -static inline u32 fc_lport_get_fid(const struct fc_lport *lp)
6180 static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
6183 @@ -586,8 +575,6 @@ int fc_set_mfs(struct fc_lport *lp, u32 mfs);
6184 *****************************/
6185 int fc_rport_init(struct fc_lport *lp);
6186 void fc_rport_terminate_io(struct fc_rport *rp);
6187 -void fc_block_rports(struct fc_lport *lp);
6192 @@ -776,6 +763,7 @@ void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data);
6193 * Functions for fc_functions_template
6195 void fc_get_host_speed(struct Scsi_Host *shost);
6196 +void fc_get_host_port_type(struct Scsi_Host *shost);
6197 void fc_get_host_port_state(struct Scsi_Host *shost);
6198 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
6199 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
6200 @@ -785,6 +773,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
6202 int fc_setup_exch_mgr(void);
6203 void fc_destroy_exch_mgr(void);
6205 +int fc_setup_rport(void);
6206 +void fc_destroy_rport(void);
6208 #endif /* _LIBFC_H_ */