Subject: Open-FCoE: Update for Beta4 From: John Fastabend Date: Thu Nov 6 13:08:49 2008 +0100: Git: c66b456a7eb389e5f19d5bf23170b47a3e01d755 References: bnc#438954 Incremental Open-FCoE update for Beta4. Signed-off-by: John Fastabend Acked-by: Hannes Reinecke diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile index 342e2ad..b78da06 100644 --- a/drivers/scsi/fcoe/Makefile +++ b/drivers/scsi/fcoe/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_FCOE) += fcoe.o fcoe-y := \ - fcoe_dev.o \ - fcoe_if.o \ - fcoeinit.o + libfcoe.o \ + fcoe_sw.o \ + fc_transport_fcoe.o diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c new file mode 100644 index 0000000..e11d36b --- /dev/null +++ b/drivers/scsi/fcoe/fc_transport_fcoe.c @@ -0,0 +1,396 @@ +/* + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "fcoe_def.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("FCoE"); +MODULE_LICENSE("GPL"); + +/* + * Static functions and variables definations + */ +#ifdef CONFIG_HOTPLUG_CPU +static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); +#endif /* CONFIG_HOTPLUG_CPU */ +static int fcoe_device_notification(struct notifier_block *, ulong, void *); +static void fcoe_dev_setup(void); +static void fcoe_dev_cleanup(void); + +#ifdef CONFIG_HOTPLUG_CPU +static struct notifier_block fcoe_cpu_notifier = { + .notifier_call = fcoe_cpu_callback, +}; +#endif /* CONFIG_HOTPLUG_CPU */ + +/* + * notification function from net device + */ +static struct notifier_block fcoe_notifier = { + .notifier_call = fcoe_device_notification, +}; + +#ifdef CONFIG_HOTPLUG_CPU +/* + * create percpu stats block + * called by cpu add/remove notifier + */ +static void fcoe_create_percpu_data(int cpu) +{ + struct fc_lport *lp; + struct fcoe_softc *fc; + struct fcoe_dev_stats *p; + + write_lock_bh(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + lp = fc->lp; + if (lp->dev_stats[cpu] == NULL) { + p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL); + if (p) + lp->dev_stats[cpu] = p; + } + } + write_unlock_bh(&fcoe_hostlist_lock); +} + +/* + * destroy percpu stats block + * called by cpu add/remove notifier + */ +static void fcoe_destroy_percpu_data(int cpu) +{ + struct fcoe_dev_stats *p; + struct fc_lport *lp; + struct fcoe_softc *fc; + + write_lock_bh(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + lp = fc->lp; + p = lp->dev_stats[cpu]; + if (p != NULL) { + lp->dev_stats[cpu] = NULL; + kfree(p); + } + } + write_unlock_bh(&fcoe_hostlist_lock); +} + +/* + * Get notified when a cpu comes on/off. Be hotplug friendly. + */ +static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + fcoe_create_percpu_data(cpu); + break; + case CPU_DEAD: + fcoe_destroy_percpu_data(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} +#endif /* CONFIG_HOTPLUG_CPU */ + +/* + * function to setup link change notification interface + */ +static void fcoe_dev_setup(void) +{ + /* + * here setup a interface specific wd time to + * monitor the link state + */ + register_netdevice_notifier(&fcoe_notifier); +} + +/* + * function to cleanup link change notification interface + */ +static void fcoe_dev_cleanup(void) +{ + unregister_netdevice_notifier(&fcoe_notifier); +} + +/* + * This function is called by the ethernet driver + * this is called in case of link change event + */ +static int fcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct fc_lport *lp = NULL; + struct net_device *real_dev = ptr; + struct fcoe_softc *fc; + struct fcoe_dev_stats *stats; + u16 new_status; + u32 mfs; + int rc = NOTIFY_OK; + + read_lock(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + if (fc->real_dev == real_dev) { + lp = fc->lp; + break; + } + } + read_unlock(&fcoe_hostlist_lock); + if (lp == NULL) { + rc = NOTIFY_DONE; + goto out; + } + + new_status = lp->link_status; + switch (event) { + case NETDEV_DOWN: + case NETDEV_GOING_DOWN: + new_status &= ~FC_LINK_UP; + break; + case NETDEV_UP: + case NETDEV_CHANGE: + new_status &= ~FC_LINK_UP; + if (!fcoe_link_ok(lp)) + new_status |= FC_LINK_UP; + break; + case NETDEV_CHANGEMTU: + mfs = fc->real_dev->mtu - + (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); + if (fc->user_mfs && fc->user_mfs < mfs) + mfs = fc->user_mfs; + if (mfs >= FC_MIN_MAX_FRAME) + fc_set_mfs(lp, mfs); + new_status &= ~FC_LINK_UP; + if (!fcoe_link_ok(lp)) + new_status |= FC_LINK_UP; + break; + case NETDEV_REGISTER: + break; + default: + FC_DBG("unknown event %ld call", event); + } + if (lp->link_status != new_status) { + if ((new_status & FC_LINK_UP) == FC_LINK_UP) + fc_linkup(lp); + else { + stats = lp->dev_stats[smp_processor_id()]; + stats->LinkFailureCount++; + fc_linkdown(lp); + fcoe_clean_pending_queue(lp); + } + } +out: + return rc; +} + +static void trimstr(char *str, int len) +{ + char *cp = str + len; + while (--cp >= str && *cp == '\n') + *cp = '\0'; +} + +static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr, + const char *buffer, size_t size) +{ + struct net_device *netdev; + char ifname[IFNAMSIZ + 2]; + + strlcpy(ifname, buffer, IFNAMSIZ); + trimstr(ifname, strlen(ifname)); + netdev = dev_get_by_name(&init_net, ifname); + if (netdev) { + fcoe_destroy_interface(netdev); + dev_put(netdev); + } + return size; +} + +static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr, + const char *buffer, size_t size) +{ + struct net_device *netdev; + char ifname[IFNAMSIZ + 2]; + + strlcpy(ifname, buffer, IFNAMSIZ); + trimstr(ifname, strlen(ifname)); + netdev = dev_get_by_name(&init_net, ifname); + if (netdev) { + fcoe_create_interface(netdev); + dev_put(netdev); + } + return size; +} + +static const struct kobj_attribute fcoe_destroyattr = \ + __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy); +static const struct kobj_attribute fcoe_createattr = \ + __ATTR(create, S_IWUSR, NULL, fcoe_create); + +/* + * Initialization routine + * 1. Will create fc transport software structure + * 2. initialize the link list of port information structure + */ +static int __init fcoe_init(void) +{ + int rc = 0; + int cpu; + struct fcoe_percpu_s *p; + + rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj, + &fcoe_destroyattr.attr); + if (!rc) + rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj, + &fcoe_createattr.attr); + + if (rc) + return rc; + + rwlock_init(&fcoe_hostlist_lock); + +#ifdef CONFIG_HOTPLUG_CPU + register_cpu_notifier(&fcoe_cpu_notifier); +#endif /* CONFIG_HOTPLUG_CPU */ + + /* + * initialize per CPU interrupt thread + */ + for_each_online_cpu(cpu) { + p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL); + if (p) { + p->thread = kthread_create(fcoe_percpu_receive_thread, + (void *)p, + "fcoethread/%d", cpu); + + /* + * if there is no error then bind the thread to the cpu + * initialize the semaphore and skb queue head + */ + if (likely(!IS_ERR(p->thread))) { + p->cpu = cpu; + fcoe_percpu[cpu] = p; + skb_queue_head_init(&p->fcoe_rx_list); + kthread_bind(p->thread, cpu); + wake_up_process(p->thread); + } else { + fcoe_percpu[cpu] = NULL; + kfree(p); + + } + } + } + if (rc < 0) { + FC_DBG("failed to initialize proc intrerface\n"); + rc = -ENODEV; + goto out_chrdev; + } + + /* + * setup link change notification + */ + fcoe_dev_setup(); + + init_timer(&fcoe_timer); + fcoe_timer.data = 0; + fcoe_timer.function = fcoe_watchdog; + fcoe_timer.expires = (jiffies + (10 * HZ)); + add_timer(&fcoe_timer); + + if (fcoe_sw_init() != 0) { + FC_DBG("fail to attach fc transport"); + return -1; + } + + return 0; + +out_chrdev: +#ifdef CONFIG_HOTPLUG_CPU + unregister_cpu_notifier(&fcoe_cpu_notifier); +#endif /* CONFIG_HOTPLUG_CPU */ + return rc; +} +module_init(fcoe_init); + +static void __exit fcoe_exit(void) +{ + u32 idx; + struct fcoe_softc *fc, *tmp; + struct fcoe_percpu_s *p; + struct sk_buff *skb; + + /* + * Stop all call back interfaces + */ +#ifdef CONFIG_HOTPLUG_CPU + unregister_cpu_notifier(&fcoe_cpu_notifier); +#endif /* CONFIG_HOTPLUG_CPU */ + fcoe_dev_cleanup(); + + /* + * stop timer + */ + del_timer_sync(&fcoe_timer); + + /* + * assuming that at this time there will be no + * ioctl in prograss, therefore we do not need to lock the + * list. + */ + list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) + fcoe_destroy_interface(fc->real_dev); + + for (idx = 0; idx < NR_CPUS; idx++) { + if (fcoe_percpu[idx]) { + kthread_stop(fcoe_percpu[idx]->thread); + p = fcoe_percpu[idx]; + spin_lock_bh(&p->fcoe_rx_list.lock); + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) + kfree_skb(skb); + spin_unlock_bh(&p->fcoe_rx_list.lock); + if (fcoe_percpu[idx]->crc_eof_page) + put_page(fcoe_percpu[idx]->crc_eof_page); + kfree(fcoe_percpu[idx]); + } + } + + fcoe_sw_exit(); +} +module_exit(fcoe_exit); diff --git a/drivers/scsi/fcoe/fcoe_def.h b/drivers/scsi/fcoe/fcoe_def.h index 12bf69c..b00e14b 100644 --- a/drivers/scsi/fcoe/fcoe_def.h +++ b/drivers/scsi/fcoe/fcoe_def.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2007 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -48,16 +48,10 @@ struct fcoe_percpu_s { int crc_eof_offset; }; -struct fcoe_info { - struct timer_list timer; - /* - * fcoe host list is protected by the following read/write lock - */ - rwlock_t fcoe_hostlist_lock; - struct list_head fcoe_hostlist; - - struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; -}; +extern struct timer_list fcoe_timer; +extern rwlock_t fcoe_hostlist_lock; +extern struct list_head fcoe_hostlist; +extern struct fcoe_percpu_s *fcoe_percpu[]; struct fcoe_softc { struct list_head list; @@ -79,22 +73,20 @@ struct fcoe_softc { u8 address_mode; }; -extern int debug_fcoe; -extern struct fcoe_percpu_s *fcoe_percpu[]; -extern struct scsi_transport_template *fcoe_transport_template; int fcoe_percpu_receive_thread(void *arg); /* * HBA transport ops prototypes */ -extern struct fcoe_info fcoei; - void fcoe_clean_pending_queue(struct fc_lport *fd); void fcoe_watchdog(ulong vp); -int fcoe_destroy_interface(const char *ifname); -int fcoe_create_interface(const char *ifname); +int fcoe_destroy_interface(struct net_device *); +int fcoe_create_interface(struct net_device *); int fcoe_xmit(struct fc_lport *, struct fc_frame *); int fcoe_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); int fcoe_link_ok(struct fc_lport *); + +int __init fcoe_sw_init(void); +void __exit fcoe_sw_exit(void); #endif /* _FCOE_DEF_H_ */ diff --git a/drivers/scsi/fcoe/fcoe_dev.c b/drivers/scsi/fcoe/fcoe_dev.c deleted file mode 100644 index d5a354f..0000000 --- a/drivers/scsi/fcoe/fcoe_dev.c +++ /dev/null @@ -1,633 +0,0 @@ -/* - * Copyright(c) 2007 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained at www.Open-FCoE.org - */ - -/* - * FCOE protocol file - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include "fcoe_def.h" - -#define FCOE_MAX_QUEUE_DEPTH 256 - -/* destination address mode */ -#define FCOE_GW_ADDR_MODE 0x00 -#define FCOE_FCOUI_ADDR_MODE 0x01 - -/* Function Prototyes */ -static int fcoe_check_wait_queue(struct fc_lport *); -static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *); -static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *); -static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); - -/* - * this is the fcoe receive function - * called by NET_RX_SOFTIRQ - * this function will receive the packet and - * build fc frame and pass it up - */ -int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, struct net_device *olddev) -{ - struct fc_lport *lp; - struct fcoe_rcv_info *fr; - struct fcoe_softc *fc; - struct fcoe_dev_stats *stats; - u8 *data; - struct fc_frame_header *fh; - unsigned short oxid; - int cpu_idx; - struct fcoe_percpu_s *fps; - struct fcoe_info *fci = &fcoei; - - fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); - lp = fc->lp; - if (unlikely(lp == NULL)) { - FC_DBG("cannot find hba structure"); - goto err2; - } - - if (unlikely(debug_fcoe)) { - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " - "end:%p sum:%d dev:%s", skb->len, skb->data_len, - skb->head, skb->data, skb_tail_pointer(skb), - skb_end_pointer(skb), skb->csum, - skb->dev ? skb->dev->name : ""); - - } - - /* check for FCOE packet type */ - if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { - FC_DBG("wrong FC type frame"); - goto err; - } - data = skb->data; - data += sizeof(struct fcoe_hdr); - fh = (struct fc_frame_header *)data; - oxid = ntohs(fh->fh_ox_id); - - fr = fcoe_dev_from_skb(skb); - fr->fr_dev = lp; - fr->ptype = ptype; - cpu_idx = 0; -#ifdef CONFIG_SMP - /* - * The exchange ID are ANDed with num of online CPUs, - * so that will have the least lock contention in - * handling the exchange. if there is no thread - * for a given idx then use first online cpu. - */ - cpu_idx = oxid & (num_online_cpus() >> 1); - if (fci->fcoe_percpu[cpu_idx] == NULL) - cpu_idx = first_cpu(cpu_online_map); -#endif - fps = fci->fcoe_percpu[cpu_idx]; - - spin_lock_bh(&fps->fcoe_rx_list.lock); - __skb_queue_tail(&fps->fcoe_rx_list, skb); - if (fps->fcoe_rx_list.qlen == 1) - wake_up_process(fps->thread); - - spin_unlock_bh(&fps->fcoe_rx_list.lock); - - return 0; -err: -#ifdef CONFIG_SMP - stats = lp->dev_stats[smp_processor_id()]; -#else - stats = lp->dev_stats[0]; -#endif - stats->ErrorFrames++; - -err2: - kfree_skb(skb); - return -1; -} - -static inline int fcoe_start_io(struct sk_buff *skb) -{ - int rc; - - skb_get(skb); - rc = dev_queue_xmit(skb); - if (rc != 0) - return rc; - kfree_skb(skb); - return 0; -} - -static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) -{ - struct fcoe_info *fci = &fcoei; - struct fcoe_percpu_s *fps; - struct page *page; - int cpu_idx; - - cpu_idx = get_cpu(); - fps = fci->fcoe_percpu[cpu_idx]; - page = fps->crc_eof_page; - if (!page) { - page = alloc_page(GFP_ATOMIC); - if (!page) { - put_cpu(); - return -ENOMEM; - } - fps->crc_eof_page = page; - WARN_ON(fps->crc_eof_offset != 0); - } - - get_page(page); - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, - fps->crc_eof_offset, tlen); - skb->len += tlen; - skb->data_len += tlen; - skb->truesize += tlen; - fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); - - if (fps->crc_eof_offset >= PAGE_SIZE) { - fps->crc_eof_page = NULL; - fps->crc_eof_offset = 0; - put_page(page); - } - put_cpu(); - return 0; -} - -/* - * this is the frame xmit routine - */ -int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) -{ - int indx; - int wlen, rc = 0; - u32 crc; - struct ethhdr *eh; - struct fcoe_crc_eof *cp; - struct sk_buff *skb; - struct fcoe_dev_stats *stats; - struct fc_frame_header *fh; - unsigned int hlen; /* header length implies the version */ - unsigned int tlen; /* trailer length */ - int flogi_in_progress = 0; - struct fcoe_softc *fc; - void *data; - u8 sof, eof; - struct fcoe_hdr *hp; - - WARN_ON((fr_len(fp) % sizeof(u32)) != 0); - - fc = (struct fcoe_softc *)lp->drv_priv; - /* - * if it is a flogi then we need to learn gw-addr - * and my own fcid - */ - fh = fc_frame_header_get(fp); - if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { - if (fc_frame_payload_op(fp) == ELS_FLOGI) { - fc->flogi_oxid = ntohs(fh->fh_ox_id); - fc->address_mode = FCOE_FCOUI_ADDR_MODE; - fc->flogi_progress = 1; - flogi_in_progress = 1; - } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) { - /* - * Here we must've gotten an SID by accepting an FLOGI - * from a point-to-point connection. Switch to using - * the source mac based on the SID. The destination - * MAC in this case would have been set by receving the - * FLOGI. - */ - fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id); - fc->flogi_progress = 0; - } - } - - skb = fp_skb(fp); - sof = fr_sof(fp); - eof = fr_eof(fp); - - crc = ~0; - crc = crc32(crc, skb->data, skb_headlen(skb)); - - for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[indx]; - unsigned long off = frag->page_offset; - unsigned long len = frag->size; - - while (len > 0) { - unsigned long clen; - - clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), - KM_SKB_DATA_SOFTIRQ); - crc = crc32(crc, data + (off & ~PAGE_MASK), - clen); - kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); - off += clen; - len -= clen; - } - } - - /* - * Get header and trailer lengths. - * This is temporary code until we get rid of the old protocol. - * Both versions have essentially the same trailer layout but T11 - * has padding afterwards. - */ - hlen = sizeof(struct fcoe_hdr); - tlen = sizeof(struct fcoe_crc_eof); - - /* - * copy fc crc and eof to the skb buff - * Use utility buffer in the fc_frame part of the sk_buff for the - * trailer. - * We don't do a get_page for this frag, since that page may not be - * managed that way. So that skb_free() doesn't do that either, we - * setup the destructor to remove this frag. - */ - if (skb_is_nonlinear(skb)) { - skb_frag_t *frag; - if (fcoe_get_paged_crc_eof(skb, tlen)) { - kfree(skb); - return -ENOMEM; - } - frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) - + frag->page_offset; - } else { - cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); - } - - cp->fcoe_eof = eof; - cp->fcoe_crc32 = cpu_to_le32(~crc); - if (tlen == sizeof(*cp)) - memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd)); - wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; - - if (skb_is_nonlinear(skb)) { - kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); - cp = NULL; - } - - /* - * Fill in the control structures - */ - skb->ip_summed = CHECKSUM_NONE; - eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr)); - if (fc->address_mode == FCOE_FCOUI_ADDR_MODE) - fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); - else - /* insert GW address */ - memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN); - - if (unlikely(flogi_in_progress)) - memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN); - else - memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN); - - eh->h_proto = htons(ETH_P_FCOE); - skb->protocol = htons(ETH_P_802_3); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - - hp = (struct fcoe_hdr *)(eh + 1); - memset(hp, 0, sizeof(*hp)); - if (FC_FCOE_VER) - FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); - hp->fcoe_sof = sof; - - stats = lp->dev_stats[smp_processor_id()]; - stats->TxFrames++; - stats->TxWords += wlen; - skb->dev = fc->real_dev; - - fr_dev(fp) = lp; - if (fc->fcoe_pending_queue.qlen) - rc = fcoe_check_wait_queue(lp); - - if (rc == 0) - rc = fcoe_start_io(skb); - - if (rc) { - fcoe_insert_wait_queue(lp, skb); - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - fc_pause(lp); - } - - return 0; -} - -int fcoe_percpu_receive_thread(void *arg) -{ - struct fcoe_percpu_s *p = arg; - u32 fr_len; - unsigned int hlen; - unsigned int tlen; - struct fc_lport *lp; - struct fcoe_rcv_info *fr; - struct fcoe_dev_stats *stats; - struct fc_frame_header *fh; - struct sk_buff *skb; - struct fcoe_crc_eof *cp; - enum fc_sof sof; - struct fc_frame *fp; - u8 *mac = NULL; - struct fcoe_softc *fc; - struct fcoe_hdr *hp; - - set_user_nice(current, 19); - - while (!kthread_should_stop()) { - - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_bh(&p->fcoe_rx_list.lock); - schedule(); - set_current_state(TASK_RUNNING); - if (kthread_should_stop()) - return 0; - spin_lock_bh(&p->fcoe_rx_list.lock); - } - spin_unlock_bh(&p->fcoe_rx_list.lock); - fr = fcoe_dev_from_skb(skb); - lp = fr->fr_dev; - if (unlikely(lp == NULL)) { - FC_DBG("invalid HBA Structure"); - kfree_skb(skb); - continue; - } - - stats = lp->dev_stats[smp_processor_id()]; - - if (unlikely(debug_fcoe)) { - FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " - "tail:%p end:%p sum:%d dev:%s", - skb->len, skb->data_len, - skb->head, skb->data, skb_tail_pointer(skb), - skb_end_pointer(skb), skb->csum, - skb->dev ? skb->dev->name : ""); - } - - /* - * Save source MAC address before discarding header. - */ - fc = lp->drv_priv; - if (unlikely(fc->flogi_progress)) - mac = eth_hdr(skb)->h_source; - - if (skb_is_nonlinear(skb)) - skb_linearize(skb); /* not ideal */ - - /* - * Check the header and pull it off. - */ - hlen = sizeof(struct fcoe_hdr); - - hp = (struct fcoe_hdr *)skb->data; - if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { - if (stats->ErrorFrames < 5) - FC_DBG("unknown FCoE version %x", - FC_FCOE_DECAPS_VER(hp)); - stats->ErrorFrames++; - kfree_skb(skb); - continue; - } - sof = hp->fcoe_sof; - skb_pull(skb, sizeof(*hp)); - fr_len = skb->len - sizeof(struct fcoe_crc_eof); - skb_trim(skb, fr_len); - tlen = sizeof(struct fcoe_crc_eof); - - if (unlikely(fr_len > skb->len)) { - if (stats->ErrorFrames < 5) - FC_DBG("length error fr_len 0x%x skb->len 0x%x", - fr_len, skb->len); - stats->ErrorFrames++; - kfree_skb(skb); - continue; - } - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - - fp = (struct fc_frame *) skb; - fc_frame_init(fp); - cp = (struct fcoe_crc_eof *)(skb->data + fr_len); - fr_eof(fp) = cp->fcoe_eof; - fr_sof(fp) = sof; - fr_dev(fp) = lp; - - /* - * Check the CRC here, unless it's solicited data for SCSI. - * In that case, the SCSI layer can check it during the copy, - * and it'll be more cache-efficient. - */ - fh = fc_frame_header_get(fp); - if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && - fh->fh_type == FC_TYPE_FCP) { - fr_flags(fp) |= FCPHF_CRC_UNCHECKED; - fc_exch_recv(lp, lp->emp, fp); - } else if (le32_to_cpu(cp->fcoe_crc32) == - ~crc32(~0, skb->data, fr_len)) { - if (unlikely(fc->flogi_progress)) - fcoe_recv_flogi(fc, fp, mac); - fc_exch_recv(lp, lp->emp, fp); - } else { - if (debug_fcoe || stats->InvalidCRCCount < 5) { - printk(KERN_WARNING \ - "fcoe: dropping frame with CRC error"); - } - stats->InvalidCRCCount++; - stats->ErrorFrames++; - fc_frame_free(fp); - } - } - return 0; -} - -/* - * Snoop potential response to FLOGI or even incoming FLOGI. - */ -static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) -{ - struct fc_frame_header *fh; - u8 op; - - fh = fc_frame_header_get(fp); - if (fh->fh_type != FC_TYPE_ELS) - return; - op = fc_frame_payload_op(fp); - if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && - fc->flogi_oxid == ntohs(fh->fh_ox_id)) { - /* - * FLOGI accepted. - * If the src mac addr is FC_OUI-based, then we mark the - * address_mode flag to use FC_OUI-based Ethernet DA. - * Otherwise we use the FCoE gateway addr - */ - if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) { - fc->address_mode = FCOE_FCOUI_ADDR_MODE; - } else { - memcpy(fc->dest_addr, sa, ETH_ALEN); - fc->address_mode = FCOE_GW_ADDR_MODE; - } - - /* - * Remove any previously-set unicast MAC filter. - * Add secondary FCoE MAC address filter for our OUI. - */ - rtnl_lock(); - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) - dev_unicast_delete(fc->real_dev, fc->data_src_addr, - ETH_ALEN); - fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id); - dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN); - rtnl_unlock(); - - fc->flogi_progress = 0; - } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { - /* - * Save source MAC for point-to-point responses. - */ - memcpy(fc->dest_addr, sa, ETH_ALEN); - fc->address_mode = FCOE_GW_ADDR_MODE; - } -} - -void fcoe_watchdog(ulong vp) -{ - struct fc_lport *lp; - struct fcoe_softc *fc; - struct fcoe_info *fci = &fcoei; - int paused = 0; - - read_lock(&fci->fcoe_hostlist_lock); - list_for_each_entry(fc, &fci->fcoe_hostlist, list) { - lp = fc->lp; - if (lp) { - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - paused = 1; - if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { - if (paused) - fc_unpause(lp); - } - } - } - read_unlock(&fci->fcoe_hostlist_lock); - - fci->timer.expires = jiffies + (1 * HZ); - add_timer(&fci->timer); -} - -/* - * the wait_queue is used when the skb transmit fails. skb will go - * in the wait_queue which will be emptied by the time function OR - * by the next skb transmit. - * - */ - -/* - * Function name : fcoe_check_wait_queue() - * - * Return Values : 0 or error - * - * Description : empties the wait_queue - * dequeue the head of the wait_queue queue and - * calls fcoe_start_io() for each packet - * if all skb have been transmitted, return 0 - * if a error occurs, then restore wait_queue and try again - * later - * - */ - -static int fcoe_check_wait_queue(struct fc_lport *lp) -{ - int rc, unpause = 0; - int paused = 0; - struct sk_buff *skb; - struct fcoe_softc *fc; - - fc = (struct fcoe_softc *)lp->drv_priv; - spin_lock_bh(&fc->fcoe_pending_queue.lock); - - /* - * is this interface paused? - */ - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - paused = 1; - if (fc->fcoe_pending_queue.qlen) { - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { - spin_unlock_bh(&fc->fcoe_pending_queue.lock); - rc = fcoe_start_io(skb); - if (rc) { - fcoe_insert_wait_queue_head(lp, skb); - return rc; - } - spin_lock_bh(&fc->fcoe_pending_queue.lock); - } - if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) - unpause = 1; - } - spin_unlock_bh(&fc->fcoe_pending_queue.lock); - if ((unpause) && (paused)) - fc_unpause(lp); - return fc->fcoe_pending_queue.qlen; -} - -static void fcoe_insert_wait_queue_head(struct fc_lport *lp, - struct sk_buff *skb) -{ - struct fcoe_softc *fc; - - fc = (struct fcoe_softc *)lp->drv_priv; - spin_lock_bh(&fc->fcoe_pending_queue.lock); - __skb_queue_head(&fc->fcoe_pending_queue, skb); - spin_unlock_bh(&fc->fcoe_pending_queue.lock); -} - -static void fcoe_insert_wait_queue(struct fc_lport *lp, - struct sk_buff *skb) -{ - struct fcoe_softc *fc; - - fc = (struct fcoe_softc *)lp->drv_priv; - spin_lock_bh(&fc->fcoe_pending_queue.lock); - __skb_queue_tail(&fc->fcoe_pending_queue, skb); - spin_unlock_bh(&fc->fcoe_pending_queue.lock); -} diff --git a/drivers/scsi/fcoe/fcoe_if.c b/drivers/scsi/fcoe/fcoe_if.c deleted file mode 100644 index 73b83ce..0000000 --- a/drivers/scsi/fcoe/fcoe_if.c +++ /dev/null @@ -1,496 +0,0 @@ -/* - * Copyright(c) 2007 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained at www.Open-FCoE.org - */ - -/* - * FCOE protocol file - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include - -#include -#include "fcoe_def.h" - -#define FCOE_VERSION "0.1" - -#define FCOE_MAX_LUN 255 -#define FCOE_MAX_FCP_TARGET 256 - -#define FCOE_MIN_XID 0x0004 -#define FCOE_MAX_XID 0x07ef - -int debug_fcoe; - -struct fcoe_info fcoei = { - .fcoe_hostlist = LIST_HEAD_INIT(fcoei.fcoe_hostlist), -}; - -static struct fcoe_softc *fcoe_find_fc_lport(const char *name) -{ - struct fcoe_softc *fc; - struct fc_lport *lp; - struct fcoe_info *fci = &fcoei; - - read_lock(&fci->fcoe_hostlist_lock); - list_for_each_entry(fc, &fci->fcoe_hostlist, list) { - lp = fc->lp; - if (!strncmp(name, lp->ifname, IFNAMSIZ)) { - read_unlock(&fci->fcoe_hostlist_lock); - return fc; - } - } - read_unlock(&fci->fcoe_hostlist_lock); - return NULL; -} - -/* - * Convert 48-bit IEEE MAC address to 64-bit FC WWN. - */ -static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], - unsigned int scheme, unsigned int port) -{ - u64 wwn; - u64 host_mac; - - /* The MAC is in NO, so flip only the low 48 bits */ - host_mac = ((u64) mac[0] << 40) | - ((u64) mac[1] << 32) | - ((u64) mac[2] << 24) | - ((u64) mac[3] << 16) | - ((u64) mac[4] << 8) | - (u64) mac[5]; - - WARN_ON(host_mac >= (1ULL << 48)); - wwn = host_mac | ((u64) scheme << 60); - switch (scheme) { - case 1: - WARN_ON(port != 0); - break; - case 2: - WARN_ON(port >= 0xfff); - wwn |= (u64) port << 48; - break; - default: - WARN_ON(1); - break; - } - - return wwn; -} - -static struct scsi_host_template fcoe_driver_template = { - .module = THIS_MODULE, - .name = "FCoE Driver", - .proc_name = FCOE_DRIVER_NAME, - .queuecommand = fc_queuecommand, - .eh_abort_handler = fc_eh_abort, - .eh_device_reset_handler = fc_eh_device_reset, - .eh_host_reset_handler = fc_eh_host_reset, - .slave_alloc = fc_slave_alloc, - .change_queue_depth = fc_change_queue_depth, - .change_queue_type = fc_change_queue_type, - .this_id = -1, - .cmd_per_lun = 32, - .can_queue = FC_MAX_OUTSTANDING_COMMANDS, - .use_clustering = ENABLE_CLUSTERING, - .sg_tablesize = 4, - .max_sectors = 0xffff, -}; - -int fcoe_destroy_interface(const char *ifname) -{ - int cpu, idx; - struct fcoe_dev_stats *p; - struct fcoe_percpu_s *pp; - struct fcoe_softc *fc; - struct fcoe_rcv_info *fr; - struct fcoe_info *fci = &fcoei; - struct sk_buff_head *list; - struct sk_buff *skb, *next; - struct sk_buff *head; - struct fc_lport *lp; - u8 flogi_maddr[ETH_ALEN]; - - fc = fcoe_find_fc_lport(ifname); - if (!fc) - return -ENODEV; - - lp = fc->lp; - - /* Remove the instance from fcoe's list */ - write_lock_bh(&fci->fcoe_hostlist_lock); - list_del(&fc->list); - write_unlock_bh(&fci->fcoe_hostlist_lock); - - /* Don't listen for Ethernet packets anymore */ - dev_remove_pack(&fc->fcoe_packet_type); - - /* Detach from the scsi-ml */ - fc_remove_host(lp->host); - scsi_remove_host(lp->host); - - /* Cleanup the fc_lport */ - fc_lport_destroy(lp); - fc_fcp_destroy(lp); - if (lp->emp) - fc_exch_mgr_free(lp->emp); - - /* Delete secondary MAC addresses */ - rtnl_lock(); - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN); - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) - dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN); - rtnl_unlock(); - - /* Free the per-CPU revieve threads */ - for (idx = 0; idx < NR_CPUS; idx++) { - if (fci->fcoe_percpu[idx]) { - pp = fci->fcoe_percpu[idx]; - spin_lock_bh(&pp->fcoe_rx_list.lock); - list = &pp->fcoe_rx_list; - head = list->next; - for (skb = head; skb != (struct sk_buff *)list; - skb = next) { - next = skb->next; - fr = fcoe_dev_from_skb(skb); - if (fr->fr_dev == fc->lp) { - __skb_unlink(skb, list); - kfree_skb(skb); - } - } - spin_unlock_bh(&pp->fcoe_rx_list.lock); - } - } - - /* Free existing skbs */ - fcoe_clean_pending_queue(lp); - - /* Free memory used by statistical counters */ - for_each_online_cpu(cpu) { - p = lp->dev_stats[cpu]; - if (p) { - lp->dev_stats[cpu] = NULL; - kfree(p); - } - } - - /* Release the net_device and Scsi_Host */ - dev_put(fc->real_dev); - scsi_host_put(lp->host); - return 0; -} - -/* - * Return zero if link is OK for use by FCoE. - * Any permanently-disqualifying conditions have been previously checked. - * This also updates the speed setting, which may change with link for 100/1000. - * - * This function should probably be checking for PAUSE support at some point - * in the future. Currently Per-priority-pause is not determinable using - * ethtool, so we shouldn't be restrictive until that problem is resolved. - */ -int fcoe_link_ok(struct fc_lport *lp) -{ - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv; - struct net_device *dev = fc->real_dev; - struct ethtool_cmd ecmd = { ETHTOOL_GSET }; - int rc = 0; - - if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { - dev = fc->phys_dev; - if (dev->ethtool_ops->get_settings) { - dev->ethtool_ops->get_settings(dev, &ecmd); - lp->link_supported_speeds &= - ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); - if (ecmd.supported & (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full)) - lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; - if (ecmd.supported & SUPPORTED_10000baseT_Full) - lp->link_supported_speeds |= - FC_PORTSPEED_10GBIT; - if (ecmd.speed == SPEED_1000) - lp->link_speed = FC_PORTSPEED_1GBIT; - if (ecmd.speed == SPEED_10000) - lp->link_speed = FC_PORTSPEED_10GBIT; - } - } else - rc = -1; - - return rc; -} - -static struct libfc_function_template fcoe_libfc_fcn_templ = { - .frame_send = fcoe_xmit, -}; - -static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost) -{ - int i = 0; - struct fcoe_dev_stats *p; - - lp->host = shost; - lp->drv_priv = (void *)(lp + 1); - - lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, - FCOE_MIN_XID, FCOE_MAX_XID); - if (!lp->emp) - return -ENOMEM; - - lp->link_status = 0; - lp->max_retry_count = 3; - lp->e_d_tov = 2 * 1000; /* FC-FS default */ - lp->r_a_tov = 2 * 2 * 1000; - lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | - FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); - - /* - * allocate per cpu stats block - */ - for_each_online_cpu(i) { - p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL); - if (p) - lp->dev_stats[i] = p; - } - - /* Finish fc_lport configuration */ - fc_lport_config(lp); - - return 0; -} - -static int net_config(struct fc_lport *lp) -{ - u32 mfs; - u64 wwnn, wwpn; - struct net_device *net_dev; - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv; - u8 flogi_maddr[ETH_ALEN]; - - /* Require support for get_pauseparam ethtool op. */ - net_dev = fc->real_dev; - if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN)) - net_dev = vlan_dev_real_dev(net_dev); - if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam) - return -EOPNOTSUPP; - - fc->phys_dev = net_dev; - - /* Do not support for bonding device */ - if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) || - (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) || - (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) { - return -EOPNOTSUPP; - } - - /* - * Determine max frame size based on underlying device and optional - * user-configured limit. If the MFS is too low, fcoe_link_ok() - * will return 0, so do this first. - */ - mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) + - sizeof(struct fcoe_crc_eof)); - fc_set_mfs(lp, mfs); - - lp->link_status = ~FC_PAUSE & ~FC_LINK_UP; - if (!fcoe_link_ok(lp)) - lp->link_status |= FC_LINK_UP; - - if (fc->real_dev->features & NETIF_F_SG) - lp->capabilities = TRANS_C_SG; - - - skb_queue_head_init(&fc->fcoe_pending_queue); - - memcpy(lp->ifname, fc->real_dev->name, IFNAMSIZ); - - /* setup Source Mac Address */ - memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr, - fc->real_dev->addr_len); - - wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0); - fc_set_wwnn(lp, wwnn); - /* XXX - 3rd arg needs to be vlan id */ - wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0); - fc_set_wwpn(lp, wwpn); - - /* - * Add FCoE MAC address as second unicast MAC address - * or enter promiscuous mode if not capable of listening - * for multiple unicast MACs. - */ - rtnl_lock(); - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN); - rtnl_unlock(); - - /* - * setup the receive function from ethernet driver - * on the ethertype for the given device - */ - fc->fcoe_packet_type.func = fcoe_rcv; - fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); - fc->fcoe_packet_type.dev = fc->real_dev; - dev_add_pack(&fc->fcoe_packet_type); - - return 0; -} - -static void shost_config(struct fc_lport *lp) -{ - lp->host->max_lun = FCOE_MAX_LUN; - lp->host->max_id = FCOE_MAX_FCP_TARGET; - lp->host->max_channel = 0; - lp->host->transportt = fcoe_transport_template; -} - -static int libfc_config(struct fc_lport *lp) -{ - /* Set the function pointers set by the LLDD */ - memcpy(&lp->tt, &fcoe_libfc_fcn_templ, - sizeof(struct libfc_function_template)); - - if (fc_fcp_init(lp)) - return -ENOMEM; - fc_exch_init(lp); - fc_lport_init(lp); - fc_rport_init(lp); - fc_disc_init(lp); - - return 0; -} - -/* - * This function creates the fcoe interface - * create struct fcdev which is a shared structure between opefc - * and transport level protocol. - */ -int fcoe_create_interface(const char *ifname) -{ - struct fc_lport *lp = NULL; - struct fcoe_softc *fc; - struct net_device *net_dev; - struct Scsi_Host *shost; - struct fcoe_info *fci = &fcoei; - int rc = 0; - - net_dev = dev_get_by_name(&init_net, ifname); - if (net_dev == NULL) { - FC_DBG("could not get network device for %s", - ifname); - return -ENODEV; - } - - if (fcoe_find_fc_lport(net_dev->name) != NULL) { - rc = -EEXIST; - goto out_put_dev; - } - - shost = scsi_host_alloc(&fcoe_driver_template, - sizeof(struct fc_lport) + - sizeof(struct fcoe_softc)); - - if (!shost) { - FC_DBG("Could not allocate host structure\n"); - rc = -ENOMEM; - goto out_put_dev; - } - - lp = shost_priv(shost); - rc = lport_config(lp, shost); - if (rc) - goto out_host_put; - - /* Configure the fcoe_softc */ - fc = (struct fcoe_softc *)lp->drv_priv; - fc->lp = lp; - fc->real_dev = net_dev; - shost_config(lp); - - - /* Add the new host to the SCSI-ml */ - rc = scsi_add_host(lp->host, NULL); - if (rc) { - FC_DBG("error on scsi_add_host\n"); - goto out_lp_destroy; - } - - sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", - FCOE_DRIVER_NAME, FCOE_VERSION, - ifname); - - /* Configure netdev and networking properties of the lp */ - rc = net_config(lp); - if (rc) - goto out_lp_destroy; - - /* Initialize the library */ - rc = libfc_config(lp); - if (rc) - goto out_lp_destroy; - - write_lock_bh(&fci->fcoe_hostlist_lock); - list_add_tail(&fc->list, &fci->fcoe_hostlist); - write_unlock_bh(&fci->fcoe_hostlist_lock); - - lp->boot_time = jiffies; - - fc_fabric_login(lp); - - return rc; - -out_lp_destroy: - fc_exch_mgr_free(lp->emp); /* Free the EM */ -out_host_put: - scsi_host_put(lp->host); -out_put_dev: - dev_put(net_dev); - return rc; -} - -void fcoe_clean_pending_queue(struct fc_lport *lp) -{ - struct fcoe_softc *fc = lp->drv_priv; - struct sk_buff *skb; - - spin_lock_bh(&fc->fcoe_pending_queue.lock); - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { - spin_unlock_bh(&fc->fcoe_pending_queue.lock); - kfree_skb(skb); - spin_lock_bh(&fc->fcoe_pending_queue.lock); - } - spin_unlock_bh(&fc->fcoe_pending_queue.lock); -} diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c new file mode 100644 index 0000000..3cf5ad6 --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_sw.c @@ -0,0 +1,532 @@ +/* + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * FCOE protocol file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include "fcoe_def.h" + +#define FCOE_VERSION "0.1" + +#define FCOE_MAX_LUN 255 +#define FCOE_MAX_FCP_TARGET 256 + +#define FCOE_MAX_OUTSTANDING_COMMANDS 1024 + +#define FCOE_MIN_XID 0x0004 +#define FCOE_MAX_XID 0x07ef + +LIST_HEAD(fcoe_hostlist); +DEFINE_RWLOCK(fcoe_hostlist_lock); +DEFINE_TIMER(fcoe_timer, NULL, 0, 0); +struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; + +static struct scsi_transport_template *fcoe_transport_template; + +static int fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + fc_lport_reset(lport); + return 0; +} + +struct fc_function_template fcoe_transport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = fcoe_reset, + + .terminate_rport_io = fc_rport_terminate_io, +}; + +static struct fcoe_softc *fcoe_find_fc_lport(const struct net_device *netdev) +{ + struct fcoe_softc *fc; + + read_lock(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + if (fc->real_dev == netdev) { + read_unlock(&fcoe_hostlist_lock); + return fc; + } + } + read_unlock(&fcoe_hostlist_lock); + return NULL; +} + +/* + * Convert 48-bit IEEE MAC address to 64-bit FC WWN. + */ +static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], + unsigned int scheme, unsigned int port) +{ + u64 wwn; + u64 host_mac; + + /* The MAC is in NO, so flip only the low 48 bits */ + host_mac = ((u64) mac[0] << 40) | + ((u64) mac[1] << 32) | + ((u64) mac[2] << 24) | + ((u64) mac[3] << 16) | + ((u64) mac[4] << 8) | + (u64) mac[5]; + + WARN_ON(host_mac >= (1ULL << 48)); + wwn = host_mac | ((u64) scheme << 60); + switch (scheme) { + case 1: + WARN_ON(port != 0); + break; + case 2: + WARN_ON(port >= 0xfff); + wwn |= (u64) port << 48; + break; + default: + WARN_ON(1); + break; + } + + return wwn; +} + +static struct scsi_host_template fcoe_driver_template = { + .module = THIS_MODULE, + .name = "FCoE Driver", + .proc_name = FCOE_DRIVER_NAME, + .queuecommand = fc_queuecommand, + .eh_abort_handler = fc_eh_abort, + .eh_device_reset_handler = fc_eh_device_reset, + .eh_host_reset_handler = fc_eh_host_reset, + .slave_alloc = fc_slave_alloc, + .change_queue_depth = fc_change_queue_depth, + .change_queue_type = fc_change_queue_type, + .this_id = -1, + .cmd_per_lun = 32, + .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, + .use_clustering = ENABLE_CLUSTERING, + .sg_tablesize = 4, + .max_sectors = 0xffff, +}; + +int fcoe_destroy_interface(struct net_device *netdev) +{ + int cpu, idx; + struct fcoe_dev_stats *p; + struct fcoe_percpu_s *pp; + struct fcoe_softc *fc; + struct fcoe_rcv_info *fr; + struct sk_buff_head *list; + struct sk_buff *skb, *next; + struct sk_buff *head; + struct fc_lport *lp; + u8 flogi_maddr[ETH_ALEN]; + + fc = fcoe_find_fc_lport(netdev); + if (!fc) + return -ENODEV; + + lp = fc->lp; + + /* Remove the instance from fcoe's list */ + write_lock_bh(&fcoe_hostlist_lock); + list_del(&fc->list); + write_unlock_bh(&fcoe_hostlist_lock); + + /* Don't listen for Ethernet packets anymore */ + dev_remove_pack(&fc->fcoe_packet_type); + + /* Detach from the scsi-ml */ + fc_remove_host(lp->host); + scsi_remove_host(lp->host); + + /* Cleanup the fc_lport */ + fc_lport_destroy(lp); + fc_fcp_destroy(lp); + if (lp->emp) + fc_exch_mgr_free(lp->emp); + + /* Delete secondary MAC addresses */ + rtnl_lock(); + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); + dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN); + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) + dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN); + rtnl_unlock(); + + /* Free the per-CPU revieve threads */ + for (idx = 0; idx < NR_CPUS; idx++) { + if (fcoe_percpu[idx]) { + pp = fcoe_percpu[idx]; + spin_lock_bh(&pp->fcoe_rx_list.lock); + list = &pp->fcoe_rx_list; + head = list->next; + for (skb = head; skb != (struct sk_buff *)list; + skb = next) { + next = skb->next; + fr = fcoe_dev_from_skb(skb); + if (fr->fr_dev == fc->lp) { + __skb_unlink(skb, list); + kfree_skb(skb); + } + } + spin_unlock_bh(&pp->fcoe_rx_list.lock); + } + } + + /* Free existing skbs */ + fcoe_clean_pending_queue(lp); + + /* Free memory used by statistical counters */ + for_each_online_cpu(cpu) { + p = lp->dev_stats[cpu]; + if (p) { + lp->dev_stats[cpu] = NULL; + kfree(p); + } + } + + /* Release the net_device and Scsi_Host */ + dev_put(fc->real_dev); + scsi_host_put(lp->host); + return 0; +} + +/* + * Return zero if link is OK for use by FCoE. + * Any permanently-disqualifying conditions have been previously checked. + * This also updates the speed setting, which may change with link for 100/1000. + * + * This function should probably be checking for PAUSE support at some point + * in the future. Currently Per-priority-pause is not determinable using + * ethtool, so we shouldn't be restrictive until that problem is resolved. + */ +int fcoe_link_ok(struct fc_lport *lp) +{ + struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv; + struct net_device *dev = fc->real_dev; + struct ethtool_cmd ecmd = { ETHTOOL_GSET }; + int rc = 0; + + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { + dev = fc->phys_dev; + if (dev->ethtool_ops->get_settings) { + dev->ethtool_ops->get_settings(dev, &ecmd); + lp->link_supported_speeds &= + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + if (ecmd.supported & (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full)) + lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; + if (ecmd.supported & SUPPORTED_10000baseT_Full) + lp->link_supported_speeds |= + FC_PORTSPEED_10GBIT; + if (ecmd.speed == SPEED_1000) + lp->link_speed = FC_PORTSPEED_1GBIT; + if (ecmd.speed == SPEED_10000) + lp->link_speed = FC_PORTSPEED_10GBIT; + } + } else + rc = -1; + + return rc; +} + +static struct libfc_function_template fcoe_libfc_fcn_templ = { + .frame_send = fcoe_xmit, +}; + +static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost) +{ + int i = 0; + struct fcoe_dev_stats *p; + + lp->host = shost; + lp->drv_priv = (void *)(lp + 1); + + lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, + FCOE_MIN_XID, FCOE_MAX_XID); + if (!lp->emp) + return -ENOMEM; + + lp->link_status = 0; + lp->max_retry_count = 3; + lp->e_d_tov = 2 * 1000; /* FC-FS default */ + lp->r_a_tov = 2 * 2 * 1000; + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + + /* + * allocate per cpu stats block + */ + for_each_online_cpu(i) { + p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL); + if (p) + lp->dev_stats[i] = p; + } + + /* Finish fc_lport configuration */ + fc_lport_config(lp); + + return 0; +} + +static int net_config(struct fc_lport *lp) +{ + u32 mfs; + u64 wwnn, wwpn; + struct net_device *net_dev; + struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv; + u8 flogi_maddr[ETH_ALEN]; + + /* Require support for get_pauseparam ethtool op. */ + net_dev = fc->real_dev; + if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN)) + net_dev = vlan_dev_real_dev(net_dev); + if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + fc->phys_dev = net_dev; + + /* Do not support for bonding device */ + if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) || + (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) || + (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) { + return -EOPNOTSUPP; + } + + /* + * Determine max frame size based on underlying device and optional + * user-configured limit. If the MFS is too low, fcoe_link_ok() + * will return 0, so do this first. + */ + mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); + fc_set_mfs(lp, mfs); + + lp->link_status = ~FC_PAUSE & ~FC_LINK_UP; + if (!fcoe_link_ok(lp)) + lp->link_status |= FC_LINK_UP; + + if (fc->real_dev->features & NETIF_F_SG) + lp->sg_supp = 1; + + + skb_queue_head_init(&fc->fcoe_pending_queue); + + /* setup Source Mac Address */ + memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr, + fc->real_dev->addr_len); + + wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0); + fc_set_wwnn(lp, wwnn); + /* XXX - 3rd arg needs to be vlan id */ + wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0); + fc_set_wwpn(lp, wwpn); + + /* + * Add FCoE MAC address as second unicast MAC address + * or enter promiscuous mode if not capable of listening + * for multiple unicast MACs. + */ + rtnl_lock(); + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); + dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN); + rtnl_unlock(); + + /* + * setup the receive function from ethernet driver + * on the ethertype for the given device + */ + fc->fcoe_packet_type.func = fcoe_rcv; + fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + fc->fcoe_packet_type.dev = fc->real_dev; + dev_add_pack(&fc->fcoe_packet_type); + + return 0; +} + +static void shost_config(struct fc_lport *lp) +{ + lp->host->max_lun = FCOE_MAX_LUN; + lp->host->max_id = FCOE_MAX_FCP_TARGET; + lp->host->max_channel = 0; + lp->host->transportt = fcoe_transport_template; +} + +static int libfc_config(struct fc_lport *lp) +{ + /* Set the function pointers set by the LLDD */ + memcpy(&lp->tt, &fcoe_libfc_fcn_templ, + sizeof(struct libfc_function_template)); + + if (fc_fcp_init(lp)) + return -ENOMEM; + fc_exch_init(lp); + fc_lport_init(lp); + fc_rport_init(lp); + fc_disc_init(lp); + + return 0; +} + +/* + * This function creates the fcoe interface + * create struct fcdev which is a shared structure between opefc + * and transport level protocol. + */ +int fcoe_create_interface(struct net_device *netdev) +{ + struct fc_lport *lp = NULL; + struct fcoe_softc *fc; + struct Scsi_Host *shost; + int rc = 0; + + if (fcoe_find_fc_lport(netdev) != NULL) + return -EEXIST; + + shost = scsi_host_alloc(&fcoe_driver_template, + sizeof(struct fc_lport) + + sizeof(struct fcoe_softc)); + + if (!shost) { + FC_DBG("Could not allocate host structure\n"); + return -ENOMEM; + } + + lp = shost_priv(shost); + rc = lport_config(lp, shost); + if (rc) + goto out_host_put; + + /* Configure the fcoe_softc */ + fc = (struct fcoe_softc *)lp->drv_priv; + fc->lp = lp; + fc->real_dev = netdev; + shost_config(lp); + + + /* Add the new host to the SCSI-ml */ + rc = scsi_add_host(lp->host, NULL); + if (rc) { + FC_DBG("error on scsi_add_host\n"); + goto out_lp_destroy; + } + + sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", + FCOE_DRIVER_NAME, FCOE_VERSION, + netdev->name); + + /* Configure netdev and networking properties of the lp */ + rc = net_config(lp); + if (rc) + goto out_lp_destroy; + + /* Initialize the library */ + rc = libfc_config(lp); + if (rc) + goto out_lp_destroy; + + write_lock_bh(&fcoe_hostlist_lock); + list_add_tail(&fc->list, &fcoe_hostlist); + write_unlock_bh(&fcoe_hostlist_lock); + + lp->boot_time = jiffies; + + fc_fabric_login(lp); + + dev_hold(netdev); + return rc; + +out_lp_destroy: + fc_exch_mgr_free(lp->emp); /* Free the EM */ +out_host_put: + scsi_host_put(lp->host); + return rc; +} + +void fcoe_clean_pending_queue(struct fc_lport *lp) +{ + struct fcoe_softc *fc = lp->drv_priv; + struct sk_buff *skb; + + spin_lock_bh(&fc->fcoe_pending_queue.lock); + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { + spin_unlock_bh(&fc->fcoe_pending_queue.lock); + kfree_skb(skb); + spin_lock_bh(&fc->fcoe_pending_queue.lock); + } + spin_unlock_bh(&fc->fcoe_pending_queue.lock); +} + +int __init fcoe_sw_init(void) +{ + fcoe_transport_template = + fc_attach_transport(&fcoe_transport_function); + return fcoe_transport_template ? 0 : -1; +} + +void __exit fcoe_sw_exit(void) +{ + fc_release_transport(fcoe_transport_template); +} diff --git a/drivers/scsi/fcoe/fcoeinit.c b/drivers/scsi/fcoe/fcoeinit.c deleted file mode 100644 index 7d52ed5..0000000 --- a/drivers/scsi/fcoe/fcoeinit.c +++ /dev/null @@ -1,440 +0,0 @@ -/* - * Copyright(c) 2007 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained at www.Open-FCoE.org - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "fcoe_def.h" - -MODULE_AUTHOR("Open-FCoE.org"); -MODULE_DESCRIPTION("FCoE"); -MODULE_LICENSE("GPL"); - -/* - * Static functions and variables definations - */ -#ifdef CONFIG_HOTPLUG_CPU -static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); -#endif /* CONFIG_HOTPLUG_CPU */ -static int fcoe_device_notification(struct notifier_block *, ulong, void *); -static void fcoe_dev_setup(void); -static void fcoe_dev_cleanup(void); - -struct scsi_transport_template *fcoe_transport_template; - -static int fcoe_reset(struct Scsi_Host *shost) -{ - struct fc_lport *lport = shost_priv(shost); - fc_lport_reset(lport); - return 0; -} - -struct fc_function_template fcoe_transport_function = { - .show_host_node_name = 1, - .show_host_port_name = 1, - .show_host_supported_classes = 1, - .show_host_supported_fc4s = 1, - .show_host_active_fc4s = 1, - .show_host_maxframe_size = 1, - - .show_host_port_id = 1, - .show_host_supported_speeds = 1, - .get_host_speed = fc_get_host_speed, - .show_host_speed = 1, - .show_host_port_type = 1, - .get_host_port_state = fc_get_host_port_state, - .show_host_port_state = 1, - .show_host_symbolic_name = 1, - - .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), - .show_rport_maxframe_size = 1, - .show_rport_supported_classes = 1, - - .show_host_fabric_name = 1, - .show_starget_node_name = 1, - .show_starget_port_name = 1, - .show_starget_port_id = 1, - .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, - .show_rport_dev_loss_tmo = 1, - .get_fc_host_stats = fc_get_host_stats, - .issue_fc_host_lip = fcoe_reset, - - .terminate_rport_io = fc_rport_terminate_io, -}; - -struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; - -#ifdef CONFIG_HOTPLUG_CPU -static struct notifier_block fcoe_cpu_notifier = { - .notifier_call = fcoe_cpu_callback, -}; -#endif /* CONFIG_HOTPLUG_CPU */ - -/* - * notification function from net device - */ -static struct notifier_block fcoe_notifier = { - .notifier_call = fcoe_device_notification, -}; - -#ifdef CONFIG_HOTPLUG_CPU -/* - * create percpu stats block - * called by cpu add/remove notifier - */ -static void fcoe_create_percpu_data(int cpu) -{ - struct fc_lport *lp; - struct fcoe_softc *fc; - struct fcoe_dev_stats *p; - struct fcoe_info *fci = &fcoei; - - write_lock_bh(&fci->fcoe_hostlist_lock); - list_for_each_entry(fc, &fci->fcoe_hostlist, list) { - lp = fc->lp; - if (lp->dev_stats[cpu] == NULL) { - p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL); - if (p) - lp->dev_stats[cpu] = p; - } - } - write_unlock_bh(&fci->fcoe_hostlist_lock); -} - -/* - * destroy percpu stats block - * called by cpu add/remove notifier - */ -static void fcoe_destroy_percpu_data(int cpu) -{ - struct fcoe_dev_stats *p; - struct fc_lport *lp; - struct fcoe_softc *fc; - struct fcoe_info *fci = &fcoei; - - write_lock_bh(&fci->fcoe_hostlist_lock); - list_for_each_entry(fc, &fci->fcoe_hostlist, list) { - lp = fc->lp; - p = lp->dev_stats[cpu]; - if (p != NULL) { - lp->dev_stats[cpu] = NULL; - kfree(p); - } - } - write_unlock_bh(&fci->fcoe_hostlist_lock); -} - -/* - * Get notified when a cpu comes on/off. Be hotplug friendly. - */ -static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - fcoe_create_percpu_data(cpu); - break; - case CPU_DEAD: - fcoe_destroy_percpu_data(cpu); - break; - default: - break; - } - return NOTIFY_OK; -} -#endif /* CONFIG_HOTPLUG_CPU */ - -/* - * function to setup link change notification interface - */ -static void fcoe_dev_setup(void) -{ - /* - * here setup a interface specific wd time to - * monitor the link state - */ - register_netdevice_notifier(&fcoe_notifier); -} - -/* - * function to cleanup link change notification interface - */ -static void fcoe_dev_cleanup(void) -{ - unregister_netdevice_notifier(&fcoe_notifier); -} - -/* - * This function is called by the ethernet driver - * this is called in case of link change event - */ -static int fcoe_device_notification(struct notifier_block *notifier, - ulong event, void *ptr) -{ - struct fc_lport *lp = NULL; - struct net_device *real_dev = ptr; - struct fcoe_softc *fc; - struct fcoe_dev_stats *stats; - struct fcoe_info *fci = &fcoei; - u16 new_status; - u32 mfs; - int rc = NOTIFY_OK; - - read_lock(&fci->fcoe_hostlist_lock); - list_for_each_entry(fc, &fci->fcoe_hostlist, list) { - if (fc->real_dev == real_dev) { - lp = fc->lp; - break; - } - } - read_unlock(&fci->fcoe_hostlist_lock); - if (lp == NULL) { - rc = NOTIFY_DONE; - goto out; - } - - new_status = lp->link_status; - switch (event) { - case NETDEV_DOWN: - case NETDEV_GOING_DOWN: - new_status &= ~FC_LINK_UP; - break; - case NETDEV_UP: - case NETDEV_CHANGE: - new_status &= ~FC_LINK_UP; - if (!fcoe_link_ok(lp)) - new_status |= FC_LINK_UP; - break; - case NETDEV_CHANGEMTU: - mfs = fc->real_dev->mtu - - (sizeof(struct fcoe_hdr) + - sizeof(struct fcoe_crc_eof)); - if (fc->user_mfs && fc->user_mfs < mfs) - mfs = fc->user_mfs; - if (mfs >= FC_MIN_MAX_FRAME) - fc_set_mfs(lp, mfs); - new_status &= ~FC_LINK_UP; - if (!fcoe_link_ok(lp)) - new_status |= FC_LINK_UP; - break; - case NETDEV_REGISTER: - break; - default: - FC_DBG("unknown event %ld call", event); - } - if (lp->link_status != new_status) { - if ((new_status & FC_LINK_UP) == FC_LINK_UP) - fc_linkup(lp); - else { - stats = lp->dev_stats[smp_processor_id()]; - stats->LinkFailureCount++; - fc_linkdown(lp); - fcoe_clean_pending_queue(lp); - } - } -out: - return rc; -} - -static void trimstr(char *str, int len) -{ - char *cp = str + len; - while (--cp >= str && *cp == '\n') - *cp = '\0'; -} - -static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr, - const char *buffer, size_t size) -{ - char ifname[40]; - strcpy(ifname, buffer); - trimstr(ifname, strlen(ifname)); - fcoe_destroy_interface(ifname); - return size; -} - -static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr, - const char *buffer, size_t size) -{ - char ifname[40]; - strcpy(ifname, buffer); - trimstr(ifname, strlen(ifname)); - fcoe_create_interface(ifname); - return size; -} - -static const struct kobj_attribute fcoe_destroyattr = \ - __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy); -static const struct kobj_attribute fcoe_createattr = \ - __ATTR(create, S_IWUSR, NULL, fcoe_create); - -/* - * Initialization routine - * 1. Will create fc transport software structure - * 2. initialize the link list of port information structure - */ -static int __init fcoeinit(void) -{ - int rc = 0; - int cpu; - struct fcoe_percpu_s *p; - struct fcoe_info *fci = &fcoei; - - rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj, - &fcoe_destroyattr.attr); - if (!rc) - rc = sysfs_create_file(&THIS_MODULE->mkobj.kobj, - &fcoe_createattr.attr); - - if (rc) - return rc; - - rwlock_init(&fci->fcoe_hostlist_lock); - -#ifdef CONFIG_HOTPLUG_CPU - register_cpu_notifier(&fcoe_cpu_notifier); -#endif /* CONFIG_HOTPLUG_CPU */ - - /* - * initialize per CPU interrupt thread - */ - for_each_online_cpu(cpu) { - p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL); - if (p) { - p->thread = kthread_create(fcoe_percpu_receive_thread, - (void *)p, - "fcoethread/%d", cpu); - - /* - * if there is no error then bind the thread to the cpu - * initialize the semaphore and skb queue head - */ - if (likely(!IS_ERR(p->thread))) { - p->cpu = cpu; - fci->fcoe_percpu[cpu] = p; - skb_queue_head_init(&p->fcoe_rx_list); - kthread_bind(p->thread, cpu); - wake_up_process(p->thread); - } else { - fci->fcoe_percpu[cpu] = NULL; - kfree(p); - - } - } - } - if (rc < 0) { - FC_DBG("failed to initialize proc intrerface\n"); - rc = -ENODEV; - goto out_chrdev; - } - - /* - * setup link change notification - */ - fcoe_dev_setup(); - - init_timer(&fci->timer); - fci->timer.data = (ulong) fci; - fci->timer.function = fcoe_watchdog; - fci->timer.expires = (jiffies + (10 * HZ)); - add_timer(&fci->timer); - - fcoe_transport_template = - fc_attach_transport(&fcoe_transport_function); - - if (fcoe_transport_template == NULL) { - FC_DBG("fail to attach fc transport"); - return -1; - } - - return 0; - -out_chrdev: -#ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&fcoe_cpu_notifier); -#endif /* CONFIG_HOTPLUG_CPU */ - return rc; -} - -static void __exit fcoe_exit(void) -{ - u32 idx; - struct fcoe_softc *fc, *tmp; - struct fc_lport *lp; - struct fcoe_info *fci = &fcoei; - struct fcoe_percpu_s *p; - struct sk_buff *skb; - - /* - * Stop all call back interfaces - */ -#ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&fcoe_cpu_notifier); -#endif /* CONFIG_HOTPLUG_CPU */ - fcoe_dev_cleanup(); - - /* - * stop timer - */ - del_timer_sync(&fci->timer); - - /* - * assuming that at this time there will be no - * ioctl in prograss, therefore we do not need to lock the - * list. - */ - list_for_each_entry_safe(fc, tmp, &fci->fcoe_hostlist, list) { - lp = fc->lp; - fcoe_destroy_interface(lp->ifname); - } - - for (idx = 0; idx < NR_CPUS; idx++) { - if (fci->fcoe_percpu[idx]) { - kthread_stop(fci->fcoe_percpu[idx]->thread); - p = fci->fcoe_percpu[idx]; - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - if (fci->fcoe_percpu[idx]->crc_eof_page) - put_page(fci->fcoe_percpu[idx]->crc_eof_page); - kfree(fci->fcoe_percpu[idx]); - } - } - - fc_release_transport(fcoe_transport_template); -} - -module_init(fcoeinit); -module_exit(fcoe_exit); diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c new file mode 100644 index 0000000..93c47aa --- /dev/null +++ b/drivers/scsi/fcoe/libfcoe.c @@ -0,0 +1,632 @@ +/* + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * FCOE protocol file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include "fcoe_def.h" + +static int debug_fcoe; + +#define FCOE_MAX_QUEUE_DEPTH 256 + +/* destination address mode */ +#define FCOE_GW_ADDR_MODE 0x00 +#define FCOE_FCOUI_ADDR_MODE 0x01 + +/* Function Prototyes */ +static int fcoe_check_wait_queue(struct fc_lport *); +static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *); +static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *); +static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); + +/* + * this is the fcoe receive function + * called by NET_RX_SOFTIRQ + * this function will receive the packet and + * build fc frame and pass it up + */ +int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *olddev) +{ + struct fc_lport *lp; + struct fcoe_rcv_info *fr; + struct fcoe_softc *fc; + struct fcoe_dev_stats *stats; + u8 *data; + struct fc_frame_header *fh; + unsigned short oxid; + int cpu_idx; + struct fcoe_percpu_s *fps; + + fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); + lp = fc->lp; + if (unlikely(lp == NULL)) { + FC_DBG("cannot find hba structure"); + goto err2; + } + + if (unlikely(debug_fcoe)) { + FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " + "end:%p sum:%d dev:%s", skb->len, skb->data_len, + skb->head, skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb), skb->csum, + skb->dev ? skb->dev->name : ""); + + } + + /* check for FCOE packet type */ + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { + FC_DBG("wrong FC type frame"); + goto err; + } + data = skb->data; + data += sizeof(struct fcoe_hdr); + fh = (struct fc_frame_header *)data; + oxid = ntohs(fh->fh_ox_id); + + fr = fcoe_dev_from_skb(skb); + fr->fr_dev = lp; + fr->ptype = ptype; + cpu_idx = 0; +#ifdef CONFIG_SMP + /* + * The exchange ID are ANDed with num of online CPUs, + * so that will have the least lock contention in + * handling the exchange. if there is no thread + * for a given idx then use first online cpu. + */ + cpu_idx = oxid & (num_online_cpus() >> 1); + if (fcoe_percpu[cpu_idx] == NULL) + cpu_idx = first_cpu(cpu_online_map); +#endif + fps = fcoe_percpu[cpu_idx]; + + spin_lock_bh(&fps->fcoe_rx_list.lock); + __skb_queue_tail(&fps->fcoe_rx_list, skb); + if (fps->fcoe_rx_list.qlen == 1) + wake_up_process(fps->thread); + + spin_unlock_bh(&fps->fcoe_rx_list.lock); + + return 0; +err: +#ifdef CONFIG_SMP + stats = lp->dev_stats[smp_processor_id()]; +#else + stats = lp->dev_stats[0]; +#endif + stats->ErrorFrames++; + +err2: + kfree_skb(skb); + return -1; +} + +static inline int fcoe_start_io(struct sk_buff *skb) +{ + int rc; + + skb_get(skb); + rc = dev_queue_xmit(skb); + if (rc != 0) + return rc; + kfree_skb(skb); + return 0; +} + +static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + struct fcoe_percpu_s *fps; + struct page *page; + int cpu_idx; + + cpu_idx = get_cpu(); + fps = fcoe_percpu[cpu_idx]; + page = fps->crc_eof_page; + if (!page) { + page = alloc_page(GFP_ATOMIC); + if (!page) { + put_cpu(); + return -ENOMEM; + } + fps->crc_eof_page = page; + WARN_ON(fps->crc_eof_offset != 0); + } + + get_page(page); + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, + fps->crc_eof_offset, tlen); + skb->len += tlen; + skb->data_len += tlen; + skb->truesize += tlen; + fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); + + if (fps->crc_eof_offset >= PAGE_SIZE) { + fps->crc_eof_page = NULL; + fps->crc_eof_offset = 0; + put_page(page); + } + put_cpu(); + return 0; +} + +/* + * this is the frame xmit routine + */ +int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) +{ + int indx; + int wlen, rc = 0; + u32 crc; + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fcoe_dev_stats *stats; + struct fc_frame_header *fh; + unsigned int hlen; /* header length implies the version */ + unsigned int tlen; /* trailer length */ + int flogi_in_progress = 0; + struct fcoe_softc *fc; + void *data; + u8 sof, eof; + struct fcoe_hdr *hp; + + WARN_ON((fr_len(fp) % sizeof(u32)) != 0); + + fc = (struct fcoe_softc *)lp->drv_priv; + /* + * if it is a flogi then we need to learn gw-addr + * and my own fcid + */ + fh = fc_frame_header_get(fp); + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (fc_frame_payload_op(fp) == ELS_FLOGI) { + fc->flogi_oxid = ntohs(fh->fh_ox_id); + fc->address_mode = FCOE_FCOUI_ADDR_MODE; + fc->flogi_progress = 1; + flogi_in_progress = 1; + } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) { + /* + * Here we must've gotten an SID by accepting an FLOGI + * from a point-to-point connection. Switch to using + * the source mac based on the SID. The destination + * MAC in this case would have been set by receving the + * FLOGI. + */ + fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id); + fc->flogi_progress = 0; + } + } + + skb = fp_skb(fp); + sof = fr_sof(fp); + eof = fr_eof(fp); + + crc = ~0; + crc = crc32(crc, skb->data, skb_headlen(skb)); + + for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[indx]; + unsigned long off = frag->page_offset; + unsigned long len = frag->size; + + while (len > 0) { + unsigned long clen; + + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); + data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), + KM_SKB_DATA_SOFTIRQ); + crc = crc32(crc, data + (off & ~PAGE_MASK), + clen); + kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); + off += clen; + len -= clen; + } + } + + /* + * Get header and trailer lengths. + * This is temporary code until we get rid of the old protocol. + * Both versions have essentially the same trailer layout but T11 + * has padding afterwards. + */ + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + + /* + * copy fc crc and eof to the skb buff + * Use utility buffer in the fc_frame part of the sk_buff for the + * trailer. + * We don't do a get_page for this frag, since that page may not be + * managed that way. So that skb_free() doesn't do that either, we + * setup the destructor to remove this frag. + */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + if (fcoe_get_paged_crc_eof(skb, tlen)) { + kfree(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) + + frag->page_offset; + } else { + cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); + } + + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (tlen == sizeof(*cp)) + memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd)); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); + cp = NULL; + } + + /* + * Fill in the control structures + */ + skb->ip_summed = CHECKSUM_NONE; + eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr)); + if (fc->address_mode == FCOE_FCOUI_ADDR_MODE) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN); + + if (unlikely(flogi_in_progress)) + memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN); + else + memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN); + + eh->h_proto = htons(ETH_P_FCOE); + skb->protocol = htons(ETH_P_802_3); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + stats = lp->dev_stats[smp_processor_id()]; + stats->TxFrames++; + stats->TxWords += wlen; + skb->dev = fc->real_dev; + + fr_dev(fp) = lp; + if (fc->fcoe_pending_queue.qlen) + rc = fcoe_check_wait_queue(lp); + + if (rc == 0) + rc = fcoe_start_io(skb); + + if (rc) { + fcoe_insert_wait_queue(lp, skb); + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) + fc_pause(lp); + } + + return 0; +} + +int fcoe_percpu_receive_thread(void *arg) +{ + struct fcoe_percpu_s *p = arg; + u32 fr_len; + unsigned int hlen; + unsigned int tlen; + struct fc_lport *lp; + struct fcoe_rcv_info *fr; + struct fcoe_dev_stats *stats; + struct fc_frame_header *fh; + struct sk_buff *skb; + struct fcoe_crc_eof *cp; + enum fc_sof sof; + struct fc_frame *fp; + u8 *mac = NULL; + struct fcoe_softc *fc; + struct fcoe_hdr *hp; + + set_user_nice(current, 19); + + while (!kthread_should_stop()) { + + spin_lock_bh(&p->fcoe_rx_list.lock); + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&p->fcoe_rx_list.lock); + schedule(); + set_current_state(TASK_RUNNING); + if (kthread_should_stop()) + return 0; + spin_lock_bh(&p->fcoe_rx_list.lock); + } + spin_unlock_bh(&p->fcoe_rx_list.lock); + fr = fcoe_dev_from_skb(skb); + lp = fr->fr_dev; + if (unlikely(lp == NULL)) { + FC_DBG("invalid HBA Structure"); + kfree_skb(skb); + continue; + } + + stats = lp->dev_stats[smp_processor_id()]; + + if (unlikely(debug_fcoe)) { + FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " + "tail:%p end:%p sum:%d dev:%s", + skb->len, skb->data_len, + skb->head, skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb), skb->csum, + skb->dev ? skb->dev->name : ""); + } + + /* + * Save source MAC address before discarding header. + */ + fc = lp->drv_priv; + if (unlikely(fc->flogi_progress)) + mac = eth_hdr(skb)->h_source; + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); /* not ideal */ + + /* + * Check the header and pull it off. + */ + hlen = sizeof(struct fcoe_hdr); + + hp = (struct fcoe_hdr *)skb->data; + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { + if (stats->ErrorFrames < 5) + FC_DBG("unknown FCoE version %x", + FC_FCOE_DECAPS_VER(hp)); + stats->ErrorFrames++; + kfree_skb(skb); + continue; + } + sof = hp->fcoe_sof; + skb_pull(skb, sizeof(*hp)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + skb_trim(skb, fr_len); + tlen = sizeof(struct fcoe_crc_eof); + + if (unlikely(fr_len > skb->len)) { + if (stats->ErrorFrames < 5) + FC_DBG("length error fr_len 0x%x skb->len 0x%x", + fr_len, skb->len); + stats->ErrorFrames++; + kfree_skb(skb); + continue; + } + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + + fp = (struct fc_frame *) skb; + fc_frame_init(fp); + cp = (struct fcoe_crc_eof *)(skb->data + fr_len); + fr_eof(fp) = cp->fcoe_eof; + fr_sof(fp) = sof; + fr_dev(fp) = lp; + + /* + * Check the CRC here, unless it's solicited data for SCSI. + * In that case, the SCSI layer can check it during the copy, + * and it'll be more cache-efficient. + */ + fh = fc_frame_header_get(fp); + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + fr_flags(fp) |= FCPHF_CRC_UNCHECKED; + fc_exch_recv(lp, lp->emp, fp); + } else if (le32_to_cpu(cp->fcoe_crc32) == + ~crc32(~0, skb->data, fr_len)) { + if (unlikely(fc->flogi_progress)) + fcoe_recv_flogi(fc, fp, mac); + fc_exch_recv(lp, lp->emp, fp); + } else { + if (debug_fcoe || stats->InvalidCRCCount < 5) { + printk(KERN_WARNING \ + "fcoe: dropping frame with CRC error"); + } + stats->InvalidCRCCount++; + stats->ErrorFrames++; + fc_frame_free(fp); + } + } + return 0; +} + +/* + * Snoop potential response to FLOGI or even incoming FLOGI. + */ +static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) +{ + struct fc_frame_header *fh; + u8 op; + + fh = fc_frame_header_get(fp); + if (fh->fh_type != FC_TYPE_ELS) + return; + op = fc_frame_payload_op(fp); + if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && + fc->flogi_oxid == ntohs(fh->fh_ox_id)) { + /* + * FLOGI accepted. + * If the src mac addr is FC_OUI-based, then we mark the + * address_mode flag to use FC_OUI-based Ethernet DA. + * Otherwise we use the FCoE gateway addr + */ + if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) { + fc->address_mode = FCOE_FCOUI_ADDR_MODE; + } else { + memcpy(fc->dest_addr, sa, ETH_ALEN); + fc->address_mode = FCOE_GW_ADDR_MODE; + } + + /* + * Remove any previously-set unicast MAC filter. + * Add secondary FCoE MAC address filter for our OUI. + */ + rtnl_lock(); + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) + dev_unicast_delete(fc->real_dev, fc->data_src_addr, + ETH_ALEN); + fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id); + dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN); + rtnl_unlock(); + + fc->flogi_progress = 0; + } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { + /* + * Save source MAC for point-to-point responses. + */ + memcpy(fc->dest_addr, sa, ETH_ALEN); + fc->address_mode = FCOE_GW_ADDR_MODE; + } +} + +void fcoe_watchdog(ulong vp) +{ + struct fc_lport *lp; + struct fcoe_softc *fc; + int paused = 0; + + read_lock(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + lp = fc->lp; + if (lp) { + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) + paused = 1; + if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { + if (paused) + fc_unpause(lp); + } + } + } + read_unlock(&fcoe_hostlist_lock); + + fcoe_timer.expires = jiffies + (1 * HZ); + add_timer(&fcoe_timer); +} + +/* + * the wait_queue is used when the skb transmit fails. skb will go + * in the wait_queue which will be emptied by the time function OR + * by the next skb transmit. + * + */ + +/* + * Function name : fcoe_check_wait_queue() + * + * Return Values : 0 or error + * + * Description : empties the wait_queue + * dequeue the head of the wait_queue queue and + * calls fcoe_start_io() for each packet + * if all skb have been transmitted, return 0 + * if a error occurs, then restore wait_queue and try again + * later + * + */ + +static int fcoe_check_wait_queue(struct fc_lport *lp) +{ + int rc, unpause = 0; + int paused = 0; + struct sk_buff *skb; + struct fcoe_softc *fc; + + fc = (struct fcoe_softc *)lp->drv_priv; + spin_lock_bh(&fc->fcoe_pending_queue.lock); + + /* + * is this interface paused? + */ + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) + paused = 1; + if (fc->fcoe_pending_queue.qlen) { + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { + spin_unlock_bh(&fc->fcoe_pending_queue.lock); + rc = fcoe_start_io(skb); + if (rc) { + fcoe_insert_wait_queue_head(lp, skb); + return rc; + } + spin_lock_bh(&fc->fcoe_pending_queue.lock); + } + if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) + unpause = 1; + } + spin_unlock_bh(&fc->fcoe_pending_queue.lock); + if ((unpause) && (paused)) + fc_unpause(lp); + return fc->fcoe_pending_queue.qlen; +} + +static void fcoe_insert_wait_queue_head(struct fc_lport *lp, + struct sk_buff *skb) +{ + struct fcoe_softc *fc; + + fc = (struct fcoe_softc *)lp->drv_priv; + spin_lock_bh(&fc->fcoe_pending_queue.lock); + __skb_queue_head(&fc->fcoe_pending_queue, skb); + spin_unlock_bh(&fc->fcoe_pending_queue.lock); +} + +static void fcoe_insert_wait_queue(struct fc_lport *lp, + struct sk_buff *skb) +{ + struct fcoe_softc *fc; + + fc = (struct fcoe_softc *)lp->drv_priv; + spin_lock_bh(&fc->fcoe_pending_queue.lock); + __skb_queue_tail(&fc->fcoe_pending_queue, skb); + spin_unlock_bh(&fc->fcoe_pending_queue.lock); +} diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 30403aa..f724dd2 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -19,7 +19,9 @@ /* * Target Discovery - * Actually, this discovers all FC-4 remote ports, including FCP initiators. + * + * This block discovers all FC-4 remote ports, including FCP initiators. It + * also handles RSCN events and re-discovery if necessary. */ #include @@ -33,12 +35,18 @@ #define FC_DISC_RETRY_LIMIT 3 /* max retries */ #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ -int fc_disc_debug; +static int fc_disc_debug; + +#define FC_DEBUG_DISC(fmt...) \ + do { \ + if (fc_disc_debug) \ + FC_DBG(fmt); \ + } while (0) static void fc_disc_gpn_ft_req(struct fc_lport *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); static int fc_disc_new_target(struct fc_lport *, struct fc_rport *, - struct fc_rport_identifiers *); + struct fc_rport_identifiers *); static void fc_disc_del_target(struct fc_lport *, struct fc_rport *); static void fc_disc_done(struct fc_lport *); static void fc_disc_error(struct fc_lport *, struct fc_frame *); @@ -47,13 +55,13 @@ static void fc_disc_single(struct fc_lport *, struct fc_disc_port *); static int fc_disc_restart(struct fc_lport *); /** - * fc_disc_rscn_req - Handle Registered State Change Notification (RSCN) + * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN) * @sp: Current sequence of the RSCN exchange * @fp: RSCN Frame - * @lp: Fibre Channel host port instance + * @lport: Fibre Channel host port instance */ -static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lp) +static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, + struct fc_lport *lport) { struct fc_els_rscn *rp; struct fc_els_rscn_page *pp; @@ -86,12 +94,14 @@ static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp, */ switch (fmt) { case ELS_ADDR_FMT_PORT: + FC_DEBUG_DISC("Port address format for port (%6x)\n", + ntoh24(pp->rscn_fid)); dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (!dp) { redisc = 1; break; } - dp->lp = lp; + dp->lp = lport; dp->ids.port_id = ntoh24(pp->rscn_fid); dp->ids.port_name = -1; dp->ids.node_name = -1; @@ -102,27 +112,26 @@ static void fc_disc_rscn_req(struct fc_seq *sp, struct fc_frame *fp, case ELS_ADDR_FMT_DOM: case ELS_ADDR_FMT_FAB: default: + FC_DEBUG_DISC("Address format is (%d)\n", fmt); redisc = 1; break; } } - lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); + lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); if (redisc) { - if (fc_disc_debug) - FC_DBG("RSCN received: rediscovering\n"); + FC_DEBUG_DISC("RSCN received: rediscovering\n"); list_for_each_entry_safe(dp, next, &disc_list, peers) { list_del(&dp->peers); kfree(dp); } - fc_disc_restart(lp); + fc_disc_restart(lport); } else { - if (fc_disc_debug) - FC_DBG("RSCN received: not rediscovering. " - "redisc %d state %d in_prog %d\n", - redisc, lp->state, lp->disc_pending); + FC_DEBUG_DISC("RSCN received: not rediscovering. " + "redisc %d state %d in_prog %d\n", + redisc, lport->state, lport->disc_pending); list_for_each_entry_safe(dp, next, &disc_list, peers) { list_del(&dp->peers); - fc_disc_single(lp, dp); + fc_disc_single(lport, dp); } } fc_frame_free(fp); @@ -131,48 +140,53 @@ reject: rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; - lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } +/** + * fc_disc_recv_req - Handle incoming requests + * @sp: Current sequence of the request exchange + * @fp: The frame + * @lport: The FC local port + */ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lp) + struct fc_lport *lport) { - switch (fc_frame_payload_op(fp)) { + u8 op; + + op = fc_frame_payload_op(fp); + switch (op) { case ELS_RSCN: - fc_disc_rscn_req(sp, fp, lp); + fc_disc_recv_rscn_req(sp, fp, lport); break; default: - FC_DBG("fc_disc recieved an unexpected request\n"); + FC_DBG("Received an unsupported request. opcode (%x)\n", op); break; } } -/* - * Refresh target discovery, perhaps due to an RSCN. - * A configurable delay is introduced to collect any subsequent RSCNs. +/** + * fc_disc_restart - Restart discovery + * @lport: FC local port */ -static int fc_disc_restart(struct fc_lport *lp) +static int fc_disc_restart(struct fc_lport *lport) { - if (!lp->disc_requested && !lp->disc_pending) { - schedule_delayed_work(&lp->disc_work, - msecs_to_jiffies(lp->disc_delay * 1000)); + if (!lport->disc_requested && !lport->disc_pending) { + schedule_delayed_work(&lport->disc_work, + msecs_to_jiffies(lport->disc_delay * 1000)); } - lp->disc_requested = 1; + lport->disc_requested = 1; return 0; } -/* - * Fibre Channel Target discovery. +/** + * fc_disc_start - Fibre Channel Target discovery + * @lport: FC local port * * Returns non-zero if discovery cannot be started. - * - * Callback is called for each target remote port found in discovery. - * When discovery is complete, the callback is called with a NULL remote port. - * Discovery may be restarted after an RSCN is received, causing the - * callback to be called after discovery complete is indicated. */ -int fc_disc_start(struct fc_lport *lp) +static int fc_disc_start(struct fc_lport *lport) { struct fc_rport *rport; int error; @@ -181,20 +195,20 @@ int fc_disc_start(struct fc_lport *lp) /* * If not ready, or already running discovery, just set request flag. */ - if (!fc_lport_test_ready(lp) || lp->disc_pending) { - lp->disc_requested = 1; + if (!fc_lport_test_ready(lport) || lport->disc_pending) { + lport->disc_requested = 1; return 0; } - lp->disc_pending = 1; - lp->disc_requested = 0; - lp->disc_retry_count = 0; + lport->disc_pending = 1; + lport->disc_requested = 0; + lport->disc_retry_count = 0; /* * Handle point-to-point mode as a simple discovery * of the remote port. */ - rport = lp->ptp_rp; + rport = lport->ptp_rp; if (rport) { ids.port_id = rport->port_id; ids.port_name = rport->port_name; @@ -202,46 +216,41 @@ int fc_disc_start(struct fc_lport *lp) ids.roles = FC_RPORT_ROLE_UNKNOWN; get_device(&rport->dev); - error = fc_disc_new_target(lp, rport, &ids); + error = fc_disc_new_target(lport, rport, &ids); put_device(&rport->dev); if (!error) - fc_disc_done(lp); + fc_disc_done(lport); } else { - fc_block_rports(lp); - fc_disc_gpn_ft_req(lp); /* get ports by FC-4 type */ + fc_disc_gpn_ft_req(lport); /* get ports by FC-4 type */ error = 0; } return error; } -/* - * Restart discovery after a delay due to resource shortages. - * If the error persists, the discovery will be abandoned. +/** + * fc_disc_retry - Retry discovery + * @lport: FC local port */ -static void fc_disc_retry(struct fc_lport *lp) +static void fc_disc_retry(struct fc_lport *lport) { unsigned long delay = FC_DISC_RETRY_DELAY; - if (!lp->disc_retry_count) + if (!lport->disc_retry_count) delay /= 4; /* timeout faster first time */ - if (lp->disc_retry_count++ < FC_DISC_RETRY_LIMIT) - schedule_delayed_work(&lp->disc_work, + if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT) + schedule_delayed_work(&lport->disc_work, msecs_to_jiffies(delay)); else - fc_disc_done(lp); + fc_disc_done(lport); } -/* - * Handle new target found by discovery. - * Create remote port and session if needed. - * Ignore returns of our own FID & WWPN. - * - * If a non-NULL rp is passed in, it is held for the caller, but not for us. - * - * Events delivered are: - * FC_EV_READY, when remote port is rediscovered. +/** + * fc_disc_new_target - Handle new target found by discovery + * @lport: FC local port + * @rport: The previous FC remote port (NULL if new remote port) + * @ids: Identifiers for the new FC remote port */ -static int fc_disc_new_target(struct fc_lport *lp, +static int fc_disc_new_target(struct fc_lport *lport, struct fc_rport *rport, struct fc_rport_identifiers *ids) { @@ -263,61 +272,64 @@ static int fc_disc_new_target(struct fc_lport *lp, * assigned the same FCID. This should be rare. * Delete the old one and fall thru to re-create. */ - fc_disc_del_target(lp, rport); + fc_disc_del_target(lport, rport); rport = NULL; } } if (((ids->port_name != -1) || (ids->port_id != -1)) && - ids->port_id != lp->fid && ids->port_name != lp->wwpn) { + ids->port_id != fc_host_port_id(lport->host) && + ids->port_name != lport->wwpn) { if (!rport) { - rport = lp->tt.rport_lookup(lp, ids->port_id); + rport = lport->tt.rport_lookup(lport, ids->port_id); if (!rport) { struct fc_disc_port dp; - dp.lp = lp; + dp.lp = lport; dp.ids.port_id = ids->port_id; dp.ids.port_name = ids->port_name; dp.ids.node_name = ids->node_name; dp.ids.roles = ids->roles; - rport = fc_rport_dummy_create(&dp); + rport = fc_rport_rogue_create(&dp); } if (!rport) error = ENOMEM; } if (rport) { rp = rport->dd_data; - rp->event_callback = lp->tt.event_callback; + rp->event_callback = lport->tt.event_callback; rp->rp_state = RPORT_ST_INIT; - lp->tt.rport_login(rport); + lport->tt.rport_login(rport); } } return error; } -/* - * Delete the remote port. +/** + * fc_disc_del_target - Delete a target + * @lport: FC local port + * @rport: The remote port to be removed */ -static void fc_disc_del_target(struct fc_lport *lp, struct fc_rport *rport) +static void fc_disc_del_target(struct fc_lport *lport, struct fc_rport *rport) { - lp->tt.rport_reset(rport); - fc_remote_port_delete(rport); /* release hold from create */ + lport->tt.rport_stop(rport); } -/* - * Done with discovery +/** + * fc_disc_done - Discovery has been completed + * @lport: FC local port */ -static void fc_disc_done(struct fc_lport *lp) +static void fc_disc_done(struct fc_lport *lport) { - lp->disc_done = 1; - lp->disc_pending = 0; - if (lp->disc_requested) - lp->tt.disc_start(lp); + lport->disc_done = 1; + lport->disc_pending = 0; + if (lport->disc_requested) + lport->tt.disc_start(lport); } /** * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request - * @lp: Fibre Channel host port instance + * @lport: FC local port */ -static void fc_disc_gpn_ft_req(struct fc_lport *lp) +static void fc_disc_gpn_ft_req(struct fc_lport *lport) { struct fc_frame *fp; struct fc_seq *sp = NULL; @@ -327,60 +339,64 @@ static void fc_disc_gpn_ft_req(struct fc_lport *lp) } *rp; int error = 0; - lp->disc_buf_len = 0; - lp->disc_seq_count = 0; - fp = fc_frame_alloc(lp, sizeof(*rp)); - if (fp == NULL) { + lport->disc_buf_len = 0; + lport->disc_seq_count = 0; + fp = fc_frame_alloc(lport, sizeof(*rp)); + if (!fp) { error = ENOMEM; } else { rp = fc_frame_payload_get(fp, sizeof(*rp)); - fc_fill_dns_hdr(lp, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid)); + fc_fill_dns_hdr(lport, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid)); rp->gid.fn_fc4_type = FC_TYPE_FCP; - WARN_ON(!fc_lport_test_ready(lp)); + WARN_ON(!fc_lport_test_ready(lport)); fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT); - sp = lp->tt.exch_seq_send(lp, fp, - fc_disc_gpn_ft_resp, NULL, - lp, lp->e_d_tov, - lp->fid, - FC_FID_DIR_SERV, - FC_FC_SEQ_INIT | FC_FC_END_SEQ); + sp = lport->tt.exch_seq_send(lport, fp, + fc_disc_gpn_ft_resp, NULL, + lport, lport->e_d_tov, + fc_host_port_id(lport->host), + FC_FID_DIR_SERV, + FC_FC_SEQ_INIT | FC_FC_END_SEQ); } - if (error || sp == NULL) - fc_disc_retry(lp); + if (error || !sp) + fc_disc_retry(lport); } -/* - * Handle error on dNS request. +/** + * fc_disc_error - Handle error on dNS request + * @lport: FC local port + * @fp: The frame pointer */ -static void fc_disc_error(struct fc_lport *lp, struct fc_frame *fp) +static void fc_disc_error(struct fc_lport *lport, struct fc_frame *fp) { - int err = PTR_ERR(fp); + long err = PTR_ERR(fp); + FC_DEBUG_DISC("Error %ld, retries %d/%d\n", PTR_ERR(fp), + lport->retry_count, FC_DISC_RETRY_LIMIT); + switch (err) { case -FC_EX_TIMEOUT: - if (lp->disc_retry_count++ < FC_DISC_RETRY_LIMIT) { - fc_disc_gpn_ft_req(lp); + if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT) { + fc_disc_gpn_ft_req(lport); } else { - FC_DBG("err %d - ending\n", err); - fc_disc_done(lp); + fc_disc_done(lport); } break; default: - FC_DBG("err %d - ending\n", err); - fc_disc_done(lp); + FC_DBG("Error code %ld not supported\n", err); + fc_disc_done(lport); break; } } /** * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request - * @lp: Fibre Channel host port instance + * @lport: Fibre Channel host port instance * @buf: GPN_FT response buffer * @len: size of response buffer */ -static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) +static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) { struct fc_gpn_ft_resp *np; char *bp; @@ -388,8 +404,8 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) size_t tlen; int error = 0; struct fc_disc_port dp; - struct fc_rport *rp; - struct fc_rport_libfc_priv *rpp; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rdata; /* * Handle partial name record left over from previous call. @@ -397,7 +413,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) bp = buf; plen = len; np = (struct fc_gpn_ft_resp *)bp; - tlen = lp->disc_buf_len; + tlen = lport->disc_buf_len; if (tlen) { WARN_ON(tlen >= sizeof(*np)); plen = sizeof(*np) - tlen; @@ -405,7 +421,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) WARN_ON(plen >= sizeof(*np)); if (plen > len) plen = len; - np = &lp->disc_buf; + np = &lport->disc_buf; memcpy((char *)np + tlen, bp, plen); /* @@ -415,9 +431,9 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) bp -= tlen; len += tlen; plen += tlen; - lp->disc_buf_len = (unsigned char) plen; + lport->disc_buf_len = (unsigned char) plen; if (plen == sizeof(*np)) - lp->disc_buf_len = 0; + lport->disc_buf_len = 0; } /* @@ -428,19 +444,20 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) * After the first time through the loop, things return to "normal". */ while (plen >= sizeof(*np)) { - dp.lp = lp; + dp.lp = lport; dp.ids.port_id = ntoh24(np->fp_fid); dp.ids.port_name = ntohll(np->fp_wwpn); dp.ids.node_name = -1; dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; - if ((dp.ids.port_id != lp->fid) && - (dp.ids.port_name != lp->wwpn)) { - rp = fc_rport_dummy_create(&dp); - if (rp) { - rpp = rp->dd_data; - rpp->local_port = lp; - lp->tt.rport_login(rp); + if ((dp.ids.port_id != fc_host_port_id(lport->host)) && + (dp.ids.port_name != lport->wwpn)) { + rport = fc_rport_rogue_create(&dp); + if (rport) { + rdata = rport->dd_data; + rdata->event_callback = lport->tt.event_callback; + rdata->local_port = lport; + lport->tt.rport_login(rport); } else FC_DBG("Failed to allocate memory for " "the newly discovered port (%6x)\n", @@ -448,7 +465,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) } if (np->fp_flags & FC_NS_FID_LAST) { - fc_disc_done(lp); + fc_disc_done(lport); len = 0; break; } @@ -462,11 +479,11 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) * Save any partial record at the end of the buffer for next time. */ if (error == 0 && len > 0 && len < sizeof(*np)) { - if (np != &lp->disc_buf) - memcpy(&lp->disc_buf, np, len); - lp->disc_buf_len = (unsigned char) len; + if (np != &lport->disc_buf) + memcpy(&lport->disc_buf, np, len); + lport->disc_buf_len = (unsigned char) len; } else { - lp->disc_buf_len = 0; + lport->disc_buf_len = 0; } return error; } @@ -476,14 +493,14 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len) */ static void fc_disc_timeout(struct work_struct *work) { - struct fc_lport *lp; + struct fc_lport *lport; - lp = container_of(work, struct fc_lport, disc_work.work); + lport = container_of(work, struct fc_lport, disc_work.work); - if (lp->disc_pending) - fc_disc_gpn_ft_req(lp); + if (lport->disc_pending) + fc_disc_gpn_ft_req(lport); else - lp->tt.disc_start(lp); + lport->tt.disc_start(lport); } /** @@ -495,9 +512,9 @@ static void fc_disc_timeout(struct work_struct *work) * The response may be in multiple frames */ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) + void *lp_arg) { - struct fc_lport *lp = lp_arg; + struct fc_lport *lport = lp_arg; struct fc_ct_hdr *cp; struct fc_frame_header *fh; unsigned int seq_cnt; @@ -506,7 +523,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, int error; if (IS_ERR(fp)) { - fc_disc_error(lp, fp); + fc_disc_error(lport, fp); return; } @@ -515,7 +532,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, len = fr_len(fp) - sizeof(*fh); seq_cnt = ntohs(fh->fh_seq_cnt); if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && - lp->disc_seq_count == 0) { + lport->disc_seq_count == 0) { cp = fc_frame_payload_get(fp, sizeof(*cp)); if (cp == NULL) { FC_DBG("GPN_FT response too short, len %d\n", @@ -531,68 +548,76 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, FC_DBG("GPN_FT rejected reason %x exp %x " "(check zoning)\n", cp->ct_reason, cp->ct_explan); - fc_disc_done(lp); + fc_disc_done(lport); } else { FC_DBG("GPN_FT unexpected response code %x\n", ntohs(cp->ct_cmd)); } } else if (fr_sof(fp) == FC_SOF_N3 && - seq_cnt == lp->disc_seq_count) { + seq_cnt == lport->disc_seq_count) { buf = fh + 1; } else { FC_DBG("GPN_FT unexpected frame - out of sequence? " "seq_cnt %x expected %x sof %x eof %x\n", - seq_cnt, lp->disc_seq_count, fr_sof(fp), fr_eof(fp)); + seq_cnt, lport->disc_seq_count, fr_sof(fp), fr_eof(fp)); } if (buf) { - error = fc_disc_gpn_ft_parse(lp, buf, len); + error = fc_disc_gpn_ft_parse(lport, buf, len); if (error) - fc_disc_retry(lp); + fc_disc_retry(lport); else - lp->disc_seq_count++; + lport->disc_seq_count++; } fc_frame_free(fp); } -/* - * Discover the directory information for a single target. +/** + * fc_disc_single - Discover the directory information for a single target + * @lport: FC local port + * @dp: The port to rediscover + * * This could be from an RSCN that reported a change for the target. */ -static void fc_disc_single(struct fc_lport *lp, struct fc_disc_port *dp) +static void fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) { struct fc_rport *rport; - struct fc_rport *rp; - struct fc_rport_libfc_priv *rpp; + struct fc_rport *new_rport; + struct fc_rport_libfc_priv *rdata; - if (dp->ids.port_id == lp->fid) + if (dp->ids.port_id == fc_host_port_id(lport->host)) goto out; - rport = lp->tt.rport_lookup(lp, dp->ids.port_id); + rport = lport->tt.rport_lookup(lport, dp->ids.port_id); if (rport) { - fc_disc_del_target(lp, rport); + fc_disc_del_target(lport, rport); put_device(&rport->dev); /* hold from lookup */ } - rp = fc_rport_dummy_create(dp); - if (rp) { - rpp = rp->dd_data; + new_rport = fc_rport_rogue_create(dp); + if (new_rport) { + rdata = new_rport->dd_data; + rdata->event_callback = lport->tt.event_callback; kfree(dp); - lp->tt.rport_login(rp); + lport->tt.rport_login(new_rport); } return; out: kfree(dp); } -int fc_disc_init(struct fc_lport *lp) +/** + * fc_disc_init - Initialize the discovery block + * @lport: FC local port + */ +int fc_disc_init(struct fc_lport *lport) { - INIT_DELAYED_WORK(&lp->disc_work, fc_disc_timeout); + INIT_DELAYED_WORK(&lport->disc_work, fc_disc_timeout); - if (!lp->tt.disc_start) - lp->tt.disc_start = fc_disc_start; + if (!lport->tt.disc_start) + lport->tt.disc_start = fc_disc_start; - if (!lp->tt.disc_recv_req) - lp->tt.disc_recv_req = fc_disc_recv_req; + if (!lport->tt.disc_recv_req) + lport->tt.disc_recv_req = fc_disc_recv_req; return 0; } diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index ed74d95..80dc1ef 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -37,6 +37,13 @@ * fc_exch_debug can be set in debugger or at compile time to get more logs. */ static int fc_exch_debug; + +#define FC_DEBUG_EXCH(fmt...) \ + do { \ + if (fc_exch_debug) \ + FC_DBG(fmt); \ + } while (0) + static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ /* @@ -86,7 +93,7 @@ struct fc_exch { struct list_head ex_list; /* free or busy list linkage */ spinlock_t ex_lock; /* lock covering exchange state */ atomic_t ex_refcnt; /* reference counter */ - struct timer_list ex_timer; /* timer for upper level protocols */ + struct delayed_work timeout_work; /* timer for upper level protocols */ struct fc_lport *lp; /* fc device instance */ u16 oxid; /* originator's exchange ID */ u16 rxid; /* responder's exchange ID */ @@ -310,7 +317,6 @@ static void fc_exch_release(struct fc_exch *ep) if (ep->lp->tt.exch_put) ep->lp->tt.exch_put(ep->lp, mp, ep->xid); WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE); - WARN_ON(timer_pending(&ep->ex_timer)); mempool_free(ep, mp->ep_pool); } } @@ -332,7 +338,7 @@ static int fc_exch_done_locked(struct fc_exch *ep) if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { ep->state |= FC_EX_DONE; - if (del_timer(&ep->ex_timer)) + if (cancel_delayed_work(&ep->timeout_work)) atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ rc = 0; } @@ -362,7 +368,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) return; - if (!mod_timer(&ep->ex_timer, jiffies + msecs_to_jiffies(timer_msec))) + FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n", + ep->xid); + if (schedule_delayed_work(&ep->timeout_work, + jiffies + msecs_to_jiffies(timer_msec))) fc_exch_hold(ep); /* hold for timer */ } @@ -435,9 +444,10 @@ EXPORT_SYMBOL(fc_seq_exch_abort); * Exchange timeout - handle exchange timer expiration. * The timer will have been cancelled before this is called. */ -static void fc_exch_timeout(unsigned long ep_arg) +static void fc_exch_timeout(struct work_struct *work) { - struct fc_exch *ep = (struct fc_exch *)ep_arg; + struct fc_exch *ep = container_of(work, struct fc_exch, + timeout_work.work); struct fc_seq *sp = &ep->seq; void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); void *arg; @@ -584,7 +594,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid) ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; ep->class = mp->class; - setup_timer(&ep->ex_timer, fc_exch_timeout, (unsigned long)ep); + INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); out: return ep; err: @@ -843,9 +853,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp) struct fc_exch *ep = fc_seq_exch(sp); sp = fc_seq_alloc(ep, ep->seq_id++); - if (fc_exch_debug) - FC_DBG("exch %4x f_ctl %6x seq %2x f_ctl %6x\n", - ep->xid, ep->f_ctl, sp->id, sp->f_ctl); + FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x f_ctl %6x\n", + ep->xid, ep->f_ctl, sp->id, sp->f_ctl); return sp; } /* @@ -913,7 +922,18 @@ int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, } hton24(fh->fh_f_ctl, f_ctl | fill); - fh->fh_seq_cnt = htons(sp->cnt++); + fh->fh_seq_cnt = htons(sp->cnt); + + /* + * update sequence count if this frame is carrying + * multiple FC frames when sequence offload is enabled + * by LLD. + */ + if (fr_max_payload(fp)) + sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)), + fr_max_payload(fp)); + else + sp->cnt++; /* * Send the frame. @@ -1185,8 +1205,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, lp->tt.lport_recv(lp, sp, fp); fc_exch_release(ep); /* release from lookup */ } else { - if (fc_exch_debug) - FC_DBG("exch/seq lookup failed: reject %x\n", reject); + FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject); fc_frame_free(fp); } } @@ -1290,12 +1309,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ if (!sp) { atomic_inc(&mp->stats.xid_not_found); - if (fc_exch_debug) - FC_DBG("seq lookup failed\n"); + FC_DEBUG_EXCH("seq lookup failed\n"); } else { atomic_inc(&mp->stats.non_bls_resp); - if (fc_exch_debug) - FC_DBG("non-BLS response to sequence"); + FC_DEBUG_EXCH("non-BLS response to sequence"); } fc_frame_free(fp); } @@ -1316,11 +1333,10 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) int rc = 1, has_rec = 0; fh = fc_frame_header_get(fp); - if (fc_exch_debug) - FC_DBG("exch: BLS rctl %x - %s\n", - fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); + FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n", + fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); - if (del_timer_sync(&ep->ex_timer)) + if (cancel_delayed_work_sync(&ep->timeout_work)) fc_exch_release(ep); /* release from pending timer hold */ spin_lock_bh(&ep->ex_lock); @@ -1410,10 +1426,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) case FC_RCTL_ACK_0: break; default: - if (fc_exch_debug) - FC_DBG("BLS rctl %x - %s received", - fh->fh_r_ctl, - fc_exch_rctl_name(fh->fh_r_ctl)); + FC_DEBUG_EXCH("BLS rctl %x - %s received", + fh->fh_r_ctl, + fc_exch_rctl_name(fh->fh_r_ctl)); break; } fc_frame_free(fp); @@ -1498,7 +1513,7 @@ static void fc_exch_reset(struct fc_exch *ep) * functions can also grab the lport lock which could cause * a deadlock). */ - if (del_timer(&ep->ex_timer)) + if (cancel_delayed_work(&ep->timeout_work)) atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ resp = ep->resp; ep->resp = NULL; @@ -1720,7 +1735,7 @@ static void fc_exch_rrq(struct fc_exch *ep) if (ep->esb_stat & ESB_ST_RESP) did = ep->sid; rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, - lp->e_d_tov, lp->fid, did, + lp->e_d_tov, fc_host_port_id(lp->host), did, FC_FC_SEQ_INIT | FC_FC_END_SEQ); if (!rrq_sp) { ep->esb_stat |= ESB_ST_REC_QUAL; @@ -1774,8 +1789,10 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) ep->esb_stat &= ~ESB_ST_REC_QUAL; atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */ } - if ((ep->esb_stat & ESB_ST_COMPLETE) && (del_timer(&ep->ex_timer))) - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + if (ep->esb_stat & ESB_ST_COMPLETE) { + if (cancel_delayed_work(&ep->timeout_work)) + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + } spin_unlock_bh(&ep->ex_lock); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 2566eed..bf8202f 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -42,22 +42,29 @@ MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("libfc"); MODULE_LICENSE("GPL"); -int fc_fcp_debug; +static int fc_fcp_debug; + +#define FC_DEBUG_FCP(fmt...) \ + do { \ + if (fc_fcp_debug) \ + FC_DBG(fmt); \ + } while (0) + static struct kmem_cache *scsi_pkt_cachep; /* SRB state definitions */ -#define FC_SRB_FREE 0 /* cmd is free */ -#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ -#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ -#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ -#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ -#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ -#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ +#define FC_SRB_FREE 0 /* cmd is free */ +#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ +#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ +#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ +#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */ +#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ +#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ -#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ +#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */ -#define FC_SRB_READ (1 << 1) -#define FC_SRB_WRITE (1 << 0) +#define FC_SRB_READ (1 << 1) +#define FC_SRB_WRITE (1 << 0) /* * scsi request structure, one for each scsi request @@ -184,8 +191,8 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); #define FC_SCSI_REC_TOV (2 * HZ) #define FC_HOST_RESET_TIMEOUT (30 * HZ) -#define FC_MAX_ERROR_CNT 5 -#define FC_MAX_RECOV_RETRY 3 +#define FC_MAX_ERROR_CNT 5 +#define FC_MAX_RECOV_RETRY 3 #define FC_FCP_DFLT_QUEUE_DEPTH 32 @@ -353,11 +360,8 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && fc_frame_crc_check(fp)) goto crc_err; - if (fc_fcp_debug) { - FC_DBG("data received past end. " - "len %zx offset %zx " - "data_len %x\n", len, offset, fsp->data_len); - } + FC_DEBUG_FCP("data received past end. len %zx offset %zx " + "data_len %x\n", len, offset, fsp->data_len); fc_fcp_retry_cmd(fsp); return; } @@ -449,55 +453,54 @@ crc_err: /* * Send SCSI data to target. * Called after receiving a Transfer Ready data descriptor. + * if LLD is capable of seq offload then send down seq_blen + * size of data in single frame, otherwise send multiple FC + * frames of max FC frame payload supported by target port. */ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp, - size_t offset, size_t len, - struct fc_frame *oldfp, int sg_supp) + size_t offset, size_t seq_blen) { struct scsi_cmnd *sc; struct scatterlist *sg; struct fc_frame *fp = NULL; struct fc_lport *lp = fsp->lp; size_t remaining; - size_t mfs; + size_t t_blen; size_t tlen; size_t sg_bytes; size_t frame_offset; int error; void *data = NULL; void *page_addr; - int using_sg = sg_supp; + int using_sg = lp->sg_supp; u32 f_ctl; - if (unlikely(offset + len > fsp->data_len)) { - /* - * this should never happen - */ - if (fc_fcp_debug) { - FC_DBG("xfer-ready past end. len %zx offset %zx\n", - len, offset); - } + WARN_ON(seq_blen <= 0); + if (unlikely(offset + seq_blen > fsp->data_len)) { + /* this should never happen */ + FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n", + seq_blen, offset); fc_fcp_send_abort(fsp); return 0; } else if (offset != fsp->xfer_len) { - /* - * Out of Order Data Request - no problem, but unexpected. - */ - if (fc_fcp_debug) { - FC_DBG("xfer-ready non-contiguous. " - "len %zx offset %zx\n", len, offset); - } + /* Out of Order Data Request - no problem, but unexpected. */ + FC_DEBUG_FCP("xfer-ready non-contiguous. " + "seq_blen %zx offset %zx\n", seq_blen, offset); } - mfs = fsp->max_payload; - WARN_ON(mfs > FC_MAX_PAYLOAD); - WARN_ON(mfs < FC_MIN_MAX_PAYLOAD); - if (mfs > 512) - mfs &= ~(512 - 1); /* round down to block size */ - WARN_ON(mfs < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */ - WARN_ON(len <= 0); + + /* + * if LLD is capable of seq_offload then set transport + * burst length (t_blen) to seq_blen, otherwise set t_blen + * to max FC frame payload previously set in fsp->max_payload. + */ + t_blen = lp->seq_offload ? seq_blen : fsp->max_payload; + WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); + if (t_blen > 512) + t_blen &= ~(512 - 1); /* round down to block size */ + WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */ sc = fsp->cmd; - remaining = len; + remaining = seq_blen; frame_offset = offset; tlen = 0; sp = lp->tt.seq_start_next(sp); @@ -540,7 +543,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp, continue; } if (!fp) { - tlen = min(mfs, remaining); + tlen = min(t_blen, remaining); /* * TODO. Temporary workaround. fc_seq_send() can't @@ -563,6 +566,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp, } fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP); fc_frame_set_offset(fp, frame_offset); + fr_max_payload(fp) = fsp->max_payload; } sg_bytes = min(tlen, sg->length - offset); if (using_sg) { @@ -621,7 +625,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp, return 0; } } - fsp->xfer_len += len; /* premature count? */ + fsp->xfer_len += seq_blen; /* premature count? */ return 0; } @@ -741,8 +745,7 @@ static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg) rc = fc_fcp_send_data(fsp, sp, (size_t) ntohl(dd->ft_data_ro), - (size_t) ntohl(dd->ft_burst_len), fp, - lp->capabilities & TRANS_C_SG); + (size_t) ntohl(dd->ft_burst_len)); if (!rc) lp->tt.seq_set_rec_data(sp, fsp->xfer_len); else if (rc == -ENOMEM) @@ -1066,7 +1069,7 @@ static int fc_fcp_send_cmd(struct fc_fcp_pkt *fsp) fc_fcp_recv, fc_fcp_pkt_destroy, fsp, 0, - rp->local_port->fid, + fc_host_port_id(rp->local_port->host), rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ); if (!sp) { @@ -1175,7 +1178,7 @@ static void fc_lun_reset_send(unsigned long data) fc_tm_done, fc_fcp_pkt_destroy, fsp, 0, - rp->local_port->fid, + fc_host_port_id(rp->local_port->host), rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ); @@ -1367,7 +1370,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) rec = fc_frame_payload_get(fp, sizeof(*rec)); memset(rec, 0, sizeof(*rec)); rec->rec_cmd = ELS_REC; - hton24(rec->rec_s_id, lp->fid); + hton24(rec->rec_s_id, fc_host_port_id(lp->host)); rec->rec_ox_id = htons(ox_id); rec->rec_rx_id = htons(rx_id); @@ -1376,7 +1379,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) sp = lp->tt.exch_seq_send(lp, fp, fc_fcp_rec_resp, NULL, fsp, jiffies_to_msecs(FC_SCSI_REC_TOV), - rp->local_port->fid, + fc_host_port_id(rp->local_port->host), rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ); @@ -1425,16 +1428,13 @@ static void fc_fcp_rec_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) rjt = fc_frame_payload_get(fp, sizeof(*rjt)); switch (rjt->er_reason) { default: - if (fc_fcp_debug) - FC_DBG("device %x unexpected REC reject " - "reason %d expl %d\n", - fsp->rport->port_id, rjt->er_reason, - rjt->er_explan); + FC_DEBUG_FCP("device %x unexpected REC reject " + "reason %d expl %d\n", + fsp->rport->port_id, rjt->er_reason, + rjt->er_explan); /* fall through */ - case ELS_RJT_UNSUP: - if (fc_fcp_debug) - FC_DBG("device does not support REC\n"); + FC_DEBUG_FCP("device does not support REC\n"); rp = fsp->rport->dd_data; /* * if we do not spport RECs or got some bogus @@ -1636,7 +1636,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) sp = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, fsp, jiffies_to_msecs(FC_SCSI_REC_TOV), - rp->local_port->fid, + fc_host_port_id(rp->local_port->host), rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ); if (!sp) { @@ -2199,7 +2199,17 @@ static int __init libfc_init(void) rc = fc_setup_exch_mgr(); if (rc) - kmem_cache_destroy(scsi_pkt_cachep); + goto destroy_pkt_cache; + + rc = fc_setup_rport(); + if (rc) + goto destroy_em; + + return rc; +destroy_em: + fc_destroy_exch_mgr(); +destroy_pkt_cache: + kmem_cache_destroy(scsi_pkt_cachep); return rc; } @@ -2207,6 +2217,7 @@ static void __exit libfc_exit(void) { kmem_cache_destroy(scsi_pkt_cachep); fc_destroy_exch_mgr(); + fc_destroy_rport(); } module_init(libfc_init); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index b1854b9..bfbc7d4 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -18,7 +18,59 @@ */ /* - * Logical interface support. + * General locking notes: + * + * The lport and rport blocks both have mutexes that are used to protect + * the port objects states. The main motivation for this protection is that + * we don't want to be preparing a request/response in one context while + * another thread "resets" the port in question. For example, if the lport + * block is sending a SCR request to the directory server we don't want + * the lport to be reset before we fill out the frame header's port_id. The + * problem is that a reset would cause the lport's port_id to reset to 0. + * If we don't protect the lport we'd spew incorrect frames. + * + * At the time of this writing there are two primary mutexes, one for the + * lport and one for the rport. Since the lport uses the rport and makes + * calls into that block the rport should never make calls that would cause + * the lport's mutex to be locked. In other words, the lport's mutex is + * considered the outer lock and the rport's lock is considered the inner + * lock. The bottom line is that you can hold a lport's mutex and then + * hold the rport's mutex, but not the other way around. + * + * The only complication to this rule is the callbacks from the rport to + * the lport's event_callback function. When rports become READY they make + * a callback to the lport so that it can track them. In the case of the + * directory server that callback might cause the lport to change its + * state, implying that the lport mutex would need to be held. This problem + * was solved by serializing the rport notifications to the lport and the + * callback is made without holding the rport's lock. + * + * lport locking notes: + * + * The critical sections protected by the lport's mutex are quite broad and + * may be improved upon in the future. The lport code and its locking doesn't + * influence the I/O path, so excessive locking doesn't penalize I/O + * performance. + * + * The strategy is to lock whenever processing a request or response. Note + * that every _enter_* function corresponds to a state change. They generally + * change the lports state and then sends a request out on the wire. We lock + * before calling any of these functions to protect that state change. This + * means that the entry points into the lport block to manage the locks while + * the state machine can transition between states (i.e. _enter_* functions) + * while always staying protected. + * + * When handling responses we also hold the lport mutex broadly. When the + * lport receives the response frame it locks the mutex and then calls the + * appropriate handler for the particuar response. Generally a response will + * trigger a state change and so the lock must already be held. + * + * Retries also have to consider the locking. The retries occur from a work + * context and the work function will lock the lport and then retry the state + * (i.e. _enter_* function). + * + * The implication to all of this is that each lport can only process one + * state at a time. */ #include @@ -36,6 +88,12 @@ static int fc_lport_debug; +#define FC_DEBUG_LPORT(fmt...) \ + do { \ + if (fc_lport_debug) \ + FC_DBG(fmt); \ + } while (0) + static void fc_lport_error(struct fc_lport *, struct fc_frame *); static void fc_lport_enter_reset(struct fc_lport *); @@ -66,41 +124,71 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) } /** + * fc_lport_lookup_rport - lookup a remote port by port_id + * @lport: Fibre Channel host port instance + * @port_id: remote port port_id to match + */ +struct fc_rport *fc_lport_lookup_rport(const struct fc_lport *lport, + u32 port_id) +{ + struct fc_rport *rport, *found; + struct fc_rport_libfc_priv *rdata; + + found = NULL; + + list_for_each_entry(rdata, &lport->rports, peers) { + rport = PRIV_TO_RPORT(rdata); + if (rport->port_id == port_id) { + found = rport; + get_device(&found->dev); + break; + } + } + return found; +} + + + +/** * fc_lport_rport_event - Event handler for rport events * @lport: The lport which is receiving the event - * @port_id: The FID of the rport which the event has occured on + * @rport: The rport which the event has occured on * @event: The event that occured * * Locking Note: The rport lock should not be held when calling * this function. */ -static void fc_lport_rport_event(struct fc_lport *lport, u32 port_id, +static void fc_lport_rport_event(struct fc_lport *lport, + struct fc_rport *rport, enum fc_lport_event event) { - struct fc_rport *rport = lport->tt.rport_lookup(lport, port_id); + struct fc_rport_libfc_priv *rdata = rport->dd_data; - if (fc_lport_debug) - FC_DBG("Received a %d event for port (%6x)\n", event, port_id); + FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event, + rport->port_id); - if (port_id == FC_FID_DIR_SERV) { - mutex_lock(&lport->lp_mutex); - switch (event) { - case LPORT_EV_RPORT_CREATED: - if (rport) { - lport->dns_rp = rport; - fc_lport_enter_rpn_id(lport); - } - break; - case LPORT_EV_RPORT_LOGO: - case LPORT_EV_RPORT_FAILED: - lport->dns_rp = NULL; - fc_lport_enter_dns(lport); - break; - case LPORT_EV_RPORT_NONE: - break; + mutex_lock(&lport->lp_mutex); + switch (event) { + case LPORT_EV_RPORT_CREATED: + if (rport->port_id == FC_FID_DIR_SERV) { + lport->dns_rp = rport; + fc_lport_enter_rpn_id(lport); + } else { + list_add_tail(&rdata->peers, &lport->rports); } - mutex_unlock(&lport->lp_mutex); + break; + case LPORT_EV_RPORT_LOGO: + case LPORT_EV_RPORT_FAILED: + case LPORT_EV_RPORT_STOP: + if (rport->port_id == FC_FID_DIR_SERV) + lport->dns_rp = NULL; + else + list_del(&rdata->peers); + break; + case LPORT_EV_RPORT_NONE: + break; } + mutex_unlock(&lport->lp_mutex); } /** @@ -118,18 +206,6 @@ static const char *fc_lport_state(struct fc_lport *lport) } /** - * fc_lport_ptp_clear - Delete the ptp rport - * @lport: The lport whose ptp rport should be removed - */ -static void fc_lport_ptp_clear(struct fc_lport *lport) -{ - if (lport->ptp_rp) { - fc_remote_port_delete(lport->ptp_rp); - lport->ptp_rp = NULL; - } -} - -/** * fc_lport_ptp_setup - Create an rport for point-to-point mode * @lport: The lport to attach the ptp rport to * @fid: The FID of the ptp rport @@ -148,19 +224,25 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, dp.ids.node_name = remote_wwnn; dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; - fc_lport_ptp_clear(lport); + if (lport->ptp_rp) { + lport->tt.rport_stop(lport->ptp_rp); + lport->ptp_rp = NULL; + } - lport->ptp_rp = fc_rport_dummy_create(&dp); + lport->ptp_rp = fc_rport_rogue_create(&dp); lport->tt.rport_login(lport->ptp_rp); fc_lport_enter_ready(lport); } -/** - * fc_get_host_port_state - supports fc_function_template - * @shost: The host whose port state should be returned - */ +void fc_get_host_port_type(struct Scsi_Host *shost) +{ + /* TODO - currently just NPORT */ + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; +} +EXPORT_SYMBOL(fc_get_host_port_type); + void fc_get_host_port_state(struct Scsi_Host *shost) { struct fc_lport *lp = shost_priv(shost); @@ -277,8 +359,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, struct fc_lport *lport) { - if (fc_lport_debug) - FC_DBG("Received RLIR request while in state %s\n", + FC_DEBUG_LPORT("Received RLIR request while in state %s\n", fc_lport_state(lport)); lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); @@ -303,8 +384,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, void *dp; u32 f_ctl; - if (fc_lport_debug) - FC_DBG("Received RLIR request while in state %s\n", + FC_DEBUG_LPORT("Received RLIR request while in state %s\n", fc_lport_state(lport)); len = fr_len(in_fp) - sizeof(struct fc_frame_header); @@ -350,8 +430,7 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, size_t len; u32 f_ctl; - if (fc_lport_debug) - FC_DBG("Received RNID request while in state %s\n", + FC_DEBUG_LPORT("Received RNID request while in state %s\n", fc_lport_state(lport)); req = fc_frame_payload_get(in_fp, sizeof(*req)); @@ -520,12 +599,10 @@ EXPORT_SYMBOL(fc_fabric_logoff); **/ int fc_lport_destroy(struct fc_lport *lport) { - mutex_lock(&lport->lp_mutex); cancel_delayed_work_sync(&lport->disc_work); lport->tt.scsi_abort_io(lport); lport->tt.frame_send = fc_frame_drop; lport->tt.exch_mgr_reset(lport->emp, 0, 0); - mutex_unlock(&lport->lp_mutex); return 0; } EXPORT_SYMBOL(fc_lport_destroy); @@ -569,9 +646,8 @@ EXPORT_SYMBOL(fc_set_mfs); */ static void fc_lport_enter_ready(struct fc_lport *lport) { - if (fc_lport_debug) - FC_DBG("Port (%6x) entered Ready from state %s\n", - lport->fid, fc_lport_state(lport)); + FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n", + fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_READY); @@ -605,8 +681,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, u32 local_fid; u32 f_ctl; - if (fc_lport_debug) - FC_DBG("Received FLOGI request while in state %s\n", + FC_DEBUG_LPORT("Received FLOGI request while in state %s\n", fc_lport_state(lport)); fh = fc_frame_header_get(rx_fp); @@ -636,7 +711,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, remote_fid = FC_LOCAL_PTP_FID_HI; } - lport->fid = local_fid; + fc_host_port_id(lport->host) = local_fid; fp = fc_frame_alloc(lport, sizeof(*flp)); if (fp) { @@ -733,7 +808,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, s_id = ntoh24(fh->fh_s_id); d_id = ntoh24(fh->fh_d_id); - rport = lport->tt.rport_lookup(lport, s_id); + rport = fc_lport_lookup_rport(lport, s_id); if (rport) { lport->tt.rport_recv_req(sp, fp, rport); put_device(&rport->dev); /* hold from lookup */ @@ -752,6 +827,12 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, fc_frame_free(fp); } mutex_unlock(&lport->lp_mutex); + + /* + * The common exch_done for all request may not be good + * if any request requires longer hold on exhange. XXX + */ + lport->tt.exch_done(sp); } /** @@ -771,6 +852,24 @@ int fc_lport_reset(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_reset); /** + * fc_lport_stop_rports - delete all the remote ports associated with the lport + * @lport: libfc local port instance + * + * Locking Note: This function expects that the lport mutex is locked before + * calling it. + */ +void fc_lport_stop_rports(struct fc_lport *lport) +{ + struct fc_rport *rport; + struct fc_rport_libfc_priv *rdata; + + list_for_each_entry(rdata, &lport->rports, peers) { + rport = PRIV_TO_RPORT(rdata); + lport->tt.rport_stop(rport); + } +} + +/** * fc_rport_enter_reset - Reset the local port * @lport: Fibre Channel local port to be reset * @@ -779,24 +878,26 @@ EXPORT_SYMBOL(fc_lport_reset); */ static void fc_lport_enter_reset(struct fc_lport *lport) { - if (fc_lport_debug) - FC_DBG("Port (%6x) entered RESET state from %s state\n", - lport->fid, fc_lport_state(lport)); + FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n", + fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_RESET); if (lport->dns_rp) { - fc_remote_port_delete(lport->dns_rp); + lport->tt.rport_stop(lport->dns_rp); lport->dns_rp = NULL; } - fc_lport_ptp_clear(lport); - fc_block_rports(lport); + if (lport->ptp_rp) { + lport->tt.rport_stop(lport->ptp_rp); + lport->ptp_rp = NULL; + } + + fc_lport_stop_rports(lport); - lport->tt.rport_reset_list(lport); lport->tt.exch_mgr_reset(lport->emp, 0, 0); fc_host_fabric_name(lport->host) = 0; - lport->fid = 0; + fc_host_port_id(lport->host) = 0; if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) fc_lport_enter_flogi(lport); @@ -814,33 +915,38 @@ static void fc_lport_enter_reset(struct fc_lport *lport) static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) { unsigned long delay = 0; - if (fc_lport_debug) - FC_DBG("Error %ld in state %s, retries %d\n", + FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n", PTR_ERR(fp), fc_lport_state(lport), lport->retry_count); - if (lport->retry_count < lport->max_retry_count) { - lport->retry_count++; - if (!fp) - delay = msecs_to_jiffies(500); - else - delay = jiffies + - msecs_to_jiffies(lport->e_d_tov); - - schedule_delayed_work(&lport->retry_work, delay); - } else { - switch (lport->state) { - case LPORT_ST_NONE: - case LPORT_ST_READY: - case LPORT_ST_RESET: - case LPORT_ST_RPN_ID: - case LPORT_ST_RFT_ID: - case LPORT_ST_SCR: - case LPORT_ST_DNS: - case LPORT_ST_FLOGI: - case LPORT_ST_LOGO: - fc_lport_enter_reset(lport); - break; + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { + /* + * Memory allocation failure, or the exchange timed out. + * Retry after delay + */ + if (lport->retry_count < lport->max_retry_count) { + lport->retry_count++; + if (!fp) + delay = msecs_to_jiffies(500); + else + delay = jiffies + + msecs_to_jiffies(lport->e_d_tov); + + schedule_delayed_work(&lport->retry_work, delay); + } else { + switch (lport->state) { + case LPORT_ST_NONE: + case LPORT_ST_READY: + case LPORT_ST_RESET: + case LPORT_ST_RPN_ID: + case LPORT_ST_RFT_ID: + case LPORT_ST_SCR: + case LPORT_ST_DNS: + case LPORT_ST_FLOGI: + case LPORT_ST_LOGO: + fc_lport_enter_reset(lport); + break; + } } } } @@ -865,8 +971,7 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&lport->lp_mutex); - if (fc_lport_debug) - FC_DBG("Received a RFT_ID response\n"); + FC_DEBUG_LPORT("Received a RFT_ID response\n"); if (lport->state != LPORT_ST_RFT_ID) { FC_DBG("Received a RFT_ID response, but in state %s\n", @@ -876,7 +981,7 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_lport_error(lport, fp); - goto out; + goto err; } fh = fc_frame_header_get(fp); @@ -890,8 +995,9 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, else fc_lport_error(lport, fp); out: - mutex_unlock(&lport->lp_mutex); fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); } /** @@ -914,8 +1020,7 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&lport->lp_mutex); - if (fc_lport_debug) - FC_DBG("Received a RPN_ID response\n"); + FC_DEBUG_LPORT("Received a RPN_ID response\n"); if (lport->state != LPORT_ST_RPN_ID) { FC_DBG("Received a RPN_ID response, but in state %s\n", @@ -925,7 +1030,7 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_lport_error(lport, fp); - goto out; + goto err; } fh = fc_frame_header_get(fp); @@ -939,8 +1044,9 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, fc_lport_error(lport, fp); out: - mutex_unlock(&lport->lp_mutex); fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); } /** @@ -961,8 +1067,7 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&lport->lp_mutex); - if (fc_lport_debug) - FC_DBG("Received a SCR response\n"); + FC_DEBUG_LPORT("Received a SCR response\n"); if (lport->state != LPORT_ST_SCR) { FC_DBG("Received a SCR response, but in state %s\n", @@ -972,7 +1077,7 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_lport_error(lport, fp); - goto out; + goto err; } op = fc_frame_payload_op(fp); @@ -982,8 +1087,9 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, fc_lport_error(lport, fp); out: - mutex_unlock(&lport->lp_mutex); fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); } /** @@ -998,9 +1104,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport) struct fc_frame *fp; struct fc_els_scr *scr; - if (fc_lport_debug) - FC_DBG("Port (%6x) entered SCR state from %s state\n", - lport->fid, fc_lport_state(lport)); + FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n", + fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_SCR); @@ -1020,7 +1125,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport) if (!lport->tt.exch_seq_send(lport, fp, fc_lport_scr_resp, NULL, lport, lport->e_d_tov, - lport->fid, FC_FID_FCTRL, + fc_host_port_id(lport->host), + FC_FID_FCTRL, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) fc_lport_error(lport, fp); } @@ -1043,9 +1149,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) struct fc_ns_fts *lps; int i; - if (fc_lport_debug) - FC_DBG("Port (%6x) entered RFT_ID state from %s state\n", - lport->fid, fc_lport_state(lport)); + FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n", + fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_RFT_ID); @@ -1069,14 +1174,14 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) FC_NS_RFT_ID, sizeof(*req) - sizeof(struct fc_ct_hdr)); - hton24(req->fid.fp_fid, lport->fid); + hton24(req->fid.fp_fid, fc_host_port_id(lport->host)); req->fts = *lps; fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT); if (!lport->tt.exch_seq_send(lport, fp, fc_lport_rft_id_resp, NULL, lport, lport->e_d_tov, - lport->fid, + fc_host_port_id(lport->host), FC_FID_DIR_SERV, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) @@ -1099,9 +1204,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) struct fc_ns_rn_id rn; } *req; - if (fc_lport_debug) - FC_DBG("Port (%6x) entered RPN_ID state from %s state\n", - lport->fid, fc_lport_state(lport)); + FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n", + fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_RPN_ID); @@ -1114,14 +1218,14 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) req = fc_frame_payload_get(fp, sizeof(*req)); memset(req, 0, sizeof(*req)); fc_fill_dns_hdr(lport, &req->ct, FC_NS_RPN_ID, sizeof(req->rn)); - hton24(req->rn.fr_fid.fp_fid, lport->fid); + hton24(req->rn.fr_fid.fp_fid, fc_host_port_id(lport->host)); put_unaligned_be64(lport->wwpn, &req->rn.fr_wwn); fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT); if (!lport->tt.exch_seq_send(lport, fp, fc_lport_rpn_id_resp, NULL, lport, lport->e_d_tov, - lport->fid, + fc_host_port_id(lport->host), FC_FID_DIR_SERV, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) fc_lport_error(lport, fp); @@ -1147,20 +1251,18 @@ static void fc_lport_enter_dns(struct fc_lport *lport) dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; dp.lp = lport; - if (fc_lport_debug) - FC_DBG("Port (%6x) entered DNS state from %s state\n", - lport->fid, fc_lport_state(lport)); + FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n", + fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_DNS); if (!lport->dns_rp) { - /* Set up a dummy rport to directory server */ - rport = fc_rport_dummy_create(&dp); + /* Set up a rogue rport to directory server */ + rport = fc_rport_rogue_create(&dp); if (!rport) goto err; lport->dns_rp = rport; - FC_DBG("created an rport for the NS\n"); } rport = lport->dns_rp; @@ -1232,8 +1334,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&lport->lp_mutex); - if (fc_lport_debug) - FC_DBG("Received a LOGO response\n"); + FC_DEBUG_LPORT("Received a LOGO response\n"); if (lport->state != LPORT_ST_LOGO) { FC_DBG("Received a LOGO response, but in state %s\n", @@ -1243,7 +1344,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_lport_error(lport, fp); - goto out; + goto err; } op = fc_frame_payload_op(fp); @@ -1253,8 +1354,9 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, fc_lport_error(lport, fp); out: - mutex_unlock(&lport->lp_mutex); fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); } /** @@ -1269,15 +1371,14 @@ static void fc_lport_enter_logo(struct fc_lport *lport) struct fc_frame *fp; struct fc_els_logo *logo; - if (fc_lport_debug) - FC_DBG("Port (%6x) entered LOGO state from %s state\n", - lport->fid, fc_lport_state(lport)); + FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n", + fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_LOGO); /* DNS session should be closed so we can release it here */ if (lport->dns_rp) { - fc_remote_port_delete(lport->dns_rp); + lport->tt.rport_logout(lport->dns_rp); lport->dns_rp = NULL; } @@ -1290,7 +1391,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) logo = fc_frame_payload_get(fp, sizeof(*logo)); memset(logo, 0, sizeof(*logo)); logo->fl_cmd = ELS_LOGO; - hton24(logo->fl_n_port_id, lport->fid); + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host)); logo->fl_n_port_wwn = htonll(lport->wwpn); fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); fc_frame_set_offset(fp, 0); @@ -1298,7 +1399,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) if (!lport->tt.exch_seq_send(lport, fp, fc_lport_logo_resp, NULL, lport, lport->e_d_tov, - lport->fid, FC_FID_FLOGI, + fc_host_port_id(lport->host), FC_FID_FLOGI, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) fc_lport_error(lport, fp); } @@ -1327,8 +1428,7 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&lport->lp_mutex); - if (fc_lport_debug) - FC_DBG("Received a FLOGI response\n"); + FC_DEBUG_LPORT("Received a FLOGI response\n"); if (lport->state != LPORT_ST_FLOGI) { FC_DBG("Received a FLOGI response, but in state %s\n", @@ -1338,16 +1438,16 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_lport_error(lport, fp); - goto out; + goto err; } fh = fc_frame_header_get(fp); did = ntoh24(fh->fh_d_id); if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { - if (fc_lport_debug) - FC_DBG("Assigned fid %x\n", did); - lport->fid = did; + FC_DEBUG_LPORT("Assigned fid %x\n", did); + fc_host_port_id(lport->host) = did; + flp = fc_frame_payload_get(fp, sizeof(*flp)); if (flp) { mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1391,8 +1491,9 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, } out: - mutex_unlock(&lport->lp_mutex); fc_frame_free(fp); +err: + mutex_unlock(&lport->lp_mutex); } /** @@ -1407,8 +1508,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport) struct fc_frame *fp; struct fc_els_flogi *flp; - if (fc_lport_debug) - FC_DBG("Processing FLOGI state\n"); + FC_DEBUG_LPORT("Processing FLOGI state\n"); fc_lport_state_enter(lport, LPORT_ST_FLOGI); @@ -1436,6 +1536,7 @@ int fc_lport_config(struct fc_lport *lport) { INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); mutex_init(&lport->lp_mutex); + INIT_LIST_HEAD(&lport->rports); fc_lport_state_enter(lport, LPORT_ST_NONE); @@ -1456,6 +1557,9 @@ int fc_lport_init(struct fc_lport *lport) if (!lport->tt.lport_reset) lport->tt.lport_reset = fc_lport_reset; + if (!lport->tt.rport_lookup) + lport->tt.rport_lookup = fc_lport_lookup_rport; + if (!lport->tt.event_callback) lport->tt.event_callback = fc_lport_rport_event; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 107b304..651a3ed 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -18,10 +18,27 @@ */ /* - * Remote Port support. + * This file contains all processing regarding fc_rports. It contains the + * rport state machine and does all rport interaction with the transport class. + * There should be no other places in libfc that interact directly with the + * transport class in regards to adding and deleting rports. * - * A remote port structure contains information about an N port to which we - * will create sessions. + * fc_rport's represent N_Port's within the fabric. + * + * rport locking notes: + * + * The rport should never hold the rport mutex and then lock the lport + * mutex. The rport's mutex is considered lesser than the lport's mutex, so + * the lport mutex can be held before locking the rport mutex, but not the + * other way around. See the comment block at the top of fc_lport.c for more + * details. + * + * The locking strategy is similar to the lport's strategy. The lock protects + * the rport's states and is held and released by the entry points to the rport + * block. All _enter_* functions correspond to rport states and expect the rport + * mutex to be locked before calling them. This means that rports only handle one + * request or response at a time, since they're not critical for the I/O path + * this potential over-use of the mutex is acceptable. */ #include @@ -34,7 +51,15 @@ #include -static int fc_rp_debug; +static int fc_rport_debug; + +#define FC_DEBUG_RPORT(fmt...) \ + do { \ + if (fc_rport_debug) \ + FC_DBG(fmt); \ + } while (0) + +static struct workqueue_struct *rport_event_queue; static void fc_rport_enter_plogi(struct fc_rport *); static void fc_rport_enter_prli(struct fc_rport *); @@ -52,6 +77,7 @@ static void fc_rport_recv_logo_req(struct fc_rport *, struct fc_seq *, struct fc_frame *); static void fc_rport_timeout(struct work_struct *); static void fc_rport_error(struct fc_rport *, struct fc_frame *); +static void fc_rport_work(struct work_struct *); static const char *fc_rport_state_names[] = { [RPORT_ST_NONE] = "None", @@ -63,7 +89,7 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_LOGO] = "LOGO", }; -struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *dp) +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) { struct fc_rport *rport; struct fc_rport_libfc_priv *rdata; @@ -91,11 +117,17 @@ struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *dp) rdata->e_d_tov = dp->lp->e_d_tov; rdata->r_a_tov = dp->lp->r_a_tov; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); + INIT_WORK(&rdata->event_work, fc_rport_work); + /* + * For good measure, but not necessary as we should only + * add REAL rport to the lport list. + */ + INIT_LIST_HEAD(&rdata->peers); return rport; } -void fc_rport_dummy_destroy(struct fc_rport *rport) +void fc_rport_rogue_destroy(struct fc_rport *rport) { kfree(rport); } @@ -116,30 +148,6 @@ static const char *fc_rport_state(struct fc_rport *rport) } /** - * fc_rport_lookup - lookup a remote port by port_id - * @lp: Fibre Channel host port instance - * @fid: remote port port_id to match - */ -struct fc_rport *fc_rport_lookup(const struct fc_lport *lp, u32 fid) -{ - struct Scsi_Host *shost = lp->host; - struct fc_rport *rport, *found; - unsigned long flags; - - found = NULL; - spin_lock_irqsave(shost->host_lock, flags); - list_for_each_entry(rport, &fc_host_rports(shost), peers) - if (rport->port_id == fid && - rport->port_state == FC_PORTSTATE_ONLINE) { - found = rport; - get_device(&found->dev); - break; - } - spin_unlock_irqrestore(shost->host_lock, flags); - return found; -} - -/** * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds. * @rport: Pointer to Fibre Channel remote port structure * @timeout: timeout in seconds @@ -229,15 +237,20 @@ static void fc_rport_state_enter(struct fc_rport *rport, rdata->rp_state = new; } -static void fc_rport_unlock(struct fc_rport *rport) +static void fc_rport_work(struct work_struct *work) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; - enum fc_lport_event event = rdata->event; + struct fc_rport_libfc_priv *rdata = + container_of(work, struct fc_rport_libfc_priv, event_work); + enum fc_lport_event event; + enum fc_rport_trans_state trans_state; struct fc_lport *lport = rdata->local_port; - u32 fid = rport->port_id; - void (*event_callback)(struct fc_lport *, u32, - enum fc_lport_event) = - rdata->event_callback; + void (*event_callback)(struct fc_lport *, struct fc_rport *, + enum fc_lport_event); + struct fc_rport *rport = PRIV_TO_RPORT(rdata); + + mutex_lock(&rdata->rp_mutex); + event = rdata->event; + event_callback = rdata->event_callback; if (event == LPORT_EV_RPORT_CREATED) { struct fc_rport *new_rport; @@ -249,10 +262,12 @@ static void fc_rport_unlock(struct fc_rport *rport) ids.port_name = rport->port_name; ids.node_name = rport->node_name; + mutex_unlock(&rdata->rp_mutex); + new_rport = fc_remote_port_add(lport->host, 0, &ids); if (new_rport) { /* - * Switch from the dummy rport to the rport + * Switch from the rogue rport to the rport * returned by the FC class. */ new_rport->maxframe_size = rport->maxframe_size; @@ -267,36 +282,32 @@ static void fc_rport_unlock(struct fc_rport *rport) mutex_init(&new_rdata->rp_mutex); INIT_DELAYED_WORK(&new_rdata->retry_work, fc_rport_timeout); + INIT_LIST_HEAD(&new_rdata->peers); + INIT_WORK(&new_rdata->event_work, fc_rport_work); fc_rport_state_enter(new_rport, RPORT_ST_READY); - fc_remote_port_rolechg(new_rport, rdata->roles); } else { FC_DBG("Failed to create the rport for port " "(%6x).\n", ids.port_id); event = LPORT_EV_RPORT_FAILED; } - - mutex_unlock(&rdata->rp_mutex); - fc_rport_dummy_destroy(rport); + fc_rport_rogue_destroy(rport); rport = new_rport; rdata = new_rport->dd_data; + event_callback(lport, rport, event); } else if ((event == LPORT_EV_RPORT_FAILED) || - (event == LPORT_EV_RPORT_LOGO)) { - if (rdata->trans_state == FC_PORTSTATE_ROGUE) { - mutex_unlock(&rdata->rp_mutex); - fc_rport_dummy_destroy(rport); - } else { - mutex_unlock(&rdata->rp_mutex); + (event == LPORT_EV_RPORT_LOGO) || + (event == LPORT_EV_RPORT_STOP)) { + + trans_state = rdata->trans_state; + mutex_unlock(&rdata->rp_mutex); + event_callback(lport, rport, event); + if (trans_state == FC_PORTSTATE_ROGUE) + fc_rport_rogue_destroy(rport); + else fc_remote_port_delete(rport); - } - } else { + } else mutex_unlock(&rdata->rp_mutex); - } - - if (event != LPORT_EV_RPORT_NONE && event_callback) { - event_callback(lport, fid, event); - rdata->event = LPORT_EV_RPORT_NONE; - } } /** @@ -313,12 +324,11 @@ int fc_rport_login(struct fc_rport *rport) mutex_lock(&rdata->rp_mutex); - if (fc_rp_debug) - FC_DBG("Login to port (%6x)\n", rport->port_id); + FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id); fc_rport_enter_plogi(rport); - fc_rport_unlock(rport); + mutex_unlock(&rdata->rp_mutex); return 0; } @@ -337,57 +347,37 @@ int fc_rport_logout(struct fc_rport *rport) mutex_lock(&rdata->rp_mutex); - if (fc_rp_debug) - FC_DBG("Logout of port (%6x)\n", rport->port_id); + FC_DEBUG_RPORT("Logout of port (%6x)\n", rport->port_id); fc_rport_enter_logo(rport); - fc_rport_unlock(rport); + + mutex_unlock(&rdata->rp_mutex); return 0; } /** - * fc_rport_reset - Reset the remote port - * @rport: Fibre Channel remote port - * - * XXX - This functionality is currently broken + * fc_rport_remove - Remove an rport + * @rport: Fibre Channel remote port to be removed * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ -void fc_rport_reset(struct fc_rport *rport) +int fc_rport_stop(struct fc_rport *rport) { struct fc_rport_libfc_priv *rdata = rport->dd_data; mutex_lock(&rdata->rp_mutex); - if (fc_rp_debug) - FC_DBG("Reset port (%6x)\n", rport->port_id); + FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); - fc_rport_enter_plogi(rport); - - fc_rport_unlock(rport); -} + rdata->event = LPORT_EV_RPORT_STOP; + queue_work(rport_event_queue, &rdata->event_work); -/** - * fc_rport_reset_list - Reset all sessions for a local port session list. - * @lport: The lport whose rports should be reset - * - * Locking Note: TBD - */ -void fc_rport_reset_list(struct fc_lport *lport) -{ - struct Scsi_Host *shost = lport->host; - struct fc_rport *rport; - struct fc_rport *next; - unsigned long flags; + mutex_unlock(&rdata->rp_mutex); - spin_lock_irqsave(shost->host_lock, flags); - list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) { - lport->tt.rport_reset(rport); - } - spin_unlock_irqrestore(shost->host_lock, flags); + return 0; } /** @@ -403,10 +393,10 @@ static void fc_rport_enter_ready(struct fc_rport *rport) fc_rport_state_enter(rport, RPORT_ST_READY); - if (fc_rp_debug) - FC_DBG("Port (%6x) is Ready\n", rport->port_id); + FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id); rdata->event = LPORT_EV_RPORT_CREATED; + queue_work(rport_event_queue, &rdata->event_work); } /** @@ -447,7 +437,7 @@ static void fc_rport_timeout(struct work_struct *work) } put_device(&rport->dev); - fc_rport_unlock(rport); + mutex_unlock(&rdata->rp_mutex); } /** @@ -467,37 +457,37 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) struct fc_rport_libfc_priv *rdata = rport->dd_data; unsigned long delay = 0; - if (fc_rp_debug) - FC_DBG("Error %ld in state %s, retries %d\n", + FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", PTR_ERR(fp), fc_rport_state(rport), rdata->retries); - if (rdata->retries < rdata->local_port->max_retry_count) { - rdata->retries++; - if (!fp) - delay = msecs_to_jiffies(500); - get_device(&rport->dev); - schedule_delayed_work(&rdata->retry_work, delay); - } else { - switch (rdata->rp_state) { - case RPORT_ST_PLOGI: - case RPORT_ST_PRLI: - case RPORT_ST_LOGO: - if (fc_rp_debug) - FC_DBG("Remote port (%6x) closed.\n", - rport->port_id); - - fc_remote_port_delete(rport); - - rdata->event = LPORT_EV_RPORT_FAILED; - break; - case RPORT_ST_RTV: - fc_rport_enter_ready(rport); - break; - case RPORT_ST_NONE: - case RPORT_ST_READY: - case RPORT_ST_INIT: - BUG(); - break; + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { + /* + * Memory allocation failure, or the exchange timed out. + * Retry after delay + */ + if (rdata->retries < rdata->local_port->max_retry_count) { + rdata->retries++; + if (!fp) + delay = msecs_to_jiffies(500); + get_device(&rport->dev); + schedule_delayed_work(&rdata->retry_work, delay); + } else { + switch (rdata->rp_state) { + case RPORT_ST_PLOGI: + case RPORT_ST_PRLI: + case RPORT_ST_LOGO: + rdata->event = LPORT_EV_RPORT_FAILED; + queue_work(rport_event_queue, &rdata->event_work); + break; + case RPORT_ST_RTV: + fc_rport_enter_ready(rport); + break; + case RPORT_ST_NONE: + case RPORT_ST_READY: + case RPORT_ST_INIT: + BUG(); + break; + } } } } @@ -526,8 +516,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - if (fc_rp_debug) - FC_DBG("Received a PLOGI response\n"); + FC_DEBUG_RPORT("Received a PLOGI response\n"); if (rdata->rp_state != RPORT_ST_PLOGI) { FC_DBG("Received a PLOGI response, but in state %s\n", @@ -537,12 +526,15 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_rport_error(rport, fp); - goto out; + goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC && (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { + rport->port_name = get_unaligned_be64(&plp->fl_wwpn); + rport->node_name = get_unaligned_be64(&plp->fl_wwnn); + tov = ntohl(plp->fl_csp.sp_e_d_tov); if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) tov /= 1000; @@ -568,8 +560,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_rport_error(rport, fp); out: - fc_rport_unlock(rport); fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); } /** @@ -586,8 +579,7 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) struct fc_frame *fp; struct fc_els_flogi *plogi; - if (fc_rp_debug) - FC_DBG("Port (%6x) entered PLOGI state from %s state\n", + FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n", rport->port_id, fc_rport_state(rport)); fc_rport_state_enter(rport, RPORT_ST_PLOGI); @@ -607,7 +599,7 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) if (!lport->tt.exch_seq_send(lport, fp, fc_rport_plogi_resp, NULL, rport, lport->e_d_tov, - rdata->local_port->fid, + fc_host_port_id(rdata->local_port->host), rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) fc_rport_error(rport, fp); @@ -638,8 +630,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - if (fc_rp_debug) - FC_DBG("Received a PRLI response\n"); + FC_DEBUG_RPORT("Received a PRLI response\n"); if (rdata->rp_state != RPORT_ST_PRLI) { FC_DBG("Received a PRLI response, but in state %s\n", @@ -649,7 +640,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_rport_error(rport, fp); - goto out; + goto err; } op = fc_frame_payload_op(fp); @@ -667,18 +658,19 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, if (fcp_parm & FCP_SPPF_TARG_FCN) roles |= FC_RPORT_ROLE_FCP_TARGET; - rdata->roles = roles; + rport->roles = roles; fc_rport_enter_rtv(rport); } else { FC_DBG("Bad ELS response\n"); rdata->event = LPORT_EV_RPORT_FAILED; - fc_remote_port_delete(rport); + queue_work(rport_event_queue, &rdata->event_work); } out: - fc_rport_unlock(rport); fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); } /** @@ -700,8 +692,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - if (fc_rp_debug) - FC_DBG("Received a LOGO response\n"); + FC_DEBUG_RPORT("Received a LOGO response\n"); if (rdata->rp_state != RPORT_ST_LOGO) { FC_DBG("Received a LOGO response, but in state %s\n", @@ -711,22 +702,22 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_rport_error(rport, fp); - goto out; + goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { fc_rport_enter_rtv(rport); - } else { FC_DBG("Bad ELS response\n"); rdata->event = LPORT_EV_RPORT_LOGO; - fc_remote_port_delete(rport); + queue_work(rport_event_queue, &rdata->event_work); } out: - fc_rport_unlock(rport); fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); } /** @@ -746,8 +737,7 @@ static void fc_rport_enter_prli(struct fc_rport *rport) } *pp; struct fc_frame *fp; - if (fc_rp_debug) - FC_DBG("Port (%6x) entered PRLI state from %s state\n", + FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n", rport->port_id, fc_rport_state(rport)); fc_rport_state_enter(rport, RPORT_ST_PRLI); @@ -771,7 +761,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport) if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp, NULL, rport, lport->e_d_tov, - lport->fid, rport->port_id, + fc_host_port_id(lport->host), + rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) fc_rport_error(rport, fp); } @@ -797,8 +788,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - if (fc_rp_debug) - FC_DBG("Received a RTV response\n"); + FC_DEBUG_RPORT("Received a RTV response\n"); if (rdata->rp_state != RPORT_ST_RTV) { FC_DBG("Received a RTV response, but in state %s\n", @@ -808,7 +798,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, if (IS_ERR(fp)) { fc_rport_error(rport, fp); - goto out; + goto err; } op = fc_frame_payload_op(fp); @@ -836,8 +826,9 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, fc_rport_enter_ready(rport); out: - fc_rport_unlock(rport); fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); } /** @@ -854,8 +845,7 @@ static void fc_rport_enter_rtv(struct fc_rport *rport) struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; - if (fc_rp_debug) - FC_DBG("Port (%6x) entered RTV state from %s state\n", + FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n", rport->port_id, fc_rport_state(rport)); fc_rport_state_enter(rport, RPORT_ST_RTV); @@ -874,7 +864,8 @@ static void fc_rport_enter_rtv(struct fc_rport *rport) if (!lport->tt.exch_seq_send(lport, fp, fc_rport_rtv_resp, NULL, rport, lport->e_d_tov, - lport->fid, rport->port_id, + fc_host_port_id(lport->host), + rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) fc_rport_error(rport, fp); } @@ -893,8 +884,7 @@ static void fc_rport_enter_logo(struct fc_rport *rport) struct fc_frame *fp; struct fc_els_logo *logo; - if (fc_rp_debug) - FC_DBG("Port (%6x) entered LOGO state from %s state\n", + FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n", rport->port_id, fc_rport_state(rport)); fc_rport_state_enter(rport, RPORT_ST_LOGO); @@ -908,14 +898,15 @@ static void fc_rport_enter_logo(struct fc_rport *rport) logo = fc_frame_payload_get(fp, sizeof(*logo)); memset(logo, 0, sizeof(*logo)); logo->fl_cmd = ELS_LOGO; - hton24(logo->fl_n_port_id, lport->fid); + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host)); logo->fl_n_port_wwn = htonll(lport->wwpn); fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); if (!lport->tt.exch_seq_send(lport, fp, fc_rport_logo_resp, NULL, rport, lport->e_d_tov, - lport->fid, rport->port_id, + fc_host_port_id(lport->host), + rport->port_id, FC_FC_SEQ_INIT | FC_FC_END_SEQ)) fc_rport_error(rport, fp); } @@ -979,7 +970,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, } } - fc_rport_unlock(rport); + mutex_unlock(&rdata->rp_mutex); fc_frame_free(fp); } @@ -1011,8 +1002,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, fh = fc_frame_header_get(fp); - if (fc_rp_debug) - FC_DBG("Received PLOGI request from port (%6x) " + FC_DEBUG_RPORT("Received PLOGI request from port (%6x) " "while in state %s\n", ntoh24(fh->fh_s_id), fc_rport_state(rport)); @@ -1041,29 +1031,25 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, */ switch (rdata->rp_state) { case RPORT_ST_INIT: - if (fc_rp_debug) - FC_DBG("incoming PLOGI from %6x wwpn %llx state INIT " + FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT " "- reject\n", sid, wwpn); reject = ELS_RJT_UNSUP; break; case RPORT_ST_PLOGI: - if (fc_rp_debug) - FC_DBG("incoming PLOGI from %x in PLOGI state %d\n", + FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n", sid, rdata->rp_state); if (wwpn < lport->wwpn) reject = ELS_RJT_INPROG; break; case RPORT_ST_PRLI: case RPORT_ST_READY: - if (fc_rp_debug) - FC_DBG("incoming PLOGI from %x in logged-in state %d " + FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d " "- ignored for now\n", sid, rdata->rp_state); /* XXX TBD - should reset */ break; case RPORT_ST_NONE: default: - if (fc_rp_debug) - FC_DBG("incoming PLOGI from %x in unexpected " + FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected " "state %d\n", sid, rdata->rp_state); break; } @@ -1145,8 +1131,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, fh = fc_frame_header_get(rx_fp); - if (fc_rp_debug) - FC_DBG("Received PRLI request from port (%6x) " + FC_DEBUG_RPORT("Received PRLI request from port (%6x) " "while in state %s\n", ntoh24(fh->fh_s_id), fc_rport_state(rport)); @@ -1220,7 +1205,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, roles |= FC_RPORT_ROLE_FCP_INITIATOR; if (fcp_parm & FCP_SPPF_TARG_FCN) roles |= FC_RPORT_ROLE_FCP_TARGET; - rdata->roles = roles; + rport->roles = roles; spp->spp_params = htonl(lport->service_params); @@ -1278,8 +1263,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, fh = fc_frame_header_get(fp); - if (fc_rp_debug) - FC_DBG("Received PRLO request from port (%6x) " + FC_DEBUG_RPORT("Received PRLO request from port (%6x) " "while in state %s\n", ntoh24(fh->fh_s_id), fc_rport_state(rport)); @@ -1308,12 +1292,12 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, fh = fc_frame_header_get(fp); - if (fc_rp_debug) - FC_DBG("Received LOGO request from port (%6x) " + FC_DEBUG_RPORT("Received LOGO request from port (%6x) " "while in state %s\n", ntoh24(fh->fh_s_id), fc_rport_state(rport)); rdata->event = LPORT_EV_RPORT_LOGO; + queue_work(rport_event_queue, &rdata->event_work); lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); fc_frame_free(fp); @@ -1327,63 +1311,37 @@ int fc_rport_init(struct fc_lport *lport) if (!lport->tt.rport_logout) lport->tt.rport_logout = fc_rport_logout; + if (!lport->tt.rport_stop) + lport->tt.rport_stop = fc_rport_stop; + if (!lport->tt.rport_recv_req) lport->tt.rport_recv_req = fc_rport_recv_req; - if (!lport->tt.rport_lookup) - lport->tt.rport_lookup = fc_rport_lookup; - - if (!lport->tt.rport_reset) - lport->tt.rport_reset = fc_rport_reset; - - if (!lport->tt.rport_reset_list) - lport->tt.rport_reset_list = fc_rport_reset_list; - return 0; } EXPORT_SYMBOL(fc_rport_init); -/** - * fc_block_rports - delete all the remote ports, on reset or link down - * @lp: libfc local port instance - * - * This routine temporarily removes any online remote ports from the fc_host - * rport list, then drops the host lock in order to call fc_remote_port_delete() - * on each rport in turn, and finally splices the list back onto the fc_host. - */ -void fc_block_rports(struct fc_lport *lp) +int fc_setup_rport() { - struct Scsi_Host *shost = lp->host; - struct fc_rport *rport, *next; - unsigned long flags; - LIST_HEAD(rports); - - spin_lock_irqsave(shost->host_lock, flags); - list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) { - /* protect the name service remote port */ - if (rport->port_id == FC_FID_DIR_SERV) - continue; - if (rport->port_state != FC_PORTSTATE_ONLINE) - continue; - list_move_tail(&rport->peers, &rports); - } - spin_unlock_irqrestore(shost->host_lock, flags); - - list_for_each_entry(rport, &rports, peers) { - fc_remote_port_delete(rport); - } + rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); + if (!rport_event_queue) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL(fc_setup_rport); - spin_lock_irqsave(shost->host_lock, flags); - list_splice(&rports, &fc_host_rports(shost)); - spin_unlock_irqrestore(shost->host_lock, flags); +void fc_destroy_rport() +{ + destroy_workqueue(rport_event_queue); } +EXPORT_SYMBOL(fc_destroy_rport); void fc_rport_terminate_io(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rp = rport->dd_data; - struct fc_lport *lp = rp->local_port; + struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_lport *lport = rdata->local_port; - lp->tt.exch_mgr_reset(lp->emp, 0, rport->port_id); - lp->tt.exch_mgr_reset(lp->emp, rport->port_id, 0); + lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id); + lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0); } EXPORT_SYMBOL(fc_rport_terminate_io); diff --git a/include/scsi/libfc/fc_frame.h b/include/scsi/libfc/fc_frame.h index c7a52bb..9508e55 100644 --- a/include/scsi/libfc/fc_frame.h +++ b/include/scsi/libfc/fc_frame.h @@ -51,6 +51,7 @@ #define fr_sof(fp) (fr_cb(fp)->fr_sof) #define fr_eof(fp) (fr_cb(fp)->fr_eof) #define fr_flags(fp) (fr_cb(fp)->fr_flags) +#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload) struct fc_frame { struct sk_buff skb; @@ -63,6 +64,7 @@ struct fcoe_rcv_info { enum fc_sof fr_sof; /* start of frame delimiter */ enum fc_eof fr_eof; /* end of frame delimiter */ u8 fr_flags; /* flags - see below */ + u16 fr_max_payload; /* max FC payload */ }; /* diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h index 24d3fcb..7e5e6be 100644 --- a/include/scsi/libfc/libfc.h +++ b/include/scsi/libfc/libfc.h @@ -36,12 +36,10 @@ #define LIBFC_DEBUG #ifdef LIBFC_DEBUG -/* - * Log message. - */ +/* Log messages */ #define FC_DBG(fmt, args...) \ do { \ - printk(KERN_INFO "%s " fmt, __func__, ##args); \ + printk(KERN_INFO "%s " fmt, __func__, ##args); \ } while (0) #else #define FC_DBG(fmt, args...) @@ -59,35 +57,22 @@ #define ntohll(x) be64_to_cpu(x) #define htonll(x) cpu_to_be64(x) -#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) +#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) -#define hton24(p, v) do { \ - p[0] = (((v) >> 16) & 0xFF); \ - p[1] = (((v) >> 8) & 0xFF); \ - p[2] = ((v) & 0xFF); \ -} while (0) +#define hton24(p, v) do { \ + p[0] = (((v) >> 16) & 0xFF); \ + p[1] = (((v) >> 8) & 0xFF); \ + p[2] = ((v) & 0xFF); \ + } while (0) struct fc_exch_mgr; /* - * tgt_flags - */ -#define FC_TGT_REC_SUPPORTED (1 << 0) - -/* * FC HBA status */ #define FC_PAUSE (1 << 1) #define FC_LINK_UP (1 << 0) -/* for fc_softc */ -#define FC_MAX_OUTSTANDING_COMMANDS 1024 - -/* - * Transport Capabilities - */ -#define TRANS_C_SG (1 << 0) /* Scatter gather */ - enum fc_lport_state { LPORT_ST_NONE = 0, LPORT_ST_FLOGI, @@ -104,6 +89,7 @@ enum fc_lport_event { LPORT_EV_RPORT_NONE = 0, LPORT_EV_RPORT_CREATED, LPORT_EV_RPORT_FAILED, + LPORT_EV_RPORT_STOP, LPORT_EV_RPORT_LOGO }; @@ -163,9 +149,11 @@ struct fc_rport_libfc_priv { struct mutex rp_mutex; struct delayed_work retry_work; enum fc_lport_event event; - void (*event_callback)(struct fc_lport *, u32, + void (*event_callback)(struct fc_lport *, + struct fc_rport *, enum fc_lport_event); - u32 roles; + struct list_head peers; + struct work_struct event_work; }; #define PRIV_TO_RPORT(x) \ @@ -173,8 +161,8 @@ struct fc_rport_libfc_priv { #define RPORT_TO_PRIV(x) \ (struct fc_rport_libfc_priv*)((void *)x + sizeof(struct fc_rport)); -struct fc_rport *fc_rport_dummy_create(struct fc_disc_port *); -void fc_rport_dummy_destroy(struct fc_rport *); +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *); +void fc_rport_rogue_destroy(struct fc_rport *); static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn) { @@ -360,7 +348,7 @@ struct libfc_function_template { int (*lport_reset)(struct fc_lport *); - void (*event_callback)(struct fc_lport *, u32, + void (*event_callback)(struct fc_lport *, struct fc_rport *, enum fc_lport_event); /** @@ -384,15 +372,21 @@ struct libfc_function_template { */ int (*rport_logout)(struct fc_rport *rport); + /* + * Delete the rport and remove it from the transport if + * it had been added. This will not send a LOGO, use + * rport_logout for a gracefull logout. + */ + int (*rport_stop)(struct fc_rport *rport); + + /* + * Recieve a request from a remote port. + */ void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, struct fc_rport *); struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32); - void (*rport_reset)(struct fc_rport *); - - void (*rport_reset_list)(struct fc_lport *); - /** * SCSI interfaces */ @@ -429,6 +423,7 @@ struct fc_lport { struct fc_rport *dns_rp; struct fc_rport *ptp_rp; void *scsi_priv; + struct list_head rports; /* Operational Information */ struct libfc_function_template tt; @@ -442,7 +437,6 @@ struct fc_lport { u64 wwpn; u64 wwnn; - u32 fid; u8 retry_count; unsigned char disc_retry_count; unsigned char disc_delay; @@ -452,8 +446,8 @@ struct fc_lport { unsigned char disc_buf_len; /* Capabilities */ - char ifname[IFNAMSIZ]; - u32 capabilities; + u32 sg_supp:1; /* scatter gather supported */ + u32 seq_offload:1; /* seq offload supported */ u32 mfs; /* max FC payload size */ unsigned int service_params; unsigned int e_d_tov; @@ -484,11 +478,6 @@ static inline int fc_lport_test_ready(struct fc_lport *lp) return lp->state == LPORT_ST_READY; } -static inline u32 fc_lport_get_fid(const struct fc_lport *lp) -{ - return lp->fid; -} - static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn) { lp->wwnn = wwnn; @@ -586,8 +575,6 @@ int fc_set_mfs(struct fc_lport *lp, u32 mfs); *****************************/ int fc_rport_init(struct fc_lport *lp); void fc_rport_terminate_io(struct fc_rport *rp); -void fc_block_rports(struct fc_lport *lp); - /** * DISCOVERY LAYER @@ -776,6 +763,7 @@ void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data); * Functions for fc_functions_template */ void fc_get_host_speed(struct Scsi_Host *shost); +void fc_get_host_port_type(struct Scsi_Host *shost); void fc_get_host_port_state(struct Scsi_Host *shost); void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout); struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *); @@ -785,6 +773,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *); */ int fc_setup_exch_mgr(void); void fc_destroy_exch_mgr(void); - +int fc_setup_rport(void); +void fc_destroy_rport(void); #endif /* _LIBFC_H_ */