From: Vasu Dev Subject: Incremental Open-FCoE for Beta6 References: bnc#438954 Incremental Open-FCoE update for Beta6. Signed-off-by: Vasu Dev Acked-by: Hannes Reinecke --- diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 6f38b13..4922958 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -328,19 +328,6 @@ menuconfig SCSI_LOWLEVEL if SCSI_LOWLEVEL && SCSI -config LIBFC - tristate "LibFC module" - depends on SCSI && SCSI_FC_ATTRS - ---help--- - Fibre Channel library module - -config FCOE - tristate "FCoE module" - depends on SCSI && SCSI_FC_ATTRS - select LIBFC - ---help--- - Fibre Channel over Ethernet module - config ISCSI_TCP tristate "iSCSI Initiator over TCP/IP" depends on SCSI && INET @@ -616,6 +603,20 @@ config SCSI_FLASHPOINT substantial, so users of MultiMaster Host Adapters may not wish to include it. +config LIBFC + tristate "LibFC module" + depends on SCSI && SCSI_FC_ATTRS + ---help--- + Fibre Channel library module + +config FCOE + tristate "FCoE module" + depends on SCSI + select LIBFC + ---help--- + Fibre Channel over Ethernet module + + config SCSI_DMX3191D tristate "DMX3191D SCSI support" depends on PCI && SCSI diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c index ff207b2..bf7fe6f 100644 --- a/drivers/scsi/fcoe/fc_transport_fcoe.c +++ b/drivers/scsi/fcoe/fc_transport_fcoe.c @@ -17,356 +17,430 @@ * Maintained at www.Open-FCoE.org */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "fcoe_def.h" - -MODULE_AUTHOR("Open-FCoE.org"); -MODULE_DESCRIPTION("FCoE"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.3"); +#include +#include +#include -/* - * Static functions and variables definations - */ -#ifdef CONFIG_HOTPLUG_CPU -static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); -#endif /* CONFIG_HOTPLUG_CPU */ -static int fcoe_device_notification(struct notifier_block *, ulong, void *); -static void fcoe_dev_setup(void); -static void fcoe_dev_cleanup(void); - -#ifdef CONFIG_HOTPLUG_CPU -static struct notifier_block fcoe_cpu_notifier = { - .notifier_call = fcoe_cpu_callback, +/* internal fcoe transport */ +struct fcoe_transport_internal { + struct fcoe_transport *t; + struct net_device *netdev; + struct list_head list; }; -#endif /* CONFIG_HOTPLUG_CPU */ -/* - * notification function from net device - */ -static struct notifier_block fcoe_notifier = { - .notifier_call = fcoe_device_notification, -}; +/* fcoe transports list and its lock */ +static LIST_HEAD(fcoe_transports); +static DEFINE_MUTEX(fcoe_transports_lock); -#ifdef CONFIG_HOTPLUG_CPU -/* - * create percpu stats block - * called by cpu add/remove notifier - */ -static void fcoe_create_percpu_data(int cpu) +/** + * fcoe_transport_default - returns ptr to the default transport fcoe_sw + **/ +struct fcoe_transport *fcoe_transport_default(void) { - struct fc_lport *lp; - struct fcoe_softc *fc; - - write_lock_bh(&fcoe_hostlist_lock); - list_for_each_entry(fc, &fcoe_hostlist, list) { - lp = fc->lp; - if (lp->dev_stats[cpu] == NULL) - lp->dev_stats[cpu] = kzalloc(sizeof(struct fcoe_dev_stats), - GFP_KERNEL); - } - write_unlock_bh(&fcoe_hostlist_lock); + return &fcoe_sw_transport; } -/* - * destroy percpu stats block - * called by cpu add/remove notifier - */ -static void fcoe_destroy_percpu_data(int cpu) +/** + * fcoe_transport_to_pcidev - get the pci dev from a netdev + * @netdev: the netdev that pci dev will be retrived from + * + * Returns: NULL or the corrsponding pci_dev + **/ +struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev) { - struct fc_lport *lp; - struct fcoe_softc *fc; - - write_lock_bh(&fcoe_hostlist_lock); - list_for_each_entry(fc, &fcoe_hostlist, list) { - lp = fc->lp; - kfree(lp->dev_stats[cpu]); - lp->dev_stats[cpu] = NULL; - } - write_unlock_bh(&fcoe_hostlist_lock); + if (!netdev->dev.parent) + return NULL; + return to_pci_dev(netdev->dev.parent); } -/* - * Get notified when a cpu comes on/off. Be hotplug friendly. - */ -static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +/** + * fcoe_transport_device_lookup - find out netdev is managed by the + * transport + * assign a transport to a device + * @netdev: the netdev the transport to be attached to + * + * This will look for existing offload driver, if not found, it falls back to + * the default sw hba (fcoe_sw) as its fcoe transport. + * + * Returns: 0 for success + **/ +static struct fcoe_transport_internal *fcoe_transport_device_lookup( + struct fcoe_transport *t, struct net_device *netdev) { - unsigned int cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - fcoe_create_percpu_data(cpu); - break; - case CPU_DEAD: - fcoe_destroy_percpu_data(cpu); - break; - default: - break; + struct fcoe_transport_internal *ti; + + /* assign the transpor to this device */ + mutex_lock(&t->devlock); + list_for_each_entry(ti, &t->devlist, list) { + if (ti->netdev == netdev) { + mutex_unlock(&t->devlock); + return ti; + } } - return NOTIFY_OK; + mutex_unlock(&t->devlock); + return NULL; } -#endif /* CONFIG_HOTPLUG_CPU */ - -/* - * function to setup link change notification interface - */ -static void fcoe_dev_setup(void) +/** + * fcoe_transport_device_add - assign a transport to a device + * @netdev: the netdev the transport to be attached to + * + * This will look for existing offload driver, if not found, it falls back to + * the default sw hba (fcoe_sw) as its fcoe transport. + * + * Returns: 0 for success + **/ +static int fcoe_transport_device_add(struct fcoe_transport *t, + struct net_device *netdev) { - /* - * here setup a interface specific wd time to - * monitor the link state - */ - register_netdevice_notifier(&fcoe_notifier); + struct fcoe_transport_internal *ti; + + ti = fcoe_transport_device_lookup(t, netdev); + if (ti) { + printk(KERN_DEBUG "fcoe_transport_device_add:" + "device %s is already added to transport %s\n", + netdev->name, t->name); + return -EEXIST; + } + /* allocate an internal struct to host the netdev and the list */ + ti = kzalloc(sizeof(*ti), GFP_KERNEL); + if (!ti) + return -ENOMEM; + + ti->t = t; + ti->netdev = netdev; + INIT_LIST_HEAD(&ti->list); + dev_hold(ti->netdev); + + mutex_lock(&t->devlock); + list_add(&ti->list, &t->devlist); + mutex_unlock(&t->devlock); + + printk(KERN_DEBUG "fcoe_transport_device_add:" + "device %s added to transport %s\n", + netdev->name, t->name); + + return 0; } -/* - * function to cleanup link change notification interface - */ -static void fcoe_dev_cleanup(void) +/** + * fcoe_transport_device_remove - remove a device from its transport + * @netdev: the netdev the transport to be attached to + * + * this removes the device from the transport so the given transport will + * not manage this device any more + * + * Returns: 0 for success + **/ +static int fcoe_transport_device_remove(struct fcoe_transport *t, + struct net_device *netdev) { - unregister_netdevice_notifier(&fcoe_notifier); + struct fcoe_transport_internal *ti; + + ti = fcoe_transport_device_lookup(t, netdev); + if (!ti) { + printk(KERN_DEBUG "fcoe_transport_device_remove:" + "device %s is not managed by transport %s\n", + netdev->name, t->name); + return -ENODEV; + } + mutex_lock(&t->devlock); + list_del(&ti->list); + mutex_unlock(&t->devlock); + printk(KERN_DEBUG "fcoe_transport_device_remove:" + "device %s removed from transport %s\n", + netdev->name, t->name); + dev_put(ti->netdev); + kfree(ti); + return 0; } -/* - * This function is called by the ethernet driver - * this is called in case of link change event - */ -static int fcoe_device_notification(struct notifier_block *notifier, - ulong event, void *ptr) +/** + * fcoe_transport_device_remove_all - remove all from transport devlist + * + * this removes the device from the transport so the given transport will + * not manage this device any more + * + * Returns: 0 for success + **/ +static void fcoe_transport_device_remove_all(struct fcoe_transport *t) { - struct fc_lport *lp = NULL; - struct net_device *real_dev = ptr; - struct fcoe_softc *fc; - struct fcoe_dev_stats *stats; - u16 new_status; - u32 mfs; - int rc = NOTIFY_OK; - - read_lock(&fcoe_hostlist_lock); - list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->real_dev == real_dev) { - lp = fc->lp; - break; - } - } - read_unlock(&fcoe_hostlist_lock); - if (lp == NULL) { - rc = NOTIFY_DONE; - goto out; - } + struct fcoe_transport_internal *ti, *tmp; - new_status = lp->link_status; - switch (event) { - case NETDEV_DOWN: - case NETDEV_GOING_DOWN: - new_status &= ~FC_LINK_UP; - break; - case NETDEV_UP: - case NETDEV_CHANGE: - new_status &= ~FC_LINK_UP; - if (!fcoe_link_ok(lp)) - new_status |= FC_LINK_UP; - break; - case NETDEV_CHANGEMTU: - mfs = fc->real_dev->mtu - - (sizeof(struct fcoe_hdr) + - sizeof(struct fcoe_crc_eof)); - if (fc->user_mfs && fc->user_mfs < mfs) - mfs = fc->user_mfs; - if (mfs >= FC_MIN_MAX_FRAME) - fc_set_mfs(lp, mfs); - new_status &= ~FC_LINK_UP; - if (!fcoe_link_ok(lp)) - new_status |= FC_LINK_UP; - break; - case NETDEV_REGISTER: - break; - default: - FC_DBG("unknown event %ld call", event); - } - if (lp->link_status != new_status) { - if ((new_status & FC_LINK_UP) == FC_LINK_UP) - fc_linkup(lp); - else { - stats = lp->dev_stats[smp_processor_id()]; - if (stats) - stats->LinkFailureCount++; - fc_linkdown(lp); - fcoe_clean_pending_queue(lp); - } + mutex_lock(&t->devlock); + list_for_each_entry_safe(ti, tmp, &t->devlist, list) { + list_del(&ti->list); + kfree(ti); } -out: - return rc; + mutex_unlock(&t->devlock); } -static void trimstr(char *str, int len) +/** + * fcoe_transport_match - use the bus device match function to match the hw + * @t: the fcoe transport + * @netdev: + * + * This function is used to check if the givne transport wants to manage the + * input netdev. if the transports implements the match function, it will be + * called, o.w. we just compare the pci vendor and device id. + * + * Returns: true for match up + **/ +static bool fcoe_transport_match(struct fcoe_transport *t, + struct net_device *netdev) { - char *cp = str + len; - while (--cp >= str && *cp == '\n') - *cp = '\0'; + /* match transport by vendor and device id */ + struct pci_dev *pci; + + pci = fcoe_transport_pcidev(netdev); + + if (pci) { + printk(KERN_DEBUG "fcoe_transport_match:" + "%s:%x:%x -- %s:%x:%x\n", + t->name, t->vendor, t->device, + netdev->name, pci->vendor, pci->device); + + /* if transport supports match */ + if (t->match) + return t->match(netdev); + + /* else just compare the vendor and device id: pci only */ + return (t->vendor == pci->vendor) && (t->device == pci->device); + } + return false; } -static int fcoe_destroy(const char *buffer, struct kernel_param *kp) +/** + * fcoe_transport_lookup - check if the transport is already registered + * @t: the transport to be looked up + * + * This compares the parent device (pci) vendor and device id + * + * Returns: NULL if not found + * + * TODO - return default sw transport if no other transport is found + **/ +static struct fcoe_transport *fcoe_transport_lookup( + struct net_device *netdev) { - struct net_device *netdev; - char ifname[IFNAMSIZ + 2]; - int rc = -ENODEV; - - strlcpy(ifname, buffer, IFNAMSIZ); - trimstr(ifname, strlen(ifname)); - netdev = dev_get_by_name(&init_net, ifname); - if (netdev) { - rc = fcoe_destroy_interface(netdev); - dev_put(netdev); + struct fcoe_transport *t; + + mutex_lock(&fcoe_transports_lock); + list_for_each_entry(t, &fcoe_transports, list) { + if (fcoe_transport_match(t, netdev)) { + mutex_unlock(&fcoe_transports_lock); + return t; + } } - return rc; + mutex_unlock(&fcoe_transports_lock); + + printk(KERN_DEBUG "fcoe_transport_lookup:" + "use default transport for %s\n", netdev->name); + return fcoe_transport_default(); } -static int fcoe_create(const char *buffer, struct kernel_param *kp) +/** + * fcoe_transport_register - adds a fcoe transport to the fcoe transports list + * @t: ptr to the fcoe transport to be added + * + * Returns: 0 for success + **/ +int fcoe_transport_register(struct fcoe_transport *t) { - struct net_device *netdev; - char ifname[IFNAMSIZ + 2]; - int rc = -ENODEV; - - strlcpy(ifname, buffer, IFNAMSIZ); - trimstr(ifname, strlen(ifname)); - netdev = dev_get_by_name(&init_net, ifname); - if (netdev) { - rc = fcoe_create_interface(netdev); - dev_put(netdev); + struct fcoe_transport *tt; + + /* TODO - add fcoe_transport specific initialization here */ + mutex_lock(&fcoe_transports_lock); + list_for_each_entry(tt, &fcoe_transports, list) { + if (tt == t) { + mutex_unlock(&fcoe_transports_lock); + return -EEXIST; + } } - return rc; + list_add_tail(&t->list, &fcoe_transports); + mutex_unlock(&fcoe_transports_lock); + + mutex_init(&t->devlock); + INIT_LIST_HEAD(&t->devlist); + + printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name); + + return 0; } +EXPORT_SYMBOL_GPL(fcoe_transport_register); -module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(create, "string"); -MODULE_PARM_DESC(create, "Create fcoe port using net device passed in."); -module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(destroy, "string"); -MODULE_PARM_DESC(destroy, "Destroy fcoe port"); +/** + * fcoe_transport_unregister - remove the tranport fro the fcoe transports list + * @t: ptr to the fcoe transport to be removed + * + * Returns: 0 for success + **/ +int fcoe_transport_unregister(struct fcoe_transport *t) +{ + struct fcoe_transport *tt, *tmp; + + mutex_lock(&fcoe_transports_lock); + list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) { + if (tt == t) { + list_del(&t->list); + mutex_unlock(&fcoe_transports_lock); + fcoe_transport_device_remove_all(t); + printk(KERN_DEBUG "fcoe_transport_unregister:%s\n", + t->name); + return 0; + } + } + mutex_unlock(&fcoe_transports_lock); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(fcoe_transport_unregister); /* - * Initialization routine - * 1. Will create fc transport software structure - * 2. initialize the link list of port information structure - */ -static int __init fcoe_init(void) + * fcoe_load_transport_driver - load an offload driver by alias name + * @netdev: the target net device + * + * Requests for an offload driver module as the fcoe transport, if fails, it + * falls back to use the SW HBA (fcoe_sw) as its transport + * + * TODO - + * 1. supports only PCI device + * 2. needs fix for VLAn and bonding + * 3. pure hw fcoe hba may not have netdev + * + * Returns: 0 for success + **/ +int fcoe_load_transport_driver(struct net_device *netdev) { - int cpu; - struct fcoe_percpu_s *p; - - rwlock_init(&fcoe_hostlist_lock); - -#ifdef CONFIG_HOTPLUG_CPU - register_cpu_notifier(&fcoe_cpu_notifier); -#endif /* CONFIG_HOTPLUG_CPU */ - - /* - * initialize per CPU interrupt thread - */ - for_each_online_cpu(cpu) { - p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL); - if (p) { - p->thread = kthread_create(fcoe_percpu_receive_thread, - (void *)p, - "fcoethread/%d", cpu); - - /* - * if there is no error then bind the thread to the cpu - * initialize the semaphore and skb queue head - */ - if (likely(!IS_ERR(p->thread))) { - p->cpu = cpu; - fcoe_percpu[cpu] = p; - skb_queue_head_init(&p->fcoe_rx_list); - kthread_bind(p->thread, cpu); - wake_up_process(p->thread); - } else { - fcoe_percpu[cpu] = NULL; - kfree(p); - - } - } + struct pci_dev *pci; + struct device *dev = netdev->dev.parent; + + if (fcoe_transport_lookup(netdev)) { + /* load default transport */ + printk(KERN_DEBUG "fcoe: already loaded transport for %s\n", + netdev->name); + return -EEXIST; } - /* - * setup link change notification - */ - fcoe_dev_setup(); + pci = to_pci_dev(dev); + if (dev->bus != &pci_bus_type) { + printk(KERN_DEBUG "fcoe: support noly PCI device\n"); + return -ENODEV; + } + printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n", + pci->vendor, pci->device); + + return request_module("fcoe-pci-0x%04x-0x%04x", + pci->vendor, pci->device); - init_timer(&fcoe_timer); - fcoe_timer.data = 0; - fcoe_timer.function = fcoe_watchdog; - fcoe_timer.expires = (jiffies + (10 * HZ)); - add_timer(&fcoe_timer); +} +EXPORT_SYMBOL_GPL(fcoe_load_transport_driver); - if (fcoe_sw_init() != 0) { - FC_DBG("fail to attach fc transport"); - return -1; +/** + * fcoe_transport_attach - load transport to fcoe + * @netdev: the netdev the transport to be attached to + * + * This will look for existing offload driver, if not found, it falls back to + * the default sw hba (fcoe_sw) as its fcoe transport. + * + * Returns: 0 for success + **/ +int fcoe_transport_attach(struct net_device *netdev) +{ + struct fcoe_transport *t; + + /* find the corresponding transport */ + t = fcoe_transport_lookup(netdev); + if (!t) { + printk(KERN_DEBUG "fcoe_transport_attach" + ":no transport for %s:use %s\n", + netdev->name, t->name); + return -ENODEV; } + /* add to the transport */ + if (fcoe_transport_device_add(t, netdev)) { + printk(KERN_DEBUG "fcoe_transport_attach" + ":failed to add %s to tramsport %s\n", + netdev->name, t->name); + return -EIO; + } + /* transport create function */ + if (t->create) + t->create(netdev); + printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n", + t->name, netdev->name); return 0; } -module_init(fcoe_init); +EXPORT_SYMBOL_GPL(fcoe_transport_attach); -static void __exit fcoe_exit(void) +/** + * fcoe_transport_release - unload transport from fcoe + * @netdev: the net device on which fcoe is to be released + * + * Returns: 0 for success + **/ +int fcoe_transport_release(struct net_device *netdev) { - u32 idx; - struct fcoe_softc *fc, *tmp; - struct fcoe_percpu_s *p; - struct sk_buff *skb; - - /* - * Stop all call back interfaces - */ -#ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&fcoe_cpu_notifier); -#endif /* CONFIG_HOTPLUG_CPU */ - fcoe_dev_cleanup(); - - /* - * stop timer - */ - del_timer_sync(&fcoe_timer); - - /* - * assuming that at this time there will be no - * ioctl in prograss, therefore we do not need to lock the - * list. - */ - list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) - fcoe_destroy_interface(fc->real_dev); - - for (idx = 0; idx < NR_CPUS; idx++) { - if (fcoe_percpu[idx]) { - kthread_stop(fcoe_percpu[idx]->thread); - p = fcoe_percpu[idx]; - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - if (fcoe_percpu[idx]->crc_eof_page) - put_page(fcoe_percpu[idx]->crc_eof_page); - kfree(fcoe_percpu[idx]); - } + struct fcoe_transport *t; + + /* find the corresponding transport */ + t = fcoe_transport_lookup(netdev); + if (!t) { + printk(KERN_DEBUG "fcoe_transport_release:" + "no transport for %s:use %s\n", + netdev->name, t->name); + return -ENODEV; } + /* remove the device from the transport */ + if (fcoe_transport_device_remove(t, netdev)) { + printk(KERN_DEBUG "fcoe_transport_release:" + "failed to add %s to tramsport %s\n", + netdev->name, t->name); + return -EIO; + } + /* transport destroy function */ + if (t->destroy) + t->destroy(netdev); + + printk(KERN_DEBUG "fcoe_transport_release:" + "device %s dettached from transport %s\n", + netdev->name, t->name); + + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_transport_release); + +/** + * fcoe_transport_init - initializes fcoe transport layer + * + * This prepares for the fcoe transport layer + * + * Returns: none + **/ +int __init fcoe_transport_init(void) +{ + INIT_LIST_HEAD(&fcoe_transports); + mutex_init(&fcoe_transports_lock); + return 0; +} - fcoe_sw_exit(); +/** + * fcoe_transport_exit - cleans up the fcoe transport layer + * This cleans up the fcoe transport layer. removing any transport on the list, + * note that the transport destroy func is not called here. + * + * Returns: none + **/ +int __exit fcoe_transport_exit(void) +{ + struct fcoe_transport *t, *tmp; + + mutex_lock(&fcoe_transports_lock); + list_for_each_entry_safe(t, tmp, &fcoe_transports, list) { + list_del(&t->list); + mutex_unlock(&fcoe_transports_lock); + fcoe_transport_device_remove_all(t); + mutex_lock(&fcoe_transports_lock); + } + mutex_unlock(&fcoe_transports_lock); + return 0; } -module_exit(fcoe_exit); diff --git a/drivers/scsi/fcoe/fcoe_def.h b/drivers/scsi/fcoe/fcoe_def.h deleted file mode 100644 index b00e14b..0000000 --- a/drivers/scsi/fcoe/fcoe_def.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained at www.Open-FCoE.org - */ - -#ifndef _FCOE_DEF_H_ -#define _FCOE_DEF_H_ - -#include -#include - -#include - -#include - -#define FCOE_DRIVER_NAME "fcoe" /* driver name for ioctls */ -#define FCOE_DRIVER_VENDOR "Open-FC.org" /* vendor name for ioctls */ - -#define FCOE_MIN_FRAME 36 -#define FCOE_WORD_TO_BYTE 4 - -/* - * this is the main common structure across all instance of fcoe driver. - * There is one to one mapping between hba struct and ethernet nic. - * list of hbas contains pointer to the hba struct, these structures are - * stored in this array using there corresponding if_index. - */ - -struct fcoe_percpu_s { - int cpu; - struct task_struct *thread; - struct sk_buff_head fcoe_rx_list; - struct page *crc_eof_page; - int crc_eof_offset; -}; - -extern struct timer_list fcoe_timer; -extern rwlock_t fcoe_hostlist_lock; -extern struct list_head fcoe_hostlist; -extern struct fcoe_percpu_s *fcoe_percpu[]; - -struct fcoe_softc { - struct list_head list; - struct fc_lport *lp; - struct net_device *real_dev; - struct net_device *phys_dev; /* device with ethtool_ops */ - struct packet_type fcoe_packet_type; - struct sk_buff_head fcoe_pending_queue; - u16 user_mfs; /* configured max frame size */ - - u8 dest_addr[ETH_ALEN]; - u8 ctl_src_addr[ETH_ALEN]; - u8 data_src_addr[ETH_ALEN]; - /* - * fcoe protocol address learning related stuff - */ - u16 flogi_oxid; - u8 flogi_progress; - u8 address_mode; -}; - -int fcoe_percpu_receive_thread(void *arg); - -/* - * HBA transport ops prototypes - */ -void fcoe_clean_pending_queue(struct fc_lport *fd); -void fcoe_watchdog(ulong vp); -int fcoe_destroy_interface(struct net_device *); -int fcoe_create_interface(struct net_device *); -int fcoe_xmit(struct fc_lport *, struct fc_frame *); -int fcoe_rcv(struct sk_buff *, struct net_device *, - struct packet_type *, struct net_device *); -int fcoe_link_ok(struct fc_lport *); - -int __init fcoe_sw_init(void); -void __exit fcoe_sw_exit(void); -#endif /* _FCOE_DEF_H_ */ diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c index d7ceb1b..33aebe5 100644 --- a/drivers/scsi/fcoe/fcoe_sw.c +++ b/drivers/scsi/fcoe/fcoe_sw.c @@ -17,19 +17,14 @@ * Maintained at www.Open-FCoE.org */ -/* - * FCOE protocol file - */ - #include #include #include +#include #include #include #include #include -#include -#include #include #include @@ -39,36 +34,25 @@ #include #include -#include - -#include -#include "fcoe_def.h" +#include +#include +#include -#define FCOE_VERSION "0.1" +#define FCOE_SW_VERSION "0.1" +#define FCOE_SW_NAME "fcoesw" +#define FCOE_SW_VENDOR "Open-FCoE.org" #define FCOE_MAX_LUN 255 #define FCOE_MAX_FCP_TARGET 256 #define FCOE_MAX_OUTSTANDING_COMMANDS 1024 -#define FCOE_MIN_XID 0x0004 -#define FCOE_MAX_XID 0x07ef +#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ +#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ -LIST_HEAD(fcoe_hostlist); -DEFINE_RWLOCK(fcoe_hostlist_lock); -DEFINE_TIMER(fcoe_timer, NULL, 0, 0); -struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; - -static struct scsi_transport_template *fcoe_transport_template; - -static int fcoe_reset(struct Scsi_Host *shost) -{ - struct fc_lport *lport = shost_priv(shost); - fc_lport_reset(lport); - return 0; -} +static struct scsi_transport_template *scsi_transport_fcoe_sw; -struct fc_function_template fcoe_transport_function = { +struct fc_function_template fcoe_sw_transport_function = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, @@ -101,60 +85,10 @@ struct fc_function_template fcoe_transport_function = { .terminate_rport_io = fc_rport_terminate_io, }; -static struct fcoe_softc *fcoe_find_fc_lport(const struct net_device *netdev) -{ - struct fcoe_softc *fc; - - read_lock(&fcoe_hostlist_lock); - list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->real_dev == netdev) { - read_unlock(&fcoe_hostlist_lock); - return fc; - } - } - read_unlock(&fcoe_hostlist_lock); - return NULL; -} - -/* - * Convert 48-bit IEEE MAC address to 64-bit FC WWN. - */ -static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], - unsigned int scheme, unsigned int port) -{ - u64 wwn; - u64 host_mac; - - /* The MAC is in NO, so flip only the low 48 bits */ - host_mac = ((u64) mac[0] << 40) | - ((u64) mac[1] << 32) | - ((u64) mac[2] << 24) | - ((u64) mac[3] << 16) | - ((u64) mac[4] << 8) | - (u64) mac[5]; - - WARN_ON(host_mac >= (1ULL << 48)); - wwn = host_mac | ((u64) scheme << 60); - switch (scheme) { - case 1: - WARN_ON(port != 0); - break; - case 2: - WARN_ON(port >= 0xfff); - wwn |= (u64) port << 48; - break; - default: - WARN_ON(1); - break; - } - - return wwn; -} - -static struct scsi_host_template fcoe_driver_template = { +static struct scsi_host_template fcoe_sw_shost_template = { .module = THIS_MODULE, .name = "FCoE Driver", - .proc_name = FCOE_DRIVER_NAME, + .proc_name = FCOE_SW_NAME, .queuecommand = fc_queuecommand, .eh_abort_handler = fc_eh_abort, .eh_device_reset_handler = fc_eh_device_reset, @@ -170,138 +104,18 @@ static struct scsi_host_template fcoe_driver_template = { .max_sectors = 0xffff, }; -int fcoe_destroy_interface(struct net_device *netdev) -{ - int cpu, idx; - struct fcoe_percpu_s *pp; - struct fcoe_softc *fc; - struct fcoe_rcv_info *fr; - struct sk_buff_head *list; - struct sk_buff *skb, *next; - struct sk_buff *head; - struct fc_lport *lp; - u8 flogi_maddr[ETH_ALEN]; - - fc = fcoe_find_fc_lport(netdev); - if (!fc) - return -ENODEV; - - lp = fc->lp; - - /* Remove the instance from fcoe's list */ - write_lock_bh(&fcoe_hostlist_lock); - list_del(&fc->list); - write_unlock_bh(&fcoe_hostlist_lock); - - /* Don't listen for Ethernet packets anymore */ - dev_remove_pack(&fc->fcoe_packet_type); - - /* Detach from the scsi-ml */ - fc_remove_host(lp->host); - scsi_remove_host(lp->host); - - /* Cleanup the fc_lport */ - fc_lport_destroy(lp); - fc_fcp_destroy(lp); - if (lp->emp) - fc_exch_mgr_free(lp->emp); - - /* Delete secondary MAC addresses */ - rtnl_lock(); - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); - dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN); - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) - dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN); - rtnl_unlock(); - - /* Free the per-CPU revieve threads */ - for (idx = 0; idx < NR_CPUS; idx++) { - if (fcoe_percpu[idx]) { - pp = fcoe_percpu[idx]; - spin_lock_bh(&pp->fcoe_rx_list.lock); - list = &pp->fcoe_rx_list; - head = list->next; - for (skb = head; skb != (struct sk_buff *)list; - skb = next) { - next = skb->next; - fr = fcoe_dev_from_skb(skb); - if (fr->fr_dev == fc->lp) { - __skb_unlink(skb, list); - kfree_skb(skb); - } - } - spin_unlock_bh(&pp->fcoe_rx_list.lock); - } - } - - /* Free existing skbs */ - fcoe_clean_pending_queue(lp); - - /* Free memory used by statistical counters */ - for_each_online_cpu(cpu) - kfree(lp->dev_stats[cpu]); - - /* Release the net_device and Scsi_Host */ - dev_put(fc->real_dev); - scsi_host_put(lp->host); - return 0; -} - /* - * Return zero if link is OK for use by FCoE. - * Any permanently-disqualifying conditions have been previously checked. - * This also updates the speed setting, which may change with link for 100/1000. + * fcoe_sw_lport_config - sets up the fc_lport + * @lp: ptr to the fc_lport + * @shost: ptr to the parent scsi host + * + * Returns: 0 for success * - * This function should probably be checking for PAUSE support at some point - * in the future. Currently Per-priority-pause is not determinable using - * ethtool, so we shouldn't be restrictive until that problem is resolved. */ -int fcoe_link_ok(struct fc_lport *lp) -{ - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv; - struct net_device *dev = fc->real_dev; - struct ethtool_cmd ecmd = { ETHTOOL_GSET }; - int rc = 0; - - if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { - dev = fc->phys_dev; - if (dev->ethtool_ops->get_settings) { - dev->ethtool_ops->get_settings(dev, &ecmd); - lp->link_supported_speeds &= - ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); - if (ecmd.supported & (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full)) - lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; - if (ecmd.supported & SUPPORTED_10000baseT_Full) - lp->link_supported_speeds |= - FC_PORTSPEED_10GBIT; - if (ecmd.speed == SPEED_1000) - lp->link_speed = FC_PORTSPEED_1GBIT; - if (ecmd.speed == SPEED_10000) - lp->link_speed = FC_PORTSPEED_10GBIT; - } - } else - rc = -1; - - return rc; -} - -static struct libfc_function_template fcoe_libfc_fcn_templ = { - .frame_send = fcoe_xmit, -}; - -static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost) +static int fcoe_sw_lport_config(struct fc_lport *lp) { int i = 0; - lp->host = shost; - lp->drv_priv = (void *)(lp + 1); - - lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, - FCOE_MIN_XID, FCOE_MAX_XID); - if (!lp->emp) - return -ENOMEM; - lp->link_status = 0; lp->max_retry_count = 3; lp->e_d_tov = 2 * 1000; /* FC-FS default */ @@ -316,25 +130,39 @@ static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost) lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL); - /* Finish fc_lport configuration */ + /* lport fc_lport related configuration */ fc_lport_config(lp); return 0; } -static int net_config(struct fc_lport *lp) +/* + * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network + * related properties + * @lp : ptr to the fc_lport + * @netdev : ptr to the associated netdevice struct + * + * Must be called after fcoe_sw_lport_config() as it will use lport mutex + * + * Returns : 0 for success + * + */ +static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) { u32 mfs; u64 wwnn, wwpn; - struct net_device *net_dev; - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv; + struct fcoe_softc *fc; u8 flogi_maddr[ETH_ALEN]; + /* Setup lport private data to point to fcoe softc */ + fc = lport_priv(lp); + fc->lp = lp; + fc->real_dev = netdev; + fc->phys_dev = netdev; + /* Require support for get_pauseparam ethtool op. */ - net_dev = fc->real_dev; - if (net_dev->priv_flags & IFF_802_1Q_VLAN) - net_dev = vlan_dev_real_dev(net_dev); - fc->phys_dev = net_dev; + if (netdev->priv_flags & IFF_802_1Q_VLAN) + fc->phys_dev = vlan_dev_real_dev(netdev); /* Do not support for bonding device */ if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) || @@ -356,6 +184,7 @@ static int net_config(struct fc_lport *lp) if (!fcoe_link_ok(lp)) lp->link_status |= FC_LINK_UP; + /* offload features support */ if (fc->real_dev->features & NETIF_F_SG) lp->sg_supp = 1; @@ -394,96 +223,210 @@ static int net_config(struct fc_lport *lp) return 0; } -static void shost_config(struct fc_lport *lp) +/* + * fcoe_sw_shost_config - sets up fc_lport->host + * @lp : ptr to the fc_lport + * @shost : ptr to the associated scsi host + * @dev : device associated to scsi host + * + * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config() + * + * Returns : 0 for success + * + */ +static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, + struct device *dev) { + int rc = 0; + + /* lport scsi host config */ + lp->host = shost; + lp->host->max_lun = FCOE_MAX_LUN; lp->host->max_id = FCOE_MAX_FCP_TARGET; lp->host->max_channel = 0; - lp->host->transportt = fcoe_transport_template; + lp->host->transportt = scsi_transport_fcoe_sw; + + /* add the new host to the SCSI-ml */ + rc = scsi_add_host(lp->host, dev); + if (rc) { + FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n"); + return rc; + } + sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", + FCOE_SW_NAME, FCOE_SW_VERSION, + fcoe_netdev(lp)->name); + + return 0; } -static int libfc_config(struct fc_lport *lp) +/* + * fcoe_sw_em_config - allocates em for this lport + * @lp: the port that em is to allocated for + * + * Returns : 0 on success + */ +static inline int fcoe_sw_em_config(struct fc_lport *lp) { - /* Set the function pointers set by the LLDD */ - memcpy(&lp->tt, &fcoe_libfc_fcn_templ, - sizeof(struct libfc_function_template)); + BUG_ON(lp->emp); - if (fc_fcp_init(lp)) + lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, + FCOE_MIN_XID, FCOE_MAX_XID); + if (!lp->emp) return -ENOMEM; - fc_exch_init(lp); - fc_lport_init(lp); - fc_rport_init(lp); - fc_disc_init(lp); return 0; } /* - * This function creates the fcoe interface - * create struct fcdev which is a shared structure between opefc - * and transport level protocol. + * fcoe_sw_destroy - FCoE software HBA tear-down function + * @netdev: ptr to the associated net_device + * + * Returns: 0 if link is OK for use by FCoE. */ -int fcoe_create_interface(struct net_device *netdev) +static int fcoe_sw_destroy(struct net_device *netdev) { + int cpu; + struct fc_lport *lp = NULL; + struct fcoe_softc *fc; + u8 flogi_maddr[ETH_ALEN]; + + BUG_ON(!netdev); + + printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n", + netdev->name); + + lp = fcoe_hostlist_lookup(netdev); + if (!lp) + return -ENODEV; + + fc = fcoe_softc(lp); + + /* Remove the instance from fcoe's list */ + fcoe_hostlist_remove(lp); + + /* Don't listen for Ethernet packets anymore */ + dev_remove_pack(&fc->fcoe_packet_type); + + /* Cleanup the fc_lport */ + fc_lport_destroy(lp); + fc_fcp_destroy(lp); + + /* Detach from the scsi-ml */ + fc_remove_host(lp->host); + scsi_remove_host(lp->host); + + /* There are no more rports or I/O, free the EM */ + if (lp->emp) + fc_exch_mgr_free(lp->emp); + + /* Delete secondary MAC addresses */ + rtnl_lock(); + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); + dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN); + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) + dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN); + rtnl_unlock(); + + /* Free the per-CPU revieve threads */ + fcoe_percpu_clean(lp); + + /* Free existing skbs */ + fcoe_clean_pending_queue(lp); + + /* Free memory used by statistical counters */ + for_each_online_cpu(cpu) + kfree(lp->dev_stats[cpu]); + + /* Release the net_device and Scsi_Host */ + dev_put(fc->real_dev); + scsi_host_put(lp->host); + + return 0; +} + +static struct libfc_function_template fcoe_sw_libfc_fcn_templ = { + .frame_send = fcoe_xmit, +}; + +/* + * fcoe_sw_create - this function creates the fcoe interface + * @netdev: pointer the associated netdevice + * + * Creates fc_lport struct and scsi_host for lport, configures lport + * and starts fabric login. + * + * Returns : 0 on success + */ +static int fcoe_sw_create(struct net_device *netdev) +{ + int rc; struct fc_lport *lp = NULL; struct fcoe_softc *fc; struct Scsi_Host *shost; - int rc = 0; - if (fcoe_find_fc_lport(netdev) != NULL) + BUG_ON(!netdev); + + printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n", + netdev->name); + + lp = fcoe_hostlist_lookup(netdev); + if (lp) return -EEXIST; - shost = scsi_host_alloc(&fcoe_driver_template, - sizeof(struct fc_lport) + + shost = fcoe_host_alloc(&fcoe_sw_shost_template, sizeof(struct fcoe_softc)); - if (!shost) { FC_DBG("Could not allocate host structure\n"); return -ENOMEM; } - lp = shost_priv(shost); - rc = lport_config(lp, shost); - if (rc) - goto out_host_put; - - /* Configure the fcoe_softc */ - fc = (struct fcoe_softc *)lp->drv_priv; - fc->lp = lp; - fc->real_dev = netdev; - shost_config(lp); + fc = lport_priv(lp); + /* configure fc_lport, e.g., em */ + rc = fcoe_sw_lport_config(lp); + if (rc) { + FC_DBG("Could not configure lport\n"); + goto out_host_put; + } - /* Add the new host to the SCSI-ml */ - rc = scsi_add_host(lp->host, NULL); + /* configure lport network properties */ + rc = fcoe_sw_netdev_config(lp, netdev); if (rc) { - FC_DBG("error on scsi_add_host\n"); - goto out_lp_destroy; + FC_DBG("Could not configure netdev for lport\n"); + goto out_host_put; } - sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s", - FCOE_DRIVER_NAME, FCOE_VERSION, - netdev->name); + /* configure lport scsi host properties */ + rc = fcoe_sw_shost_config(lp, shost, &netdev->dev); + if (rc) { + FC_DBG("Could not configure shost for lport\n"); + goto out_host_put; + } - /* Configure netdev and networking properties of the lp */ - rc = net_config(lp); - if (rc) - goto out_lp_destroy; + /* lport exch manager allocation */ + rc = fcoe_sw_em_config(lp); + if (rc) { + FC_DBG("Could not configure em for lport\n"); + goto out_host_put; + } /* Initialize the library */ - rc = libfc_config(lp); - if (rc) + rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ); + if (rc) { + FC_DBG("Could not configure libfc for lport!\n"); goto out_lp_destroy; + } - write_lock_bh(&fcoe_hostlist_lock); - list_add_tail(&fc->list, &fcoe_hostlist); - write_unlock_bh(&fcoe_hostlist_lock); + /* add to lports list */ + fcoe_hostlist_add(lp); lp->boot_time = jiffies; fc_fabric_login(lp); dev_hold(netdev); + return rc; out_lp_destroy: @@ -493,28 +436,55 @@ out_host_put: return rc; } -void fcoe_clean_pending_queue(struct fc_lport *lp) +/* + * fcoe_sw_match - the fcoe sw transport match function + * + * Returns : false always + */ +static bool fcoe_sw_match(struct net_device *netdev) { - struct fcoe_softc *fc = lp->drv_priv; - struct sk_buff *skb; - - spin_lock_bh(&fc->fcoe_pending_queue.lock); - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { - spin_unlock_bh(&fc->fcoe_pending_queue.lock); - kfree_skb(skb); - spin_lock_bh(&fc->fcoe_pending_queue.lock); - } - spin_unlock_bh(&fc->fcoe_pending_queue.lock); + /* FIXME - for sw transport, always return false */ + return false; } +/* the sw hba fcoe transport */ +struct fcoe_transport fcoe_sw_transport = { + .name = "fcoesw", + .create = fcoe_sw_create, + .destroy = fcoe_sw_destroy, + .match = fcoe_sw_match, + .vendor = 0x0, + .device = 0xffff, +}; + +/* + * fcoe_sw_init - registers fcoe_sw_transport + * + * Returns : 0 on success + */ int __init fcoe_sw_init(void) { - fcoe_transport_template = - fc_attach_transport(&fcoe_transport_function); - return fcoe_transport_template ? 0 : -1; + /* attach to scsi transport */ + scsi_transport_fcoe_sw = + fc_attach_transport(&fcoe_sw_transport_function); + if (!scsi_transport_fcoe_sw) { + printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n"); + return -ENODEV; + } + /* register sw transport */ + fcoe_transport_register(&fcoe_sw_transport); + return 0; } -void __exit fcoe_sw_exit(void) +/* + * fcoe_sw_exit - unregisters fcoe_sw_transport + * + * Returns : 0 on success + */ +int __exit fcoe_sw_exit(void) { - fc_release_transport(fcoe_transport_template); + /* dettach the transport */ + fc_release_transport(scsi_transport_fcoe_sw); + fcoe_transport_unregister(&fcoe_sw_transport); + return 0; } diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 45a7d6f..de29ccd 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -17,10 +17,6 @@ * Maintained at www.Open-FCoE.org */ -/* - * FCOE protocol file - */ - #include #include #include @@ -28,9 +24,15 @@ #include #include #include +#include #include +#include #include #include +#include +#include +#include +#include #include #include #include @@ -39,11 +41,10 @@ #include -#include -#include - -#include -#include "fcoe_def.h" +#include +#include +#include +#include static int debug_fcoe; @@ -53,18 +54,129 @@ static int debug_fcoe; #define FCOE_GW_ADDR_MODE 0x00 #define FCOE_FCOUI_ADDR_MODE 0x01 +#define FCOE_WORD_TO_BYTE 4 + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("FCoE"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.4"); + +/* fcoe host list */ +LIST_HEAD(fcoe_hostlist); +DEFINE_RWLOCK(fcoe_hostlist_lock); +DEFINE_TIMER(fcoe_timer, NULL, 0, 0); +struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; + + /* Function Prototyes */ static int fcoe_check_wait_queue(struct fc_lport *); static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *); static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *); static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); +#ifdef CONFIG_HOTPLUG_CPU +static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); +#endif /* CONFIG_HOTPLUG_CPU */ +static int fcoe_device_notification(struct notifier_block *, ulong, void *); +static void fcoe_dev_setup(void); +static void fcoe_dev_cleanup(void); + +/* notification function from net device */ +static struct notifier_block fcoe_notifier = { + .notifier_call = fcoe_device_notification, +}; + + +#ifdef CONFIG_HOTPLUG_CPU +static struct notifier_block fcoe_cpu_notifier = { + .notifier_call = fcoe_cpu_callback, +}; + +/** + * fcoe_create_percpu_data - creates the associated cpu data + * @cpu: index for the cpu where fcoe cpu data will be created + * + * create percpu stats block, from cpu add notifier + * + * Returns: none + **/ +static void fcoe_create_percpu_data(int cpu) +{ + struct fc_lport *lp; + struct fcoe_softc *fc; -/* - * this is the fcoe receive function - * called by NET_RX_SOFTIRQ - * this function will receive the packet and - * build fc frame and pass it up - */ + write_lock_bh(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + lp = fc->lp; + if (lp->dev_stats[cpu] == NULL) + lp->dev_stats[cpu] = + kzalloc(sizeof(struct fcoe_dev_stats), + GFP_KERNEL); + } + write_unlock_bh(&fcoe_hostlist_lock); +} + +/** + * fcoe_destroy_percpu_data - destroys the associated cpu data + * @cpu: index for the cpu where fcoe cpu data will destroyed + * + * destroy percpu stats block called by cpu add/remove notifier + * + * Retuns: none + **/ +static void fcoe_destroy_percpu_data(int cpu) +{ + struct fc_lport *lp; + struct fcoe_softc *fc; + + write_lock_bh(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + lp = fc->lp; + kfree(lp->dev_stats[cpu]); + lp->dev_stats[cpu] = NULL; + } + write_unlock_bh(&fcoe_hostlist_lock); +} + +/** + * fcoe_cpu_callback - fcoe cpu hotplug event callback + * @nfb: callback data block + * @action: event triggering the callback + * @hcpu: index for the cpu of this event + * + * this creates or destroys per cpu data for fcoe + * + * Returns NOTIFY_OK always. + **/ +static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + fcoe_create_percpu_data(cpu); + break; + case CPU_DEAD: + fcoe_destroy_percpu_data(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} +#endif /* CONFIG_HOTPLUG_CPU */ + +/** + * foce_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ + * @skb: the receive skb + * @dev: associated net device + * @ptype: context + * @odldev: last device + * + * this function will receive the packet and build fc frame and pass it up + * + * Returns: 0 for success + **/ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *olddev) { @@ -142,7 +254,14 @@ err2: kfree_skb(skb); return -1; } +EXPORT_SYMBOL_GPL(fcoe_rcv); +/** + * fcoe_start_io - pass to netdev to start xmit for fcoe + * @skb: the skb to be xmitted + * + * Returns: 0 for success + **/ static inline int fcoe_start_io(struct sk_buff *skb) { int rc; @@ -155,6 +274,13 @@ static inline int fcoe_start_io(struct sk_buff *skb) return 0; } +/** + * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof + * @skb: the skb to be xmitted + * @tlen: total len + * + * Returns: 0 for success + **/ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) { struct fcoe_percpu_s *fps; @@ -191,12 +317,53 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) return 0; } -/* - * this is the frame xmit routine - */ +/** + * fcoe_fc_crc - calculates FC CRC in this fcoe skb + * @fp: the fc_frame containg data to be checksummed + * + * This uses crc32() to calculate the crc for fc frame + * Return : 32 bit crc + * + **/ +u32 fcoe_fc_crc(struct fc_frame *fp) +{ + struct sk_buff *skb = fp_skb(fp); + struct skb_frag_struct *frag; + unsigned char *data; + unsigned long off, len, clen; + u32 crc; + unsigned i; + + crc = crc32(~0, skb->data, skb_headlen(skb)); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + off = frag->page_offset; + len = frag->size; + while (len > 0) { + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); + data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), + KM_SKB_DATA_SOFTIRQ); + crc = crc32(crc, data + (off & ~PAGE_MASK), clen); + kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); + off += clen; + len -= clen; + } + } + return crc; +} +EXPORT_SYMBOL_GPL(fcoe_fc_crc); + +/** + * fcoe_xmit - FCoE frame transmit function + * @lp: the associated local port + * @fp: the fc_frame to be transmitted + * + * Return : 0 for success + * + **/ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) { - int indx; int wlen, rc = 0; u32 crc; struct ethhdr *eh; @@ -206,15 +373,15 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) struct fc_frame_header *fh; unsigned int hlen; /* header length implies the version */ unsigned int tlen; /* trailer length */ + unsigned int elen; /* eth header, may include vlan */ int flogi_in_progress = 0; struct fcoe_softc *fc; - void *data; u8 sof, eof; struct fcoe_hdr *hp; WARN_ON((fr_len(fp) % sizeof(u32)) != 0); - fc = (struct fcoe_softc *)lp->drv_priv; + fc = fcoe_softc(lp); /* * if it is a flogi then we need to learn gw-addr * and my own fcid @@ -243,45 +410,24 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) sof = fr_sof(fp); eof = fr_eof(fp); - crc = ~0; - crc = crc32(crc, skb->data, skb_headlen(skb)); - - for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[indx]; - unsigned long off = frag->page_offset; - unsigned long len = frag->size; - - while (len > 0) { - unsigned long clen; - - clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), - KM_SKB_DATA_SOFTIRQ); - crc = crc32(crc, data + (off & ~PAGE_MASK), - clen); - kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); - off += clen; - len -= clen; - } - } - - /* - * Get header and trailer lengths. - * This is temporary code until we get rid of the old protocol. - * Both versions have essentially the same trailer layout but T11 - * has padding afterwards. - */ + elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? + sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr); hlen = sizeof(struct fcoe_hdr); tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; - /* - * copy fc crc and eof to the skb buff - * Use utility buffer in the fc_frame part of the sk_buff for the - * trailer. - * We don't do a get_page for this frag, since that page may not be - * managed that way. So that skb_free() doesn't do that either, we - * setup the destructor to remove this frag. - */ + /* crc offload */ + if (likely(lp->crc_offload)) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum_start = skb_headroom(skb); + skb->csum_offset = skb->len; + crc = 0; + } else { + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + } + + /* copy fc crc and eof to the skb buff */ if (skb_is_nonlinear(skb)) { skb_frag_t *frag; if (fcoe_get_paged_crc_eof(skb, tlen)) { @@ -295,22 +441,27 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); } + memset(cp, 0, sizeof(*cp)); cp->fcoe_eof = eof; cp->fcoe_crc32 = cpu_to_le32(~crc); - if (tlen == sizeof(*cp)) - memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd)); - wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; if (skb_is_nonlinear(skb)) { kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); cp = NULL; } - /* - * Fill in the control structures - */ - skb->ip_summed = CHECKSUM_NONE; - eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr)); + /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, elen); + skb_set_transport_header(skb, elen + hlen); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + skb->dev = fc->real_dev; + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); if (fc->address_mode == FCOE_FCOUI_ADDR_MODE) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else @@ -322,24 +473,20 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) else memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN); - eh->h_proto = htons(ETH_P_FCOE); - skb->protocol = htons(ETH_P_802_3); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - - hp = (struct fcoe_hdr *)(eh + 1); + hp = (struct fcoe_hdr *)skb_network_header(skb); memset(hp, 0, sizeof(*hp)); if (FC_FCOE_VER) FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); hp->fcoe_sof = sof; + /* update tx stats: regardless if LLD fails */ stats = lp->dev_stats[smp_processor_id()]; if (stats) { stats->TxFrames++; stats->TxWords += wlen; } - skb->dev = fc->real_dev; + /* send down to lld */ fr_dev(fp) = lp; if (fc->fcoe_pending_queue.qlen) rc = fcoe_check_wait_queue(lp); @@ -355,7 +502,15 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) return 0; } +EXPORT_SYMBOL_GPL(fcoe_xmit); +/* + * fcoe_percpu_receive_thread - recv thread per cpu + * @arg: ptr to the fcoe per cpu struct + * + * Return: 0 for success + * + */ int fcoe_percpu_receive_thread(void *arg) { struct fcoe_percpu_s *p = arg; @@ -368,7 +523,6 @@ int fcoe_percpu_receive_thread(void *arg) struct fc_frame_header *fh; struct sk_buff *skb; struct fcoe_crc_eof *cp; - enum fc_sof sof; struct fc_frame *fp; u8 *mac = NULL; struct fcoe_softc *fc; @@ -411,7 +565,7 @@ int fcoe_percpu_receive_thread(void *arg) /* * Save source MAC address before discarding header. */ - fc = lp->drv_priv; + fc = lport_priv(lp); if (unlikely(fc->flogi_progress)) mac = eth_hdr(skb)->h_source; @@ -422,7 +576,6 @@ int fcoe_percpu_receive_thread(void *arg) * Check the header and pull it off. */ hlen = sizeof(struct fcoe_hdr); - hp = (struct fcoe_hdr *)skb->data; if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { if (stats) { @@ -434,11 +587,10 @@ int fcoe_percpu_receive_thread(void *arg) kfree_skb(skb); continue; } - sof = hp->fcoe_sof; - skb_pull(skb, sizeof(*hp)); - fr_len = skb->len - sizeof(struct fcoe_crc_eof); - skb_trim(skb, fr_len); + skb_pull(skb, sizeof(struct fcoe_hdr)); tlen = sizeof(struct fcoe_crc_eof); + fr_len = skb->len - tlen; + skb_trim(skb, fr_len); if (unlikely(fr_len > skb->len)) { if (stats) { @@ -456,47 +608,61 @@ int fcoe_percpu_receive_thread(void *arg) stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; } - fp = (struct fc_frame *) skb; - fc_frame_init(fp); + fp = (struct fc_frame *)skb; cp = (struct fcoe_crc_eof *)(skb->data + fr_len); + fc_frame_init(fp); fr_eof(fp) = cp->fcoe_eof; - fr_sof(fp) = sof; + fr_sof(fp) = hp->fcoe_sof; fr_dev(fp) = lp; /* - * Check the CRC here, unless it's solicited data for SCSI. - * In that case, the SCSI layer can check it during the copy, - * and it'll be more cache-efficient. + * We only check CRC if no offload is available and if it is + * it's solicited data, in which case, the FCP layer would + * check it during the copy. */ + if (lp->crc_offload) + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + else + fr_flags(fp) |= FCPHF_CRC_UNCHECKED; + fh = fc_frame_header_get(fp); if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { - fr_flags(fp) |= FCPHF_CRC_UNCHECKED; fc_exch_recv(lp, lp->emp, fp); - } else if (le32_to_cpu(cp->fcoe_crc32) == - ~crc32(~0, skb->data, fr_len)) { - if (unlikely(fc->flogi_progress)) - fcoe_recv_flogi(fc, fp, mac); - fc_exch_recv(lp, lp->emp, fp); - } else { - if (debug_fcoe || - (stats && stats->InvalidCRCCount < 5)) { - printk(KERN_WARNING \ - "fcoe: dropping frame with CRC error"); - } - if (stats) { + continue; + } + if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { + if (le32_to_cpu(cp->fcoe_crc32) != + ~crc32(~0, skb->data, fr_len)) { + if (debug_fcoe || stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping " + "frame with CRC error\n"); stats->InvalidCRCCount++; stats->ErrorFrames++; + fc_frame_free(fp); + continue; } - fc_frame_free(fp); + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; } + /* non flogi and non data exchanges are handled here */ + if (unlikely(fc->flogi_progress)) + fcoe_recv_flogi(fc, fp, mac); + fc_exch_recv(lp, lp->emp, fp); } return 0; } -/* - * Snoop potential response to FLOGI or even incoming FLOGI. - */ +/** + * fcoe_recv_flogi - flogi receive function + * @fc: associated fcoe_softc + * @fp: the recieved frame + * @sa: the source address of this flogi + * + * This is responsible to parse the flogi response and sets the corresponding + * mac address for the initiator, eitehr OUI based or GW based. + * + * Returns: none + **/ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) { struct fc_frame_header *fh; @@ -543,6 +709,16 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) } } +/** + * fcoe_watchdog - fcoe timer callback + * @vp: + * + * This checks the pending queue length for fcoe and put fcoe to be paused state + * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the + * fcoe_hostlist. + * + * Returns: 0 for success + **/ void fcoe_watchdog(ulong vp) { struct fc_lport *lp; @@ -567,27 +743,23 @@ void fcoe_watchdog(ulong vp) add_timer(&fcoe_timer); } -/* - * the wait_queue is used when the skb transmit fails. skb will go - * in the wait_queue which will be emptied by the time function OR - * by the next skb transmit. - * - */ -/* - * Function name : fcoe_check_wait_queue() +/** + * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue + * @lp: the fc_port for this skb + * @skb: the associated skb to be xmitted * - * Return Values : 0 or error + * This empties the wait_queue, dequeue the head of the wait_queue queue + * and calls fcoe_start_io() for each packet, if all skb have been + * transmitted, return 0 if a error occurs, then restore wait_queue and + * try again later. * - * Description : empties the wait_queue - * dequeue the head of the wait_queue queue and - * calls fcoe_start_io() for each packet - * if all skb have been transmitted, return 0 - * if a error occurs, then restore wait_queue and try again - * later + * The wait_queue is used when the skb transmit fails. skb will go + * in the wait_queue which will be emptied by the time function OR + * by the next skb transmit. * - */ - + * Returns: 0 for success + **/ static int fcoe_check_wait_queue(struct fc_lport *lp) { int rc, unpause = 0; @@ -595,7 +767,7 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) struct sk_buff *skb; struct fcoe_softc *fc; - fc = (struct fcoe_softc *)lp->drv_priv; + fc = fcoe_softc(lp); spin_lock_bh(&fc->fcoe_pending_queue.lock); /* @@ -622,24 +794,714 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) return fc->fcoe_pending_queue.qlen; } +/** + * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head + * @lp: the fc_port for this skb + * @skb: the associated skb to be xmitted + * + * Returns: none + **/ static void fcoe_insert_wait_queue_head(struct fc_lport *lp, struct sk_buff *skb) { struct fcoe_softc *fc; - fc = (struct fcoe_softc *)lp->drv_priv; + fc = fcoe_softc(lp); spin_lock_bh(&fc->fcoe_pending_queue.lock); __skb_queue_head(&fc->fcoe_pending_queue, skb); spin_unlock_bh(&fc->fcoe_pending_queue.lock); } +/** + * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail + * @lp: the fc_port for this skb + * @skb: the associated skb to be xmitted + * + * Returns: none + **/ static void fcoe_insert_wait_queue(struct fc_lport *lp, struct sk_buff *skb) { struct fcoe_softc *fc; - fc = (struct fcoe_softc *)lp->drv_priv; + fc = fcoe_softc(lp); spin_lock_bh(&fc->fcoe_pending_queue.lock); __skb_queue_tail(&fc->fcoe_pending_queue, skb); spin_unlock_bh(&fc->fcoe_pending_queue.lock); } + +/** + * fcoe_dev_setup - setup link change notification interface + * + **/ +static void fcoe_dev_setup(void) +{ + /* + * here setup a interface specific wd time to + * monitor the link state + */ + register_netdevice_notifier(&fcoe_notifier); +} + +/** + * fcoe_dev_setup - cleanup link change notification interface + **/ +static void fcoe_dev_cleanup(void) +{ + unregister_netdevice_notifier(&fcoe_notifier); +} + +/** + * fcoe_device_notification - netdev event notification callback + * @notifier: context of the notification + * @event: type of event + * @ptr: fixed array for output parsed ifname + * + * This function is called by the ethernet driver in case of link change event + * + * Returns: 0 for success + **/ +static int fcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct fc_lport *lp = NULL; + struct net_device *real_dev = ptr; + struct fcoe_softc *fc; + struct fcoe_dev_stats *stats; + u16 new_status; + u32 mfs; + int rc = NOTIFY_OK; + + read_lock(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + if (fc->real_dev == real_dev) { + lp = fc->lp; + break; + } + } + read_unlock(&fcoe_hostlist_lock); + if (lp == NULL) { + rc = NOTIFY_DONE; + goto out; + } + + new_status = lp->link_status; + switch (event) { + case NETDEV_DOWN: + case NETDEV_GOING_DOWN: + new_status &= ~FC_LINK_UP; + break; + case NETDEV_UP: + case NETDEV_CHANGE: + new_status &= ~FC_LINK_UP; + if (!fcoe_link_ok(lp)) + new_status |= FC_LINK_UP; + break; + case NETDEV_CHANGEMTU: + mfs = fc->real_dev->mtu - + (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); + if (fc->user_mfs && fc->user_mfs < mfs) + mfs = fc->user_mfs; + if (mfs >= FC_MIN_MAX_FRAME) + fc_set_mfs(lp, mfs); + new_status &= ~FC_LINK_UP; + if (!fcoe_link_ok(lp)) + new_status |= FC_LINK_UP; + break; + case NETDEV_REGISTER: + break; + default: + FC_DBG("unknown event %ld call", event); + } + if (lp->link_status != new_status) { + if ((new_status & FC_LINK_UP) == FC_LINK_UP) + fc_linkup(lp); + else { + stats = lp->dev_stats[smp_processor_id()]; + if (stats) + stats->LinkFailureCount++; + fc_linkdown(lp); + fcoe_clean_pending_queue(lp); + } + } +out: + return rc; +} + +/** + * fcoe_if_to_netdev - parse a name buffer to get netdev + * @ifname: fixed array for output parsed ifname + * @buffer: incoming buffer to be copied + * + * Returns: NULL or ptr to netdeive + **/ +static struct net_device *fcoe_if_to_netdev(const char *buffer) +{ + char *cp; + char ifname[IFNAMSIZ + 2]; + + if (buffer) { + strlcpy(ifname, buffer, IFNAMSIZ); + cp = ifname + strlen(ifname); + while (--cp >= ifname && *cp == '\n') + *cp = '\0'; + return dev_get_by_name(&init_net, ifname); + } + return NULL; +} + +/** + * fcoe_netdev_to_module_owner - finds out the nic drive moddule of the netdev + * @netdev: the target netdev + * + * Returns: ptr to the struct module, NULL for failure + **/ +static struct module *fcoe_netdev_to_module_owner( + const struct net_device *netdev) +{ + struct device *dev; + + if (!netdev) + return NULL; + + dev = netdev->dev.parent; + if (!dev) + return NULL; + + if (!dev->driver) + return NULL; + + return dev->driver->owner; +} + +/** + * fcoe_ethdrv_get - holds the nic driver module by try_module_get() for + * the corresponding netdev. + * @netdev: the target netdev + * + * Returns: 0 for succsss + **/ +static int fcoe_ethdrv_get(const struct net_device *netdev) +{ + struct module *owner; + + owner = fcoe_netdev_to_module_owner(netdev); + if (owner) { + printk(KERN_DEBUG "foce:hold driver module %s for %s\n", + owner->name, netdev->name); + return try_module_get(owner); + } + return -ENODEV; +} + +/** + * fcoe_ethdrv_get - releases the nic driver module by module_put for + * the corresponding netdev. + * @netdev: the target netdev + * + * Returns: 0 for succsss + **/ +static int fcoe_ethdrv_put(const struct net_device *netdev) +{ + struct module *owner; + + owner = fcoe_netdev_to_module_owner(netdev); + if (owner) { + printk(KERN_DEBUG "foce:release driver module %s for %s\n", + owner->name, netdev->name); + module_put(owner); + return 0; + } + return -ENODEV; +} + +/** + * fcoe_destroy- handles the destroy from sysfs + * @buffer: expcted to be a eth if name + * @kp: associated kernel param + * + * Returns: 0 for success + **/ +static int fcoe_destroy(const char *buffer, struct kernel_param *kp) +{ + int rc; + struct net_device *netdev; + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + rc = -ENODEV; + goto out_nodev; + } + /* look for existing lport */ + if (!fcoe_hostlist_lookup(netdev)) { + rc = -ENODEV; + goto out_putdev; + } + /* pass to transport */ + rc = fcoe_transport_release(netdev); + if (rc) { + printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n", + netdev->name); + rc = -EIO; + goto out_putdev; + } + fcoe_ethdrv_put(netdev); + rc = 0; +out_putdev: + dev_put(netdev); +out_nodev: + return rc; +} + +/** + * fcoe_create - handles the create call from sysfs + * @buffer: expcted to be a eth if name + * @kp: associated kernel param + * + * Returns: 0 for success + **/ +static int fcoe_create(const char *buffer, struct kernel_param *kp) +{ + int rc; + struct net_device *netdev; + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + rc = -ENODEV; + goto out_nodev; + } + /* look for existing lport */ + if (fcoe_hostlist_lookup(netdev)) { + rc = -EEXIST; + goto out_putdev; + } + fcoe_ethdrv_get(netdev); + + /* pass to transport */ + rc = fcoe_transport_attach(netdev); + if (rc) { + printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n", + netdev->name); + fcoe_ethdrv_put(netdev); + rc = -EIO; + goto out_putdev; + } + rc = 0; +out_putdev: + dev_put(netdev); +out_nodev: + return rc; +} + +module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(create, "string"); +MODULE_PARM_DESC(create, "Create fcoe port using net device passed in."); +module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(destroy, "string"); +MODULE_PARM_DESC(destroy, "Destroy fcoe port"); + +/* + * fcoe_link_ok - check if link is ok for the fc_lport + * @lp: ptr to the fc_lport + * + * Any permanently-disqualifying conditions have been previously checked. + * This also updates the speed setting, which may change with link for 100/1000. + * + * This function should probably be checking for PAUSE support at some point + * in the future. Currently Per-priority-pause is not determinable using + * ethtool, so we shouldn't be restrictive until that problem is resolved. + * + * Returns: 0 if link is OK for use by FCoE. + * + */ +int fcoe_link_ok(struct fc_lport *lp) +{ + struct fcoe_softc *fc = fcoe_softc(lp); + struct net_device *dev = fc->real_dev; + struct ethtool_cmd ecmd = { ETHTOOL_GSET }; + int rc = 0; + + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { + dev = fc->phys_dev; + if (dev->ethtool_ops->get_settings) { + dev->ethtool_ops->get_settings(dev, &ecmd); + lp->link_supported_speeds &= + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + if (ecmd.supported & (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full)) + lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; + if (ecmd.supported & SUPPORTED_10000baseT_Full) + lp->link_supported_speeds |= + FC_PORTSPEED_10GBIT; + if (ecmd.speed == SPEED_1000) + lp->link_speed = FC_PORTSPEED_1GBIT; + if (ecmd.speed == SPEED_10000) + lp->link_speed = FC_PORTSPEED_10GBIT; + } + } else + rc = -1; + + return rc; +} +EXPORT_SYMBOL_GPL(fcoe_link_ok); + +/* + * fcoe_percpu_clean - frees skb of the corresponding lport from the per + * cpu queue. + * @lp: the fc_lport + */ +void fcoe_percpu_clean(struct fc_lport *lp) +{ + int idx; + struct fcoe_percpu_s *pp; + struct fcoe_rcv_info *fr; + struct sk_buff_head *list; + struct sk_buff *skb, *next; + struct sk_buff *head; + + for (idx = 0; idx < NR_CPUS; idx++) { + if (fcoe_percpu[idx]) { + pp = fcoe_percpu[idx]; + spin_lock_bh(&pp->fcoe_rx_list.lock); + list = &pp->fcoe_rx_list; + head = list->next; + for (skb = head; skb != (struct sk_buff *)list; + skb = next) { + next = skb->next; + fr = fcoe_dev_from_skb(skb); + if (fr->fr_dev == lp) { + __skb_unlink(skb, list); + kfree_skb(skb); + } + } + spin_unlock_bh(&pp->fcoe_rx_list.lock); + } + } +} +EXPORT_SYMBOL_GPL(fcoe_percpu_clean); + +/** + * fcoe_clean_pending_queue - dequeue skb and free it + * @lp: the corresponding fc_lport + * + * Returns: none + **/ +void fcoe_clean_pending_queue(struct fc_lport *lp) +{ + struct fcoe_softc *fc = lport_priv(lp); + struct sk_buff *skb; + + spin_lock_bh(&fc->fcoe_pending_queue.lock); + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { + spin_unlock_bh(&fc->fcoe_pending_queue.lock); + kfree_skb(skb); + spin_lock_bh(&fc->fcoe_pending_queue.lock); + } + spin_unlock_bh(&fc->fcoe_pending_queue.lock); +} +EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); + +/** + * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport + * @sht: ptr to the scsi host templ + * @priv_size: size of private data after fc_lport + * + * Returns: ptr to Scsi_Host + * TODO - to libfc? + */ +static inline struct Scsi_Host *libfc_host_alloc( + struct scsi_host_template *sht, int priv_size) +{ + return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size); +} + +/** + * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc + * @sht: ptr to the scsi host templ + * @priv_size: size of private data after fc_lport + * + * Returns: ptr to Scsi_Host + */ +struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size) +{ + return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size); +} +EXPORT_SYMBOL_GPL(fcoe_host_alloc); + +/* + * fcoe_reset - resets the fcoe + * @shost: shost the reset is from + * + * Returns: always 0 + */ +int fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + fc_lport_reset(lport); + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_reset); + +/* + * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN. + * @mac: mac address + * @scheme: check port + * @port: port indicator for converting + * + * Returns: u64 fc world wide name + */ +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], + unsigned int scheme, unsigned int port) +{ + u64 wwn; + u64 host_mac; + + /* The MAC is in NO, so flip only the low 48 bits */ + host_mac = ((u64) mac[0] << 40) | + ((u64) mac[1] << 32) | + ((u64) mac[2] << 24) | + ((u64) mac[3] << 16) | + ((u64) mac[4] << 8) | + (u64) mac[5]; + + WARN_ON(host_mac >= (1ULL << 48)); + wwn = host_mac | ((u64) scheme << 60); + switch (scheme) { + case 1: + WARN_ON(port != 0); + break; + case 2: + WARN_ON(port >= 0xfff); + wwn |= (u64) port << 48; + break; + default: + WARN_ON(1); + break; + } + + return wwn; +} +EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); +/* + * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device + * @device: this is currently ptr to net_device + * + * Returns: NULL or the located fcoe_softc + */ +static struct fcoe_softc *fcoe_hostlist_lookup_softc( + const struct net_device *dev) +{ + struct fcoe_softc *fc; + + read_lock(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + if (fc->real_dev == dev) { + read_unlock(&fcoe_hostlist_lock); + return fc; + } + } + read_unlock(&fcoe_hostlist_lock); + return NULL; +} + +/* + * fcoe_hostlist_lookup - find the corresponding lport by netdev + * @netdev: ptr to net_device + * + * Returns: 0 for success + */ +struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) +{ + struct fcoe_softc *fc; + + fc = fcoe_hostlist_lookup_softc(netdev); + + return (fc) ? fc->lp : NULL; +} +EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup); + +/* + * fcoe_hostlist_add - add a lport to lports list + * @lp: ptr to the fc_lport to badded + * + * Returns: 0 for success + */ +int fcoe_hostlist_add(const struct fc_lport *lp) +{ + struct fcoe_softc *fc; + + fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); + if (!fc) { + fc = fcoe_softc(lp); + write_lock_bh(&fcoe_hostlist_lock); + list_add_tail(&fc->list, &fcoe_hostlist); + write_unlock_bh(&fcoe_hostlist_lock); + } + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_hostlist_add); + +/* + * fcoe_hostlist_remove - remove a lport from lports list + * @lp: ptr to the fc_lport to badded + * + * Returns: 0 for success + */ +int fcoe_hostlist_remove(const struct fc_lport *lp) +{ + struct fcoe_softc *fc; + + fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); + BUG_ON(!fc); + write_lock_bh(&fcoe_hostlist_lock); + list_del(&fc->list); + write_unlock_bh(&fcoe_hostlist_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_hostlist_remove); + +/** + * fcoe_libfc_config - sets up libfc related properties for lport + * @lp: ptr to the fc_lport + * @tt: libfc function template + * + * Returns : 0 for success + **/ +int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) +{ + /* Set the function pointers set by the LLDD */ + memcpy(&lp->tt, tt, sizeof(*tt)); + if (fc_fcp_init(lp)) + return -ENOMEM; + fc_exch_init(lp); + fc_elsct_init(lp); + fc_lport_init(lp); + fc_rport_init(lp); + fc_disc_init(lp); + + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_libfc_config); + +/** + * fcoe_init - fcoe module loading initialization + * + * Initialization routine + * 1. Will create fc transport software structure + * 2. initialize the link list of port information structure + * + * Returns 0 on success, negative on failure + **/ +static int __init fcoe_init(void) +{ + int cpu; + struct fcoe_percpu_s *p; + + + INIT_LIST_HEAD(&fcoe_hostlist); + rwlock_init(&fcoe_hostlist_lock); + +#ifdef CONFIG_HOTPLUG_CPU + register_cpu_notifier(&fcoe_cpu_notifier); +#endif /* CONFIG_HOTPLUG_CPU */ + + /* + * initialize per CPU interrupt thread + */ + for_each_online_cpu(cpu) { + p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL); + if (p) { + p->thread = kthread_create(fcoe_percpu_receive_thread, + (void *)p, + "fcoethread/%d", cpu); + + /* + * if there is no error then bind the thread to the cpu + * initialize the semaphore and skb queue head + */ + if (likely(!IS_ERR(p->thread))) { + p->cpu = cpu; + fcoe_percpu[cpu] = p; + skb_queue_head_init(&p->fcoe_rx_list); + kthread_bind(p->thread, cpu); + wake_up_process(p->thread); + } else { + fcoe_percpu[cpu] = NULL; + kfree(p); + + } + } + } + + /* + * setup link change notification + */ + fcoe_dev_setup(); + + init_timer(&fcoe_timer); + fcoe_timer.data = 0; + fcoe_timer.function = fcoe_watchdog; + fcoe_timer.expires = (jiffies + (10 * HZ)); + add_timer(&fcoe_timer); + + /* initiatlize the fcoe transport */ + fcoe_transport_init(); + + fcoe_sw_init(); + + return 0; +} +module_init(fcoe_init); + +/** + * fcoe_exit - fcoe module unloading cleanup + * + * Returns 0 on success, negative on failure + **/ +static void __exit fcoe_exit(void) +{ + u32 idx; + struct fcoe_softc *fc, *tmp; + struct fcoe_percpu_s *p; + struct sk_buff *skb; + + /* + * Stop all call back interfaces + */ +#ifdef CONFIG_HOTPLUG_CPU + unregister_cpu_notifier(&fcoe_cpu_notifier); +#endif /* CONFIG_HOTPLUG_CPU */ + fcoe_dev_cleanup(); + + /* + * stop timer + */ + del_timer_sync(&fcoe_timer); + + /* releases the assocaited fcoe transport for each lport */ + list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) + fcoe_transport_release(fc->real_dev); + + for (idx = 0; idx < NR_CPUS; idx++) { + if (fcoe_percpu[idx]) { + kthread_stop(fcoe_percpu[idx]->thread); + p = fcoe_percpu[idx]; + spin_lock_bh(&p->fcoe_rx_list.lock); + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) + kfree_skb(skb); + spin_unlock_bh(&p->fcoe_rx_list.lock); + if (fcoe_percpu[idx]->crc_eof_page) + put_page(fcoe_percpu[idx]->crc_eof_page); + kfree(fcoe_percpu[idx]); + } + } + + /* remove sw trasnport */ + fcoe_sw_exit(); + + /* detach the transport */ + fcoe_transport_exit(); +} +module_exit(fcoe_exit); diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile index e6d4086..55f982d 100644 --- a/drivers/scsi/libfc/Makefile +++ b/drivers/scsi/libfc/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_LIBFC) += libfc.o libfc-objs := \ fc_disc.o \ fc_exch.o \ + fc_elsct.o \ fc_frame.o \ fc_lport.o \ fc_rport.o \ diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index d50f1a5..aee2f9c 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -30,11 +30,13 @@ #include -#include +#include #define FC_DISC_RETRY_LIMIT 3 /* max retries */ #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ +#define FC_DISC_DELAY 3 + static int fc_disc_debug; #define FC_DEBUG_DISC(fmt...) \ @@ -43,26 +45,182 @@ static int fc_disc_debug; FC_DBG(fmt); \ } while (0) -static void fc_disc_gpn_ft_req(struct fc_lport *); +static struct mutex disc_list_lock; +static struct list_head disc_list; + +struct fc_disc { + unsigned char retry_count; + unsigned char delay; + unsigned char pending; + unsigned char requested; + unsigned short seq_count; + unsigned char buf_len; + enum fc_disc_event event; + + void (*disc_callback)(struct fc_lport *, + enum fc_disc_event); + + struct list_head rports; + struct fc_lport *lport; + struct mutex disc_mutex; + struct fc_gpn_ft_resp partial_buf; /* partial name buffer */ + struct delayed_work disc_work; + + struct list_head list; +}; + +static void fc_disc_gpn_ft_req(struct fc_disc *); static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); -static int fc_disc_new_target(struct fc_lport *, struct fc_rport *, +static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, struct fc_rport_identifiers *); -static void fc_disc_del_target(struct fc_lport *, struct fc_rport *); -static void fc_disc_done(struct fc_lport *); -static void fc_disc_error(struct fc_lport *, struct fc_frame *); +static void fc_disc_del_target(struct fc_disc *, struct fc_rport *); +static void fc_disc_done(struct fc_disc *); static void fc_disc_timeout(struct work_struct *); -static void fc_disc_single(struct fc_lport *, struct fc_disc_port *); -static int fc_disc_restart(struct fc_lport *); +static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); +static void fc_disc_restart(struct fc_disc *); + +/** + * fc_disc_lookup_rport - lookup a remote port by port_id + * @lport: Fibre Channel host port instance + * @port_id: remote port port_id to match + */ +struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport, + u32 port_id) +{ + struct fc_disc *disc; + struct fc_rport *rport, *found = NULL; + struct fc_rport_libfc_priv *rdata; + int disc_found = 0; + + mutex_lock(&disc_list_lock); + list_for_each_entry(disc, &disc_list, list) { + if (disc->lport == lport) { + list_for_each_entry(rdata, &disc->rports, peers) { + rport = PRIV_TO_RPORT(rdata); + if (rport->port_id == port_id) { + disc_found = 1; + found = rport; + get_device(&found->dev); + break; + } + } + } + } + mutex_unlock(&disc_list_lock); + + if (!disc_found) { + FC_DEBUG_DISC("The rport (%6x) for lport (%6x) " + "is not maintained by the discovery layer\n", + port_id, fc_host_port_id(lport->host)); + found = NULL; + } + + return found; +} + +/** + * fc_disc_alloc - Allocate a discovery work object + * @lport: The FC lport associated with the discovery job + */ +static inline struct fc_disc *fc_disc_alloc(struct fc_lport *lport) +{ + struct fc_disc *disc; + + disc = kzalloc(sizeof(struct fc_disc), GFP_KERNEL); + INIT_LIST_HEAD(&disc->list); + INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); + mutex_init(&disc->disc_mutex); + INIT_LIST_HEAD(&disc->rports); + + disc->lport = lport; + disc->delay = FC_DISC_DELAY; + disc->event = DISC_EV_NONE; + + mutex_lock(&disc_list_lock); + list_add_tail(&disc->list, &disc_list); + mutex_unlock(&disc_list_lock); + + return disc; +} + +/** + * fc_disc_stop_rports - delete all the remote ports associated with the lport + * @disc: The discovery job to stop rports on + * + * Locking Note: This function expects that the lport mutex is locked before + * calling it. + */ +void fc_disc_stop_rports(struct fc_disc *disc) +{ + struct fc_lport *lport; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rdata, *next; + + lport = disc->lport; + + mutex_lock(&disc->disc_mutex); + list_for_each_entry_safe(rdata, next, &disc->rports, peers) { + rport = PRIV_TO_RPORT(rdata); + list_del(&rdata->peers); + lport->tt.rport_logoff(rport); + } + + mutex_unlock(&disc->disc_mutex); +} + +/** + * fc_disc_rport_event - Event handler for rport events + * @lport: The lport which is receiving the event + * @rport: The rport which the event has occured on + * @event: The event that occured + * + * Locking Note: The rport lock should not be held when calling + * this function. + */ +static void fc_disc_rport_event(struct fc_lport *lport, + struct fc_rport *rport, + enum fc_lport_event event) +{ + struct fc_rport_libfc_priv *rdata = rport->dd_data; + struct fc_disc *disc; + int found = 0; + + FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event, + rport->port_id); + + if (event == RPORT_EV_CREATED) { + mutex_lock(&disc_list_lock); + list_for_each_entry(disc, &disc_list, list) { + if (disc->lport == lport) { + found = 1; + mutex_lock(&disc->disc_mutex); + list_add_tail(&rdata->peers, &disc->rports); + mutex_unlock(&disc->disc_mutex); + } + } + mutex_unlock(&disc_list_lock); + } + + if (!found) + FC_DEBUG_DISC("The rport (%6x) is not maintained " + "by the discovery layer\n", rport->port_id); +} /** * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN) * @sp: Current sequence of the RSCN exchange * @fp: RSCN Frame * @lport: Fibre Channel host port instance + * + * Locking Note: This function expects that the disc_mutex is locked + * before it is called. */ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lport) + struct fc_disc *disc) { + struct fc_lport *lport; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rdata; struct fc_els_rscn *rp; struct fc_els_rscn_page *pp; struct fc_seq_els_data rjt_data; @@ -70,9 +228,14 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, int redisc = 0; enum fc_els_rscn_ev_qual ev_qual; enum fc_els_rscn_addr_fmt fmt; - LIST_HEAD(disc_list); + LIST_HEAD(disc_ports); struct fc_disc_port *dp, *next; + lport = disc->lport; + + FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n", + fc_host_port_id(lport->host)); + rp = fc_frame_payload_get(fp, sizeof(*rp)); if (!rp || rp->rscn_page_len != sizeof(*pp)) @@ -106,7 +269,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, dp->ids.port_name = -1; dp->ids.node_name = -1; dp->ids.roles = FC_RPORT_ROLE_UNKNOWN; - list_add_tail(&dp->peers, &disc_list); + list_add_tail(&dp->peers, &disc_ports); break; case ELS_ADDR_FMT_AREA: case ELS_ADDR_FMT_DOM: @@ -120,18 +283,20 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); if (redisc) { FC_DEBUG_DISC("RSCN received: rediscovering\n"); - list_for_each_entry_safe(dp, next, &disc_list, peers) { - list_del(&dp->peers); - kfree(dp); - } - fc_disc_restart(lport); + fc_disc_restart(disc); } else { FC_DEBUG_DISC("RSCN received: not rediscovering. " "redisc %d state %d in_prog %d\n", - redisc, lport->state, lport->disc_pending); - list_for_each_entry_safe(dp, next, &disc_list, peers) { + redisc, lport->state, disc->pending); + list_for_each_entry_safe(dp, next, &disc_ports, peers) { list_del(&dp->peers); - fc_disc_single(lport, dp); + rport = lport->tt.rport_lookup(lport, dp->ids.port_id); + if (rport) { + rdata = RPORT_TO_PRIV(rport); + list_del(&rdata->peers); + lport->tt.rport_logoff(rport); + } + fc_disc_single(disc, dp); } } fc_frame_free(fp); @@ -149,16 +314,39 @@ reject: * @sp: Current sequence of the request exchange * @fp: The frame * @lport: The FC local port + * + * Locking Note: This function is called from the EM and will lock + * the disc_mutex before calling the handler for the + * request. */ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, struct fc_lport *lport) { u8 op; + struct fc_disc *disc; + int found = 0; + + mutex_lock(&disc_list_lock); + list_for_each_entry(disc, &disc_list, list) { + if (disc->lport == lport) { + found = 1; + break; + } + } + mutex_unlock(&disc_list_lock); + + if (!found) { + FC_DBG("Received a request for an lport not managed " + "by the discovery engine\n"); + return; + } op = fc_frame_payload_op(fp); switch (op) { case ELS_RSCN: - fc_disc_recv_rscn_req(sp, fp, lport); + mutex_lock(&disc->disc_mutex); + fc_disc_recv_rscn_req(sp, fp, disc); + mutex_unlock(&disc->disc_mutex); break; default: FC_DBG("Received an unsupported request. opcode (%x)\n", op); @@ -168,16 +356,30 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_disc_restart - Restart discovery - * @lport: FC local port + * @lport: FC discovery context + * + * Locking Note: This function expects that the disc mutex + * is already locked. */ -static int fc_disc_restart(struct fc_lport *lport) +static void fc_disc_restart(struct fc_disc *disc) { - if (!lport->disc_requested && !lport->disc_pending) { - schedule_delayed_work(&lport->disc_work, - msecs_to_jiffies(lport->disc_delay * 1000)); + struct fc_rport *rport; + struct fc_rport_libfc_priv *rdata, *next; + struct fc_lport *lport = disc->lport; + + FC_DEBUG_DISC("Restarting discovery for port (%6x)\n", + fc_host_port_id(lport->host)); + + list_for_each_entry_safe(rdata, next, &disc->rports, peers) { + rport = PRIV_TO_RPORT(rdata); + FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id); + list_del(&rdata->peers); + lport->tt.rport_logoff(rport); } - lport->disc_requested = 1; - return 0; + + disc->requested = 1; + if (!disc->pending) + fc_disc_gpn_ft_req(disc); } /** @@ -186,29 +388,58 @@ static int fc_disc_restart(struct fc_lport *lport) * * Returns non-zero if discovery cannot be started. */ -static int fc_disc_start(struct fc_lport *lport) +static void fc_disc_start(void (*disc_callback)(struct fc_lport *, + enum fc_disc_event), + struct fc_lport *lport) { struct fc_rport *rport; - int error; struct fc_rport_identifiers ids; + struct fc_disc *disc; + int found = 0; + + mutex_lock(&disc_list_lock); + list_for_each_entry(disc, &disc_list, list) { + if (disc->lport == lport) { + found = 1; + break; + } + } + mutex_unlock(&disc_list_lock); + + if (!found) { + FC_DEBUG_DISC("No existing discovery job, " + "creating one for lport (%6x)\n", + fc_host_port_id(lport->host)); + disc = fc_disc_alloc(lport); + } else + FC_DEBUG_DISC("Found an existing discovery job " + "for lport (%6x)\n", + fc_host_port_id(lport->host)); + + /* + * At this point we may have a new disc job or an existing + * one. Either way, let's lock when we make changes to it + * and send the GPN_FT request. + */ + mutex_lock(&disc->disc_mutex); + + disc->disc_callback = disc_callback; /* * If not ready, or already running discovery, just set request flag. */ - if (!fc_lport_test_ready(lport) || lport->disc_pending) { - lport->disc_requested = 1; + disc->requested = 1; - return 0; + if (disc->pending) { + mutex_unlock(&disc->disc_mutex); + return; } - lport->disc_pending = 1; - lport->disc_requested = 0; - lport->disc_retry_count = 0; /* * Handle point-to-point mode as a simple discovery - * of the remote port. + * of the remote port. Yucky, yucky, yuck, yuck! */ - rport = lport->ptp_rp; + rport = disc->lport->ptp_rp; if (rport) { ids.port_id = rport->port_id; ids.port_name = rport->port_name; @@ -216,32 +447,16 @@ static int fc_disc_start(struct fc_lport *lport) ids.roles = FC_RPORT_ROLE_UNKNOWN; get_device(&rport->dev); - error = fc_disc_new_target(lport, rport, &ids); + if (!fc_disc_new_target(disc, rport, &ids)) { + disc->event = DISC_EV_SUCCESS; + fc_disc_done(disc); + } put_device(&rport->dev); - if (!error) - fc_disc_done(lport); } else { - fc_disc_gpn_ft_req(lport); /* get ports by FC-4 type */ - error = 0; + fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */ } - return error; -} - -/** - * fc_disc_retry - Retry discovery - * @lport: FC local port - */ -static void fc_disc_retry(struct fc_lport *lport) -{ - unsigned long delay = FC_DISC_RETRY_DELAY; - if (!lport->disc_retry_count) - delay /= 4; /* timeout faster first time */ - if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT) - schedule_delayed_work(&lport->disc_work, - msecs_to_jiffies(delay)); - else - fc_disc_done(lport); + mutex_unlock(&disc->disc_mutex); } /** @@ -249,11 +464,15 @@ static void fc_disc_retry(struct fc_lport *lport) * @lport: FC local port * @rport: The previous FC remote port (NULL if new remote port) * @ids: Identifiers for the new FC remote port + * + * Locking Note: This function expects that the disc_mutex is locked + * before it is called. */ -static int fc_disc_new_target(struct fc_lport *lport, +static int fc_disc_new_target(struct fc_disc *disc, struct fc_rport *rport, struct fc_rport_identifiers *ids) { + struct fc_lport *lport = disc->lport; struct fc_rport_libfc_priv *rp; int error = 0; @@ -272,7 +491,7 @@ static int fc_disc_new_target(struct fc_lport *lport, * assigned the same FCID. This should be rare. * Delete the old one and fall thru to re-create. */ - fc_disc_del_target(lport, rport); + fc_disc_del_target(disc, rport); rport = NULL; } } @@ -295,7 +514,7 @@ static int fc_disc_new_target(struct fc_lport *lport, } if (rport) { rp = rport->dd_data; - rp->event_callback = lport->tt.event_callback; + rp->event_callback = fc_disc_rport_event; rp->rp_state = RPORT_ST_INIT; lport->tt.rport_login(rport); } @@ -305,89 +524,111 @@ static int fc_disc_new_target(struct fc_lport *lport, /** * fc_disc_del_target - Delete a target - * @lport: FC local port + * @disc: FC discovery context * @rport: The remote port to be removed */ -static void fc_disc_del_target(struct fc_lport *lport, struct fc_rport *rport) +static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport) { - lport->tt.rport_stop(rport); + struct fc_lport *lport = disc->lport; + struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport); + list_del(&rdata->peers); + lport->tt.rport_logoff(rport); } /** * fc_disc_done - Discovery has been completed - * @lport: FC local port + * @disc: FC discovery context */ -static void fc_disc_done(struct fc_lport *lport) +static void fc_disc_done(struct fc_disc *disc) { - lport->disc_done = 1; - lport->disc_pending = 0; - if (lport->disc_requested) - lport->tt.disc_start(lport); + struct fc_lport *lport = disc->lport; + + FC_DEBUG_DISC("Discovery complete for port (%6x)\n", + fc_host_port_id(lport->host)); + + disc->disc_callback(lport, disc->event); + disc->event = DISC_EV_NONE; + + if (disc->requested) + fc_disc_gpn_ft_req(disc); + else + disc->pending = 0; } /** - * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request - * @lport: FC local port + * fc_disc_error - Handle error on dNS request + * @disc: FC discovery context + * @fp: The frame pointer */ -static void fc_disc_gpn_ft_req(struct fc_lport *lport) +static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) { - struct fc_frame *fp; - struct fc_seq *sp = NULL; - struct req { - struct fc_ct_hdr ct; - struct fc_ns_gid_ft gid; - } *rp; - int error = 0; - - lport->disc_buf_len = 0; - lport->disc_seq_count = 0; - fp = fc_frame_alloc(lport, sizeof(*rp)); - if (!fp) { - error = ENOMEM; - } else { - rp = fc_frame_payload_get(fp, sizeof(*rp)); - fc_fill_dns_hdr(lport, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid)); - rp->gid.fn_fc4_type = FC_TYPE_FCP; - - WARN_ON(!fc_lport_test_ready(lport)); - - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT); - sp = lport->tt.exch_seq_send(lport, fp, - fc_disc_gpn_ft_resp, NULL, - lport, lport->e_d_tov, - fc_host_port_id(lport->host), - FC_FID_DIR_SERV, - FC_FC_SEQ_INIT | FC_FC_END_SEQ); + struct fc_lport *lport = disc->lport; + unsigned long delay = 0; + if (fc_disc_debug) + FC_DBG("Error %ld, retries %d/%d\n", + PTR_ERR(fp), disc->retry_count, + FC_DISC_RETRY_LIMIT); + + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { + /* + * Memory allocation failure, or the exchange timed out, + * retry after delay. + */ + if (disc->retry_count < FC_DISC_RETRY_LIMIT) { + /* go ahead and retry */ + if (!fp) + delay = msecs_to_jiffies(500); + else { + delay = jiffies + + msecs_to_jiffies(lport->e_d_tov); + + /* timeout faster first time */ + if (!disc->retry_count) + delay /= 4; + } + disc->retry_count++; + schedule_delayed_work(&disc->disc_work, + delay); + } else { + /* exceeded retries */ + disc->event = DISC_EV_FAILED; + fc_disc_done(disc); + } } - if (error || !sp) - fc_disc_retry(lport); } /** - * fc_disc_error - Handle error on dNS request - * @lport: FC local port - * @fp: The frame pointer + * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request + * @lport: FC discovery context + * + * Locking Note: This function expects that the disc_mutex is locked + * before it is called. */ -static void fc_disc_error(struct fc_lport *lport, struct fc_frame *fp) +static void fc_disc_gpn_ft_req(struct fc_disc *disc) { - long err = PTR_ERR(fp); + struct fc_frame *fp; + struct fc_lport *lport = disc->lport; - FC_DEBUG_DISC("Error %ld, retries %d/%d\n", PTR_ERR(fp), - lport->retry_count, FC_DISC_RETRY_LIMIT); + WARN_ON(!fc_lport_test_ready(lport)); - switch (err) { - case -FC_EX_TIMEOUT: - if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT) { - fc_disc_gpn_ft_req(lport); - } else { - fc_disc_done(lport); - } - break; - default: - FC_DBG("Error code %ld not supported\n", err); - fc_disc_done(lport); - break; - } + disc->pending = 1; + disc->requested = 0; + + disc->buf_len = 0; + disc->seq_count = 0; + fp = fc_frame_alloc(lport, + sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_gid_ft)); + if (!fp) + goto err; + + if (lport->tt.elsct_send(lport, NULL, fp, + FC_NS_GPN_FT, + fc_disc_gpn_ft_resp, + disc, lport->e_d_tov)) + return; +err: + fc_disc_error(disc, fp); } /** @@ -396,8 +637,9 @@ static void fc_disc_error(struct fc_lport *lport, struct fc_frame *fp) * @buf: GPN_FT response buffer * @len: size of response buffer */ -static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) +static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) { + struct fc_lport *lport; struct fc_gpn_ft_resp *np; char *bp; size_t plen; @@ -407,13 +649,15 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) struct fc_rport *rport; struct fc_rport_libfc_priv *rdata; + lport = disc->lport; + /* * Handle partial name record left over from previous call. */ bp = buf; plen = len; np = (struct fc_gpn_ft_resp *)bp; - tlen = lport->disc_buf_len; + tlen = disc->buf_len; if (tlen) { WARN_ON(tlen >= sizeof(*np)); plen = sizeof(*np) - tlen; @@ -421,7 +665,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) WARN_ON(plen >= sizeof(*np)); if (plen > len) plen = len; - np = &lport->disc_buf; + np = &disc->partial_buf; memcpy((char *)np + tlen, bp, plen); /* @@ -431,9 +675,9 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) bp -= tlen; len += tlen; plen += tlen; - lport->disc_buf_len = (unsigned char) plen; + disc->buf_len = (unsigned char) plen; if (plen == sizeof(*np)) - lport->disc_buf_len = 0; + disc->buf_len = 0; } /* @@ -455,7 +699,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) rport = fc_rport_rogue_create(&dp); if (rport) { rdata = rport->dd_data; - rdata->event_callback = lport->tt.event_callback; + rdata->event_callback = fc_disc_rport_event; rdata->local_port = lport; lport->tt.rport_login(rport); } else @@ -465,7 +709,8 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) } if (np->fp_flags & FC_NS_FID_LAST) { - fc_disc_done(lport); + disc->event = DISC_EV_SUCCESS; + fc_disc_done(disc); len = 0; break; } @@ -479,11 +724,15 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) * Save any partial record at the end of the buffer for next time. */ if (error == 0 && len > 0 && len < sizeof(*np)) { - if (np != &lport->disc_buf) - memcpy(&lport->disc_buf, np, len); - lport->disc_buf_len = (unsigned char) len; + if (np != &disc->partial_buf) { + FC_DEBUG_DISC("Partial buffer remains " + "for discovery by (%6x)\n", + fc_host_port_id(lport->host)); + memcpy(&disc->partial_buf, np, len); + } + disc->buf_len = (unsigned char) len; } else { - lport->disc_buf_len = 0; + disc->buf_len = 0; } return error; } @@ -493,14 +742,13 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len) */ static void fc_disc_timeout(struct work_struct *work) { - struct fc_lport *lport; - - lport = container_of(work, struct fc_lport, disc_work.work); - - if (lport->disc_pending) - fc_disc_gpn_ft_req(lport); - else - lport->tt.disc_start(lport); + struct fc_disc *disc = container_of(work, + struct fc_disc, + disc_work.work); + mutex_lock(&disc->disc_mutex); + if (disc->requested && !disc->pending) + fc_disc_gpn_ft_req(disc); + mutex_unlock(&disc->disc_mutex); } /** @@ -509,12 +757,13 @@ static void fc_disc_timeout(struct work_struct *work) * @fp: response frame * @lp_arg: Fibre Channel host port instance * - * The response may be in multiple frames + * Locking Note: This function expects that the disc_mutex is locked + * before it is called. */ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, - void *lp_arg) + void *disc_arg) { - struct fc_lport *lport = lp_arg; + struct fc_disc *disc = disc_arg; struct fc_ct_hdr *cp; struct fc_frame_header *fh; unsigned int seq_cnt; @@ -522,8 +771,11 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, unsigned int len; int error; + FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n", + fc_host_port_id(disc->lport->host)); + if (IS_ERR(fp)) { - fc_disc_error(lport, fp); + fc_disc_error(disc, fp); return; } @@ -532,9 +784,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, len = fr_len(fp) - sizeof(*fh); seq_cnt = ntohs(fh->fh_seq_cnt); if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && - lport->disc_seq_count == 0) { + disc->seq_count == 0) { cp = fc_frame_payload_get(fp, sizeof(*cp)); - if (cp == NULL) { + if (!cp) { FC_DBG("GPN_FT response too short, len %d\n", fr_len(fp)); } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { @@ -548,25 +800,26 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, FC_DBG("GPN_FT rejected reason %x exp %x " "(check zoning)\n", cp->ct_reason, cp->ct_explan); - fc_disc_done(lport); + disc->event = DISC_EV_FAILED; + fc_disc_done(disc); } else { FC_DBG("GPN_FT unexpected response code %x\n", ntohs(cp->ct_cmd)); } } else if (fr_sof(fp) == FC_SOF_N3 && - seq_cnt == lport->disc_seq_count) { + seq_cnt == disc->seq_count) { buf = fh + 1; } else { FC_DBG("GPN_FT unexpected frame - out of sequence? " "seq_cnt %x expected %x sof %x eof %x\n", - seq_cnt, lport->disc_seq_count, fr_sof(fp), fr_eof(fp)); + seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); } if (buf) { - error = fc_disc_gpn_ft_parse(lport, buf, len); + error = fc_disc_gpn_ft_parse(disc, buf, len); if (error) - fc_disc_retry(lport); + fc_disc_error(disc, fp); else - lport->disc_seq_count++; + disc->seq_count++; } fc_frame_free(fp); } @@ -576,27 +829,31 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, * @lport: FC local port * @dp: The port to rediscover * - * This could be from an RSCN that reported a change for the target. + * Locking Note: This function expects that the disc_mutex is locked + * before it is called. */ -static void fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) +static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) { + struct fc_lport *lport; struct fc_rport *rport; struct fc_rport *new_rport; struct fc_rport_libfc_priv *rdata; + lport = disc->lport; + if (dp->ids.port_id == fc_host_port_id(lport->host)) goto out; rport = lport->tt.rport_lookup(lport, dp->ids.port_id); if (rport) { - fc_disc_del_target(lport, rport); + fc_disc_del_target(disc, rport); put_device(&rport->dev); /* hold from lookup */ } new_rport = fc_rport_rogue_create(dp); if (new_rport) { rdata = new_rport->dd_data; - rdata->event_callback = lport->tt.event_callback; + rdata->event_callback = fc_disc_rport_event; kfree(dp); lport->tt.rport_login(new_rport); } @@ -606,19 +863,70 @@ out: } /** + * fc_disc_stop - Stop discovery for a given lport + * @lport: The lport that discovery should stop for + */ +void fc_disc_stop(struct fc_lport *lport) +{ + struct fc_disc *disc, *next; + + mutex_lock(&disc_list_lock); + list_for_each_entry_safe(disc, next, &disc_list, list) { + if (disc->lport == lport) { + cancel_delayed_work_sync(&disc->disc_work); + fc_disc_stop_rports(disc); + } + } + mutex_unlock(&disc_list_lock); +} + +/** + * fc_disc_stop_final - Stop discovery for a given lport + * @lport: The lport that discovery should stop for + * + * This function will block until discovery has been + * completely stopped and all rports have been deleted. + */ +void fc_disc_stop_final(struct fc_lport *lport) +{ + struct fc_disc *disc, *next; + fc_disc_stop(lport); + lport->tt.rport_flush_queue(); + + mutex_lock(&disc_list_lock); + list_for_each_entry_safe(disc, next, &disc_list, list) { + if (disc->lport == lport) { + list_del(&disc->list); + kfree(disc); + } + } + mutex_unlock(&disc_list_lock); +} + +/** * fc_disc_init - Initialize the discovery block * @lport: FC local port */ int fc_disc_init(struct fc_lport *lport) { - INIT_DELAYED_WORK(&lport->disc_work, fc_disc_timeout); + INIT_LIST_HEAD(&disc_list); + mutex_init(&disc_list_lock); if (!lport->tt.disc_start) lport->tt.disc_start = fc_disc_start; + if (!lport->tt.disc_stop) + lport->tt.disc_stop = fc_disc_stop; + + if (!lport->tt.disc_stop_final) + lport->tt.disc_stop_final = fc_disc_stop_final; + if (!lport->tt.disc_recv_req) lport->tt.disc_recv_req = fc_disc_recv_req; + if (!lport->tt.rport_lookup) + lport->tt.rport_lookup = fc_disc_lookup_rport; + return 0; } EXPORT_SYMBOL(fc_disc_init); diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c new file mode 100644 index 0000000..dd47fe6 --- /dev/null +++ b/drivers/scsi/libfc/fc_elsct.c @@ -0,0 +1,71 @@ +/* + * Copyright(c) 2008 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +/* + * Provide interface to send ELS/CT FC frames + */ + +#include +#include +#include +#include +#include +#include + +/* + * fc_elsct_send - sends ELS/CT frame + */ +static struct fc_seq *fc_elsct_send(struct fc_lport *lport, + struct fc_rport *rport, + struct fc_frame *fp, + unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg), + void *arg, u32 timer_msec) +{ + enum fc_rctl r_ctl; + u32 did; + enum fc_fh_type fh_type; + int rc; + + /* ELS requests */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) + rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type); + else + /* CT requests */ + rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type); + + if (rc) + return NULL; + + fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); +} + +int fc_elsct_init(struct fc_lport *lport) +{ + if (!lport->tt.elsct_send) + lport->tt.elsct_send = fc_elsct_send; + + return 0; +} +EXPORT_SYMBOL(fc_elsct_init); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 67c5bad..12a1196 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -29,7 +29,8 @@ #include -#include +#include +#include #define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */ @@ -60,66 +61,6 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ */ /* - * Sequence. - */ -struct fc_seq { - u8 id; /* seq ID */ - u16 ssb_stat; /* status flags for sequence status block */ - u16 cnt; /* frames sent so far on sequence */ - u32 f_ctl; /* F_CTL flags for frames */ - u32 rec_data; /* FC-4 value for REC */ -}; - -struct fc_exch; - -#define FC_EX_DONE (1 << 0) /* ep is completed */ -#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */ - -/* - * Exchange. - * - * Locking notes: The ex_lock protects changes to the following fields: - * esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl. - * seq_id - * sequence allocation - * - * If the em_lock and ex_lock must be taken at the same time, the - * em_lock must be taken before the ex_lock. - */ -struct fc_exch { - struct fc_exch_mgr *em; /* exchange manager */ - u32 state; /* internal driver state */ - u16 xid; /* our exchange ID */ - struct list_head ex_list; /* free or busy list linkage */ - spinlock_t ex_lock; /* lock covering exchange state */ - atomic_t ex_refcnt; /* reference counter */ - struct delayed_work timeout_work; /* timer for upper level protocols */ - struct fc_lport *lp; /* fc device instance */ - u16 oxid; /* originator's exchange ID */ - u16 rxid; /* responder's exchange ID */ - u32 oid; /* originator's FCID */ - u32 sid; /* source FCID */ - u32 did; /* destination FCID */ - u32 esb_stat; /* exchange status for ESB */ - u32 r_a_tov; /* r_a_tov from rport (msec) */ - u8 seq_id; /* next sequence ID to use */ - u32 f_ctl; /* F_CTL flags for sequences */ - u8 fh_type; /* frame type */ - enum fc_class class; /* class of service */ - struct fc_seq seq; /* single sequence */ - /* - * Handler for responses to this current exchange. - */ - void (*resp)(struct fc_seq *, struct fc_frame *, void *); - void (*destructor)(struct fc_seq *, void *); - /* - * arg is passed as void pointer to exchange - * resp and destructor handlers - */ - void *arg; -}; - -/* * Exchange manager. * * This structure is the center for creating exchanges and sequences. @@ -131,6 +72,8 @@ struct fc_exch_mgr { u16 last_xid; /* last allocated exchange ID */ u16 min_xid; /* min exchange ID */ u16 max_xid; /* max exchange ID */ + u16 max_read; /* max exchange ID for read */ + u16 last_read; /* last xid allocated for read */ u32 total_exches; /* total allocated exchanges */ struct list_head ex_list; /* allocated exchanges list */ struct fc_lport *lp; /* fc device instance */ @@ -151,14 +94,12 @@ struct fc_exch_mgr { } stats; struct fc_exch **exches; /* for exch pointers indexed by xid */ }; - #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) -#define fc_exch_next_xid(mp, id) ((id == mp->max_xid) ? mp->min_xid : id + 1) static void fc_exch_rrq(struct fc_exch *); static void fc_seq_ls_acc(struct fc_seq *); static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, - enum fc_els_rjt_explan); + enum fc_els_rjt_explan); static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp); @@ -274,34 +215,57 @@ static void fc_exch_hold(struct fc_exch *ep) } /* - * Fill in frame header. - * - * The following fields are the responsibility of this routine: - * d_id, s_id, df_ctl, oxid, rxid, cs_ctl, seq_id - * - * The following fields are handled by the caller. - * r_ctl, type, f_ctl, seq_cnt, parm_offset - * - * That should be a complete list. - * - * We may be the originator or responder to the sequence. + * setup fc hdr by initializing few more FC header fields and sof/eof. + * Initialized fields by this func: + * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt + * - sof and eof */ -static void fc_seq_fill_hdr(struct fc_seq *sp, struct fc_frame *fp) +static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, + u32 f_ctl) { struct fc_frame_header *fh = fc_frame_header_get(fp); - struct fc_exch *ep; + u16 fill; - ep = fc_seq_exch(sp); + fr_sof(fp) = ep->class; + if (ep->seq.cnt) + fr_sof(fp) = fc_sof_normal(ep->class); + + if (f_ctl & FC_FC_END_SEQ) { + fr_eof(fp) = FC_EOF_T; + if (fc_sof_needs_ack(ep->class)) + fr_eof(fp) = FC_EOF_N; + /* + * Form f_ctl. + * The number of fill bytes to make the length a 4-byte + * multiple is the low order 2-bits of the f_ctl. + * The fill itself will have been cleared by the frame + * allocation. + * After this, the length will be even, as expected by + * the transport. + */ + fill = fr_len(fp) & 3; + if (fill) { + fill = 4 - fill; + /* TODO, this may be a problem with fragmented skb */ + skb_put(fp_skb(fp), fill); + hton24(fh->fh_f_ctl, f_ctl | fill); + } + } else { + WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ + fr_eof(fp) = FC_EOF_N; + } - hton24(fh->fh_s_id, ep->sid); - hton24(fh->fh_d_id, ep->did); + /* + * Initialize remainig fh fields + * from fc_fill_fc_hdr + */ fh->fh_ox_id = htons(ep->oxid); fh->fh_rx_id = htons(ep->rxid); - fh->fh_seq_id = sp->id; - fh->fh_cs_ctl = 0; - fh->fh_df_ctl = 0; + fh->fh_seq_id = ep->seq.id; + fh->fh_seq_cnt = htons(ep->seq.cnt); } + /* * Release a reference to an exchange. * If the refcnt goes to zero and the exchange is complete, it is freed. @@ -432,8 +396,9 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec) */ fp = fc_frame_alloc(ep->lp, 0); if (fp) { - fc_frame_setup(fp, FC_RCTL_BA_ABTS, FC_TYPE_BLS); - error = fc_seq_send(ep->lp, sp, fp, FC_FC_END_SEQ); + fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, + FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + error = fc_seq_send(ep->lp, sp, fp); } else error = -ENOBUFS; return error; @@ -508,36 +473,66 @@ static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) } /* - * Allocate an exchange. + * fc_em_alloc_xid - returns an xid based on request type + * @lp : ptr to associated lport + * @fp : ptr to the assocated frame * - * if xid is supplied zero then assign next free exchange ID - * from exchange manager, otherwise use supplied xid. - * Returns with exch lock held. + * check the associated fc_fsp_pkt to get scsi command type and + * command direction to decide from which range this exch id + * will be allocated from. + * + * Returns : 0 or an valid xid */ -struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid) +static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp) { + u16 xid, min, max; + u16 *plast; struct fc_exch *ep = NULL; - u16 min_xid, max_xid; - min_xid = mp->min_xid; - max_xid = mp->max_xid; - /* - * if xid is supplied then verify its xid range - */ - if (xid) { - if (unlikely((xid < min_xid) || (xid > max_xid))) { - FC_DBG("Invalid xid 0x:%x\n", xid); - goto out; - } - if (unlikely(mp->exches[xid - min_xid] != NULL)) { - FC_DBG("xid 0x:%x is already in use\n", xid); - goto out; + if (mp->max_read) { + if (fc_frame_is_read(fp)) { + min = mp->min_xid; + max = mp->max_read; + plast = &mp->last_read; + } else { + min = mp->max_read + 1; + max = mp->max_xid; + plast = &mp->last_xid; } + } else { + min = mp->min_xid; + max = mp->max_xid; + plast = &mp->last_xid; } + xid = *plast; + do { + xid = (xid == max) ? min : xid + 1; + ep = mp->exches[xid - mp->min_xid]; + } while ((ep != NULL) && (xid != *plast)); - /* - * Allocate new exchange - */ + if (unlikely(ep)) + xid = 0; + else + *plast = xid; + + return xid; +} + +/* + * fc_exch_alloc - allocate an exchange. + * @mp : ptr to the exchange manager + * @xid: input xid + * + * if xid is supplied zero then assign next free exchange ID + * from exchange manager, otherwise use supplied xid. + * Returns with exch lock held. + */ +struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, + struct fc_frame *fp, u16 xid) +{ + struct fc_exch *ep = NULL; + + /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); if (!ep) { atomic_inc(&mp->stats.no_free_exch); @@ -546,40 +541,26 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid) memset(ep, 0, sizeof(*ep)); spin_lock_bh(&mp->em_lock); - - /* - * if xid is zero then assign next free exchange ID - */ + /* alloc xid if input xid 0 */ if (!xid) { - xid = fc_exch_next_xid(mp, mp->last_xid); - /* - * find next free xid using linear search - */ - while (mp->exches[xid - min_xid] != NULL) { - if (xid == mp->last_xid) - break; - xid = fc_exch_next_xid(mp, xid); - } - - if (unlikely(mp->exches[xid - min_xid] != NULL)) + /* alloc a new xid */ + xid = fc_em_alloc_xid(mp, fp); + if (!xid) { + printk(KERN_ERR "fc_em_alloc_xid() failed\n"); goto err; - mp->last_xid = xid; + } } - /* lport lock ? */ - if (mp->lp->state == LPORT_ST_RESET) - goto err; /* don't add new ep during local port reset */ - fc_exch_hold(ep); /* hold for exch in mp */ spin_lock_init(&ep->ex_lock); /* * Hold exch lock for caller to prevent fc_exch_reset() - * from releasing exch while fc_exch_alloc() caller is + * from releasing exch while fc_exch_alloc() caller is * still working on exch. */ spin_lock_bh(&ep->ex_lock); - mp->exches[xid - min_xid] = ep; + mp->exches[xid - mp->min_xid] = ep; list_add_tail(&ep->ex_list, &mp->ex_list); fc_seq_alloc(ep, ep->seq_id++); mp->total_exches++; @@ -874,55 +855,17 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp) } EXPORT_SYMBOL(fc_seq_start_next); -int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, - struct fc_frame *fp, u32 f_ctl) +int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp) { struct fc_exch *ep; - struct fc_frame_header *fh; - enum fc_class class; - u16 fill = 0; + struct fc_frame_header *fh = fc_frame_header_get(fp); int error; ep = fc_seq_exch(sp); WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); - fc_seq_fill_hdr(sp, fp); - fh = fc_frame_header_get(fp); - class = ep->class; - fr_sof(fp) = class; - if (sp->cnt) - fr_sof(fp) = fc_sof_normal(class); - - if (f_ctl & FC_FC_END_SEQ) { - fr_eof(fp) = FC_EOF_T; - if (fc_sof_needs_ack(class)) - fr_eof(fp) = FC_EOF_N; - /* - * Form f_ctl. - * The number of fill bytes to make the length a 4-byte - * multiple is the low order 2-bits of the f_ctl. - * The fill itself will have been cleared by the frame - * allocation. - * After this, the length will be even, as expected by - * the transport. Don't include the fill in the f_ctl - * saved in the sequence. - */ - fill = fr_len(fp) & 3; - if (fill) { - fill = 4 - fill; - /* TODO, this may be a problem with fragmented skb */ - skb_put(fp_skb(fp), fill); - } - f_ctl |= sp->f_ctl | ep->f_ctl; - } else { - WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ - f_ctl |= sp->f_ctl | ep->f_ctl; - f_ctl &= ~FC_FC_SEQ_INIT; - fr_eof(fp) = FC_EOF_N; - } - - hton24(fh->fh_f_ctl, f_ctl | fill); - fh->fh_seq_cnt = htons(sp->cnt); + sp->f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, sp->f_ctl); /* * update sequence count if this frame is carrying @@ -946,12 +889,10 @@ int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, * We can only be called to send once for each sequence. */ spin_lock_bh(&ep->ex_lock); - sp->f_ctl = f_ctl; /* save for possible abort */ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ - if (f_ctl & FC_FC_END_SEQ) { - if (f_ctl & FC_FC_SEQ_INIT) - ep->esb_stat &= ~ESB_ST_SEQ_INIT; - } + sp->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ + if (sp->f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) + ep->esb_stat &= ~ESB_ST_SEQ_INIT; spin_unlock_bh(&ep->ex_lock); return error; } @@ -986,10 +927,11 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, enum fc_rctl rctl, enum fc_fh_type fh_type) { u32 f_ctl; + struct fc_exch *ep = fc_seq_exch(sp); - fc_frame_setup(fp, rctl, fh_type); - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_seq_send(fc_seq_exch(sp)->lp, sp, fp, f_ctl); + f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; + fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); + fc_seq_send(fc_seq_exch(sp)->lp, sp, fp); } /* @@ -1001,7 +943,8 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) struct fc_frame *fp; struct fc_frame_header *rx_fh; struct fc_frame_header *fh; - struct fc_lport *lp = fc_seq_exch(sp)->lp; + struct fc_exch *ep = fc_seq_exch(sp); + struct fc_lport *lp = ep->lp; unsigned int f_ctl; /* @@ -1013,7 +956,6 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) if (!fp) return; - fc_seq_fill_hdr(sp, fp); fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_ACK_1; fh->fh_type = FC_TYPE_BLS; @@ -1034,6 +976,7 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX; hton24(fh->fh_f_ctl, f_ctl); + fc_exch_setup_hdr(ep, fp, f_ctl); fh->fh_seq_id = rx_fh->fh_seq_id; fh->fh_seq_cnt = rx_fh->fh_seq_cnt; fh->fh_parm_offset = htonl(1); /* ack single frame */ @@ -1514,7 +1457,7 @@ static void fc_exch_reset(struct fc_exch *ep) * a deadlock). */ if (cancel_delayed_work(&ep->timeout_work)) - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ resp = ep->resp; ep->resp = NULL; if (ep->esb_stat & ESB_ST_REC_QUAL) @@ -1565,22 +1508,6 @@ restart: } EXPORT_SYMBOL(fc_exch_mgr_reset); -void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid) -{ - struct fc_exch *ep; - - ep = fc_seq_exch(sp); - *oxid = ep->oxid; - *rxid = ep->rxid; -} -EXPORT_SYMBOL(fc_seq_get_xids); - -void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data) -{ - sp->rec_data = rec_data; -} -EXPORT_SYMBOL(fc_seq_set_rec_data); - /* * Handle incoming ELS REC - Read Exchange Concise. * Note that the requesting port may be different than the S_ID in the request. @@ -1648,8 +1575,8 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) hton24(acc->reca_rfid, ep->sid); acc->reca_fc4value = htonl(ep->seq.rec_data); acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP | - ESB_ST_SEQ_INIT | - ESB_ST_COMPLETE)); + ESB_ST_SEQ_INIT | + ESB_ST_COMPLETE)); sp = fc_seq_start_next(sp); fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); out: @@ -1723,7 +1650,6 @@ static void fc_exch_rrq(struct fc_exch *ep) fp = fc_frame_alloc(lp, sizeof(*rrq)); if (!fp) return; - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); rrq = fc_frame_payload_get(fp, sizeof(*rrq)); memset(rrq, 0, sizeof(*rrq)); rrq->rrq_cmd = ELS_RRQ; @@ -1734,9 +1660,13 @@ static void fc_exch_rrq(struct fc_exch *ep) did = ep->did; if (ep->esb_stat & ESB_ST_RESP) did = ep->sid; + + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, + fc_host_port_id(lp->host), FC_TYPE_ELS, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, - lp->e_d_tov, fc_host_port_id(lp->host), did, - FC_FC_SEQ_INIT | FC_FC_END_SEQ); + lp->e_d_tov); if (!rrq_sp) { ep->esb_stat |= ESB_ST_REC_QUAL; fc_exch_timer_set_locked(ep, ep->r_a_tov); @@ -1791,7 +1721,7 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) } if (ep->esb_stat & ESB_ST_COMPLETE) { if (cancel_delayed_work(&ep->timeout_work)) - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ + atomic_dec(&ep->ex_refcnt); /* drop timer hold */ } spin_unlock_bh(&ep->ex_lock); @@ -1827,6 +1757,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, /* * Memory need for EM */ +#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2))) len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *)); len += sizeof(struct fc_exch_mgr); @@ -1837,10 +1768,22 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, mp->class = class; mp->total_exches = 0; mp->exches = (struct fc_exch **)(mp + 1); - mp->last_xid = min_xid - 1; + mp->lp = lp; + /* adjust em exch xid range for offload */ mp->min_xid = min_xid; mp->max_xid = max_xid; - mp->lp = lp; + mp->last_xid = min_xid - 1; + mp->max_read = 0; + mp->last_read = 0; + if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) { + mp->max_read = lp->lro_xid; + mp->last_read = min_xid - 1; + mp->last_xid = mp->max_read; + } else { + /* disable lro if no xid control over read */ + lp->lro_enabled = 0; + } + INIT_LIST_HEAD(&mp->ex_list); spin_lock_init(&mp->em_lock); @@ -1873,7 +1816,8 @@ struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp) { if (!lp || !lp->emp) return NULL; - return fc_exch_alloc(lp->emp, 0); + + return fc_exch_alloc(lp->emp, fp, 0); } EXPORT_SYMBOL(fc_exch_get); @@ -1883,13 +1827,11 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, struct fc_frame *fp, void *arg), void (*destructor)(struct fc_seq *, void *), - void *arg, u32 timer_msec, - u32 sid, u32 did, u32 f_ctl) + void *arg, u32 timer_msec) { struct fc_exch *ep; struct fc_seq *sp = NULL; struct fc_frame_header *fh; - u16 fill; int rc = 1; ep = lp->tt.exch_get(lp, fp); @@ -1898,7 +1840,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, return NULL; } ep->esb_stat |= ESB_ST_SEQ_INIT; - fc_exch_set_addr(ep, sid, did); + fh = fc_frame_header_get(fp); + fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); ep->resp = resp; ep->destructor = destructor; ep->arg = arg; @@ -1907,43 +1850,20 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, sp = &ep->seq; WARN_ON((sp->f_ctl & FC_FC_END_SEQ) != 0); - fr_sof(fp) = ep->class; - if (sp->cnt) - fr_sof(fp) = fc_sof_normal(ep->class); - fr_eof(fp) = FC_EOF_T; - if (fc_sof_needs_ack(ep->class)) - fr_eof(fp) = FC_EOF_N; - - fc_seq_fill_hdr(sp, fp); - /* - * Form f_ctl. - * The number of fill bytes to make the length a 4-byte multiple is - * the low order 2-bits of the f_ctl. The fill itself will have been - * cleared by the frame allocation. - * After this, the length will be even, as expected by the transport. - * Don't include the fill in the f_ctl saved in the sequence. - */ - fill = fr_len(fp) & 3; - if (fill) { - fill = 4 - fill; - /* TODO, this may be a problem with fragmented skb */ - skb_put(fp_skb(fp), fill); - } - f_ctl |= ep->f_ctl; - fh = fc_frame_header_get(fp); - hton24(fh->fh_f_ctl, f_ctl | fill); - fh->fh_seq_cnt = htons(sp->cnt++); ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ + ep->f_ctl = ntoh24(fh->fh_f_ctl); + fc_exch_setup_hdr(ep, fp, ep->f_ctl); + sp->cnt++; if (unlikely(lp->tt.frame_send(lp, fp))) goto err; if (timer_msec) fc_exch_timer_set_locked(ep, timer_msec); - sp->f_ctl = f_ctl; /* save for possible abort */ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ + sp->f_ctl = ep->f_ctl; /* save for possible abort */ - if (f_ctl & FC_FC_SEQ_INIT) + if (ep->f_ctl & FC_FC_SEQ_INIT) ep->esb_stat &= ~ESB_ST_SEQ_INIT; spin_unlock_bh(&ep->ex_lock); return sp; @@ -2032,11 +1952,6 @@ int fc_exch_init(struct fc_lport *lp) if (!lp->tt.seq_exch_abort) lp->tt.seq_exch_abort = fc_seq_exch_abort; - if (!lp->tt.seq_get_xids) - lp->tt.seq_get_xids = fc_seq_get_xids; - - if (!lp->tt.seq_set_rec_data) - lp->tt.seq_set_rec_data = fc_seq_set_rec_data; return 0; } EXPORT_SYMBOL(fc_exch_init); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 01e84dc..04ced7f 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -36,12 +36,12 @@ #include -#include +#include +#include MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("libfc"); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.3"); static int fc_fcp_debug; @@ -388,15 +388,23 @@ crc_err: } /* - * Send SCSI data to target. + * fc_fcp_send_data - Send SCSI data to target. + * @fsp: ptr to fc_fcp_pkt + * @sp: ptr to this sequence + * @offset: starting offset for this data request + * @seq_blen: the burst length for this data request + * * Called after receiving a Transfer Ready data descriptor. * if LLD is capable of seq offload then send down seq_blen * size of data in single frame, otherwise send multiple FC * frames of max FC frame payload supported by target port. + * + * Returns : 0 for success. */ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, size_t offset, size_t seq_blen) { + struct fc_exch *ep; struct scsi_cmnd *sc; struct scatterlist *sg; struct fc_frame *fp = NULL; @@ -405,7 +413,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, size_t t_blen; size_t tlen; size_t sg_bytes; - size_t frame_offset; + size_t frame_offset, fh_parm_offset; int error; void *data = NULL; void *page_addr; @@ -438,7 +446,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, sc = fsp->cmd; remaining = seq_blen; - frame_offset = offset; + fh_parm_offset = frame_offset = offset; tlen = 0; seq = lp->tt.seq_start_next(seq); f_ctl = FC_FC_REL_OFF; @@ -501,8 +509,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, data = (void *)(fr_hdr(fp)) + sizeof(struct fc_frame_header); } - fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP); - fc_frame_set_offset(fp, frame_offset); + fh_parm_offset = frame_offset; fr_max_payload(fp) = fsp->max_payload; } sg_bytes = min(tlen, sg->length - offset); @@ -539,28 +546,30 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, tlen -= sg_bytes; remaining -= sg_bytes; - if (remaining == 0) { - /* - * Send a request sequence with - * transfer sequence initiative. - */ - f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; - error = lp->tt.seq_send(lp, seq, fp, f_ctl); - } else if (tlen == 0) { - /* - * send fragment using for a sequence. - */ - error = lp->tt.seq_send(lp, seq, fp, f_ctl); - } else { + if (tlen) continue; - } - fp = NULL; + /* + * Send sequence with transfer sequence initiative in case + * this is last FCP frame of the sequence. + */ + if (remaining == 0) + f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; + + ep = fc_seq_exch(seq); + fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, + FC_TYPE_FCP, f_ctl, fh_parm_offset); + + /* + * send fragment using for a sequence. + */ + error = lp->tt.seq_send(lp, seq, fp); if (error) { WARN_ON(1); /* send error should be rare */ fc_fcp_retry_cmd(fsp); return 0; } + fp = NULL; } fsp->xfer_len += seq_blen; /* premature count? */ return 0; @@ -684,7 +693,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) (size_t) ntohl(dd->ft_data_ro), (size_t) ntohl(dd->ft_burst_len)); if (!rc) - lp->tt.seq_set_rec_data(seq, fsp->xfer_len); + seq->rec_data = fsp->xfer_len; else if (rc == -ENOMEM) fsp->state |= FC_SRB_NOMEM; } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { @@ -694,7 +703,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) */ WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */ fc_fcp_recv_data(fsp, fp); - lp->tt.seq_set_rec_data(seq, fsp->xfer_contig_end); + seq->rec_data = fsp->xfer_contig_end; } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) { WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); @@ -833,6 +842,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) { struct fc_lport *lp = fsp->lp; struct fc_seq *seq; + struct fc_exch *ep; u32 f_ctl; if (fsp->state & FC_SRB_ABORT_PENDING) @@ -864,11 +874,13 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) csp = lp->tt.seq_start_next(seq); conf_frame = fc_frame_alloc(fsp->lp, 0); if (conf_frame) { - fc_frame_setup(conf_frame, - FC_RCTL_DD_SOL_CTL, FC_TYPE_FCP); f_ctl = FC_FC_SEQ_INIT; f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; - lp->tt.seq_send(lp, csp, conf_frame, f_ctl); + ep = fc_seq_exch(seq); + fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, + ep->did, ep->sid, + FC_TYPE_FCP, f_ctl, 0); + lp->tt.seq_send(lp, csp, conf_frame); } } lp->tt.exch_done(seq); @@ -947,7 +959,7 @@ static void fc_fcp_abort_io(struct fc_lport *lp) * This is called by upper layer protocol. * Return : zero for success and -1 for failure * Context : called from queuecommand which can be called from process - * or scsi soft irq. + * or scsi soft irq. * Locks : called with the host lock and irqs disabled. */ static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp) @@ -995,18 +1007,16 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, } memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP); - fc_frame_set_offset(fp, 0); + fr_cmd(fp) = fsp->cmd; rport = fsp->rport; fsp->max_payload = rport->maxframe_size; rp = rport->dd_data; - seq = lp->tt.exch_seq_send(lp, fp, - resp, - fc_fcp_pkt_destroy, - fsp, 0, - fc_host_port_id(rp->local_port->host), - rport->port_id, - FC_FC_SEQ_INIT | FC_FC_END_SEQ); + + fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, + fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); if (!seq) { fc_frame_free(fp); rc = -1; @@ -1018,8 +1028,8 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); fc_fcp_timer_set(fsp, - (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? - FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); + (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? + FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); unlock: fc_fcp_unlock_pkt(fsp); return rc; @@ -1249,50 +1259,33 @@ unlock: static void fc_fcp_rec(struct fc_fcp_pkt *fsp) { struct fc_lport *lp; - struct fc_seq *seq; struct fc_frame *fp; - struct fc_els_rec *rec; struct fc_rport *rport; struct fc_rport_libfc_priv *rp; - u16 ox_id; - u16 rx_id; lp = fsp->lp; rport = fsp->rport; rp = rport->dd_data; - seq = fsp->seq_ptr; - if (!seq || rp->rp_state != RPORT_ST_READY) { + if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) { fsp->status_code = FC_HRD_ERROR; fsp->io_status = SUGGEST_RETRY << 24; fc_fcp_complete_locked(fsp); return; } - lp->tt.seq_get_xids(seq, &ox_id, &rx_id); - fp = fc_frame_alloc(lp, sizeof(*rec)); + fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec)); if (!fp) goto retry; - rec = fc_frame_payload_get(fp, sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); - rec->rec_cmd = ELS_REC; - hton24(rec->rec_s_id, fc_host_port_id(lp->host)); - rec->rec_ox_id = htons(ox_id); - rec->rec_rx_id = htons(rx_id); - - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - fc_frame_set_offset(fp, 0); - seq = lp->tt.exch_seq_send(lp, fp, - fc_fcp_rec_resp, NULL, - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV), - fc_host_port_id(rp->local_port->host), - rport->port_id, - FC_FC_SEQ_INIT | FC_FC_END_SEQ); - - if (seq) { + fr_seq(fp) = fsp->seq_ptr; + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, + fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp, + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ return; - } else - fc_frame_free(fp); + } + fc_frame_free(fp); retry: if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); @@ -1510,17 +1503,15 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) struct fc_lport *lp = fsp->lp; struct fc_rport *rport; struct fc_rport_libfc_priv *rp; + struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); struct fc_seq *seq; struct fcp_srr *srr; struct fc_frame *fp; u8 cdb_op; - u16 ox_id; - u16 rx_id; rport = fsp->rport; rp = rport->dd_data; cdb_op = fsp->cdb_cmd.fc_cdb[0]; - lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id); if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY) goto retry; /* shouldn't happen */ @@ -1531,19 +1522,17 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) srr = fc_frame_payload_get(fp, sizeof(*srr)); memset(srr, 0, sizeof(*srr)); srr->srr_op = ELS_SRR; - srr->srr_ox_id = htons(ox_id); - srr->srr_rx_id = htons(rx_id); + srr->srr_ox_id = htons(ep->oxid); + srr->srr_rx_id = htons(ep->rxid); srr->srr_r_ctl = r_ctl; srr->srr_rel_off = htonl(offset); - fc_frame_setup(fp, FC_RCTL_ELS4_REQ, FC_TYPE_FCP); - fc_frame_set_offset(fp, 0); - seq = lp->tt.exch_seq_send(lp, fp, - fc_fcp_srr_resp, NULL, - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV), - fc_host_port_id(rp->local_port->host), - rport->port_id, - FC_FC_SEQ_INIT | FC_FC_END_SEQ); + fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, + fc_host_port_id(rp->local_port->host), FC_TYPE_FCP, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); if (!seq) { fc_frame_free(fp); goto retry; @@ -1565,8 +1554,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { struct fc_fcp_pkt *fsp = arg; struct fc_frame_header *fh; - u16 ox_id; - u16 rx_id; if (IS_ERR(fp)) { fc_fcp_srr_error(fsp, fp); @@ -1590,8 +1577,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) } fsp->recov_seq = NULL; - - fsp->lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id); switch (fc_frame_payload_op(fp)) { case ELS_LS_ACC: fsp->recov_retry = 0; @@ -2007,7 +1992,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) return SUCCESS; } else { shost_printk(KERN_INFO, shost, "Host reset failed. " - "lport not ready.\n"); + "lport not ready.\n"); return FAILED; } } diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c index 388dc6c..0bbeff2 100644 --- a/drivers/scsi/libfc/fc_frame.c +++ b/drivers/scsi/libfc/fc_frame.c @@ -25,7 +25,7 @@ #include #include -#include +#include /* * Check the CRC in a frame. @@ -82,7 +82,8 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) if (fp) { memset((char *) fr_hdr(fp) + payload_len, 0, fill); /* trim is OK, we just allocated it so there are no fragments */ - skb_trim(fp_skb(fp), payload_len + sizeof(struct fc_frame_header)); + skb_trim(fp_skb(fp), + payload_len + sizeof(struct fc_frame_header)); } return fp; } diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 7e7c060..083d57b 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -78,7 +78,8 @@ #include -#include +#include +#include /* Fabric IDs to use for point-to-point mode, chosen on whims. */ #define FC_LOCAL_PTP_FID_LO 0x010101 @@ -124,71 +125,59 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) } /** - * fc_lport_lookup_rport - lookup a remote port by port_id - * @lport: Fibre Channel host port instance - * @port_id: remote port port_id to match - */ -struct fc_rport *fc_lport_lookup_rport(const struct fc_lport *lport, - u32 port_id) -{ - struct fc_rport *rport, *found; - struct fc_rport_libfc_priv *rdata; - - found = NULL; - - list_for_each_entry(rdata, &lport->rports, peers) { - rport = PRIV_TO_RPORT(rdata); - if (rport->port_id == port_id) { - found = rport; - get_device(&found->dev); - break; - } - } - return found; -} - - - -/** * fc_lport_rport_event - Event handler for rport events * @lport: The lport which is receiving the event * @rport: The rport which the event has occured on * @event: The event that occured * * Locking Note: The rport lock should not be held when calling - * this function. + * this function. */ static void fc_lport_rport_event(struct fc_lport *lport, struct fc_rport *rport, enum fc_lport_event event) { - struct fc_rport_libfc_priv *rdata = rport->dd_data; - FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event, rport->port_id); - mutex_lock(&lport->lp_mutex); switch (event) { - case LPORT_EV_RPORT_CREATED: + case RPORT_EV_CREATED: if (rport->port_id == FC_FID_DIR_SERV) { - lport->dns_rp = rport; - fc_lport_enter_rpn_id(lport); - } else { - list_add_tail(&rdata->peers, &lport->rports); - } + mutex_lock(&lport->lp_mutex); + if (lport->state == LPORT_ST_DNS) { + lport->dns_rp = rport; + fc_lport_enter_rpn_id(lport); + } else { + FC_DEBUG_LPORT("Received an CREATED event on " + "port (%6x) for the directory " + "server, but the lport is not " + "in the DNS state, it's in the " + "%d state", rport->port_id, + lport->state); + lport->tt.rport_logoff(rport); + } + mutex_unlock(&lport->lp_mutex); + } else + FC_DEBUG_LPORT("Received an event for port (%6x) " + "which is not the directory server\n", + rport->port_id); break; - case LPORT_EV_RPORT_LOGO: - case LPORT_EV_RPORT_FAILED: - case LPORT_EV_RPORT_STOP: - if (rport->port_id == FC_FID_DIR_SERV) + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + if (rport->port_id == FC_FID_DIR_SERV) { + mutex_lock(&lport->lp_mutex); lport->dns_rp = NULL; - else - list_del(&rdata->peers); + mutex_unlock(&lport->lp_mutex); + + } else + FC_DEBUG_LPORT("Received an event for port (%6x) " + "which is not the directory server\n", + rport->port_id); break; - case LPORT_EV_RPORT_NONE: + case RPORT_EV_NONE: break; } - mutex_unlock(&lport->lp_mutex); } /** @@ -225,7 +214,7 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; if (lport->ptp_rp) { - lport->tt.rport_stop(lport->ptp_rp); + lport->tt.rport_logoff(lport->ptp_rp); lport->ptp_rp = NULL; } @@ -379,6 +368,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, struct fc_lport *lport) { struct fc_frame *fp; + struct fc_exch *ep = fc_seq_exch(sp); unsigned int len; void *pp; void *dp; @@ -399,9 +389,10 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, memcpy(dp, pp, len); *((u32 *)dp) = htonl(ELS_LS_ACC << 24); sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); - lport->tt.seq_send(lport, sp, fp, f_ctl); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); } fc_frame_free(in_fp); } @@ -419,6 +410,7 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, struct fc_lport *lport) { struct fc_frame *fp; + struct fc_exch *ep = fc_seq_exch(sp); struct fc_els_rnid *req; struct { struct fc_els_rnid_resp rnid; @@ -462,9 +454,11 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, sizeof(rp->gen)); } sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); - lport->tt.seq_send(lport, sp, fp, f_ctl); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; + f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); } } fc_frame_free(in_fp); @@ -492,7 +486,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp, * @lport: The lport that should log into the fabric * * Locking Note: This function should not be called - * with the lport lock held. + * with the lport lock held. */ int fc_fabric_login(struct fc_lport *lport) { @@ -515,6 +509,9 @@ EXPORT_SYMBOL(fc_fabric_login); */ void fc_linkup(struct fc_lport *lport) { + FC_DEBUG_LPORT("Link is up for port (%6x)\n", + fc_host_port_id(lport->host)); + mutex_lock(&lport->lp_mutex); if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) { lport->link_status |= FC_LINK_UP; @@ -533,13 +530,14 @@ EXPORT_SYMBOL(fc_linkup); void fc_linkdown(struct fc_lport *lport) { mutex_lock(&lport->lp_mutex); + FC_DEBUG_LPORT("Link is down for port (%6x)\n", + fc_host_port_id(lport->host)); if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) { lport->link_status &= ~(FC_LINK_UP); fc_lport_enter_reset(lport); lport->tt.fcp_cleanup(lport); } - mutex_unlock(&lport->lp_mutex); } EXPORT_SYMBOL(fc_linkdown); @@ -577,9 +575,9 @@ EXPORT_SYMBOL(fc_unpause); **/ int fc_fabric_logoff(struct fc_lport *lport) { + lport->tt.disc_stop_final(lport); mutex_lock(&lport->lp_mutex); fc_lport_enter_logo(lport); - lport->tt.fcp_cleanup(lport); mutex_unlock(&lport->lp_mutex); return 0; } @@ -599,9 +597,8 @@ EXPORT_SYMBOL(fc_fabric_logoff); **/ int fc_lport_destroy(struct fc_lport *lport) { - cancel_delayed_work_sync(&lport->disc_work); - lport->tt.fcp_abort_io(lport); lport->tt.frame_send = fc_frame_drop; + lport->tt.fcp_abort_io(lport); lport->tt.exch_mgr_reset(lport->emp, 0, 0); return 0; } @@ -626,10 +623,8 @@ int fc_set_mfs(struct fc_lport *lport, u32 mfs) rc = 0; } - if (!rc && mfs < old_mfs) { - lport->disc_done = 0; + if (!rc && mfs < old_mfs) fc_lport_enter_reset(lport); - } mutex_unlock(&lport->lp_mutex); @@ -638,6 +633,31 @@ int fc_set_mfs(struct fc_lport *lport, u32 mfs) EXPORT_SYMBOL(fc_set_mfs); /** + * fc_lport_disc_callback - Callback for discovery events + * @lport: FC local port + * @event: The discovery event + */ +void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) +{ + switch (event) { + case DISC_EV_SUCCESS: + FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n", + fc_host_port_id(lport->host)); + break; + case DISC_EV_FAILED: + FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n", + fc_host_port_id(lport->host)); + mutex_lock(&lport->lp_mutex); + fc_lport_enter_reset(lport); + mutex_unlock(&lport->lp_mutex); + break; + case DISC_EV_NONE: + WARN_ON(1); + break; + } +} + +/** * fc_rport_enter_ready - Enter the ready state and start discovery * @lport: Fibre Channel local port that is ready * @@ -651,7 +671,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_READY); - lport->tt.disc_start(lport); + lport->tt.disc_start(fc_lport_disc_callback, lport); } /** @@ -674,6 +694,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, struct fc_frame *fp; struct fc_frame_header *fh; struct fc_seq *sp; + struct fc_exch *ep; struct fc_els_flogi *flp; struct fc_els_flogi *new_flp; u64 remote_wwpn; @@ -724,9 +745,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, * Send the response. If this fails, the originator should * repeat the sequence. */ - f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); - lport->tt.seq_send(lport, sp, fp, f_ctl); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; + ep = fc_seq_exch(sp); + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); } else { fc_lport_error(lport, fp); @@ -734,8 +757,8 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, get_unaligned_be64(&flp->fl_wwnn)); - if (lport->tt.disc_start(lport)) - FC_DBG("target discovery start error\n"); + lport->tt.disc_start(fc_lport_disc_callback, lport); + out: sp = fr_seq(rx_fp); fc_frame_free(rx_fp); @@ -751,7 +774,7 @@ out: * if an rport should handle the request. * * Locking Note: This function should not be called with the lport - * lock held becuase it will grab the lock. + * lock held becuase it will grab the lock. */ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) @@ -808,7 +831,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, s_id = ntoh24(fh->fh_s_id); d_id = ntoh24(fh->fh_d_id); - rport = fc_lport_lookup_rport(lport, s_id); + rport = lport->tt.rport_lookup(lport, s_id); if (rport) { lport->tt.rport_recv_req(sp, fp, rport); put_device(&rport->dev); /* hold from lookup */ @@ -840,7 +863,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, * @lport: The lport which should be reset * * Locking Note: This functions should not be called with the - * lport lock held. + * lport lock held. */ int fc_lport_reset(struct fc_lport *lport) { @@ -852,24 +875,6 @@ int fc_lport_reset(struct fc_lport *lport) EXPORT_SYMBOL(fc_lport_reset); /** - * fc_lport_stop_rports - delete all the remote ports associated with the lport - * @lport: libfc local port instance - * - * Locking Note: This function expects that the lport mutex is locked before - * calling it. - */ -void fc_lport_stop_rports(struct fc_lport *lport) -{ - struct fc_rport *rport; - struct fc_rport_libfc_priv *rdata; - - list_for_each_entry(rdata, &lport->rports, peers) { - rport = PRIV_TO_RPORT(rdata); - lport->tt.rport_stop(rport); - } -} - -/** * fc_rport_enter_reset - Reset the local port * @lport: Fibre Channel local port to be reset * @@ -883,17 +888,15 @@ static void fc_lport_enter_reset(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_RESET); - if (lport->dns_rp) { - lport->tt.rport_stop(lport->dns_rp); - lport->dns_rp = NULL; - } + if (lport->dns_rp) + lport->tt.rport_logoff(lport->dns_rp); if (lport->ptp_rp) { - lport->tt.rport_stop(lport->ptp_rp); + lport->tt.rport_logoff(lport->ptp_rp); lport->ptp_rp = NULL; } - fc_lport_stop_rports(lport); + lport->tt.disc_stop(lport); lport->tt.exch_mgr_reset(lport->emp, 0, 0); fc_host_fabric_name(lport->host) = 0; @@ -952,7 +955,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) /** * fc_lport_rft_id_resp - Handle response to Register Fibre - * Channel Types by ID (RPN_ID) request + * Channel Types by ID (RPN_ID) request * @sp: current sequence in RPN_ID exchange * @fp: response frame * @lp_arg: Fibre Channel host port instance @@ -1004,7 +1007,7 @@ err: /** * fc_lport_rpn_id_resp - Handle response to Register Port - * Name by ID (RPN_ID) request + * Name by ID (RPN_ID) request * @sp: current sequence in RPN_ID exchange * @fp: response frame * @lp_arg: Fibre Channel host port instance @@ -1110,32 +1113,20 @@ err: static void fc_lport_enter_scr(struct fc_lport *lport) { struct fc_frame *fp; - struct fc_els_scr *scr; FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n", fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_SCR); - fp = fc_frame_alloc(lport, sizeof(*scr)); + fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); if (!fp) { fc_lport_error(lport, fp); return; } - scr = fc_frame_payload_get(fp, sizeof(*scr)); - memset(scr, 0, sizeof(*scr)); - scr->scr_cmd = ELS_SCR; - scr->scr_reg_func = ELS_SCRF_FULL; - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - fc_frame_set_offset(fp, 0); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_lport_scr_resp, NULL, - lport, lport->e_d_tov, - fc_host_port_id(lport->host), - FC_FID_FCTRL, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR, + fc_lport_scr_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); } @@ -1149,11 +1140,6 @@ static void fc_lport_enter_scr(struct fc_lport *lport) static void fc_lport_enter_rft_id(struct fc_lport *lport) { struct fc_frame *fp; - struct req { - struct fc_ct_hdr ct; - struct fc_ns_fid fid; /* port ID object */ - struct fc_ns_fts fts; /* FC4-types object */ - } *req; struct fc_ns_fts *lps; int i; @@ -1170,31 +1156,20 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) if (i < 0) { /* nothing to register, move on to SCR */ fc_lport_enter_scr(lport); - } else { - fp = fc_frame_alloc(lport, sizeof(*req)); - if (!fp) { - fc_lport_error(lport, fp); - return; - } + return; + } - req = fc_frame_payload_get(fp, sizeof(*req)); - fc_fill_dns_hdr(lport, &req->ct, - FC_NS_RFT_ID, - sizeof(*req) - - sizeof(struct fc_ct_hdr)); - hton24(req->fid.fp_fid, fc_host_port_id(lport->host)); - req->fts = *lps; - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_lport_rft_id_resp, NULL, - lport, lport->e_d_tov, - fc_host_port_id(lport->host), - FC_FID_DIR_SERV, - FC_FC_SEQ_INIT | - FC_FC_END_SEQ)) - fc_lport_error(lport, fp); + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_rft)); + if (!fp) { + fc_lport_error(lport, fp); + return; } + + if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID, + fc_lport_rft_id_resp, + lport, lport->e_d_tov)) + fc_lport_error(lport, fp); } /** @@ -1207,37 +1182,23 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) static void fc_lport_enter_rpn_id(struct fc_lport *lport) { struct fc_frame *fp; - struct req { - struct fc_ct_hdr ct; - struct fc_ns_rn_id rn; - } *req; FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n", fc_host_port_id(lport->host), fc_lport_state(lport)); fc_lport_state_enter(lport, LPORT_ST_RPN_ID); - fp = fc_frame_alloc(lport, sizeof(*req)); + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + + sizeof(struct fc_ns_rn_id)); if (!fp) { fc_lport_error(lport, fp); return; } - req = fc_frame_payload_get(fp, sizeof(*req)); - memset(req, 0, sizeof(*req)); - fc_fill_dns_hdr(lport, &req->ct, FC_NS_RPN_ID, sizeof(req->rn)); - hton24(req->rn.fr_fid.fp_fid, fc_host_port_id(lport->host)); - put_unaligned_be64(lport->wwpn, &req->rn.fr_wwn); - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_lport_rpn_id_resp, NULL, - lport, lport->e_d_tov, - fc_host_port_id(lport->host), - FC_FID_DIR_SERV, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID, + fc_lport_rpn_id_resp, + lport, lport->e_d_tov)) fc_lport_error(lport, fp); - } /** @@ -1264,16 +1225,10 @@ static void fc_lport_enter_dns(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_DNS); - if (!lport->dns_rp) { - /* Set up a rogue rport to directory server */ - rport = fc_rport_rogue_create(&dp); - - if (!rport) - goto err; - lport->dns_rp = rport; - } + rport = fc_rport_rogue_create(&dp); + if (!rport) + goto err; - rport = lport->dns_rp; rdata = rport->dd_data; rdata->event_callback = fc_lport_rport_event; lport->tt.rport_login(rport); @@ -1388,10 +1343,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_LOGO); /* DNS session should be closed so we can release it here */ - if (lport->dns_rp) { - lport->tt.rport_logout(lport->dns_rp); - lport->dns_rp = NULL; - } + if (lport->dns_rp) + lport->tt.rport_logoff(lport->dns_rp); fp = fc_frame_alloc(lport, sizeof(*logo)); if (!fp) { @@ -1399,19 +1352,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport) return; } - logo = fc_frame_payload_get(fp, sizeof(*logo)); - memset(logo, 0, sizeof(*logo)); - logo->fl_cmd = ELS_LOGO; - hton24(logo->fl_n_port_id, fc_host_port_id(lport->host)); - logo->fl_n_port_wwn = htonll(lport->wwpn); - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - fc_frame_set_offset(fp, 0); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_lport_logo_resp, NULL, - lport, lport->e_d_tov, - fc_host_port_id(lport->host), FC_FID_FLOGI, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp, + lport, lport->e_d_tov)) fc_lport_error(lport, fp); } @@ -1496,8 +1438,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, if (flp) { csp_flags = ntohs(flp->fl_csp.sp_features); if ((csp_flags & FC_SP_FT_FPORT) == 0) { - if (lport->tt.disc_start(lport)) - FC_DBG("Target disc start error\n"); + lport->tt.disc_start(fc_lport_disc_callback, + lport); } } } else { @@ -1520,29 +1462,18 @@ err: void fc_lport_enter_flogi(struct fc_lport *lport) { struct fc_frame *fp; - struct fc_els_flogi *flp; FC_DEBUG_LPORT("Processing FLOGI state\n"); fc_lport_state_enter(lport, LPORT_ST_FLOGI); - fp = fc_frame_alloc(lport, sizeof(*flp)); + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) return fc_lport_error(lport, fp); - flp = fc_frame_payload_get(fp, sizeof(*flp)); - fc_lport_flogi_fill(lport, flp, ELS_FLOGI); - - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - fc_frame_set_offset(fp, 0); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_lport_flogi_resp, NULL, - lport, lport->e_d_tov, - 0, FC_FID_FLOGI, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI, + fc_lport_flogi_resp, lport, lport->e_d_tov)) fc_lport_error(lport, fp); - } /* Configure a fc_lport */ @@ -1550,12 +1481,9 @@ int fc_lport_config(struct fc_lport *lport) { INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); mutex_init(&lport->lp_mutex); - INIT_LIST_HEAD(&lport->rports); fc_lport_state_enter(lport, LPORT_ST_NONE); - lport->disc_delay = DNS_DELAY; - fc_lport_add_fc4_type(lport, FC_TYPE_FCP); fc_lport_add_fc4_type(lport, FC_TYPE_CT); @@ -1571,12 +1499,6 @@ int fc_lport_init(struct fc_lport *lport) if (!lport->tt.lport_reset) lport->tt.lport_reset = fc_lport_reset; - if (!lport->tt.rport_lookup) - lport->tt.rport_lookup = fc_lport_lookup_rport; - - if (!lport->tt.event_callback) - lport->tt.event_callback = fc_lport_rport_event; - fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; fc_host_node_name(lport->host) = lport->wwnn; fc_host_port_name(lport->host) = lport->wwpn; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 2d0bd85..d081af5 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -36,9 +36,9 @@ * The locking strategy is similar to the lport's strategy. The lock protects * the rport's states and is held and released by the entry points to the rport * block. All _enter_* functions correspond to rport states and expect the rport - * mutex to be locked before calling them. This means that rports only handle one - * request or response at a time, since they're not critical for the I/O path - * this potential over-use of the mutex is acceptable. + * mutex to be locked before calling them. This means that rports only handle + * one request or response at a time, since they're not critical for the I/O + * path this potential over-use of the mutex is acceptable. */ #include @@ -49,7 +49,8 @@ #include #include -#include +#include +#include static int fc_rport_debug; @@ -59,7 +60,7 @@ static int fc_rport_debug; FC_DBG(fmt); \ } while (0) -static struct workqueue_struct *rport_event_queue; +struct workqueue_struct *rport_event_queue; static void fc_rport_enter_plogi(struct fc_rport *); static void fc_rport_enter_prli(struct fc_rport *); @@ -122,7 +123,7 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) rdata->local_port = dp->lp; rdata->trans_state = FC_PORTSTATE_ROGUE; rdata->rp_state = RPORT_ST_INIT; - rdata->event = LPORT_EV_RPORT_NONE; + rdata->event = RPORT_EV_NONE; rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; rdata->event_callback = NULL; rdata->e_d_tov = dp->lp->e_d_tov; @@ -196,43 +197,6 @@ fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval) } /** - * fc_lport_plogi_fill - Fill in PLOGI command for request - * @lport: Fibre Channel host port instance - * @plogi: PLOGI command structure to fill (same structure as FLOGI) - * @op: either ELS_PLOGI for a localy generated request, or ELS_LS_ACC - */ -static void -fc_lport_plogi_fill(struct fc_lport *lport, - struct fc_els_flogi *plogi, unsigned int op) -{ - struct fc_els_csp *sp; - struct fc_els_cssp *cp; - - memset(plogi, 0, sizeof(*plogi)); - plogi->fl_cmd = (u8) op; - put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn); - put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn); - - sp = &plogi->fl_csp; - sp->sp_hi_ver = 0x20; - sp->sp_lo_ver = 0x20; - sp->sp_bb_cred = htons(10); /* this gets set by gateway */ - sp->sp_bb_data = htons((u16) lport->mfs); - cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */ - cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); - if (op != ELS_FLOGI) { - sp->sp_features = htons(FC_SP_FT_CIRO); - sp->sp_tot_seq = htons(255); /* seq. we accept */ - sp->sp_rel_off = htons(0x1f); - sp->sp_e_d_tov = htonl(lport->e_d_tov); - - cp->cp_rdfs = htons((u16) lport->mfs); - cp->cp_con_seq = htons(255); - cp->cp_open_seq = 1; - } -} - -/** * fc_rport_state_enter - Change the rport's state * @rport: The rport whose state should change * @new: The new state of the rport @@ -263,7 +227,7 @@ static void fc_rport_work(struct work_struct *work) event = rdata->event; event_callback = rdata->event_callback; - if (event == LPORT_EV_RPORT_CREATED) { + if (event == RPORT_EV_CREATED) { struct fc_rport *new_rport; struct fc_rport_libfc_priv *new_rdata; struct fc_rport_identifiers ids; @@ -300,19 +264,20 @@ static void fc_rport_work(struct work_struct *work) } else { FC_DBG("Failed to create the rport for port " "(%6x).\n", ids.port_id); - event = LPORT_EV_RPORT_FAILED; + event = RPORT_EV_FAILED; } fc_rport_rogue_destroy(rport); rport = new_rport; rdata = new_rport->dd_data; - event_callback(lport, rport, event); - } else if ((event == LPORT_EV_RPORT_FAILED) || - (event == LPORT_EV_RPORT_LOGO) || - (event == LPORT_EV_RPORT_STOP)) { - + if (event_callback) + event_callback(lport, rport, event); + } else if ((event == RPORT_EV_FAILED) || + (event == RPORT_EV_LOGO) || + (event == RPORT_EV_STOP)) { trans_state = rdata->trans_state; mutex_unlock(&rdata->rp_mutex); - event_callback(lport, rport, event); + if (event_callback) + event_callback(lport, rport, event); if (trans_state == FC_PORTSTATE_ROGUE) fc_rport_rogue_destroy(rport); else @@ -345,45 +310,32 @@ int fc_rport_login(struct fc_rport *rport) } /** - * fc_rport_logout - Logout of the remote port and delete it - * @rport: Fibre Channel remote port + * fc_rport_logoff - Logoff and remove an rport + * @rport: Fibre Channel remote port to be removed * * Locking Note: Called without the rport lock held. This * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ -int fc_rport_logout(struct fc_rport *rport) +int fc_rport_logoff(struct fc_rport *rport) { struct fc_rport_libfc_priv *rdata = rport->dd_data; mutex_lock(&rdata->rp_mutex); - FC_DEBUG_RPORT("Logout of port (%6x)\n", rport->port_id); + FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); fc_rport_enter_logo(rport); - mutex_unlock(&rdata->rp_mutex); - - return 0; -} - -/** - * fc_rport_remove - Remove an rport - * @rport: Fibre Channel remote port to be removed - * - * Locking Note: Called without the rport lock held. This - * function will hold the rport lock, call an _enter_* - * function and then unlock the rport. - */ -int fc_rport_stop(struct fc_rport *rport) -{ - struct fc_rport_libfc_priv *rdata = rport->dd_data; - - mutex_lock(&rdata->rp_mutex); + /* + * Change the state to NONE so that we discard + * the response. + */ + fc_rport_state_enter(rport, RPORT_ST_NONE); - FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); + cancel_delayed_work_sync(&rdata->retry_work); - rdata->event = LPORT_EV_RPORT_STOP; + rdata->event = RPORT_EV_STOP; queue_work(rport_event_queue, &rdata->event_work); mutex_unlock(&rdata->rp_mutex); @@ -406,7 +358,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport) FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id); - rdata->event = LPORT_EV_RPORT_CREATED; + rdata->event = RPORT_EV_CREATED; queue_work(rport_event_queue, &rdata->event_work); } @@ -441,9 +393,7 @@ static void fc_rport_timeout(struct work_struct *work) break; case RPORT_ST_READY: case RPORT_ST_INIT: - break; case RPORT_ST_NONE: - BUG(); break; } put_device(&rport->dev); @@ -487,8 +437,9 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) case RPORT_ST_PLOGI: case RPORT_ST_PRLI: case RPORT_ST_LOGO: - rdata->event = LPORT_EV_RPORT_FAILED; - queue_work(rport_event_queue, &rdata->event_work); + rdata->event = RPORT_EV_FAILED; + queue_work(rport_event_queue, + &rdata->event_work); break; case RPORT_ST_RTV: fc_rport_enter_ready(rport); @@ -496,7 +447,6 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) case RPORT_ST_NONE: case RPORT_ST_READY: case RPORT_ST_INIT: - BUG(); break; } } @@ -527,7 +477,8 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_DEBUG_RPORT("Received a PLOGI response\n"); + FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", + rport->port_id); if (rdata->rp_state != RPORT_ST_PLOGI) { FC_DBG("Received a PLOGI response, but in state %s\n", @@ -588,7 +539,6 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; - struct fc_els_flogi *plogi; FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n", rport->port_id, fc_rport_state(rport)); @@ -596,23 +546,15 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) fc_rport_state_enter(rport, RPORT_ST_PLOGI); rport->maxframe_size = FC_MIN_MAX_PAYLOAD; - fp = fc_frame_alloc(lport, sizeof(*plogi)); + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { fc_rport_error(rport, fp); return; } - - plogi = fc_frame_payload_get(fp, sizeof(*plogi)); - fc_lport_plogi_fill(rdata->local_port, plogi, ELS_PLOGI); rdata->e_d_tov = lport->e_d_tov; - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_rport_plogi_resp, NULL, - rport, lport->e_d_tov, - fc_host_port_id(rdata->local_port->host), - rport->port_id, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + + if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI, + fc_rport_plogi_resp, rport, lport->e_d_tov)) fc_rport_error(rport, fp); } @@ -641,7 +583,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_DEBUG_RPORT("Received a PRLI response\n"); + FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", + rport->port_id); if (rdata->rp_state != RPORT_ST_PRLI) { FC_DBG("Received a PRLI response, but in state %s\n", @@ -674,7 +617,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, } else { FC_DBG("Bad ELS response\n"); - rdata->event = LPORT_EV_RPORT_FAILED; + rdata->event = RPORT_EV_FAILED; queue_work(rport_event_queue, &rdata->event_work); } @@ -703,25 +646,26 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_DEBUG_RPORT("Received a LOGO response\n"); - - if (rdata->rp_state != RPORT_ST_LOGO) { - FC_DBG("Received a LOGO response, but in state %s\n", - fc_rport_state(rport)); - goto out; - } + FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n", + rport->port_id); if (IS_ERR(fp)) { fc_rport_error(rport, fp); goto err; } + if (rdata->rp_state != RPORT_ST_LOGO) { + FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n", + fc_rport_state(rport)); + goto out; + } + op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) { fc_rport_enter_rtv(rport); } else { FC_DBG("Bad ELS response\n"); - rdata->event = LPORT_EV_RPORT_LOGO; + rdata->event = RPORT_EV_LOGO; queue_work(rport_event_queue, &rdata->event_work); } @@ -759,22 +703,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport) return; } - pp = fc_frame_payload_get(fp, sizeof(*pp)); - memset(pp, 0, sizeof(*pp)); - pp->prli.prli_cmd = ELS_PRLI; - pp->prli.prli_spp_len = sizeof(struct fc_els_spp); - pp->prli.prli_len = htons(sizeof(*pp)); - pp->spp.spp_type = FC_TYPE_FCP; - pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR; - pp->spp.spp_params = htonl(lport->service_params); - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_rport_prli_resp, NULL, - rport, lport->e_d_tov, - fc_host_port_id(lport->host), - rport->port_id, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI, + fc_rport_prli_resp, rport, lport->e_d_tov)) fc_rport_error(rport, fp); } @@ -799,7 +729,8 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, mutex_lock(&rdata->rp_mutex); - FC_DEBUG_RPORT("Received a RTV response\n"); + FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", + rport->port_id); if (rdata->rp_state != RPORT_ST_RTV) { FC_DBG("Received a RTV response, but in state %s\n", @@ -851,7 +782,6 @@ err: */ static void fc_rport_enter_rtv(struct fc_rport *rport) { - struct fc_els_rtv *rtv; struct fc_frame *fp; struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; @@ -861,23 +791,14 @@ static void fc_rport_enter_rtv(struct fc_rport *rport) fc_rport_state_enter(rport, RPORT_ST_RTV); - fp = fc_frame_alloc(lport, sizeof(*rtv)); + fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); if (!fp) { fc_rport_error(rport, fp); return; } - rtv = fc_frame_payload_get(fp, sizeof(*rtv)); - memset(rtv, 0, sizeof(*rtv)); - rtv->rtv_cmd = ELS_RTV; - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_rport_rtv_resp, NULL, - rport, lport->e_d_tov, - fc_host_port_id(lport->host), - rport->port_id, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV, + fc_rport_rtv_resp, rport, lport->e_d_tov)) fc_rport_error(rport, fp); } @@ -893,32 +814,20 @@ static void fc_rport_enter_logo(struct fc_rport *rport) struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; - struct fc_els_logo *logo; FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n", rport->port_id, fc_rport_state(rport)); fc_rport_state_enter(rport, RPORT_ST_LOGO); - fp = fc_frame_alloc(lport, sizeof(*logo)); + fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); if (!fp) { fc_rport_error(rport, fp); return; } - logo = fc_frame_payload_get(fp, sizeof(*logo)); - memset(logo, 0, sizeof(*logo)); - logo->fl_cmd = ELS_LOGO; - hton24(logo->fl_n_port_id, fc_host_port_id(lport->host)); - logo->fl_n_port_wwn = htonll(lport->wwpn); - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS); - - if (!lport->tt.exch_seq_send(lport, fp, - fc_rport_logo_resp, NULL, - rport, lport->e_d_tov, - fc_host_port_id(lport->host), - rport->port_id, - FC_FC_SEQ_INIT | FC_FC_END_SEQ)) + if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO, + fc_rport_logo_resp, rport, lport->e_d_tov)) fc_rport_error(rport, fp); } @@ -982,7 +891,6 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, } mutex_unlock(&rdata->rp_mutex); - fc_frame_free(fp); } /** @@ -1000,7 +908,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fc_frame *fp = rx_fp; - + struct fc_exch *ep; struct fc_frame_header *fh; struct fc_els_flogi *pl; struct fc_seq_els_data rjt_data; @@ -1089,17 +997,18 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, rport->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); fc_frame_free(rx_fp); - pl = fc_frame_payload_get(fp, sizeof(*pl)); - WARN_ON(!pl); - fc_lport_plogi_fill(lport, pl, ELS_LS_ACC); + fc_plogi_fill(lport, fp, ELS_LS_ACC); /* * Send LS_ACC. If this fails, * the originator should retry. */ - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); - lport->tt.seq_send(lport, sp, fp, f_ctl); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; + f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; + ep = fc_seq_exch(sp); + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); if (rdata->rp_state == RPORT_ST_PLOGI) fc_rport_enter_prli(rport); } @@ -1120,7 +1029,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, { struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; - + struct fc_exch *ep; struct fc_frame *fp; struct fc_frame_header *fh; struct { @@ -1234,9 +1143,12 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, /* * Send LS_ACC. If this fails, the originator should retry. */ - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); - lport->tt.seq_send(lport, sp, fp, f_ctl); + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; + f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; + ep = fc_seq_exch(sp); + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); /* * Get lock and re-check state. @@ -1307,27 +1219,33 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, "while in state %s\n", ntoh24(fh->fh_s_id), fc_rport_state(rport)); - rdata->event = LPORT_EV_RPORT_LOGO; + rdata->event = RPORT_EV_LOGO; queue_work(rport_event_queue, &rdata->event_work); lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); fc_frame_free(fp); } +static void fc_rport_flush_queue(void) +{ + flush_workqueue(rport_event_queue); +} + + int fc_rport_init(struct fc_lport *lport) { if (!lport->tt.rport_login) lport->tt.rport_login = fc_rport_login; - if (!lport->tt.rport_logout) - lport->tt.rport_logout = fc_rport_logout; - - if (!lport->tt.rport_stop) - lport->tt.rport_stop = fc_rport_stop; + if (!lport->tt.rport_logoff) + lport->tt.rport_logoff = fc_rport_logoff; if (!lport->tt.rport_recv_req) lport->tt.rport_recv_req = fc_rport_recv_req; + if (!lport->tt.rport_flush_queue) + lport->tt.rport_flush_queue = fc_rport_flush_queue; + return 0; } EXPORT_SYMBOL(fc_rport_init); diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h index 59c9d0c..a6118a2 100644 --- a/include/scsi/fc/fc_fcoe.h +++ b/include/scsi/fc/fc_fcoe.h @@ -31,6 +31,10 @@ #define ETH_P_FCOE 0x8906 /* FCOE ether type */ #endif +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + /* * FC_FCOE_OUI hasn't been standardized yet. XXX TBD. */ @@ -81,7 +85,9 @@ struct fcoe_crc_eof { } __attribute__((packed)); /* - * Store OUI + DID into MAC address field. + * fc_fcoe_set_mac - Store OUI + DID into MAC address field. + * @mac: mac address to be set + * @did: fc dest id to use */ static inline void fc_fcoe_set_mac(u8 *mac, u8 *did) { @@ -93,8 +99,4 @@ static inline void fc_fcoe_set_mac(u8 *mac, u8 *did) mac[5] = did[2]; } -#ifndef ETH_P_8021Q -#define ETH_P_8021Q 0x8100 -#endif - #endif /* _FC_FCOE_H_ */ diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h index 3897c6c..3e4801d 100644 --- a/include/scsi/fc/fc_fs.h +++ b/include/scsi/fc/fc_fs.h @@ -82,6 +82,12 @@ enum fc_rctl { FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */ FC_RCTL_ELS4_REP = 0x33, /* FC-4 ELS reply */ /* + * Optional Extended Headers + */ + FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */ + FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */ + FC_RCTL_ENCH = 0x52, /* encapsulation header */ + /* * Basic Link Services fh_r_ctl values. */ FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */ @@ -200,6 +206,8 @@ enum fc_fh_type { * Exchange IDs. */ #define FC_XID_UNKNOWN 0xffff /* unknown exchange ID */ +#define FC_XID_MIN 0x0 /* supported min exchange ID */ +#define FC_XID_MAX 0xfffe /* supported max exchange ID */ /* * fh_f_ctl - Frame control flags. diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h new file mode 100644 index 0000000..6300f55 --- /dev/null +++ b/include/scsi/fc_encode.h @@ -0,0 +1,309 @@ +/* + * Copyright(c) 2008 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _FC_ENCODE_H_ +#define _FC_ENCODE_H_ +#include + +struct fc_ns_rft { + struct fc_ns_fid fid; /* port ID object */ + struct fc_ns_fts fts; /* FC4-types object */ +}; + +struct fc_ct_req { + struct fc_ct_hdr hdr; + union { + struct fc_ns_gid_ft gid; + struct fc_ns_rn_id rn; + struct fc_ns_rft rft; + } payload; +}; + +/** + * fill FC header fields in specified fc_frame + */ +static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl, + u32 did, u32 sid, enum fc_fh_type type, + u32 f_ctl, u32 parm_offset) +{ + struct fc_frame_header *fh; + + fh = fc_frame_header_get(fp); + WARN_ON(r_ctl == 0); + fh->fh_r_ctl = r_ctl; + hton24(fh->fh_d_id, did); + hton24(fh->fh_s_id, sid); + fh->fh_type = type; + hton24(fh->fh_f_ctl, f_ctl); + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = htonl(parm_offset); +} + +/** + * fc_ct_hdr_fill- fills ct header and reset ct payload + * returns pointer to ct request. + */ +static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp, + unsigned int op, size_t req_size) +{ + struct fc_ct_req *ct; + size_t ct_plen; + + ct_plen = sizeof(struct fc_ct_hdr) + req_size; + ct = fc_frame_payload_get(fp, ct_plen); + memset(ct, 0, ct_plen); + ct->hdr.ct_rev = FC_CT_REV; + ct->hdr.ct_fs_type = FC_FST_DIR; + ct->hdr.ct_fs_subtype = FC_NS_SUBTYPE; + ct->hdr.ct_cmd = htons((u16) op); + return ct; +} + +/** + * fc_ct_fill - Fill in a name service request frame + */ +static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp, + unsigned int op, enum fc_rctl *r_ctl, u32 *did, + enum fc_fh_type *fh_type) +{ + struct fc_ct_req *ct; + + switch (op) { + case FC_NS_GPN_FT: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft)); + ct->payload.gid.fn_fc4_type = FC_TYPE_FCP; + break; + + case FC_NS_RFT_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft)); + hton24(ct->payload.rft.fid.fp_fid, + fc_host_port_id(lport->host)); + ct->payload.rft.fts = lport->fcts; + break; + + case FC_NS_RPN_ID: + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id)); + hton24(ct->payload.rn.fr_fid.fp_fid, + fc_host_port_id(lport->host)); + ct->payload.rft.fts = lport->fcts; + put_unaligned_be64(lport->wwpn, &ct->payload.rn.fr_wwn); + break; + + default: + FC_DBG("Invalid op code %x \n", op); + return -EINVAL; + } + *r_ctl = FC_RCTL_DD_UNSOL_CTL; + *did = FC_FID_DIR_SERV; + *fh_type = FC_TYPE_CT; + return 0; +} + +/** + * fc_plogi_fill - Fill in plogi request frame + */ +static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp, + unsigned int op) +{ + struct fc_els_flogi *plogi; + struct fc_els_csp *csp; + struct fc_els_cssp *cp; + + plogi = fc_frame_payload_get(fp, sizeof(*plogi)); + memset(plogi, 0, sizeof(*plogi)); + plogi->fl_cmd = (u8) op; + put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn); + put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn); + + csp = &plogi->fl_csp; + csp->sp_hi_ver = 0x20; + csp->sp_lo_ver = 0x20; + csp->sp_bb_cred = htons(10); /* this gets set by gateway */ + csp->sp_bb_data = htons((u16) lport->mfs); + cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */ + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); + csp->sp_features = htons(FC_SP_FT_CIRO); + csp->sp_tot_seq = htons(255); /* seq. we accept */ + csp->sp_rel_off = htons(0x1f); + csp->sp_e_d_tov = htonl(lport->e_d_tov); + + cp->cp_rdfs = htons((u16) lport->mfs); + cp->cp_con_seq = htons(255); + cp->cp_open_seq = 1; +} + +/** + * fc_flogi_fill - Fill in a flogi request frame. + */ +static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_csp *sp; + struct fc_els_cssp *cp; + struct fc_els_flogi *flogi; + + flogi = fc_frame_payload_get(fp, sizeof(*flogi)); + memset(flogi, 0, sizeof(*flogi)); + flogi->fl_cmd = (u8) ELS_FLOGI; + put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); + put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); + sp = &flogi->fl_csp; + sp->sp_hi_ver = 0x20; + sp->sp_lo_ver = 0x20; + sp->sp_bb_cred = htons(10); /* this gets set by gateway */ + sp->sp_bb_data = htons((u16) lport->mfs); + cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); +} + +/** + * fc_logo_fill - Fill in a logo request frame. + */ +static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_logo *logo; + + logo = fc_frame_payload_get(fp, sizeof(*logo)); + memset(logo, 0, sizeof(*logo)); + logo->fl_cmd = ELS_LOGO; + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host)); + logo->fl_n_port_wwn = htonll(lport->wwpn); +} + +/** + * fc_rtv_fill - Fill in RTV (read timeout value) request frame. + */ +static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_rtv *rtv; + + rtv = fc_frame_payload_get(fp, sizeof(*rtv)); + memset(rtv, 0, sizeof(*rtv)); + rtv->rtv_cmd = ELS_RTV; +} + +/** + * fc_rec_fill - Fill in rec request frame + */ +static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_rec *rec; + struct fc_exch *ep = fc_seq_exch(fr_seq(fp)); + + rec = fc_frame_payload_get(fp, sizeof(*rec)); + memset(rec, 0, sizeof(*rec)); + rec->rec_cmd = ELS_REC; + hton24(rec->rec_s_id, fc_host_port_id(lport->host)); + rec->rec_ox_id = htons(ep->oxid); + rec->rec_rx_id = htons(ep->rxid); +} + +/** + * fc_prli_fill - Fill in prli request frame + */ +static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct { + struct fc_els_prli prli; + struct fc_els_spp spp; + } *pp; + + pp = fc_frame_payload_get(fp, sizeof(*pp)); + memset(pp, 0, sizeof(*pp)); + pp->prli.prli_cmd = ELS_PRLI; + pp->prli.prli_spp_len = sizeof(struct fc_els_spp); + pp->prli.prli_len = htons(sizeof(*pp)); + pp->spp.spp_type = FC_TYPE_FCP; + pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR; + pp->spp.spp_params = htonl(lport->service_params); +} + +/** + * fc_scr_fill - Fill in a scr request frame. + */ +static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_els_scr *scr; + + scr = fc_frame_payload_get(fp, sizeof(*scr)); + memset(scr, 0, sizeof(*scr)); + scr->scr_cmd = ELS_SCR; + scr->scr_reg_func = ELS_SCRF_FULL; +} + +/** + * fc_els_fill - Fill in an ELS request frame + */ +static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport, + struct fc_frame *fp, unsigned int op, + enum fc_rctl *r_ctl, u32 *did, enum fc_fh_type *fh_type) +{ + switch (op) { + case ELS_PLOGI: + fc_plogi_fill(lport, fp, ELS_PLOGI); + *did = rport->port_id; + break; + + case ELS_FLOGI: + fc_flogi_fill(lport, fp); + *did = FC_FID_FLOGI; + break; + + case ELS_LOGO: + fc_logo_fill(lport, fp); + *did = FC_FID_FLOGI; + /* + * if rport is valid then it + * is port logo, therefore + * set did to rport id. + */ + if (rport) + *did = rport->port_id; + break; + + case ELS_RTV: + fc_rtv_fill(lport, fp); + *did = rport->port_id; + break; + + case ELS_REC: + fc_rec_fill(lport, fp); + *did = rport->port_id; + break; + + case ELS_PRLI: + fc_prli_fill(lport, fp); + *did = rport->port_id; + break; + + case ELS_SCR: + fc_scr_fill(lport, fp); + *did = FC_FID_FCTRL; + break; + + default: + FC_DBG("Invalid op code %x \n", op); + return -EINVAL; + } + + *r_ctl = FC_RCTL_ELS_REQ; + *fh_type = FC_TYPE_ELS; + return 0; +} +#endif /* _FC_ENCODE_H_ */ diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h new file mode 100644 index 0000000..dc5f734 --- /dev/null +++ b/include/scsi/fc_frame.h @@ -0,0 +1,239 @@ +/* + * Copyright(c) 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _FC_FRAME_H_ +#define _FC_FRAME_H_ + +#include +#include +#include + +#include +#include +#include + +/* + * The fc_frame interface is used to pass frame data between functions. + * The frame includes the data buffer, length, and SOF / EOF delimiter types. + * A pointer to the port structure of the receiving port is also includeded. + */ + +#define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */ +#define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */ + +/* + * Information about an individual fibre channel frame received or to be sent. + * The buffer may be in up to 4 additional non-contiguous sections, + * but the linear section must hold the frame header. + */ +#define FC_FRAME_SG_LEN 4 /* scatter/gather list maximum length */ + +#define fp_skb(fp) (&((fp)->skb)) +#define fr_hdr(fp) ((fp)->skb.data) +#define fr_len(fp) ((fp)->skb.len) +#define fr_cb(fp) ((struct fcoe_rcv_info *)&((fp)->skb.cb[0])) +#define fr_dev(fp) (fr_cb(fp)->fr_dev) +#define fr_seq(fp) (fr_cb(fp)->fr_seq) +#define fr_sof(fp) (fr_cb(fp)->fr_sof) +#define fr_eof(fp) (fr_cb(fp)->fr_eof) +#define fr_flags(fp) (fr_cb(fp)->fr_flags) +#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload) +#define fr_cmd(fp) (fr_cb(fp)->fr_cmd) +#define fr_dir(fp) (fr_cmd(fp)->sc_data_direction) + +struct fc_frame { + struct sk_buff skb; +}; + +struct fcoe_rcv_info { + struct packet_type *ptype; + struct fc_lport *fr_dev; /* transport layer private pointer */ + struct fc_seq *fr_seq; /* for use with exchange manager */ + struct scsi_cmnd *fr_cmd; /* for use of scsi command */ + enum fc_sof fr_sof; /* start of frame delimiter */ + enum fc_eof fr_eof; /* end of frame delimiter */ + u8 fr_flags; /* flags - see below */ + u16 fr_max_payload; /* max FC payload */ +}; + +/* + * Get fc_frame pointer for an skb that's already been imported. + */ +static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb)); + return (struct fcoe_rcv_info *) skb->cb; +} + +/* + * fr_flags. + */ +#define FCPHF_CRC_UNCHECKED 0x01 /* CRC not computed, still appended */ + +/* + * Initialize a frame. + * We don't do a complete memset here for performance reasons. + * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually. + */ +static inline void fc_frame_init(struct fc_frame *fp) +{ + fr_dev(fp) = NULL; + fr_seq(fp) = NULL; + fr_flags(fp) = 0; +} + +struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len); + +struct fc_frame *__fc_frame_alloc(size_t payload_len); + +/* + * Get frame for sending via port. + */ +static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev, + size_t payload_len) +{ + return __fc_frame_alloc(payload_len); +} + +/* + * Allocate fc_frame structure and buffer. Set the initial length to + * payload_size + sizeof (struct fc_frame_header). + */ +static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len) +{ + struct fc_frame *fp; + + /* + * Note: Since len will often be a constant multiple of 4, + * this check will usually be evaluated and eliminated at compile time. + */ + if ((len % 4) != 0) + fp = fc_frame_alloc_fill(dev, len); + else + fp = _fc_frame_alloc(dev, len); + return fp; +} + +/* + * Free the fc_frame structure and buffer. + */ +static inline void fc_frame_free(struct fc_frame *fp) +{ + kfree_skb(fp_skb(fp)); +} + +static inline int fc_frame_is_linear(struct fc_frame *fp) +{ + return !skb_is_nonlinear(fp_skb(fp)); +} + +/* + * Get frame header from message in fc_frame structure. + * This hides a cast and provides a place to add some checking. + */ +static inline +struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp) +{ + WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header)); + return (struct fc_frame_header *) fr_hdr(fp); +} + +/* + * Get frame payload from message in fc_frame structure. + * This hides a cast and provides a place to add some checking. + * The len parameter is the minimum length for the payload portion. + * Returns NULL if the frame is too short. + * + * This assumes the interesting part of the payload is in the first part + * of the buffer for received data. This may not be appropriate to use for + * buffers being transmitted. + */ +static inline void *fc_frame_payload_get(const struct fc_frame *fp, + size_t len) +{ + void *pp = NULL; + + if (fr_len(fp) >= sizeof(struct fc_frame_header) + len) + pp = fc_frame_header_get(fp) + 1; + return pp; +} + +/* + * Get frame payload opcode (first byte) from message in fc_frame structure. + * This hides a cast and provides a place to add some checking. Return 0 + * if the frame has no payload. + */ +static inline u8 fc_frame_payload_op(const struct fc_frame *fp) +{ + u8 *cp; + + cp = fc_frame_payload_get(fp, sizeof(u8)); + if (!cp) + return 0; + return *cp; + +} + +/* + * Get FC class from frame. + */ +static inline enum fc_class fc_frame_class(const struct fc_frame *fp) +{ + return fc_sof_class(fr_sof(fp)); +} + +/* + * Check the CRC in a frame. + * The CRC immediately follows the last data item *AFTER* the length. + * The return value is zero if the CRC matches. + */ +u32 fc_frame_crc_check(struct fc_frame *); + +static inline u8 fc_frame_rctl(const struct fc_frame *fp) +{ + return fc_frame_header_get(fp)->fh_r_ctl; +} + +static inline bool fc_frame_is_cmd(const struct fc_frame *fp) +{ + return fc_frame_rctl(fp) == FC_RCTL_DD_UNSOL_CMD; +} + +static inline bool fc_frame_is_read(const struct fc_frame *fp) +{ + if (fc_frame_is_cmd(fp) && fr_cmd(fp)) + return fr_dir(fp) == DMA_FROM_DEVICE; + return false; +} + +static inline bool fc_frame_is_write(const struct fc_frame *fp) +{ + if (fc_frame_is_cmd(fp) && fr_cmd(fp)) + return fr_dir(fp) == DMA_TO_DEVICE; + return false; +} + +/* + * Check for leaks. + * Print the frame header of any currently allocated frame, assuming there + * should be none at this point. + */ +void fc_frame_leak_check(void); + +#endif /* _FC_FRAME_H_ */ diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h new file mode 100644 index 0000000..8dca2af --- /dev/null +++ b/include/scsi/fc_transport_fcoe.h @@ -0,0 +1,54 @@ +#ifndef FC_TRANSPORT_FCOE_H +#define FC_TRANSPORT_FCOE_H + +#include +#include +#include +#include + +/** + * struct fcoe_transport - FCoE transport struct for generic transport + * for Ethernet devices as well as pure HBAs + * + * @name: name for thsi transport + * @bus: physical bus type (pci_bus_type) + * @driver: physical bus driver for network device + * @create: entry create function + * @destroy: exit destroy function + * @list: list of transports + */ +struct fcoe_transport { + char *name; + unsigned short vendor; + unsigned short device; + struct bus_type *bus; + struct device_driver *driver; + int (*create)(struct net_device *device); + int (*destroy)(struct net_device *device); + bool (*match)(struct net_device *device); + struct list_head list; + struct list_head devlist; + struct mutex devlock; +}; + +/** + * MODULE_ALIAS_FCOE_PCI + * + * some care must be taken with this, vendor and device MUST be a hex value + * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB) + */ +#define MODULE_ALIAS_FCOE_PCI(vendor, device) \ + MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device)) + +/* exported funcs */ +int fcoe_transport_attach(struct net_device *netdev); +int fcoe_transport_release(struct net_device *netdev); +int fcoe_transport_register(struct fcoe_transport *t); +int fcoe_transport_unregister(struct fcoe_transport *t); +int fcoe_load_transport_driver(struct net_device *netdev); +int __init fcoe_transport_init(void); +int __exit fcoe_transport_exit(void); + +/* fcow_sw is the default transport */ +extern struct fcoe_transport fcoe_sw_transport; +#endif /* FC_TRANSPORT_FCOE_H */ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h new file mode 100644 index 0000000..dac03e2 --- /dev/null +++ b/include/scsi/libfc.h @@ -0,0 +1,917 @@ +/* + * Copyright(c) 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _LIBFC_H_ +#define _LIBFC_H_ + +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include + +#define LIBFC_DEBUG + +#ifdef LIBFC_DEBUG +/* Log messages */ +#define FC_DBG(fmt, args...) \ + do { \ + printk(KERN_INFO "%s " fmt, __func__, ##args); \ + } while (0) +#else +#define FC_DBG(fmt, args...) +#endif + +/* + * libfc error codes + */ +#define FC_NO_ERR 0 /* no error */ +#define FC_EX_TIMEOUT 1 /* Exchange timeout */ +#define FC_EX_CLOSED 2 /* Exchange closed */ + +/* some helpful macros */ + +#define ntohll(x) be64_to_cpu(x) +#define htonll(x) cpu_to_be64(x) + +#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) + +#define hton24(p, v) do { \ + p[0] = (((v) >> 16) & 0xFF); \ + p[1] = (((v) >> 8) & 0xFF); \ + p[2] = ((v) & 0xFF); \ + } while (0) + +/* + * FC HBA status + */ +#define FC_PAUSE (1 << 1) +#define FC_LINK_UP (1 << 0) + +enum fc_lport_state { + LPORT_ST_NONE = 0, + LPORT_ST_FLOGI, + LPORT_ST_DNS, + LPORT_ST_RPN_ID, + LPORT_ST_RFT_ID, + LPORT_ST_SCR, + LPORT_ST_READY, + LPORT_ST_LOGO, + LPORT_ST_RESET +}; + +enum fc_disc_event { + DISC_EV_NONE = 0, + DISC_EV_SUCCESS, + DISC_EV_FAILED +}; + +enum fc_lport_event { + RPORT_EV_NONE = 0, + RPORT_EV_CREATED, + RPORT_EV_FAILED, + RPORT_EV_STOP, + RPORT_EV_LOGO +}; + +enum fc_rport_state { + RPORT_ST_NONE = 0, + RPORT_ST_INIT, /* initialized */ + RPORT_ST_PLOGI, /* waiting for PLOGI completion */ + RPORT_ST_PRLI, /* waiting for PRLI completion */ + RPORT_ST_RTV, /* waiting for RTV completion */ + RPORT_ST_READY, /* ready for use */ + RPORT_ST_LOGO, /* port logout sent */ +}; + +enum fc_rport_trans_state { + FC_PORTSTATE_ROGUE, + FC_PORTSTATE_REAL, +}; + +/** + * struct fc_disc_port - temporary discovery port to hold rport identifiers + * @lp: Fibre Channel host port instance + * @peers: node for list management during discovery and RSCN processing + * @ids: identifiers structure to pass to fc_remote_port_add() + * @rport_work: work struct for starting the rport state machine + */ +struct fc_disc_port { + struct fc_lport *lp; + struct list_head peers; + struct fc_rport_identifiers ids; + struct work_struct rport_work; +}; + +/** + * struct fc_rport_libfc_priv - libfc internal information about a remote port + * @local_port: Fibre Channel host port instance + * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges + * @flags: REC and RETRY supported flags + * @max_seq: maximum number of concurrent sequences + * @retries: retry count in current state + * @e_d_tov: error detect timeout value (in msec) + * @r_a_tov: resource allocation timeout value (in msec) + * @rp_mutex: mutex protects rport + * @retry_work: + * @event_callback: Callback for rport READY, FAILED or LOGO + */ +struct fc_rport_libfc_priv { + struct fc_lport *local_port; + enum fc_rport_state rp_state; + u16 flags; + #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) + #define FC_RP_FLAGS_RETRY (1 << 1) + u16 max_seq; + unsigned int retries; + unsigned int e_d_tov; + unsigned int r_a_tov; + enum fc_rport_trans_state trans_state; + struct mutex rp_mutex; + struct delayed_work retry_work; + enum fc_lport_event event; + void (*event_callback)(struct fc_lport *, + struct fc_rport *, + enum fc_lport_event); + struct list_head peers; + struct work_struct event_work; +}; + +#define PRIV_TO_RPORT(x) \ + (struct fc_rport *)((void *)x - sizeof(struct fc_rport)); +#define RPORT_TO_PRIV(x) \ + (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport)); + +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *); +void fc_rport_rogue_destroy(struct fc_rport *); + +static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn) +{ + rport->node_name = wwnn; + rport->port_name = wwpn; +} + +/* + * fcoe stats structure + */ +struct fcoe_dev_stats { + u64 SecondsSinceLastReset; + u64 TxFrames; + u64 TxWords; + u64 RxFrames; + u64 RxWords; + u64 ErrorFrames; + u64 DumpedFrames; + u64 LinkFailureCount; + u64 LossOfSignalCount; + u64 InvalidTxWordCount; + u64 InvalidCRCCount; + u64 InputRequests; + u64 OutputRequests; + u64 ControlRequests; + u64 InputMegabytes; + u64 OutputMegabytes; +}; + +/* + * els data is used for passing ELS respone specific + * data to send ELS response mainly using infomation + * in exchange and sequence in EM layer. + */ +struct fc_seq_els_data { + struct fc_frame *fp; + enum fc_els_rjt_reason reason; + enum fc_els_rjt_explan explan; +}; + +/* + * FCP request structure, one for each scsi cmd request + */ +struct fc_fcp_pkt { + /* + * housekeeping stuff + */ + struct fc_lport *lp; /* handle to hba struct */ + u16 state; /* scsi_pkt state state */ + u16 tgt_flags; /* target flags */ + atomic_t ref_cnt; /* fcp pkt ref count */ + spinlock_t scsi_pkt_lock; /* Must be taken before the host lock + * if both are held at the same time */ + /* + * SCSI I/O related stuff + */ + struct scsi_cmnd *cmd; /* scsi command pointer. set/clear + * under host lock */ + struct list_head list; /* tracks queued commands. access under + * host lock */ + /* + * timeout related stuff + */ + struct timer_list timer; /* command timer */ + struct completion tm_done; + int wait_for_comp; + unsigned long start_time; /* start jiffie */ + unsigned long end_time; /* end jiffie */ + unsigned long last_pkt_time; /* jiffies of last frame received */ + + /* + * scsi cmd and data transfer information + */ + u32 data_len; + /* + * transport related veriables + */ + struct fcp_cmnd cdb_cmd; + size_t xfer_len; + u32 xfer_contig_end; /* offset of end of contiguous xfer */ + u16 max_payload; /* max payload size in bytes */ + + /* + * scsi/fcp return status + */ + u32 io_status; /* SCSI result upper 24 bits */ + u8 cdb_status; + u8 status_code; /* FCP I/O status */ + /* bit 3 Underrun bit 2: overrun */ + u8 scsi_comp_flags; + u32 req_flags; /* bit 0: read bit:1 write */ + u32 scsi_resid; /* residule length */ + + struct fc_rport *rport; /* remote port pointer */ + struct fc_seq *seq_ptr; /* current sequence pointer */ + /* + * Error Processing + */ + u8 recov_retry; /* count of recovery retries */ + struct fc_seq *recov_seq; /* sequence for REC or SRR */ +}; + +/* + * Structure and function definitions for managing Fibre Channel Exchanges + * and Sequences + * + * fc_exch holds state for one exchange and links to its active sequence. + * + * fc_seq holds the state for an individual sequence. + */ + +struct fc_exch_mgr; + +/* + * Sequence. + */ +struct fc_seq { + u8 id; /* seq ID */ + u16 ssb_stat; /* status flags for sequence status block */ + u16 cnt; /* frames sent so far on sequence */ + u32 f_ctl; /* F_CTL flags for frames */ + u32 rec_data; /* FC-4 value for REC */ +}; + +#define FC_EX_DONE (1 << 0) /* ep is completed */ +#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */ + +/* + * Exchange. + * + * Locking notes: The ex_lock protects changes to the following fields: + * esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl. + * seq_id + * sequence allocation + * + */ +struct fc_exch { + struct fc_exch_mgr *em; /* exchange manager */ + u32 state; /* internal driver state */ + u16 xid; /* our exchange ID */ + struct list_head ex_list; /* free or busy list linkage */ + spinlock_t ex_lock; /* lock covering exchange state */ + atomic_t ex_refcnt; /* reference counter */ + struct delayed_work timeout_work; /* timer for upper level protocols */ + struct fc_lport *lp; /* fc device instance */ + u16 oxid; /* originator's exchange ID */ + u16 rxid; /* responder's exchange ID */ + u32 oid; /* originator's FCID */ + u32 sid; /* source FCID */ + u32 did; /* destination FCID */ + u32 esb_stat; /* exchange status for ESB */ + u32 r_a_tov; /* r_a_tov from rport (msec) */ + u8 seq_id; /* next sequence ID to use */ + u32 f_ctl; /* F_CTL flags for sequences */ + u8 fh_type; /* frame type */ + enum fc_class class; /* class of service */ + struct fc_seq seq; /* single sequence */ + /* + * Handler for responses to this current exchange. + */ + void (*resp)(struct fc_seq *, struct fc_frame *, void *); + void (*destructor)(struct fc_seq *, void *); + /* + * arg is passed as void pointer to exchange + * resp and destructor handlers + */ + void *arg; +}; +#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) + +struct libfc_function_template { + + /** + * Mandatory Fields + * + * These handlers must be implemented by the LLD. + */ + + /* + * Interface to send a FC frame + */ + int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp); + + /** + * Optional Fields + * + * The LLD may choose to implement any of the following handlers. + * If LLD doesn't specify hander and leaves its pointer NULL then + * the default libfc function will be used for that handler. + */ + + /** + * ELS/CT interfaces + */ + + /* + * elsct_send - sends ELS/CT frame + */ + struct fc_seq *(*elsct_send)(struct fc_lport *lport, + struct fc_rport *rport, + struct fc_frame *fp, + unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *fp, + void *arg), + void *arg, u32 timer_msec); + /** + * Exhance Manager interfaces + */ + + /* + * Send the FC frame payload using a new exchange and sequence. + * + * The frame pointer with some of the header's fields must be + * filled before calling exch_seq_send(), those fields are, + * + * - routing control + * - FC port did + * - FC port sid + * - FC header type + * - frame control + * - parameter or relative offset + * + * The exchange response handler is set in this routine to resp() + * function pointer. It can be called in two scenarios: if a timeout + * occurs or if a response frame is received for the exchange. The + * fc_frame pointer in response handler will also indicate timeout + * as error using IS_ERR related macros. + * + * The exchange destructor handler is also set in this routine. + * The destructor handler is invoked by EM layer when exchange + * is about to free, this can be used by caller to free its + * resources along with exchange free. + * + * The arg is passed back to resp and destructor handler. + * + * The timeout value (in msec) for an exchange is set if non zero + * timer_msec argument is specified. The timer is canceled when + * it fires or when the exchange is done. The exchange timeout handler + * is registered by EM layer. + */ + struct fc_seq *(*exch_seq_send)(struct fc_lport *lp, + struct fc_frame *fp, + void (*resp)(struct fc_seq *sp, + struct fc_frame *fp, + void *arg), + void (*destructor)(struct fc_seq *sp, + void *arg), + void *arg, unsigned int timer_msec); + + /* + * send a frame using existing sequence and exchange. + */ + int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp, + struct fc_frame *fp); + + /* + * Send ELS response using mainly infomation + * in exchange and sequence in EM layer. + */ + void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd, + struct fc_seq_els_data *els_data); + + /* + * Abort an exchange and sequence. Generally called because of a + * exchange timeout or an abort from the upper layer. + * + * A timer_msec can be specified for abort timeout, if non-zero + * timer_msec value is specified then exchange resp handler + * will be called with timeout error if no response to abort. + */ + int (*seq_exch_abort)(const struct fc_seq *req_sp, + unsigned int timer_msec); + + /* + * Indicate that an exchange/sequence tuple is complete and the memory + * allocated for the related objects may be freed. + */ + void (*exch_done)(struct fc_seq *sp); + + /* + * Assigns a EM and a free XID for an new exchange and then + * allocates a new exchange and sequence pair. + * The fp can be used to determine free XID. + */ + struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp); + + /* + * Release previously assigned XID by exch_get API. + * The LLD may implement this if XID is assigned by LLD + * in exch_get(). + */ + void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp, + u16 ex_id); + + /* + * Start a new sequence on the same exchange/sequence tuple. + */ + struct fc_seq *(*seq_start_next)(struct fc_seq *sp); + + /* + * Reset an exchange manager, completing all sequences and exchanges. + * If s_id is non-zero, reset only exchanges originating from that FID. + * If d_id is non-zero, reset only exchanges sending to that FID. + */ + void (*exch_mgr_reset)(struct fc_exch_mgr *, + u32 s_id, u32 d_id); + + void (*rport_flush_queue)(void); + /** + * Local Port interfaces + */ + + /* + * Receive a frame to a local port. + */ + void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp, + struct fc_frame *fp); + + int (*lport_reset)(struct fc_lport *); + + /** + * Remote Port interfaces + */ + + /* + * Initiates the RP state machine. It is called from the LP module. + * This function will issue the following commands to the N_Port + * identified by the FC ID provided. + * + * - PLOGI + * - PRLI + * - RTV + */ + int (*rport_login)(struct fc_rport *rport); + + /* + * Logoff, and remove the rport from the transport if + * it had been added. This will send a LOGO to the target. + */ + int (*rport_logoff)(struct fc_rport *rport); + + /* + * Recieve a request from a remote port. + */ + void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, + struct fc_rport *); + + struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32); + + /** + * FCP interfaces + */ + + /* + * Send a fcp cmd from fsp pkt. + * Called with the SCSI host lock unlocked and irqs disabled. + * + * The resp handler is called when FCP_RSP received. + * + */ + int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp, + void (*resp)(struct fc_seq *, struct fc_frame *fp, + void *arg)); + + /* + * Used at least durring linkdown and reset + */ + void (*fcp_cleanup)(struct fc_lport *lp); + + /* + * Abort all I/O on a local port + */ + void (*fcp_abort_io)(struct fc_lport *lp); + + /** + * Discovery interfaces + */ + + void (*disc_recv_req)(struct fc_seq *, + struct fc_frame *, struct fc_lport *); + + /* + * Start discovery for a local port. + */ + void (*disc_start)(void (*disc_callback)(struct fc_lport *, + enum fc_disc_event), + struct fc_lport *); + + /* + * Stop discovery for a given lport. This will remove + * all discovered rports + */ + void (*disc_stop) (struct fc_lport *); + + /* + * Stop discovery for a given lport. This will block + * until all discovered rports are deleted from the + * FC transport class + */ + void (*disc_stop_final) (struct fc_lport *); +}; + +struct fc_lport { + struct list_head list; + + /* Associations */ + struct Scsi_Host *host; + struct fc_exch_mgr *emp; + struct fc_rport *dns_rp; + struct fc_rport *ptp_rp; + void *scsi_priv; + + /* Operational Information */ + struct libfc_function_template tt; + u16 link_status; + enum fc_lport_state state; + unsigned long boot_time; + + struct fc_host_statistics host_stats; + struct fcoe_dev_stats *dev_stats[NR_CPUS]; + u64 wwpn; + u64 wwnn; + u8 retry_count; + + /* Capabilities */ + u32 sg_supp:1; /* scatter gather supported */ + u32 seq_offload:1; /* seq offload supported */ + u32 crc_offload:1; /* crc offload supported */ + u32 lro_enabled:1; /* large receive offload */ + u32 mfs; /* max FC payload size */ + unsigned int service_params; + unsigned int e_d_tov; + unsigned int r_a_tov; + u8 max_retry_count; + u16 link_speed; + u16 link_supported_speeds; + u16 lro_xid; /* max xid for fcoe lro */ + struct fc_ns_fts fcts; /* FC-4 type masks */ + struct fc_els_rnid_gen rnid_gen; /* RNID information */ + + /* Semaphores */ + struct mutex lp_mutex; + + /* Miscellaneous */ + struct delayed_work retry_work; + struct delayed_work disc_work; +}; + +/** + * FC_LPORT HELPER FUNCTIONS + *****************************/ +static inline void *lport_priv(const struct fc_lport *lp) +{ + return (void *)(lp + 1); +} + +static inline int fc_lport_test_ready(struct fc_lport *lp) +{ + return lp->state == LPORT_ST_READY; +} + +static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn) +{ + lp->wwnn = wwnn; +} + +static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn) +{ + lp->wwpn = wwnn; +} + +static inline void fc_lport_state_enter(struct fc_lport *lp, + enum fc_lport_state state) +{ + if (state != lp->state) + lp->retry_count = 0; + lp->state = state; +} + + +/** + * LOCAL PORT LAYER + *****************************/ +int fc_lport_init(struct fc_lport *lp); + +/* + * Destroy the specified local port by finding and freeing all + * fc_rports associated with it and then by freeing the fc_lport + * itself. + */ +int fc_lport_destroy(struct fc_lport *lp); + +/* + * Logout the specified local port from the fabric + */ +int fc_fabric_logoff(struct fc_lport *lp); + +/* + * Initiate the LP state machine. This handler will use fc_host_attr + * to store the FLOGI service parameters, so fc_host_attr must be + * initialized before calling this handler. + */ +int fc_fabric_login(struct fc_lport *lp); + +/* + * The link is up for the given local port. + */ +void fc_linkup(struct fc_lport *); + +/* + * Link is down for the given local port. + */ +void fc_linkdown(struct fc_lport *); + +/* + * Pause and unpause traffic. + */ +void fc_pause(struct fc_lport *); +void fc_unpause(struct fc_lport *); + +/* + * Configure the local port. + */ +int fc_lport_config(struct fc_lport *); + +/* + * Reset the local port. + */ +int fc_lport_reset(struct fc_lport *); + +/* + * Set the mfs or reset + */ +int fc_set_mfs(struct fc_lport *lp, u32 mfs); + + +/** + * REMOTE PORT LAYER + *****************************/ +int fc_rport_init(struct fc_lport *lp); +void fc_rport_terminate_io(struct fc_rport *rp); + +/** + * DISCOVERY LAYER + *****************************/ +int fc_disc_init(struct fc_lport *lp); + + +/** + * SCSI LAYER + *****************************/ +/* + * Initialize the SCSI block of libfc + */ +int fc_fcp_init(struct fc_lport *); + +/* + * This section provides an API which allows direct interaction + * with the SCSI-ml. Each of these functions satisfies a function + * pointer defined in Scsi_Host and therefore is always called + * directly from the SCSI-ml. + */ +int fc_queuecommand(struct scsi_cmnd *sc_cmd, + void (*done)(struct scsi_cmnd *)); + +/* + * complete processing of a fcp packet + * + * This function may sleep if a fsp timer is pending. + * The host lock must not be held by caller. + */ +void fc_fcp_complete(struct fc_fcp_pkt *fsp); + +/* + * Send an ABTS frame to the target device. The sc_cmd argument + * is a pointer to the SCSI command to be aborted. + */ +int fc_eh_abort(struct scsi_cmnd *sc_cmd); + +/* + * Reset a LUN by sending send the tm cmd to the target. + */ +int fc_eh_device_reset(struct scsi_cmnd *sc_cmd); + +/* + * Reset the host adapter. + */ +int fc_eh_host_reset(struct scsi_cmnd *sc_cmd); + +/* + * Check rport status. + */ +int fc_slave_alloc(struct scsi_device *sdev); + +/* + * Adjust the queue depth. + */ +int fc_change_queue_depth(struct scsi_device *sdev, int qdepth); + +/* + * Change the tag type. + */ +int fc_change_queue_type(struct scsi_device *sdev, int tag_type); + +/* + * Free memory pools used by the FCP layer. + */ +void fc_fcp_destroy(struct fc_lport *); + +/** + * ELS/CT interface + *****************************/ +/* + * Initializes ELS/CT interface + */ +int fc_elsct_init(struct fc_lport *lp); + + +/** + * EXCHANGE MANAGER LAYER + *****************************/ +/* + * Initializes Exchange Manager related + * function pointers in struct libfc_function_template. + */ +int fc_exch_init(struct fc_lport *lp); + +/* + * Allocates an Exchange Manager (EM). + * + * The EM manages exchanges for their allocation and + * free, also allows exchange lookup for received + * frame. + * + * The class is used for initializing FC class of + * allocated exchange from EM. + * + * The min_xid and max_xid will limit new + * exchange ID (XID) within this range for + * a new exchange. + * The LLD may choose to have multiple EMs, + * e.g. one EM instance per CPU receive thread in LLD. + * The LLD can use exch_get() of struct libfc_function_template + * to specify XID for a new exchange within + * a specified EM instance. + * + * The em_idx to uniquely identify an EM instance. + */ +struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, + enum fc_class class, + u16 min_xid, + u16 max_xid); + +/* + * Free an exchange manager. + */ +void fc_exch_mgr_free(struct fc_exch_mgr *mp); + +/* + * Receive a frame on specified local port and exchange manager. + */ +void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, + struct fc_frame *fp); + +/* + * This function is for exch_seq_send function pointer in + * struct libfc_function_template, see comment block on + * exch_seq_send for description of this function. + */ +struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, + struct fc_frame *fp, + void (*resp)(struct fc_seq *sp, + struct fc_frame *fp, + void *arg), + void (*destructor)(struct fc_seq *sp, + void *arg), + void *arg, u32 timer_msec); + +/* + * send a frame using existing sequence and exchange. + */ +int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp); + +/* + * Send ELS response using mainly infomation + * in exchange and sequence in EM layer. + */ +void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, + struct fc_seq_els_data *els_data); + +/* + * This function is for seq_exch_abort function pointer in + * struct libfc_function_template, see comment block on + * seq_exch_abort for description of this function. + */ +int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec); + +/* + * Indicate that an exchange/sequence tuple is complete and the memory + * allocated for the related objects may be freed. + */ +void fc_exch_done(struct fc_seq *sp); + +/* + * Assigns a EM and XID for a frame and then allocates + * a new exchange and sequence pair. + * The fp can be used to determine free XID. + */ +struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp); + +/* + * Allocate a new exchange and sequence pair. + * if ex_id is zero then next free exchange id + * from specified exchange manger mp will be assigned. + */ +struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, + struct fc_frame *fp, u16 ex_id); +/* + * Start a new sequence on the same exchange as the supplied sequence. + */ +struct fc_seq *fc_seq_start_next(struct fc_seq *sp); + +/* + * Reset an exchange manager, completing all sequences and exchanges. + * If s_id is non-zero, reset only exchanges originating from that FID. + * If d_id is non-zero, reset only exchanges sending to that FID. + */ +void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id); + +/* + * Functions for fc_functions_template + */ +void fc_get_host_speed(struct Scsi_Host *shost); +void fc_get_host_port_type(struct Scsi_Host *shost); +void fc_get_host_port_state(struct Scsi_Host *shost); +void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout); +struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *); + +/* + * module setup functions. + */ +int fc_setup_exch_mgr(void); +void fc_destroy_exch_mgr(void); +int fc_setup_rport(void); +void fc_destroy_rport(void); + +#endif /* _LIBFC_H_ */ diff --git a/include/scsi/libfc/fc_frame.h b/include/scsi/libfc/fc_frame.h deleted file mode 100644 index 9508e55..0000000 --- a/include/scsi/libfc/fc_frame.h +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright(c) 2007 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained at www.Open-FCoE.org - */ - -#ifndef _FC_FRAME_H_ -#define _FC_FRAME_H_ - -#include -#include - -#include -#include - -/* - * The fc_frame interface is used to pass frame data between functions. - * The frame includes the data buffer, length, and SOF / EOF delimiter types. - * A pointer to the port structure of the receiving port is also includeded. - */ - -#define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */ -#define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */ - -/* - * Information about an individual fibre channel frame received or to be sent. - * The buffer may be in up to 4 additional non-contiguous sections, - * but the linear section must hold the frame header. - */ -#define FC_FRAME_SG_LEN 4 /* scatter/gather list maximum length */ - -#define fp_skb(fp) (&((fp)->skb)) -#define fr_hdr(fp) ((fp)->skb.data) -#define fr_len(fp) ((fp)->skb.len) -#define fr_cb(fp) ((struct fcoe_rcv_info *)&((fp)->skb.cb[0])) -#define fr_dev(fp) (fr_cb(fp)->fr_dev) -#define fr_seq(fp) (fr_cb(fp)->fr_seq) -#define fr_sof(fp) (fr_cb(fp)->fr_sof) -#define fr_eof(fp) (fr_cb(fp)->fr_eof) -#define fr_flags(fp) (fr_cb(fp)->fr_flags) -#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload) - -struct fc_frame { - struct sk_buff skb; -}; - -struct fcoe_rcv_info { - struct packet_type *ptype; - struct fc_lport *fr_dev; /* transport layer private pointer */ - struct fc_seq *fr_seq; /* for use with exchange manager */ - enum fc_sof fr_sof; /* start of frame delimiter */ - enum fc_eof fr_eof; /* end of frame delimiter */ - u8 fr_flags; /* flags - see below */ - u16 fr_max_payload; /* max FC payload */ -}; - -/* - * Get fc_frame pointer for an skb that's already been imported. - */ -static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb) -{ - BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb)); - return (struct fcoe_rcv_info *) skb->cb; -} - -/* - * fr_flags. - */ -#define FCPHF_CRC_UNCHECKED 0x01 /* CRC not computed, still appended */ - -/* - * Initialize a frame. - * We don't do a complete memset here for performance reasons. - * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually. - */ -static inline void fc_frame_init(struct fc_frame *fp) -{ - fr_dev(fp) = NULL; - fr_seq(fp) = NULL; - fr_flags(fp) = 0; -} - -struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len); - -struct fc_frame *__fc_frame_alloc(size_t payload_len); - -/* - * Get frame for sending via port. - */ -static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev, - size_t payload_len) -{ - return __fc_frame_alloc(payload_len); -} - -/* - * Allocate fc_frame structure and buffer. Set the initial length to - * payload_size + sizeof (struct fc_frame_header). - */ -static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len) -{ - struct fc_frame *fp; - - /* - * Note: Since len will often be a constant multiple of 4, - * this check will usually be evaluated and eliminated at compile time. - */ - if ((len % 4) != 0) - fp = fc_frame_alloc_fill(dev, len); - else - fp = _fc_frame_alloc(dev, len); - return fp; -} - -/* - * Free the fc_frame structure and buffer. - */ -static inline void fc_frame_free(struct fc_frame *fp) -{ - kfree_skb(fp_skb(fp)); -} - -static inline int fc_frame_is_linear(struct fc_frame *fp) -{ - return !skb_is_nonlinear(fp_skb(fp)); -} - -/* - * Get frame header from message in fc_frame structure. - * This hides a cast and provides a place to add some checking. - */ -static inline -struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp) -{ - WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header)); - return (struct fc_frame_header *) fr_hdr(fp); -} - -/* - * Get frame payload from message in fc_frame structure. - * This hides a cast and provides a place to add some checking. - * The len parameter is the minimum length for the payload portion. - * Returns NULL if the frame is too short. - * - * This assumes the interesting part of the payload is in the first part - * of the buffer for received data. This may not be appropriate to use for - * buffers being transmitted. - */ -static inline void *fc_frame_payload_get(const struct fc_frame *fp, - size_t len) -{ - void *pp = NULL; - - if (fr_len(fp) >= sizeof(struct fc_frame_header) + len) - pp = fc_frame_header_get(fp) + 1; - return pp; -} - -/* - * Get frame payload opcode (first byte) from message in fc_frame structure. - * This hides a cast and provides a place to add some checking. Return 0 - * if the frame has no payload. - */ -static inline u8 fc_frame_payload_op(const struct fc_frame *fp) -{ - u8 *cp; - - cp = fc_frame_payload_get(fp, sizeof(u8)); - if (!cp) - return 0; - return *cp; - -} - -/* - * Get FC class from frame. - */ -static inline enum fc_class fc_frame_class(const struct fc_frame *fp) -{ - return fc_sof_class(fr_sof(fp)); -} - -/* - * Set r_ctl and type in preparation for sending frame. - * This also clears fh_parm_offset. - */ -static inline void fc_frame_setup(struct fc_frame *fp, enum fc_rctl r_ctl, - enum fc_fh_type type) -{ - struct fc_frame_header *fh; - - fh = fc_frame_header_get(fp); - WARN_ON(r_ctl == 0); - fh->fh_r_ctl = r_ctl; - fh->fh_type = type; - fh->fh_parm_offset = htonl(0); -} - -/* - * Set offset in preparation for sending frame. - */ -static inline void -fc_frame_set_offset(struct fc_frame *fp, u32 offset) -{ - struct fc_frame_header *fh; - - fh = fc_frame_header_get(fp); - fh->fh_parm_offset = htonl(offset); -} - -/* - * Check the CRC in a frame. - * The CRC immediately follows the last data item *AFTER* the length. - * The return value is zero if the CRC matches. - */ -u32 fc_frame_crc_check(struct fc_frame *); - -/* - * Check for leaks. - * Print the frame header of any currently allocated frame, assuming there - * should be none at this point. - */ -void fc_frame_leak_check(void); - -#endif /* _FC_FRAME_H_ */ diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h deleted file mode 100644 index 237abd3..0000000 --- a/include/scsi/libfc/libfc.h +++ /dev/null @@ -1,860 +0,0 @@ -/* - * Copyright(c) 2007 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained at www.Open-FCoE.org - */ - -#ifndef _LIBFC_H_ -#define _LIBFC_H_ - -#include -#include - -#include -#include - -#include -#include -#include -#include - -#include - -#define LIBFC_DEBUG - -#ifdef LIBFC_DEBUG -/* Log messages */ -#define FC_DBG(fmt, args...) \ - do { \ - printk(KERN_INFO "%s " fmt, __func__, ##args); \ - } while (0) -#else -#define FC_DBG(fmt, args...) -#endif - -/* - * libfc error codes - */ -#define FC_NO_ERR 0 /* no error */ -#define FC_EX_TIMEOUT 1 /* Exchange timeout */ -#define FC_EX_CLOSED 2 /* Exchange closed */ - -/* some helpful macros */ - -#define ntohll(x) be64_to_cpu(x) -#define htonll(x) cpu_to_be64(x) - -#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2])) - -#define hton24(p, v) do { \ - p[0] = (((v) >> 16) & 0xFF); \ - p[1] = (((v) >> 8) & 0xFF); \ - p[2] = ((v) & 0xFF); \ - } while (0) - -struct fc_exch_mgr; - -/* - * FC HBA status - */ -#define FC_PAUSE (1 << 1) -#define FC_LINK_UP (1 << 0) - -enum fc_lport_state { - LPORT_ST_NONE = 0, - LPORT_ST_FLOGI, - LPORT_ST_DNS, - LPORT_ST_RPN_ID, - LPORT_ST_RFT_ID, - LPORT_ST_SCR, - LPORT_ST_READY, - LPORT_ST_LOGO, - LPORT_ST_RESET -}; - -enum fc_lport_event { - LPORT_EV_RPORT_NONE = 0, - LPORT_EV_RPORT_CREATED, - LPORT_EV_RPORT_FAILED, - LPORT_EV_RPORT_STOP, - LPORT_EV_RPORT_LOGO -}; - -enum fc_rport_state { - RPORT_ST_NONE = 0, - RPORT_ST_INIT, /* initialized */ - RPORT_ST_PLOGI, /* waiting for PLOGI completion */ - RPORT_ST_PRLI, /* waiting for PRLI completion */ - RPORT_ST_RTV, /* waiting for RTV completion */ - RPORT_ST_READY, /* ready for use */ - RPORT_ST_LOGO, /* port logout sent */ -}; - -enum fc_rport_trans_state { - FC_PORTSTATE_ROGUE, - FC_PORTSTATE_REAL, -}; - -/** - * struct fc_disc_port - temporary discovery port to hold rport identifiers - * @lp: Fibre Channel host port instance - * @peers: node for list management during discovery and RSCN processing - * @ids: identifiers structure to pass to fc_remote_port_add() - * @rport_work: work struct for starting the rport state machine - */ -struct fc_disc_port { - struct fc_lport *lp; - struct list_head peers; - struct fc_rport_identifiers ids; - struct work_struct rport_work; -}; - -/** - * struct fc_rport_libfc_priv - libfc internal information about a remote port - * @local_port: Fibre Channel host port instance - * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges - * @flags: REC and RETRY supported flags - * @max_seq: maximum number of concurrent sequences - * @retries: retry count in current state - * @e_d_tov: error detect timeout value (in msec) - * @r_a_tov: resource allocation timeout value (in msec) - * @rp_mutex: mutex protects rport - * @retry_work: - * @event_callback: Callback for rport READY, FAILED or LOGO - */ -struct fc_rport_libfc_priv { - struct fc_lport *local_port; - enum fc_rport_state rp_state; - u16 flags; - #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) - #define FC_RP_FLAGS_RETRY (1 << 1) - u16 max_seq; - unsigned int retries; - unsigned int e_d_tov; - unsigned int r_a_tov; - enum fc_rport_trans_state trans_state; - struct mutex rp_mutex; - struct delayed_work retry_work; - enum fc_lport_event event; - void (*event_callback)(struct fc_lport *, - struct fc_rport *, - enum fc_lport_event); - struct list_head peers; - struct work_struct event_work; -}; - -#define PRIV_TO_RPORT(x) \ - (struct fc_rport *)((void *)x - sizeof(struct fc_rport)); -#define RPORT_TO_PRIV(x) \ - (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport)); - -struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *); -void fc_rport_rogue_destroy(struct fc_rport *); - -static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn) -{ - rport->node_name = wwnn; - rport->port_name = wwpn; -} - -/* - * fcoe stats structure - */ -struct fcoe_dev_stats { - u64 SecondsSinceLastReset; - u64 TxFrames; - u64 TxWords; - u64 RxFrames; - u64 RxWords; - u64 ErrorFrames; - u64 DumpedFrames; - u64 LinkFailureCount; - u64 LossOfSignalCount; - u64 InvalidTxWordCount; - u64 InvalidCRCCount; - u64 InputRequests; - u64 OutputRequests; - u64 ControlRequests; - u64 InputMegabytes; - u64 OutputMegabytes; -}; - -/* - * els data is used for passing ELS respone specific - * data to send ELS response mainly using infomation - * in exchange and sequence in EM layer. - */ -struct fc_seq_els_data { - struct fc_frame *fp; - enum fc_els_rjt_reason reason; - enum fc_els_rjt_explan explan; -}; - -/* - * FCP request structure, one for each scsi cmd request - */ -struct fc_fcp_pkt { - /* - * housekeeping stuff - */ - struct fc_lport *lp; /* handle to hba struct */ - u16 state; /* scsi_pkt state state */ - u16 tgt_flags; /* target flags */ - atomic_t ref_cnt; /* fcp pkt ref count */ - spinlock_t scsi_pkt_lock; /* Must be taken before the host lock - * if both are held at the same time */ - /* - * SCSI I/O related stuff - */ - struct scsi_cmnd *cmd; /* scsi command pointer. set/clear - * under host lock */ - struct list_head list; /* tracks queued commands. access under - * host lock */ - /* - * timeout related stuff - */ - struct timer_list timer; /* command timer */ - struct completion tm_done; - int wait_for_comp; - unsigned long start_time; /* start jiffie */ - unsigned long end_time; /* end jiffie */ - unsigned long last_pkt_time; /* jiffies of last frame received */ - - /* - * scsi cmd and data transfer information - */ - u32 data_len; - /* - * transport related veriables - */ - struct fcp_cmnd cdb_cmd; - size_t xfer_len; - u32 xfer_contig_end; /* offset of end of contiguous xfer */ - u16 max_payload; /* max payload size in bytes */ - - /* - * scsi/fcp return status - */ - u32 io_status; /* SCSI result upper 24 bits */ - u8 cdb_status; - u8 status_code; /* FCP I/O status */ - /* bit 3 Underrun bit 2: overrun */ - u8 scsi_comp_flags; - u32 req_flags; /* bit 0: read bit:1 write */ - u32 scsi_resid; /* residule length */ - - struct fc_rport *rport; /* remote port pointer */ - struct fc_seq *seq_ptr; /* current sequence pointer */ - /* - * Error Processing - */ - u8 recov_retry; /* count of recovery retries */ - struct fc_seq *recov_seq; /* sequence for REC or SRR */ -}; - -struct libfc_function_template { - - /** - * Mandatory Fields - * - * These handlers must be implemented by the LLD. - */ - - /* - * Interface to send a FC frame - */ - int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp); - - /** - * Optional Fields - * - * The LLD may choose to implement any of the following handlers. - * If LLD doesn't specify hander and leaves its pointer NULL then - * the default libfc function will be used for that handler. - */ - - /** - * Exhance Manager interfaces - */ - - /* - * Send the FC frame payload using a new exchange and sequence. - * - * The frame pointer with some of the header's fields must be - * filled before calling exch_seq_send(), those fields are, - * - * - routing control - * - FC header type - * - parameter or relative offset - * - * The exchange response handler is set in this routine to resp() - * function pointer. It can be called in two scenarios: if a timeout - * occurs or if a response frame is received for the exchange. The - * fc_frame pointer in response handler will also indicate timeout - * as error using IS_ERR related macros. - * - * The exchange destructor handler is also set in this routine. - * The destructor handler is invoked by EM layer when exchange - * is about to free, this can be used by caller to free its - * resources along with exchange free. - * - * The arg is passed back to resp and destructor handler. - * - * The timeout value (in msec) for an exchange is set if non zero - * timer_msec argument is specified. The timer is canceled when - * it fires or when the exchange is done. The exchange timeout handler - * is registered by EM layer. - * - * The caller also need to specify FC sid, did and frame control field. - */ - struct fc_seq *(*exch_seq_send)(struct fc_lport *lp, - struct fc_frame *fp, - void (*resp)(struct fc_seq *sp, - struct fc_frame *fp, - void *arg), - void (*destructor)(struct fc_seq *sp, - void *arg), - void *arg, unsigned int timer_msec, - u32 sid, u32 did, u32 f_ctl); - - /* - * send a frame using existing sequence and exchange. - */ - int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp, - struct fc_frame *fp, u32 f_ctl); - - /* - * Send ELS response using mainly infomation - * in exchange and sequence in EM layer. - */ - void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd, - struct fc_seq_els_data *els_data); - - /* - * Abort an exchange and sequence. Generally called because of a - * exchange timeout or an abort from the upper layer. - * - * A timer_msec can be specified for abort timeout, if non-zero - * timer_msec value is specified then exchange resp handler - * will be called with timeout error if no response to abort. - */ - int (*seq_exch_abort)(const struct fc_seq *req_sp, - unsigned int timer_msec); - - /* - * Indicate that an exchange/sequence tuple is complete and the memory - * allocated for the related objects may be freed. - */ - void (*exch_done)(struct fc_seq *sp); - - /* - * Assigns a EM and a free XID for an new exchange and then - * allocates a new exchange and sequence pair. - * The fp can be used to determine free XID. - */ - struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp); - - /* - * Release previously assigned XID by exch_get API. - * The LLD may implement this if XID is assigned by LLD - * in exch_get(). - */ - void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp, - u16 ex_id); - - /* - * Start a new sequence on the same exchange/sequence tuple. - */ - struct fc_seq *(*seq_start_next)(struct fc_seq *sp); - - /* - * Reset an exchange manager, completing all sequences and exchanges. - * If s_id is non-zero, reset only exchanges originating from that FID. - * If d_id is non-zero, reset only exchanges sending to that FID. - */ - void (*exch_mgr_reset)(struct fc_exch_mgr *, - u32 s_id, u32 d_id); - - /* - * Get exchange Ids of a sequence - */ - void (*seq_get_xids)(struct fc_seq *sp, u16 *oxid, u16 *rxid); - - /* - * Set REC data to a sequence - */ - void (*seq_set_rec_data)(struct fc_seq *sp, u32 rec_data); - - /** - * Local Port interfaces - */ - - /* - * Receive a frame to a local port. - */ - void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp, - struct fc_frame *fp); - - int (*lport_reset)(struct fc_lport *); - - void (*event_callback)(struct fc_lport *, struct fc_rport *, - enum fc_lport_event); - - /** - * Remote Port interfaces - */ - - /* - * Initiates the RP state machine. It is called from the LP module. - * This function will issue the following commands to the N_Port - * identified by the FC ID provided. - * - * - PLOGI - * - PRLI - * - RTV - */ - int (*rport_login)(struct fc_rport *rport); - - /* - * Logs the specified local port out of a N_Port identified - * by the ID provided. - */ - int (*rport_logout)(struct fc_rport *rport); - - /* - * Delete the rport and remove it from the transport if - * it had been added. This will not send a LOGO, use - * rport_logout for a gracefull logout. - */ - int (*rport_stop)(struct fc_rport *rport); - - /* - * Recieve a request from a remote port. - */ - void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, - struct fc_rport *); - - struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32); - - /** - * FCP interfaces - */ - - /* - * Send a fcp cmd from fsp pkt. - * Called with the SCSI host lock unlocked and irqs disabled. - * - * The resp handler is called when FCP_RSP received. - * - */ - int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp, - void (*resp)(struct fc_seq *, struct fc_frame *fp, - void *arg)); - - /* - * Used at least durring linkdown and reset - */ - void (*fcp_cleanup)(struct fc_lport *lp); - - /* - * Abort all I/O on a local port - */ - void (*fcp_abort_io)(struct fc_lport *lp); - - /** - * Discovery interfaces - */ - - void (*disc_recv_req)(struct fc_seq *, - struct fc_frame *, struct fc_lport *); - - /* - * Start discovery for a local port. - */ - int (*disc_start)(struct fc_lport *); -}; - -struct fc_lport { - struct list_head list; - - /* Associations */ - struct Scsi_Host *host; - struct fc_exch_mgr *emp; - struct fc_rport *dns_rp; - struct fc_rport *ptp_rp; - void *scsi_priv; - struct list_head rports; - - /* Operational Information */ - struct libfc_function_template tt; - u16 link_status; - u8 disc_done; - enum fc_lport_state state; - unsigned long boot_time; - - struct fc_host_statistics host_stats; - struct fcoe_dev_stats *dev_stats[NR_CPUS]; - - u64 wwpn; - u64 wwnn; - u8 retry_count; - unsigned char disc_retry_count; - unsigned char disc_delay; - unsigned char disc_pending; - unsigned char disc_requested; - unsigned short disc_seq_count; - unsigned char disc_buf_len; - - /* Capabilities */ - u32 sg_supp:1; /* scatter gather supported */ - u32 seq_offload:1; /* seq offload supported */ - u32 mfs; /* max FC payload size */ - unsigned int service_params; - unsigned int e_d_tov; - unsigned int r_a_tov; - u8 max_retry_count; - u16 link_speed; - u16 link_supported_speeds; - struct fc_ns_fts fcts; /* FC-4 type masks */ - struct fc_els_rnid_gen rnid_gen; /* RNID information */ - - /* Semaphores */ - struct mutex lp_mutex; - - /* Miscellaneous */ - struct fc_gpn_ft_resp disc_buf; /* partial name buffer */ - struct delayed_work retry_work; - struct delayed_work disc_work; - - void *drv_priv; -}; - -/** - * FC_LPORT HELPER FUNCTIONS - *****************************/ - -static inline int fc_lport_test_ready(struct fc_lport *lp) -{ - return lp->state == LPORT_ST_READY; -} - -static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn) -{ - lp->wwnn = wwnn; -} - -static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn) -{ - lp->wwpn = wwnn; -} - -/** - * fc_fill_dns_hdr - Fill in a name service request header - * @lp: Fibre Channel host port instance - * @ct: Common Transport (CT) header structure - * @op: Name Service request code - * @req_size: Full size of Name Service request - */ -static inline void fc_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct, - unsigned int op, unsigned int req_size) -{ - memset(ct, 0, sizeof(*ct) + req_size); - ct->ct_rev = FC_CT_REV; - ct->ct_fs_type = FC_FST_DIR; - ct->ct_fs_subtype = FC_NS_SUBTYPE; - ct->ct_cmd = htons((u16) op); -} - -static inline void fc_lport_state_enter(struct fc_lport *lp, - enum fc_lport_state state) -{ - if (state != lp->state) - lp->retry_count = 0; - lp->state = state; -} - - -/** - * LOCAL PORT LAYER - *****************************/ -int fc_lport_init(struct fc_lport *lp); - -/* - * Destroy the specified local port by finding and freeing all - * fc_rports associated with it and then by freeing the fc_lport - * itself. - */ -int fc_lport_destroy(struct fc_lport *lp); - -/* - * Logout the specified local port from the fabric - */ -int fc_fabric_logoff(struct fc_lport *lp); - -/* - * Initiate the LP state machine. This handler will use fc_host_attr - * to store the FLOGI service parameters, so fc_host_attr must be - * initialized before calling this handler. - */ -int fc_fabric_login(struct fc_lport *lp); - -/* - * The link is up for the given local port. - */ -void fc_linkup(struct fc_lport *); - -/* - * Link is down for the given local port. - */ -void fc_linkdown(struct fc_lport *); - -/* - * Pause and unpause traffic. - */ -void fc_pause(struct fc_lport *); -void fc_unpause(struct fc_lport *); - -/* - * Configure the local port. - */ -int fc_lport_config(struct fc_lport *); - -/* - * Reset the local port. - */ -int fc_lport_reset(struct fc_lport *); - -/* - * Set the mfs or reset - */ -int fc_set_mfs(struct fc_lport *lp, u32 mfs); - - -/** - * REMOTE PORT LAYER - *****************************/ -int fc_rport_init(struct fc_lport *lp); -void fc_rport_terminate_io(struct fc_rport *rp); - -/** - * DISCOVERY LAYER - *****************************/ -int fc_disc_init(struct fc_lport *lp); - - -/** - * SCSI LAYER - *****************************/ -/* - * Initialize the SCSI block of libfc - */ -int fc_fcp_init(struct fc_lport *); - -/* - * This section provides an API which allows direct interaction - * with the SCSI-ml. Each of these functions satisfies a function - * pointer defined in Scsi_Host and therefore is always called - * directly from the SCSI-ml. - */ -int fc_queuecommand(struct scsi_cmnd *sc_cmd, - void (*done)(struct scsi_cmnd *)); - -/* - * complete processing of a fcp packet - * - * This function may sleep if a fsp timer is pending. - * The host lock must not be held by caller. - */ -void fc_fcp_complete(struct fc_fcp_pkt *fsp); - -/* - * Send an ABTS frame to the target device. The sc_cmd argument - * is a pointer to the SCSI command to be aborted. - */ -int fc_eh_abort(struct scsi_cmnd *sc_cmd); - -/* - * Reset a LUN by sending send the tm cmd to the target. - */ -int fc_eh_device_reset(struct scsi_cmnd *sc_cmd); - -/* - * Reset the host adapter. - */ -int fc_eh_host_reset(struct scsi_cmnd *sc_cmd); - -/* - * Check rport status. - */ -int fc_slave_alloc(struct scsi_device *sdev); - -/* - * Adjust the queue depth. - */ -int fc_change_queue_depth(struct scsi_device *sdev, int qdepth); - -/* - * Change the tag type. - */ -int fc_change_queue_type(struct scsi_device *sdev, int tag_type); - -/* - * Free memory pools used by the FCP layer. - */ -void fc_fcp_destroy(struct fc_lport *); - - -/** - * EXCHANGE MANAGER LAYER - *****************************/ -/* - * Initializes Exchange Manager related - * function pointers in struct libfc_function_template. - */ -int fc_exch_init(struct fc_lport *lp); - -/* - * Allocates an Exchange Manager (EM). - * - * The EM manages exchanges for their allocation and - * free, also allows exchange lookup for received - * frame. - * - * The class is used for initializing FC class of - * allocated exchange from EM. - * - * The min_xid and max_xid will limit new - * exchange ID (XID) within this range for - * a new exchange. - * The LLD may choose to have multiple EMs, - * e.g. one EM instance per CPU receive thread in LLD. - * The LLD can use exch_get() of struct libfc_function_template - * to specify XID for a new exchange within - * a specified EM instance. - * - * The em_idx to uniquely identify an EM instance. - */ -struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, - enum fc_class class, - u16 min_xid, - u16 max_xid); - -/* - * Free an exchange manager. - */ -void fc_exch_mgr_free(struct fc_exch_mgr *mp); - -/* - * Receive a frame on specified local port and exchange manager. - */ -void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, - struct fc_frame *fp); - -/* - * This function is for exch_seq_send function pointer in - * struct libfc_function_template, see comment block on - * exch_seq_send for description of this function. - */ -struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, - struct fc_frame *fp, - void (*resp)(struct fc_seq *sp, - struct fc_frame *fp, - void *arg), - void (*destructor)(struct fc_seq *sp, - void *arg), - void *arg, u32 timer_msec, - u32 sid, u32 did, u32 f_ctl); - -/* - * send a frame using existing sequence and exchange. - */ -int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, - struct fc_frame *fp, u32 f_ctl); - -/* - * Send ELS response using mainly infomation - * in exchange and sequence in EM layer. - */ -void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, - struct fc_seq_els_data *els_data); - -/* - * This function is for seq_exch_abort function pointer in - * struct libfc_function_template, see comment block on - * seq_exch_abort for description of this function. - */ -int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec); - -/* - * Indicate that an exchange/sequence tuple is complete and the memory - * allocated for the related objects may be freed. - */ -void fc_exch_done(struct fc_seq *sp); - -/* - * Assigns a EM and XID for a frame and then allocates - * a new exchange and sequence pair. - * The fp can be used to determine free XID. - */ -struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp); - -/* - * Allocate a new exchange and sequence pair. - * if ex_id is zero then next free exchange id - * from specified exchange manger mp will be assigned. - */ -struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 ex_id); - -/* - * Start a new sequence on the same exchange as the supplied sequence. - */ -struct fc_seq *fc_seq_start_next(struct fc_seq *sp); - -/* - * Reset an exchange manager, completing all sequences and exchanges. - * If s_id is non-zero, reset only exchanges originating from that FID. - * If d_id is non-zero, reset only exchanges sending to that FID. - */ -void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id); - -/* - * Get exchange Ids of a sequence - */ -void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid); - -/* - * Set REC data to a sequence - */ -void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data); - -/* - * Functions for fc_functions_template - */ -void fc_get_host_speed(struct Scsi_Host *shost); -void fc_get_host_port_type(struct Scsi_Host *shost); -void fc_get_host_port_state(struct Scsi_Host *shost); -void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout); -struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *); - -/* - * module setup functions. - */ -int fc_setup_exch_mgr(void); -void fc_destroy_exch_mgr(void); -int fc_setup_rport(void); -void fc_destroy_rport(void); - -#endif /* _LIBFC_H_ */ diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h new file mode 100644 index 0000000..4ca5902 --- /dev/null +++ b/include/scsi/libfcoe.h @@ -0,0 +1,177 @@ +/* + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#ifndef _LIBFCOE_H +#define _LIBFCOE_H + +#include +#include +#include +#include + +/* + * this percpu struct for fcoe + */ +struct fcoe_percpu_s { + int cpu; + struct task_struct *thread; + struct sk_buff_head fcoe_rx_list; + struct page *crc_eof_page; + int crc_eof_offset; +}; + +/* + * the fcoe sw transport private data + */ +struct fcoe_softc { + struct list_head list; + struct fc_lport *lp; + struct net_device *real_dev; + struct net_device *phys_dev; /* device with ethtool_ops */ + struct packet_type fcoe_packet_type; + struct sk_buff_head fcoe_pending_queue; + u16 user_mfs; /* configured max frame size */ + + u8 dest_addr[ETH_ALEN]; + u8 ctl_src_addr[ETH_ALEN]; + u8 data_src_addr[ETH_ALEN]; + /* + * fcoe protocol address learning related stuff + */ + u16 flogi_oxid; + u8 flogi_progress; + u8 address_mode; +}; + +static inline struct fcoe_softc *fcoe_softc( + const struct fc_lport *lp) +{ + return (struct fcoe_softc *)lport_priv(lp); +} + +static inline struct net_device *fcoe_netdev( + const struct fc_lport *lp) +{ + return fcoe_softc(lp)->real_dev; +} + +static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb) +{ + return (struct fcoe_hdr *)skb_network_header(skb); +} + +static inline int skb_fcoe_offset(const struct sk_buff *skb) +{ + return skb_network_offset(skb); +} + +static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb) +{ + return (struct fc_frame_header *)skb_transport_header(skb); +} + +static inline int skb_fc_offset(const struct sk_buff *skb) +{ + return skb_transport_offset(skb); +} + +static inline void skb_reset_fc_header(struct sk_buff *skb) +{ + skb_reset_network_header(skb); + skb_set_transport_header(skb, skb_network_offset(skb) + + sizeof(struct fcoe_hdr)); +} + +static inline bool skb_fc_is_data(const struct sk_buff *skb) +{ + return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA; +} + +static inline bool skb_fc_is_cmd(const struct sk_buff *skb) +{ + return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD; +} + +static inline bool skb_fc_has_exthdr(const struct sk_buff *skb) +{ + return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) || + (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) || + (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH); +} + +static inline bool skb_fc_is_roff(const struct sk_buff *skb) +{ + return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF; +} + +static inline u16 skb_fc_oxid(const struct sk_buff *skb) +{ + return be16_to_cpu(skb_fc_header(skb)->fh_ox_id); +} + +static inline u16 skb_fc_rxid(const struct sk_buff *skb) +{ + return be16_to_cpu(skb_fc_header(skb)->fh_rx_id); +} + +/* FIXME - DMA_BIDIRECTIONAL ? */ +#define skb_cb(skb) ((struct fcoe_rcv_info *)&((skb)->cb[0])) +#define skb_cmd(skb) (skb_cb(skb)->fr_cmd) +#define skb_dir(skb) (skb_cmd(skb)->sc_data_direction) +static inline bool skb_fc_is_read(const struct sk_buff *skb) +{ + if (skb_fc_is_cmd(skb) && skb_cmd(skb)) + return skb_dir(skb) == DMA_FROM_DEVICE; + return false; +} + +static inline bool skb_fc_is_write(const struct sk_buff *skb) +{ + if (skb_fc_is_cmd(skb) && skb_cmd(skb)) + return skb_dir(skb) == DMA_TO_DEVICE; + return false; +} + +/* libfcoe funcs */ +int fcoe_reset(struct Scsi_Host *shost); +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], + unsigned int scheme, unsigned int port); + +u32 fcoe_fc_crc(struct fc_frame *fp); +int fcoe_xmit(struct fc_lport *, struct fc_frame *); +int fcoe_rcv(struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *); + +int fcoe_percpu_receive_thread(void *arg); +void fcoe_clean_pending_queue(struct fc_lport *lp); +void fcoe_percpu_clean(struct fc_lport *lp); +void fcoe_watchdog(ulong vp); +int fcoe_link_ok(struct fc_lport *lp); + +struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); +int fcoe_hostlist_add(const struct fc_lport *); +int fcoe_hostlist_remove(const struct fc_lport *); + +struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int); +int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); + +/* fcoe sw hba */ +int __init fcoe_sw_init(void); +int __exit fcoe_sw_exit(void); +#endif /* _LIBFCOE_H */